namespace factor {
-template<typename Block> struct forwarder {
- mark_bits<Block> *forwarding_map;
-
- explicit forwarder(mark_bits<Block> *forwarding_map_) :
- forwarding_map(forwarding_map_) {}
-
- Block *operator()(Block *block)
- {
- return forwarding_map->forward_block(block);
- }
-};
-
-static inline cell tuple_size_with_forwarding(mark_bits<object> *forwarding_map, object *obj)
-{
- /* The tuple layout may or may not have been forwarded already. Tricky. */
- object *layout_obj = (object *)UNTAG(((tuple *)obj)->layout);
- tuple_layout *layout;
-
- if(layout_obj < obj)
- {
- /* It's already been moved up; dereference through forwarding
- map to get the size */
- layout = (tuple_layout *)forwarding_map->forward_block(layout_obj);
- }
- else
- {
- /* It hasn't been moved up yet; dereference directly */
- layout = (tuple_layout *)layout_obj;
- }
-
- return tuple_size(layout);
-}
-
-struct compaction_sizer {
- mark_bits<object> *forwarding_map;
-
- explicit compaction_sizer(mark_bits<object> *forwarding_map_) :
- forwarding_map(forwarding_map_) {}
-
- cell operator()(object *obj)
- {
- if(!forwarding_map->marked_p(obj))
- return forwarding_map->unmarked_block_size(obj);
- else if(obj->type() == TUPLE_TYPE)
- return align(tuple_size_with_forwarding(forwarding_map,obj),data_alignment);
- else
- return obj->size();
- }
+struct compaction_fixup {
+ static const bool translated_code_block_map = false;
+
+ mark_bits* data_forwarding_map;
+ mark_bits* code_forwarding_map;
+ const object** data_finger;
+ const code_block** code_finger;
+
+ compaction_fixup(mark_bits* data_forwarding_map,
+ mark_bits* code_forwarding_map,
+ const object** data_finger,
+ const code_block** code_finger)
+ : data_forwarding_map(data_forwarding_map),
+ code_forwarding_map(code_forwarding_map),
+ data_finger(data_finger),
+ code_finger(code_finger) {}
+
+ object* fixup_data(object* obj) {
+ return (object*)data_forwarding_map->forward_block((cell)obj);
+ }
+
+ code_block* fixup_code(code_block* compiled) {
+ return (code_block*)code_forwarding_map->forward_block((cell)compiled);
+ }
+
+ object* translate_data(const object* obj) {
+ if (obj < *data_finger)
+ return fixup_data((object*)obj);
+ return (object*)obj;
+ }
+
+ code_block* translate_code(const code_block* compiled) {
+ if (compiled < *code_finger)
+ return fixup_code((code_block*)compiled);
+ return (code_block*)compiled;
+ }
+
+ cell size(object* obj) {
+ if (data_forwarding_map->marked_p((cell)obj))
+ return obj->size(*this);
+ return data_forwarding_map->unmarked_block_size((cell)obj);
+ }
+
+ cell size(code_block* compiled) {
+ if (code_forwarding_map->marked_p((cell)compiled))
+ return compiled->size(*this);
+ return code_forwarding_map->unmarked_block_size((cell)compiled);
+ }
};
-struct object_compaction_updater {
- factor_vm *parent;
- slot_visitor<forwarder<object> > slot_forwarder;
- code_block_visitor<forwarder<code_block> > code_forwarder;
- mark_bits<object> *data_forwarding_map;
- object_start_map *starts;
-
- explicit object_compaction_updater(factor_vm *parent_,
- slot_visitor<forwarder<object> > slot_forwarder_,
- code_block_visitor<forwarder<code_block> > code_forwarder_,
- mark_bits<object> *data_forwarding_map_) :
- parent(parent_),
- slot_forwarder(slot_forwarder_),
- code_forwarder(code_forwarder_),
- data_forwarding_map(data_forwarding_map_),
- starts(&parent->data->tenured->starts) {}
-
- void operator()(object *old_address, object *new_address, cell size)
- {
- cell payload_start;
- if(old_address->type() == TUPLE_TYPE)
- payload_start = tuple_size_with_forwarding(data_forwarding_map,old_address);
- else
- payload_start = old_address->binary_payload_start();
-
- memmove(new_address,old_address,size);
-
- slot_forwarder.visit_slots(new_address,payload_start);
- code_forwarder.visit_object_code_block(new_address);
- starts->record_object_start_offset(new_address);
- }
-};
-
-template<typename SlotForwarder> struct code_block_compaction_updater {
- factor_vm *parent;
- SlotForwarder slot_forwarder;
-
- explicit code_block_compaction_updater(factor_vm *parent_, SlotForwarder slot_forwarder_) :
- parent(parent_), slot_forwarder(slot_forwarder_) {}
-
- void operator()(code_block *old_address, code_block *new_address, cell size)
- {
- memmove(new_address,old_address,size);
- slot_forwarder.visit_literal_references(new_address);
- parent->relocate_code_block(new_address);
- }
-};
+// After a compaction, invalidate any code heap roots which are not
+// marked, and also slide the valid roots up so that call sites can be updated
+// correctly in case an inline cache compilation triggered compaction.
+void factor_vm::update_code_roots_for_compaction() {
-/* Compact data and code heaps */
-void factor_vm::collect_compact_impl(bool trace_contexts_p)
-{
- current_gc->event->started_compaction();
+ mark_bits* state = &code->allocator->state;
- tenured_space *tenured = data->tenured;
- mark_bits<object> *data_forwarding_map = &tenured->state;
- mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
+ FACTOR_FOR_EACH(code_roots) {
+ code_root* root = *iter;
+ cell block = root->value & (~data_alignment + 1);
- /* Figure out where blocks are going to go */
- data_forwarding_map->compute_forwarding();
- code_forwarding_map->compute_forwarding();
+ // Offset of return address within 16-byte allocation line
+ cell offset = root->value - block;
- slot_visitor<forwarder<object> > slot_forwarder(this,forwarder<object>(data_forwarding_map));
- code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
-
- /* Object start offsets get recomputed by the object_compaction_updater */
- data->tenured->starts.clear_object_start_offsets();
-
- /* Slide everything in tenured space up, and update data and code heap
- pointers inside objects. */
- object_compaction_updater object_updater(this,slot_forwarder,code_forwarder,data_forwarding_map);
- compaction_sizer object_sizer(data_forwarding_map);
- tenured->compact(object_updater,object_sizer);
-
- /* Slide everything in the code heap up, and update data and code heap
- pointers inside code blocks. */
- code_block_compaction_updater<slot_visitor<forwarder<object> > > code_block_updater(this,slot_forwarder);
- standard_sizer<code_block> code_block_sizer;
- code->allocator->compact(code_block_updater,code_block_sizer);
-
- slot_forwarder.visit_roots();
- if(trace_contexts_p)
- {
- slot_forwarder.visit_contexts();
- code_forwarder.visit_context_code_blocks();
- code_forwarder.visit_callback_code_blocks();
- }
-
- update_code_roots_for_compaction();
-
- current_gc->event->ended_compaction();
+ if (root->valid && state->marked_p(block)) {
+ block = state->forward_block(block);
+ root->value = block + offset;
+ } else
+ root->valid = false;
+ }
}
-struct object_code_block_updater {
- code_block_visitor<forwarder<code_block> > *visitor;
+// Compact data and code heaps
+void factor_vm::collect_compact_impl() {
+ gc_event* event = current_gc->event;
+
+#ifdef FACTOR_DEBUG
+ code->verify_all_blocks_set();
+#endif
+
+ if (event)
+ event->reset_timer();
+
+ tenured_space* tenured = data->tenured;
+ mark_bits* data_forwarding_map = &tenured->state;
+ mark_bits* code_forwarding_map = &code->allocator->state;
+
+ // Figure out where blocks are going to go
+ data_forwarding_map->compute_forwarding();
+ code_forwarding_map->compute_forwarding();
+
+ const object* data_finger = (object*)tenured->start;
+ const code_block* code_finger = (code_block*)code->allocator->start;
+
+ {
+ compaction_fixup fixup(data_forwarding_map, code_forwarding_map,
+ &data_finger, &code_finger);
+ slot_visitor<compaction_fixup> forwarder(this, fixup);
+
+ forwarder.visit_uninitialized_code_blocks();
+
+ // Object start offsets get recomputed by the object_compaction_updater
+ data->tenured->starts.clear_object_start_offsets();
+
+ // Slide everything in tenured space up, and update data and code heap
+ // pointers inside objects.
+ auto compact_object_func = [&](object* old_addr, object* new_addr, cell size) {
+ (void)old_addr;
+ (void)size;
+ forwarder.visit_slots(new_addr);
+ forwarder.visit_object_code_block(new_addr);
+ tenured->starts.record_object_start_offset(new_addr);
+ };
+ tenured->compact(compact_object_func, fixup, &data_finger);
+
+ // Slide everything in the code heap up, and update data and code heap
+ // pointers inside code blocks.
+ auto compact_code_func = [&](code_block* old_addr,
+ code_block* new_addr,
+ cell size) {
+ (void)size;
+ forwarder.visit_code_block_objects(new_addr);
+ cell old_entry_point = old_addr->entry_point();
+ forwarder.visit_instruction_operands(new_addr, old_entry_point);
+ };
+ code->allocator->compact(compact_code_func, fixup, &code_finger);
+
+ forwarder.visit_all_roots();
+ forwarder.visit_context_code_blocks();
+ }
+
+ update_code_roots_for_compaction();
+
+ // Each callback has a relocation with a pointer to a code block in
+ // the code heap. Since the code heap has now been compacted, those
+ // pointers are invalid and we need to update them.
+ auto callback_updater = [&](code_block* stub, cell size) {
+ (void)size;
+ callbacks->update(stub);
+ };
+ callbacks->allocator->iterate(callback_updater, no_fixup());
+
+ code->initialize_all_blocks_set();
+
+ if (event)
+ event->ended_phase(PHASE_DATA_COMPACTION);
+}
- explicit object_code_block_updater(code_block_visitor<forwarder<code_block> > *visitor_) :
- visitor(visitor_) {}
+void factor_vm::collect_compact() {
+ collect_mark_impl();
+ collect_compact_impl();
- void operator()(object *obj)
- {
- visitor->visit_object_code_block(obj);
- }
-};
+ // Compaction did not free up enough memory. Grow the data heap.
+ if (data->high_fragmentation_p()) {
+ set_current_gc_op(COLLECT_GROWING_DATA_HEAP_OP);
+ collect_growing_data_heap(0);
+ }
-struct dummy_slot_forwarder {
- void visit_literal_references(code_block *compiled) {}
-};
+ code->flush_icache();
+}
-/* Compact just the code heap */
-void factor_vm::collect_compact_code_impl(bool trace_contexts_p)
-{
- /* Figure out where blocks are going to go */
- mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
- code_forwarding_map->compute_forwarding();
- code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
-
- if(trace_contexts_p)
- {
- code_forwarder.visit_context_code_blocks();
- code_forwarder.visit_callback_code_blocks();
- }
-
- /* Update code heap references in data heap */
- object_code_block_updater updater(&code_forwarder);
- each_object(updater);
-
- /* Slide everything in the code heap up, and update code heap
- pointers inside code blocks. */
- dummy_slot_forwarder slot_forwarder;
- code_block_compaction_updater<dummy_slot_forwarder> code_block_updater(this,slot_forwarder);
- standard_sizer<code_block> code_block_sizer;
- code->allocator->compact(code_block_updater,code_block_sizer);
-
- update_code_roots_for_compaction();
+void factor_vm::collect_growing_data_heap(cell requested_size) {
+ // Grow the data heap and copy all live objects to the new heap.
+ data_heap* old = data;
+ set_data_heap(data->grow(&nursery, requested_size));
+ collect_mark_impl();
+ collect_compact_impl();
+ code->flush_icache();
+ delete old;
}
}