5 template<typename Block> struct forwarder {
6 mark_bits<Block> *forwarding_map;
8 explicit forwarder(mark_bits<Block> *forwarding_map_) :
9 forwarding_map(forwarding_map_) {}
11 Block *operator()(Block *block)
13 return forwarding_map->forward_block(block);
17 static inline cell tuple_size_with_forwarding(mark_bits<object> *forwarding_map, object *obj)
19 /* The tuple layout may or may not have been forwarded already. Tricky. */
20 object *layout_obj = (object *)UNTAG(((tuple *)obj)->layout);
25 /* It's already been moved up; dereference through forwarding
26 map to get the size */
27 layout = (tuple_layout *)forwarding_map->forward_block(layout_obj);
31 /* It hasn't been moved up yet; dereference directly */
32 layout = (tuple_layout *)layout_obj;
35 return tuple_size(layout);
38 struct compaction_sizer {
39 mark_bits<object> *forwarding_map;
41 explicit compaction_sizer(mark_bits<object> *forwarding_map_) :
42 forwarding_map(forwarding_map_) {}
44 cell operator()(object *obj)
46 if(!forwarding_map->marked_p(obj))
47 return forwarding_map->unmarked_block_size(obj);
48 else if(obj->type() == TUPLE_TYPE)
49 return align(tuple_size_with_forwarding(forwarding_map,obj),data_alignment);
55 struct object_compaction_updater {
57 mark_bits<code_block> *code_forwarding_map;
58 mark_bits<object> *data_forwarding_map;
59 object_start_map *starts;
61 explicit object_compaction_updater(factor_vm *parent_,
62 mark_bits<object> *data_forwarding_map_,
63 mark_bits<code_block> *code_forwarding_map_) :
65 code_forwarding_map(code_forwarding_map_),
66 data_forwarding_map(data_forwarding_map_),
67 starts(&parent->data->tenured->starts) {}
69 void operator()(object *old_address, object *new_address, cell size)
72 if(old_address->type() == TUPLE_TYPE)
73 payload_start = tuple_size_with_forwarding(data_forwarding_map,old_address);
75 payload_start = old_address->binary_payload_start();
77 memmove(new_address,old_address,size);
79 slot_visitor<forwarder<object> > slot_forwarder(parent,forwarder<object>(data_forwarding_map));
80 slot_forwarder.visit_slots(new_address,payload_start);
82 code_block_visitor<forwarder<code_block> > code_forwarder(parent,forwarder<code_block>(code_forwarding_map));
83 code_forwarder.visit_object_code_block(new_address);
85 starts->record_object_start_offset(new_address);
89 template<typename SlotForwarder>
90 struct code_block_compaction_relocation_visitor {
92 code_block *old_address;
93 slot_visitor<SlotForwarder> slot_forwarder;
94 code_block_visitor<forwarder<code_block> > code_forwarder;
96 explicit code_block_compaction_relocation_visitor(factor_vm *parent_,
97 code_block *old_address_,
98 slot_visitor<SlotForwarder> slot_forwarder_,
99 code_block_visitor<forwarder<code_block> > code_forwarder_) :
101 old_address(old_address_),
102 slot_forwarder(slot_forwarder_),
103 code_forwarder(code_forwarder_) {}
105 void operator()(instruction_operand op)
107 cell old_offset = op.rel_offset() + (cell)old_address->xt();
109 switch(op.rel_type())
112 op.store_value(slot_forwarder.visit_pointer(op.load_value(old_offset)));
117 op.store_code_block(code_forwarder.visit_code_block(op.load_code_block(old_offset)));
120 op.store_value(op.load_value(old_offset) - (cell)old_address + (cell)op.parent_code_block());
123 case RT_CARDS_OFFSET:
124 case RT_DECKS_OFFSET:
125 parent->store_external_address(op);
128 op.store_value(op.load_value(old_offset));
134 template<typename SlotForwarder>
135 struct code_block_compaction_updater {
137 slot_visitor<SlotForwarder> slot_forwarder;
138 code_block_visitor<forwarder<code_block> > code_forwarder;
140 explicit code_block_compaction_updater(factor_vm *parent_,
141 slot_visitor<SlotForwarder> slot_forwarder_,
142 code_block_visitor<forwarder<code_block> > code_forwarder_) :
144 slot_forwarder(slot_forwarder_),
145 code_forwarder(code_forwarder_) {}
147 void operator()(code_block *old_address, code_block *new_address, cell size)
149 memmove(new_address,old_address,size);
151 slot_forwarder.visit_code_block_objects(new_address);
153 code_block_compaction_relocation_visitor<SlotForwarder> visitor(parent,old_address,slot_forwarder,code_forwarder);
154 new_address->each_instruction_operand(visitor);
158 /* Compact data and code heaps */
159 void factor_vm::collect_compact_impl(bool trace_contexts_p)
161 current_gc->event->started_compaction();
163 tenured_space *tenured = data->tenured;
164 mark_bits<object> *data_forwarding_map = &tenured->state;
165 mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
167 /* Figure out where blocks are going to go */
168 data_forwarding_map->compute_forwarding();
169 code_forwarding_map->compute_forwarding();
171 slot_visitor<forwarder<object> > slot_forwarder(this,forwarder<object>(data_forwarding_map));
172 code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
174 code_forwarder.visit_uninitialized_code_blocks();
176 /* Object start offsets get recomputed by the object_compaction_updater */
177 data->tenured->starts.clear_object_start_offsets();
179 /* Slide everything in tenured space up, and update data and code heap
180 pointers inside objects. */
181 object_compaction_updater object_updater(this,data_forwarding_map,code_forwarding_map);
182 compaction_sizer object_sizer(data_forwarding_map);
183 tenured->compact(object_updater,object_sizer);
185 /* Slide everything in the code heap up, and update data and code heap
186 pointers inside code blocks. */
187 code_block_compaction_updater<forwarder<object> > code_block_updater(this,slot_forwarder,code_forwarder);
188 standard_sizer<code_block> code_block_sizer;
189 code->allocator->compact(code_block_updater,code_block_sizer);
191 slot_forwarder.visit_roots();
194 slot_forwarder.visit_contexts();
195 code_forwarder.visit_context_code_blocks();
198 update_code_roots_for_compaction();
201 current_gc->event->ended_compaction();
204 struct object_grow_heap_updater {
205 code_block_visitor<forwarder<code_block> > code_forwarder;
207 explicit object_grow_heap_updater(code_block_visitor<forwarder<code_block> > code_forwarder_) :
208 code_forwarder(code_forwarder_) {}
210 void operator()(object *obj)
212 code_forwarder.visit_object_code_block(obj);
216 struct dummy_slot_forwarder {
217 object *operator()(object *obj) { return obj; }
220 /* Compact just the code heap, after growing the data heap */
221 void factor_vm::collect_compact_code_impl(bool trace_contexts_p)
223 /* Figure out where blocks are going to go */
224 mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
225 code_forwarding_map->compute_forwarding();
227 slot_visitor<dummy_slot_forwarder> slot_forwarder(this,dummy_slot_forwarder());
228 code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
230 code_forwarder.visit_uninitialized_code_blocks();
233 code_forwarder.visit_context_code_blocks();
235 /* Update code heap references in data heap */
236 object_grow_heap_updater updater(code_forwarder);
237 each_object(updater);
239 /* Slide everything in the code heap up, and update code heap
240 pointers inside code blocks. */
241 code_block_compaction_updater<dummy_slot_forwarder> code_block_updater(this,slot_forwarder,code_forwarder);
242 standard_sizer<code_block> code_block_sizer;
243 code->allocator->compact(code_block_updater,code_block_sizer);
245 update_code_roots_for_compaction();
249 void factor_vm::collect_compact(bool trace_contexts_p)
251 collect_mark_impl(trace_contexts_p);
252 collect_compact_impl(trace_contexts_p);
253 code->flush_icache();
256 void factor_vm::collect_growing_heap(cell requested_bytes, bool trace_contexts_p)
258 /* Grow the data heap and copy all live objects to the new heap. */
259 data_heap *old = data;
260 set_data_heap(data->grow(requested_bytes));
261 collect_mark_impl(trace_contexts_p);
262 collect_compact_code_impl(trace_contexts_p);
263 code->flush_icache();