5 template<typename Block> struct forwarder {
6 mark_bits<Block> *forwarding_map;
8 explicit forwarder(mark_bits<Block> *forwarding_map_) :
9 forwarding_map(forwarding_map_) {}
11 Block *operator()(Block *block)
13 return forwarding_map->forward_block(block);
17 static inline cell tuple_size_with_forwarding(mark_bits<object> *forwarding_map, object *obj)
19 /* The tuple layout may or may not have been forwarded already. Tricky. */
20 object *layout_obj = (object *)UNTAG(((tuple *)obj)->layout);
25 /* It's already been moved up; dereference through forwarding
26 map to get the size */
27 layout = (tuple_layout *)forwarding_map->forward_block(layout_obj);
31 /* It hasn't been moved up yet; dereference directly */
32 layout = (tuple_layout *)layout_obj;
35 return tuple_size(layout);
38 struct compaction_sizer {
39 mark_bits<object> *forwarding_map;
41 explicit compaction_sizer(mark_bits<object> *forwarding_map_) :
42 forwarding_map(forwarding_map_) {}
44 cell operator()(object *obj)
46 if(!forwarding_map->marked_p(obj))
47 return forwarding_map->unmarked_block_size(obj);
48 else if(obj->type() == TUPLE_TYPE)
49 return align(tuple_size_with_forwarding(forwarding_map,obj),data_alignment);
55 struct object_compaction_updater {
57 mark_bits<code_block> *code_forwarding_map;
58 mark_bits<object> *data_forwarding_map;
59 object_start_map *starts;
61 explicit object_compaction_updater(factor_vm *parent_,
62 mark_bits<object> *data_forwarding_map_,
63 mark_bits<code_block> *code_forwarding_map_) :
65 code_forwarding_map(code_forwarding_map_),
66 data_forwarding_map(data_forwarding_map_),
67 starts(&parent->data->tenured->starts) {}
69 void operator()(object *old_address, object *new_address, cell size)
72 if(old_address->type() == TUPLE_TYPE)
73 payload_start = tuple_size_with_forwarding(data_forwarding_map,old_address);
75 payload_start = old_address->binary_payload_start();
77 memmove(new_address,old_address,size);
79 slot_visitor<forwarder<object> > slot_forwarder(parent,forwarder<object>(data_forwarding_map));
80 slot_forwarder.visit_slots(new_address,payload_start);
82 code_block_visitor<forwarder<code_block> > code_forwarder(parent,forwarder<code_block>(code_forwarding_map));
83 code_forwarder.visit_object_code_block(new_address);
85 starts->record_object_start_offset(new_address);
89 template<typename SlotForwarder>
90 struct code_block_compaction_relocation_visitor {
92 code_block *old_address;
93 slot_visitor<SlotForwarder> slot_forwarder;
94 code_block_visitor<forwarder<code_block> > code_forwarder;
96 explicit code_block_compaction_relocation_visitor(factor_vm *parent_,
97 code_block *old_address_,
98 slot_visitor<SlotForwarder> slot_forwarder_,
99 code_block_visitor<forwarder<code_block> > code_forwarder_) :
101 old_address(old_address_),
102 slot_forwarder(slot_forwarder_),
103 code_forwarder(code_forwarder_) {}
105 void operator()(instruction_operand op)
107 cell old_offset = op.rel_offset() + (cell)old_address->entry_point();
109 switch(op.rel_type())
112 op.store_value(slot_forwarder.visit_pointer(op.load_value(old_offset)));
115 case RT_ENTRY_POINT_PIC:
116 case RT_ENTRY_POINT_PIC_TAIL:
117 op.store_code_block(code_forwarder.visit_code_block(op.load_code_block(old_offset)));
120 op.store_value(op.load_value(old_offset) - (cell)old_address + (cell)op.parent_code_block());
123 case RT_CARDS_OFFSET:
124 case RT_DECKS_OFFSET:
125 parent->store_external_address(op);
128 op.store_value(op.load_value(old_offset));
134 template<typename SlotForwarder>
135 struct code_block_compaction_updater {
137 slot_visitor<SlotForwarder> slot_forwarder;
138 code_block_visitor<forwarder<code_block> > code_forwarder;
140 explicit code_block_compaction_updater(factor_vm *parent_,
141 slot_visitor<SlotForwarder> slot_forwarder_,
142 code_block_visitor<forwarder<code_block> > code_forwarder_) :
144 slot_forwarder(slot_forwarder_),
145 code_forwarder(code_forwarder_) {}
147 void operator()(code_block *old_address, code_block *new_address, cell size)
149 memmove(new_address,old_address,size);
151 slot_forwarder.visit_code_block_objects(new_address);
153 code_block_compaction_relocation_visitor<SlotForwarder> visitor(parent,old_address,slot_forwarder,code_forwarder);
154 new_address->each_instruction_operand(visitor);
158 /* After a compaction, invalidate any code heap roots which are not
159 marked, and also slide the valid roots up so that call sites can be updated
160 correctly in case an inline cache compilation triggered compaction. */
161 void factor_vm::update_code_roots_for_compaction()
163 std::vector<code_root *>::const_iterator iter = code_roots.begin();
164 std::vector<code_root *>::const_iterator end = code_roots.end();
166 mark_bits<code_block> *state = &code->allocator->state;
168 for(; iter < end; iter++)
170 code_root *root = *iter;
171 code_block *block = (code_block *)(root->value & (~data_alignment + 1));
173 /* Offset of return address within 16-byte allocation line */
174 cell offset = root->value - (cell)block;
176 if(root->valid && state->marked_p(block))
178 block = state->forward_block(block);
179 root->value = (cell)block + offset;
186 /* Compact data and code heaps */
187 void factor_vm::collect_compact_impl(bool trace_contexts_p)
189 current_gc->event->started_compaction();
191 tenured_space *tenured = data->tenured;
192 mark_bits<object> *data_forwarding_map = &tenured->state;
193 mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
195 /* Figure out where blocks are going to go */
196 data_forwarding_map->compute_forwarding();
197 code_forwarding_map->compute_forwarding();
199 slot_visitor<forwarder<object> > slot_forwarder(this,forwarder<object>(data_forwarding_map));
200 code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
202 code_forwarder.visit_uninitialized_code_blocks();
204 /* Object start offsets get recomputed by the object_compaction_updater */
205 data->tenured->starts.clear_object_start_offsets();
207 /* Slide everything in tenured space up, and update data and code heap
208 pointers inside objects. */
209 object_compaction_updater object_updater(this,data_forwarding_map,code_forwarding_map);
210 compaction_sizer object_sizer(data_forwarding_map);
211 tenured->compact(object_updater,object_sizer);
213 /* Slide everything in the code heap up, and update data and code heap
214 pointers inside code blocks. */
215 code_block_compaction_updater<forwarder<object> > code_block_updater(this,slot_forwarder,code_forwarder);
216 standard_sizer<code_block> code_block_sizer;
217 code->allocator->compact(code_block_updater,code_block_sizer);
219 slot_forwarder.visit_roots();
222 slot_forwarder.visit_contexts();
223 code_forwarder.visit_context_code_blocks();
226 update_code_roots_for_compaction();
229 current_gc->event->ended_compaction();
232 struct object_grow_heap_updater {
233 code_block_visitor<forwarder<code_block> > code_forwarder;
235 explicit object_grow_heap_updater(code_block_visitor<forwarder<code_block> > code_forwarder_) :
236 code_forwarder(code_forwarder_) {}
238 void operator()(object *obj)
240 code_forwarder.visit_object_code_block(obj);
244 struct dummy_slot_forwarder {
245 object *operator()(object *obj) { return obj; }
248 /* Compact just the code heap, after growing the data heap */
249 void factor_vm::collect_compact_code_impl(bool trace_contexts_p)
251 /* Figure out where blocks are going to go */
252 mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
253 code_forwarding_map->compute_forwarding();
255 slot_visitor<dummy_slot_forwarder> slot_forwarder(this,dummy_slot_forwarder());
256 code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
258 code_forwarder.visit_uninitialized_code_blocks();
261 code_forwarder.visit_context_code_blocks();
263 /* Update code heap references in data heap */
264 object_grow_heap_updater updater(code_forwarder);
265 each_object(updater);
267 /* Slide everything in the code heap up, and update code heap
268 pointers inside code blocks. */
269 code_block_compaction_updater<dummy_slot_forwarder> code_block_updater(this,slot_forwarder,code_forwarder);
270 standard_sizer<code_block> code_block_sizer;
271 code->allocator->compact(code_block_updater,code_block_sizer);
273 update_code_roots_for_compaction();
277 void factor_vm::collect_compact(bool trace_contexts_p)
279 collect_mark_impl(trace_contexts_p);
280 collect_compact_impl(trace_contexts_p);
281 code->flush_icache();
284 void factor_vm::collect_growing_heap(cell requested_bytes, bool trace_contexts_p)
286 /* Grow the data heap and copy all live objects to the new heap. */
287 data_heap *old = data;
288 set_data_heap(data->grow(requested_bytes));
289 collect_mark_impl(trace_contexts_p);
290 collect_compact_code_impl(trace_contexts_p);
291 code->flush_icache();