]> gitweb.factorcode.org Git - factor.git/blob - vm/compaction.cpp
5e52c70b0c852cd1385b9865e7e2d2d99da02873
[factor.git] / vm / compaction.cpp
1 #include "master.hpp"
2
3 namespace factor {
4
5 template<typename Block> struct forwarder {
6         mark_bits<Block> *forwarding_map;
7
8         explicit forwarder(mark_bits<Block> *forwarding_map_) :
9                 forwarding_map(forwarding_map_) {}
10
11         Block *operator()(Block *block)
12         {
13                 return forwarding_map->forward_block(block);
14         }
15 };
16
17 static inline cell tuple_size_with_forwarding(mark_bits<object> *forwarding_map, object *obj)
18 {
19         /* The tuple layout may or may not have been forwarded already. Tricky. */
20         object *layout_obj = (object *)UNTAG(((tuple *)obj)->layout);
21         tuple_layout *layout;
22
23         if(layout_obj < obj)
24         {
25                 /* It's already been moved up; dereference through forwarding
26                 map to get the size */
27                 layout = (tuple_layout *)forwarding_map->forward_block(layout_obj);
28         }
29         else
30         {
31                 /* It hasn't been moved up yet; dereference directly */
32                 layout = (tuple_layout *)layout_obj;
33         }
34
35         return tuple_size(layout);
36 }
37
38 struct compaction_sizer {
39         mark_bits<object> *forwarding_map;
40
41         explicit compaction_sizer(mark_bits<object> *forwarding_map_) :
42                 forwarding_map(forwarding_map_) {}
43
44         cell operator()(object *obj)
45         {
46                 if(!forwarding_map->marked_p(obj))
47                         return forwarding_map->unmarked_block_size(obj);
48                 else if(obj->type() == TUPLE_TYPE)
49                         return align(tuple_size_with_forwarding(forwarding_map,obj),data_alignment);
50                 else
51                         return obj->size();
52         }
53 };
54
55 struct object_compaction_updater {
56         factor_vm *parent;
57         mark_bits<code_block> *code_forwarding_map;
58         mark_bits<object> *data_forwarding_map;
59         object_start_map *starts;
60
61         explicit object_compaction_updater(factor_vm *parent_,
62                 mark_bits<object> *data_forwarding_map_,
63                 mark_bits<code_block> *code_forwarding_map_) :
64                 parent(parent_),
65                 code_forwarding_map(code_forwarding_map_),
66                 data_forwarding_map(data_forwarding_map_),
67                 starts(&parent->data->tenured->starts) {}
68
69         void operator()(object *old_address, object *new_address, cell size)
70         {
71                 cell payload_start;
72                 if(old_address->type() == TUPLE_TYPE)
73                         payload_start = tuple_size_with_forwarding(data_forwarding_map,old_address);
74                 else
75                         payload_start = old_address->binary_payload_start();
76
77                 memmove(new_address,old_address,size);
78
79                 slot_visitor<forwarder<object> > slot_forwarder(parent,forwarder<object>(data_forwarding_map));
80                 slot_forwarder.visit_slots(new_address,payload_start);
81
82                 code_block_visitor<forwarder<code_block> > code_forwarder(parent,forwarder<code_block>(code_forwarding_map));
83                 code_forwarder.visit_object_code_block(new_address);
84
85                 starts->record_object_start_offset(new_address);
86         }
87 };
88
89 template<typename SlotForwarder>
90 struct code_block_compaction_relocation_visitor {
91         factor_vm *parent;
92         code_block *old_address;
93         slot_visitor<SlotForwarder> slot_forwarder;
94         code_block_visitor<forwarder<code_block> > code_forwarder;
95
96         explicit code_block_compaction_relocation_visitor(factor_vm *parent_,
97                 code_block *old_address_,
98                 slot_visitor<SlotForwarder> slot_forwarder_,
99                 code_block_visitor<forwarder<code_block> > code_forwarder_) :
100                 parent(parent_),
101                 old_address(old_address_),
102                 slot_forwarder(slot_forwarder_),
103                 code_forwarder(code_forwarder_) {}
104
105         void operator()(instruction_operand op)
106         {
107                 cell old_offset = op.rel_offset() + (cell)old_address->entry_point();
108
109                 switch(op.rel_type())
110                 {
111                 case RT_LITERAL:
112                         op.store_value(slot_forwarder.visit_pointer(op.load_value(old_offset)));
113                         break;
114                 case RT_ENTRY_POINT:
115                 case RT_ENTRY_POINT_PIC:
116                 case RT_ENTRY_POINT_PIC_TAIL:
117                         op.store_code_block(code_forwarder.visit_code_block(op.load_code_block(old_offset)));
118                         break;
119                 case RT_HERE:
120                         op.store_value(op.load_value(old_offset) - (cell)old_address + (cell)op.parent_code_block());
121                         break;
122                 case RT_THIS:
123                 case RT_CARDS_OFFSET:
124                 case RT_DECKS_OFFSET:
125                         parent->store_external_address(op);
126                         break;
127                 default:
128                         op.store_value(op.load_value(old_offset));
129                         break;
130                 }
131         }
132 };
133
134 template<typename SlotForwarder>
135 struct code_block_compaction_updater {
136         factor_vm *parent;
137         slot_visitor<SlotForwarder> slot_forwarder;
138         code_block_visitor<forwarder<code_block> > code_forwarder;
139
140         explicit code_block_compaction_updater(factor_vm *parent_,
141                 slot_visitor<SlotForwarder> slot_forwarder_,
142                 code_block_visitor<forwarder<code_block> > code_forwarder_) :
143                 parent(parent_),
144                 slot_forwarder(slot_forwarder_),
145                 code_forwarder(code_forwarder_) {}
146
147         void operator()(code_block *old_address, code_block *new_address, cell size)
148         {
149                 memmove(new_address,old_address,size);
150
151                 slot_forwarder.visit_code_block_objects(new_address);
152
153                 code_block_compaction_relocation_visitor<SlotForwarder> visitor(parent,old_address,slot_forwarder,code_forwarder);
154                 new_address->each_instruction_operand(visitor);
155         }
156 };
157
158 /* After a compaction, invalidate any code heap roots which are not
159 marked, and also slide the valid roots up so that call sites can be updated
160 correctly in case an inline cache compilation triggered compaction. */
161 void factor_vm::update_code_roots_for_compaction()
162 {
163         std::vector<code_root *>::const_iterator iter = code_roots.begin();
164         std::vector<code_root *>::const_iterator end = code_roots.end();
165
166         mark_bits<code_block> *state = &code->allocator->state;
167
168         for(; iter < end; iter++)
169         {
170                 code_root *root = *iter;
171                 code_block *block = (code_block *)(root->value & (~data_alignment + 1));
172
173                 /* Offset of return address within 16-byte allocation line */
174                 cell offset = root->value - (cell)block;
175
176                 if(root->valid && state->marked_p(block))
177                 {
178                         block = state->forward_block(block);
179                         root->value = (cell)block + offset;
180                 }
181                 else
182                         root->valid = false;
183         }
184 }
185
186 /* Compact data and code heaps */
187 void factor_vm::collect_compact_impl(bool trace_contexts_p)
188 {
189         current_gc->event->started_compaction();
190
191         tenured_space *tenured = data->tenured;
192         mark_bits<object> *data_forwarding_map = &tenured->state;
193         mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
194
195         /* Figure out where blocks are going to go */
196         data_forwarding_map->compute_forwarding();
197         code_forwarding_map->compute_forwarding();
198
199         slot_visitor<forwarder<object> > slot_forwarder(this,forwarder<object>(data_forwarding_map));
200         code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
201
202         code_forwarder.visit_uninitialized_code_blocks();
203
204         /* Object start offsets get recomputed by the object_compaction_updater */
205         data->tenured->starts.clear_object_start_offsets();
206
207         /* Slide everything in tenured space up, and update data and code heap
208         pointers inside objects. */
209         object_compaction_updater object_updater(this,data_forwarding_map,code_forwarding_map);
210         compaction_sizer object_sizer(data_forwarding_map);
211         tenured->compact(object_updater,object_sizer);
212
213         /* Slide everything in the code heap up, and update data and code heap
214         pointers inside code blocks. */
215         code_block_compaction_updater<forwarder<object> > code_block_updater(this,slot_forwarder,code_forwarder);
216         standard_sizer<code_block> code_block_sizer;
217         code->allocator->compact(code_block_updater,code_block_sizer);
218
219         slot_forwarder.visit_roots();
220         if(trace_contexts_p)
221         {
222                 slot_forwarder.visit_contexts();
223                 code_forwarder.visit_context_code_blocks();
224         }
225
226         update_code_roots_for_compaction();
227         callbacks->update();
228
229         current_gc->event->ended_compaction();
230 }
231
232 struct object_grow_heap_updater {
233         code_block_visitor<forwarder<code_block> > code_forwarder;
234
235         explicit object_grow_heap_updater(code_block_visitor<forwarder<code_block> > code_forwarder_) :
236                 code_forwarder(code_forwarder_) {}
237
238         void operator()(object *obj)
239         {
240                 code_forwarder.visit_object_code_block(obj);
241         }
242 };
243
244 struct dummy_slot_forwarder {
245         object *operator()(object *obj) { return obj; }
246 };
247
248 /* Compact just the code heap, after growing the data heap */
249 void factor_vm::collect_compact_code_impl(bool trace_contexts_p)
250 {
251         /* Figure out where blocks are going to go */
252         mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
253         code_forwarding_map->compute_forwarding();
254
255         slot_visitor<dummy_slot_forwarder> slot_forwarder(this,dummy_slot_forwarder());
256         code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
257
258         code_forwarder.visit_uninitialized_code_blocks();
259
260         if(trace_contexts_p)
261                 code_forwarder.visit_context_code_blocks();
262
263         /* Update code heap references in data heap */
264         object_grow_heap_updater updater(code_forwarder);
265         each_object(updater);
266
267         /* Slide everything in the code heap up, and update code heap
268         pointers inside code blocks. */
269         code_block_compaction_updater<dummy_slot_forwarder> code_block_updater(this,slot_forwarder,code_forwarder);
270         standard_sizer<code_block> code_block_sizer;
271         code->allocator->compact(code_block_updater,code_block_sizer);
272
273         update_code_roots_for_compaction();
274         callbacks->update();
275 }
276
277 void factor_vm::collect_compact(bool trace_contexts_p)
278 {
279         collect_mark_impl(trace_contexts_p);
280         collect_compact_impl(trace_contexts_p);
281         code->flush_icache();
282 }
283
284 void factor_vm::collect_growing_heap(cell requested_bytes, bool trace_contexts_p)
285 {
286         /* Grow the data heap and copy all live objects to the new heap. */
287         data_heap *old = data;
288         set_data_heap(data->grow(requested_bytes));
289         collect_mark_impl(trace_contexts_p);
290         collect_compact_code_impl(trace_contexts_p);
291         code->flush_icache();
292         delete old;
293 }
294
295 }