]> gitweb.factorcode.org Git - factor.git/blob - vm/compaction.cpp
Merge branch 'master' of factorcode.org:/git/factor
[factor.git] / vm / compaction.cpp
1 #include "master.hpp"
2
3 namespace factor {
4
5 template<typename Block> struct forwarder {
6         mark_bits<Block> *forwarding_map;
7
8         explicit forwarder(mark_bits<Block> *forwarding_map_) :
9                 forwarding_map(forwarding_map_) {}
10
11         Block *operator()(Block *block)
12         {
13                 return forwarding_map->forward_block(block);
14         }
15 };
16
17 static inline cell tuple_size_with_forwarding(mark_bits<object> *forwarding_map, object *obj)
18 {
19         /* The tuple layout may or may not have been forwarded already. Tricky. */
20         object *layout_obj = (object *)UNTAG(((tuple *)obj)->layout);
21         tuple_layout *layout;
22
23         if(layout_obj < obj)
24         {
25                 /* It's already been moved up; dereference through forwarding
26                 map to get the size */
27                 layout = (tuple_layout *)forwarding_map->forward_block(layout_obj);
28         }
29         else
30         {
31                 /* It hasn't been moved up yet; dereference directly */
32                 layout = (tuple_layout *)layout_obj;
33         }
34
35         return tuple_size(layout);
36 }
37
38 struct compaction_sizer {
39         mark_bits<object> *forwarding_map;
40
41         explicit compaction_sizer(mark_bits<object> *forwarding_map_) :
42                 forwarding_map(forwarding_map_) {}
43
44         cell operator()(object *obj)
45         {
46                 if(!forwarding_map->marked_p(obj))
47                         return forwarding_map->unmarked_block_size(obj);
48                 else if(obj->type() == TUPLE_TYPE)
49                         return align(tuple_size_with_forwarding(forwarding_map,obj),data_alignment);
50                 else
51                         return obj->size();
52         }
53 };
54
55 struct object_compaction_updater {
56         factor_vm *parent;
57         mark_bits<code_block> *code_forwarding_map;
58         mark_bits<object> *data_forwarding_map;
59         object_start_map *starts;
60
61         explicit object_compaction_updater(factor_vm *parent_,
62                 mark_bits<object> *data_forwarding_map_,
63                 mark_bits<code_block> *code_forwarding_map_) :
64                 parent(parent_),
65                 code_forwarding_map(code_forwarding_map_),
66                 data_forwarding_map(data_forwarding_map_),
67                 starts(&parent->data->tenured->starts) {}
68
69         void operator()(object *old_address, object *new_address, cell size)
70         {
71                 cell payload_start;
72                 if(old_address->type() == TUPLE_TYPE)
73                         payload_start = tuple_size_with_forwarding(data_forwarding_map,old_address);
74                 else
75                         payload_start = old_address->binary_payload_start();
76
77                 memmove(new_address,old_address,size);
78
79                 slot_visitor<forwarder<object> > slot_forwarder(parent,forwarder<object>(data_forwarding_map));
80                 slot_forwarder.visit_slots(new_address,payload_start);
81
82                 code_block_visitor<forwarder<code_block> > code_forwarder(parent,forwarder<code_block>(code_forwarding_map));
83                 code_forwarder.visit_object_code_block(new_address);
84
85                 starts->record_object_start_offset(new_address);
86         }
87 };
88
89 template<typename SlotForwarder>
90 struct code_block_compaction_relocation_visitor {
91         factor_vm *parent;
92         code_block *old_address;
93         slot_visitor<SlotForwarder> slot_forwarder;
94         code_block_visitor<forwarder<code_block> > code_forwarder;
95
96         explicit code_block_compaction_relocation_visitor(factor_vm *parent_,
97                 code_block *old_address_,
98                 slot_visitor<SlotForwarder> slot_forwarder_,
99                 code_block_visitor<forwarder<code_block> > code_forwarder_) :
100                 parent(parent_),
101                 old_address(old_address_),
102                 slot_forwarder(slot_forwarder_),
103                 code_forwarder(code_forwarder_) {}
104
105         void operator()(instruction_operand op)
106         {
107                 cell old_offset = op.rel_offset() + (cell)old_address->entry_point();
108
109                 switch(op.rel_type())
110                 {
111                 case RT_LITERAL:
112                         op.store_value(slot_forwarder.visit_pointer(op.load_value(old_offset)));
113                         break;
114                 case RT_FLOAT:
115                         op.store_float(slot_forwarder.visit_pointer(op.load_float(old_offset)));
116                         break;
117                 case RT_ENTRY_POINT:
118                 case RT_ENTRY_POINT_PIC:
119                 case RT_ENTRY_POINT_PIC_TAIL:
120                         op.store_code_block(code_forwarder.visit_code_block(op.load_code_block(old_offset)));
121                         break;
122                 case RT_HERE:
123                         op.store_value(op.load_value(old_offset) - (cell)old_address + (cell)op.parent_code_block());
124                         break;
125                 case RT_THIS:
126                 case RT_CARDS_OFFSET:
127                 case RT_DECKS_OFFSET:
128                         parent->store_external_address(op);
129                         break;
130                 default:
131                         op.store_value(op.load_value(old_offset));
132                         break;
133                 }
134         }
135 };
136
137 template<typename SlotForwarder>
138 struct code_block_compaction_updater {
139         factor_vm *parent;
140         slot_visitor<SlotForwarder> slot_forwarder;
141         code_block_visitor<forwarder<code_block> > code_forwarder;
142
143         explicit code_block_compaction_updater(factor_vm *parent_,
144                 slot_visitor<SlotForwarder> slot_forwarder_,
145                 code_block_visitor<forwarder<code_block> > code_forwarder_) :
146                 parent(parent_),
147                 slot_forwarder(slot_forwarder_),
148                 code_forwarder(code_forwarder_) {}
149
150         void operator()(code_block *old_address, code_block *new_address, cell size)
151         {
152                 memmove(new_address,old_address,size);
153
154                 slot_forwarder.visit_code_block_objects(new_address);
155
156                 code_block_compaction_relocation_visitor<SlotForwarder> visitor(parent,old_address,slot_forwarder,code_forwarder);
157                 new_address->each_instruction_operand(visitor);
158         }
159 };
160
161 /* After a compaction, invalidate any code heap roots which are not
162 marked, and also slide the valid roots up so that call sites can be updated
163 correctly in case an inline cache compilation triggered compaction. */
164 void factor_vm::update_code_roots_for_compaction()
165 {
166         std::vector<code_root *>::const_iterator iter = code_roots.begin();
167         std::vector<code_root *>::const_iterator end = code_roots.end();
168
169         mark_bits<code_block> *state = &code->allocator->state;
170
171         for(; iter < end; iter++)
172         {
173                 code_root *root = *iter;
174                 code_block *block = (code_block *)(root->value & (~data_alignment + 1));
175
176                 /* Offset of return address within 16-byte allocation line */
177                 cell offset = root->value - (cell)block;
178
179                 if(root->valid && state->marked_p(block))
180                 {
181                         block = state->forward_block(block);
182                         root->value = (cell)block + offset;
183                 }
184                 else
185                         root->valid = false;
186         }
187 }
188
189 /* Compact data and code heaps */
190 void factor_vm::collect_compact_impl(bool trace_contexts_p)
191 {
192         current_gc->event->started_compaction();
193
194         tenured_space *tenured = data->tenured;
195         mark_bits<object> *data_forwarding_map = &tenured->state;
196         mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
197
198         /* Figure out where blocks are going to go */
199         data_forwarding_map->compute_forwarding();
200         code_forwarding_map->compute_forwarding();
201
202         slot_visitor<forwarder<object> > slot_forwarder(this,forwarder<object>(data_forwarding_map));
203         code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
204
205         code_forwarder.visit_uninitialized_code_blocks();
206
207         /* Object start offsets get recomputed by the object_compaction_updater */
208         data->tenured->starts.clear_object_start_offsets();
209
210         /* Slide everything in tenured space up, and update data and code heap
211         pointers inside objects. */
212         object_compaction_updater object_updater(this,data_forwarding_map,code_forwarding_map);
213         compaction_sizer object_sizer(data_forwarding_map);
214         tenured->compact(object_updater,object_sizer);
215
216         /* Slide everything in the code heap up, and update data and code heap
217         pointers inside code blocks. */
218         code_block_compaction_updater<forwarder<object> > code_block_updater(this,slot_forwarder,code_forwarder);
219         standard_sizer<code_block> code_block_sizer;
220         code->allocator->compact(code_block_updater,code_block_sizer);
221
222         slot_forwarder.visit_roots();
223         if(trace_contexts_p)
224         {
225                 slot_forwarder.visit_contexts();
226                 code_forwarder.visit_context_code_blocks();
227         }
228
229         update_code_roots_for_compaction();
230         callbacks->update();
231
232         current_gc->event->ended_compaction();
233 }
234
235 struct object_grow_heap_updater {
236         code_block_visitor<forwarder<code_block> > code_forwarder;
237
238         explicit object_grow_heap_updater(code_block_visitor<forwarder<code_block> > code_forwarder_) :
239                 code_forwarder(code_forwarder_) {}
240
241         void operator()(object *obj)
242         {
243                 code_forwarder.visit_object_code_block(obj);
244         }
245 };
246
247 struct dummy_slot_forwarder {
248         object *operator()(object *obj) { return obj; }
249 };
250
251 /* Compact just the code heap, after growing the data heap */
252 void factor_vm::collect_compact_code_impl(bool trace_contexts_p)
253 {
254         /* Figure out where blocks are going to go */
255         mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
256         code_forwarding_map->compute_forwarding();
257
258         slot_visitor<dummy_slot_forwarder> slot_forwarder(this,dummy_slot_forwarder());
259         code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
260
261         code_forwarder.visit_uninitialized_code_blocks();
262
263         if(trace_contexts_p)
264                 code_forwarder.visit_context_code_blocks();
265
266         /* Update code heap references in data heap */
267         object_grow_heap_updater updater(code_forwarder);
268         each_object(updater);
269
270         /* Slide everything in the code heap up, and update code heap
271         pointers inside code blocks. */
272         code_block_compaction_updater<dummy_slot_forwarder> code_block_updater(this,slot_forwarder,code_forwarder);
273         standard_sizer<code_block> code_block_sizer;
274         code->allocator->compact(code_block_updater,code_block_sizer);
275
276         update_code_roots_for_compaction();
277         callbacks->update();
278 }
279
280 void factor_vm::collect_compact(bool trace_contexts_p)
281 {
282         collect_mark_impl(trace_contexts_p);
283         collect_compact_impl(trace_contexts_p);
284         code->flush_icache();
285 }
286
287 void factor_vm::collect_growing_heap(cell requested_bytes, bool trace_contexts_p)
288 {
289         /* Grow the data heap and copy all live objects to the new heap. */
290         data_heap *old = data;
291         set_data_heap(data->grow(requested_bytes));
292         collect_mark_impl(trace_contexts_p);
293         collect_compact_code_impl(trace_contexts_p);
294         code->flush_icache();
295         delete old;
296 }
297
298 }