]> gitweb.factorcode.org Git - factor.git/blob - vm/compaction.cpp
vm: update callback stub machine code at the end of compaction to prevent it from...
[factor.git] / vm / compaction.cpp
1 #include "master.hpp"
2
3 namespace factor {
4
5 void factor_vm::update_fixup_set_for_compaction(mark_bits<code_block> *forwarding_map)
6 {
7         std::set<code_block *>::const_iterator iter = code->needs_fixup.begin();
8         std::set<code_block *>::const_iterator end = code->needs_fixup.end();
9
10         std::set<code_block *> new_needs_fixup;
11         for(; iter != end; iter++)
12                 new_needs_fixup.insert(forwarding_map->forward_block(*iter));
13
14         code->needs_fixup = new_needs_fixup;
15 }
16
17 template<typename Block> struct forwarder {
18         mark_bits<Block> *forwarding_map;
19
20         explicit forwarder(mark_bits<Block> *forwarding_map_) :
21                 forwarding_map(forwarding_map_) {}
22
23         Block *operator()(Block *block)
24         {
25                 return forwarding_map->forward_block(block);
26         }
27 };
28
29 static inline cell tuple_size_with_forwarding(mark_bits<object> *forwarding_map, object *obj)
30 {
31         /* The tuple layout may or may not have been forwarded already. Tricky. */
32         object *layout_obj = (object *)UNTAG(((tuple *)obj)->layout);
33         tuple_layout *layout;
34
35         if(layout_obj < obj)
36         {
37                 /* It's already been moved up; dereference through forwarding
38                 map to get the size */
39                 layout = (tuple_layout *)forwarding_map->forward_block(layout_obj);
40         }
41         else
42         {
43                 /* It hasn't been moved up yet; dereference directly */
44                 layout = (tuple_layout *)layout_obj;
45         }
46
47         return tuple_size(layout);
48 }
49
50 struct compaction_sizer {
51         mark_bits<object> *forwarding_map;
52
53         explicit compaction_sizer(mark_bits<object> *forwarding_map_) :
54                 forwarding_map(forwarding_map_) {}
55
56         cell operator()(object *obj)
57         {
58                 if(!forwarding_map->marked_p(obj))
59                         return forwarding_map->unmarked_block_size(obj);
60                 else if(obj->type() == TUPLE_TYPE)
61                         return align(tuple_size_with_forwarding(forwarding_map,obj),data_alignment);
62                 else
63                         return obj->size();
64         }
65 };
66
67 struct object_compaction_updater {
68         factor_vm *parent;
69         mark_bits<code_block> *code_forwarding_map;
70         mark_bits<object> *data_forwarding_map;
71         object_start_map *starts;
72
73         explicit object_compaction_updater(factor_vm *parent_,
74                 mark_bits<object> *data_forwarding_map_,
75                 mark_bits<code_block> *code_forwarding_map_) :
76                 parent(parent_),
77                 code_forwarding_map(code_forwarding_map_),
78                 data_forwarding_map(data_forwarding_map_),
79                 starts(&parent->data->tenured->starts) {}
80
81         void operator()(object *old_address, object *new_address, cell size)
82         {
83                 cell payload_start;
84                 if(old_address->type() == TUPLE_TYPE)
85                         payload_start = tuple_size_with_forwarding(data_forwarding_map,old_address);
86                 else
87                         payload_start = old_address->binary_payload_start();
88
89                 memmove(new_address,old_address,size);
90
91                 slot_visitor<forwarder<object> > slot_forwarder(parent,forwarder<object>(data_forwarding_map));
92                 slot_forwarder.visit_slots(new_address,payload_start);
93
94                 code_block_visitor<forwarder<code_block> > code_forwarder(parent,forwarder<code_block>(code_forwarding_map));
95                 code_forwarder.visit_object_code_block(new_address);
96
97                 starts->record_object_start_offset(new_address);
98         }
99 };
100
101 template<typename SlotForwarder>
102 struct code_block_compaction_relocation_visitor {
103         factor_vm *parent;
104         code_block *old_address;
105         slot_visitor<SlotForwarder> slot_forwarder;
106         code_block_visitor<forwarder<code_block> > code_forwarder;
107
108         explicit code_block_compaction_relocation_visitor(factor_vm *parent_,
109                 code_block *old_address_,
110                 slot_visitor<SlotForwarder> slot_forwarder_,
111                 code_block_visitor<forwarder<code_block> > code_forwarder_) :
112                 parent(parent_),
113                 old_address(old_address_),
114                 slot_forwarder(slot_forwarder_),
115                 code_forwarder(code_forwarder_) {}
116
117         void operator()(instruction_operand op)
118         {
119                 cell old_offset = op.rel_offset() + (cell)old_address->xt();
120
121                 switch(op.rel_type())
122                 {
123                 case RT_IMMEDIATE:
124                         op.store_value(slot_forwarder.visit_pointer(op.load_value(old_offset)));
125                         break;
126                 case RT_XT:
127                 case RT_XT_PIC:
128                 case RT_XT_PIC_TAIL:
129                         op.store_code_block(code_forwarder.visit_code_block(op.load_code_block(old_offset)));
130                         break;
131                 case RT_HERE:
132                 case RT_THIS:
133                 case RT_CARDS_OFFSET:
134                 case RT_DECKS_OFFSET:
135                         parent->store_external_address(op);
136                         break;
137                 default:
138                         op.store_value(op.load_value(old_offset));
139                         break;
140                 }
141         }
142 };
143
144 template<typename SlotForwarder>
145 struct code_block_compaction_updater {
146         factor_vm *parent;
147         slot_visitor<SlotForwarder> slot_forwarder;
148         code_block_visitor<forwarder<code_block> > code_forwarder;
149
150         explicit code_block_compaction_updater(factor_vm *parent_,
151                 slot_visitor<SlotForwarder> slot_forwarder_,
152                 code_block_visitor<forwarder<code_block> > code_forwarder_) :
153                 parent(parent_),
154                 slot_forwarder(slot_forwarder_),
155                 code_forwarder(code_forwarder_) {}
156
157         void operator()(code_block *old_address, code_block *new_address, cell size)
158         {
159                 memmove(new_address,old_address,size);
160
161                 slot_forwarder.visit_code_block_objects(new_address);
162
163                 code_block_compaction_relocation_visitor<SlotForwarder> visitor(parent,old_address,slot_forwarder,code_forwarder);
164                 new_address->each_instruction_operand(visitor);
165         }
166 };
167
168 /* Compact data and code heaps */
169 void factor_vm::collect_compact_impl(bool trace_contexts_p)
170 {
171         current_gc->event->started_compaction();
172
173         tenured_space *tenured = data->tenured;
174         mark_bits<object> *data_forwarding_map = &tenured->state;
175         mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
176
177         /* Figure out where blocks are going to go */
178         data_forwarding_map->compute_forwarding();
179         code_forwarding_map->compute_forwarding();
180
181         update_fixup_set_for_compaction(code_forwarding_map);
182
183         slot_visitor<forwarder<object> > slot_forwarder(this,forwarder<object>(data_forwarding_map));
184         code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
185
186         /* Object start offsets get recomputed by the object_compaction_updater */
187         data->tenured->starts.clear_object_start_offsets();
188
189         /* Slide everything in tenured space up, and update data and code heap
190         pointers inside objects. */
191         object_compaction_updater object_updater(this,data_forwarding_map,code_forwarding_map);
192         compaction_sizer object_sizer(data_forwarding_map);
193         tenured->compact(object_updater,object_sizer);
194
195         /* Slide everything in the code heap up, and update data and code heap
196         pointers inside code blocks. */
197         code_block_compaction_updater<forwarder<object> > code_block_updater(this,slot_forwarder,code_forwarder);
198         standard_sizer<code_block> code_block_sizer;
199         code->allocator->compact(code_block_updater,code_block_sizer);
200
201         slot_forwarder.visit_roots();
202         if(trace_contexts_p)
203         {
204                 slot_forwarder.visit_contexts();
205                 code_forwarder.visit_context_code_blocks();
206         }
207
208         update_code_roots_for_compaction();
209         callbacks->update();
210
211         current_gc->event->ended_compaction();
212 }
213
214 struct object_grow_heap_updater {
215         code_block_visitor<forwarder<code_block> > code_forwarder;
216
217         explicit object_grow_heap_updater(code_block_visitor<forwarder<code_block> > code_forwarder_) :
218                 code_forwarder(code_forwarder_) {}
219
220         void operator()(object *obj)
221         {
222                 code_forwarder.visit_object_code_block(obj);
223         }
224 };
225
226 struct dummy_slot_forwarder {
227         object *operator()(object *obj) { return obj; }
228 };
229
230 /* Compact just the code heap, after growing the data heap */
231 void factor_vm::collect_compact_code_impl(bool trace_contexts_p)
232 {
233         /* Figure out where blocks are going to go */
234         mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
235         code_forwarding_map->compute_forwarding();
236
237         update_fixup_set_for_compaction(code_forwarding_map);
238
239         slot_visitor<dummy_slot_forwarder> slot_forwarder(this,dummy_slot_forwarder());
240         code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
241
242         if(trace_contexts_p)
243                 code_forwarder.visit_context_code_blocks();
244
245         /* Update code heap references in data heap */
246         object_grow_heap_updater updater(code_forwarder);
247         each_object(updater);
248
249         /* Slide everything in the code heap up, and update code heap
250         pointers inside code blocks. */
251         code_block_compaction_updater<dummy_slot_forwarder> code_block_updater(this,slot_forwarder,code_forwarder);
252         standard_sizer<code_block> code_block_sizer;
253         code->allocator->compact(code_block_updater,code_block_sizer);
254
255         update_code_roots_for_compaction();
256         callbacks->update();
257 }
258
259 void factor_vm::collect_compact(bool trace_contexts_p)
260 {
261         collect_mark_impl(trace_contexts_p);
262         collect_compact_impl(trace_contexts_p);
263         code->flush_icache();
264 }
265
266 void factor_vm::collect_growing_heap(cell requested_bytes, bool trace_contexts_p)
267 {
268         /* Grow the data heap and copy all live objects to the new heap. */
269         data_heap *old = data;
270         set_data_heap(data->grow(requested_bytes));
271         collect_mark_impl(trace_contexts_p);
272         collect_compact_code_impl(trace_contexts_p);
273         code->flush_icache();
274         delete old;
275 }
276
277 }