]> gitweb.factorcode.org Git - factor.git/blob - vm/compaction.cpp
vm: growing heap no longer uses relocate_code_block()
[factor.git] / vm / compaction.cpp
1 #include "master.hpp"
2
3 namespace factor {
4
5 void factor_vm::update_fixup_set_for_compaction(mark_bits<code_block> *forwarding_map)
6 {
7         std::set<code_block *>::const_iterator iter = code->needs_fixup.begin();
8         std::set<code_block *>::const_iterator end = code->needs_fixup.end();
9
10         std::set<code_block *> new_needs_fixup;
11         for(; iter != end; iter++)
12                 new_needs_fixup.insert(forwarding_map->forward_block(*iter));
13
14         code->needs_fixup = new_needs_fixup;
15 }
16
17 template<typename Block> struct forwarder {
18         mark_bits<Block> *forwarding_map;
19
20         explicit forwarder(mark_bits<Block> *forwarding_map_) :
21                 forwarding_map(forwarding_map_) {}
22
23         Block *operator()(Block *block)
24         {
25                 return forwarding_map->forward_block(block);
26         }
27 };
28
29 static inline cell tuple_size_with_forwarding(mark_bits<object> *forwarding_map, object *obj)
30 {
31         /* The tuple layout may or may not have been forwarded already. Tricky. */
32         object *layout_obj = (object *)UNTAG(((tuple *)obj)->layout);
33         tuple_layout *layout;
34
35         if(layout_obj < obj)
36         {
37                 /* It's already been moved up; dereference through forwarding
38                 map to get the size */
39                 layout = (tuple_layout *)forwarding_map->forward_block(layout_obj);
40         }
41         else
42         {
43                 /* It hasn't been moved up yet; dereference directly */
44                 layout = (tuple_layout *)layout_obj;
45         }
46
47         return tuple_size(layout);
48 }
49
50 struct compaction_sizer {
51         mark_bits<object> *forwarding_map;
52
53         explicit compaction_sizer(mark_bits<object> *forwarding_map_) :
54                 forwarding_map(forwarding_map_) {}
55
56         cell operator()(object *obj)
57         {
58                 if(!forwarding_map->marked_p(obj))
59                         return forwarding_map->unmarked_block_size(obj);
60                 else if(obj->type() == TUPLE_TYPE)
61                         return align(tuple_size_with_forwarding(forwarding_map,obj),data_alignment);
62                 else
63                         return obj->size();
64         }
65 };
66
67 struct object_compaction_updater {
68         factor_vm *parent;
69         mark_bits<code_block> *code_forwarding_map;
70         mark_bits<object> *data_forwarding_map;
71         object_start_map *starts;
72
73         explicit object_compaction_updater(factor_vm *parent_,
74                 mark_bits<object> *data_forwarding_map_,
75                 mark_bits<code_block> *code_forwarding_map_) :
76                 parent(parent_),
77                 code_forwarding_map(code_forwarding_map_),
78                 data_forwarding_map(data_forwarding_map_),
79                 starts(&parent->data->tenured->starts) {}
80
81         void operator()(object *old_address, object *new_address, cell size)
82         {
83                 cell payload_start;
84                 if(old_address->type() == TUPLE_TYPE)
85                         payload_start = tuple_size_with_forwarding(data_forwarding_map,old_address);
86                 else
87                         payload_start = old_address->binary_payload_start();
88
89                 memmove(new_address,old_address,size);
90
91                 slot_visitor<forwarder<object> > slot_forwarder(parent,forwarder<object>(data_forwarding_map));
92                 slot_forwarder.visit_slots(new_address,payload_start);
93
94                 code_block_visitor<forwarder<code_block> > code_forwarder(parent,forwarder<code_block>(code_forwarding_map));
95                 code_forwarder.visit_object_code_block(new_address);
96
97                 starts->record_object_start_offset(new_address);
98         }
99 };
100
101 template<typename SlotForwarder>
102 struct code_block_compaction_relocation_visitor {
103         factor_vm *parent;
104         code_block *old_address;
105         slot_visitor<SlotForwarder> slot_forwarder;
106         code_block_visitor<forwarder<code_block> > code_forwarder;
107
108         explicit code_block_compaction_relocation_visitor(factor_vm *parent_,
109                 code_block *old_address_,
110                 slot_visitor<SlotForwarder> slot_forwarder_,
111                 code_block_visitor<forwarder<code_block> > code_forwarder_) :
112                 parent(parent_),
113                 old_address(old_address_),
114                 slot_forwarder(slot_forwarder_),
115                 code_forwarder(code_forwarder_) {}
116
117         void operator()(relocation_entry rel, cell index, code_block *compiled)
118         {
119                 relocation_type type = rel.rel_type();
120                 instruction_operand op(rel.rel_class(),rel.rel_offset() + (cell)compiled->xt());
121
122                 array *literals = (parent->to_boolean(compiled->literals)
123                         ? untag<array>(compiled->literals) : NULL);
124
125                 cell old_offset = rel.rel_offset() + (cell)old_address->xt();
126
127                 switch(type)
128                 {
129                 case RT_IMMEDIATE:
130                         op.store_value(slot_forwarder.visit_pointer(op.load_value(old_offset)));
131                         break;
132                 case RT_XT:
133                 case RT_XT_PIC:
134                 case RT_XT_PIC_TAIL:
135                         op.store_code_block(code_forwarder.visit_code_block(op.load_code_block(old_offset)));
136                         break;
137                 case RT_HERE:
138                         op.store_value(parent->compute_here_relocation(array_nth(literals,index),rel.rel_offset(),compiled));
139                         break;
140                 case RT_THIS:
141                         op.store_value((cell)compiled->xt());
142                         break;
143                 case RT_CARDS_OFFSET:
144                         op.store_value(parent->cards_offset);
145                         break;
146                 case RT_DECKS_OFFSET:
147                         op.store_value(parent->decks_offset);
148                         break;
149                 default:
150                         op.store_value(op.load_value(old_offset));
151                         break;
152                 }
153         }
154 };
155
156 template<typename SlotForwarder>
157 struct code_block_compaction_updater {
158         factor_vm *parent;
159         slot_visitor<SlotForwarder> slot_forwarder;
160         code_block_visitor<forwarder<code_block> > code_forwarder;
161
162         explicit code_block_compaction_updater(factor_vm *parent_,
163                 slot_visitor<SlotForwarder> slot_forwarder_,
164                 code_block_visitor<forwarder<code_block> > code_forwarder_) :
165                 parent(parent_),
166                 slot_forwarder(slot_forwarder_),
167                 code_forwarder(code_forwarder_) {}
168
169         void operator()(code_block *old_address, code_block *new_address, cell size)
170         {
171                 memmove(new_address,old_address,size);
172
173                 slot_forwarder.visit_code_block_objects(new_address);
174
175                 code_block_compaction_relocation_visitor<SlotForwarder> visitor(parent,old_address,slot_forwarder,code_forwarder);
176                 parent->iterate_relocations(new_address,visitor);
177         }
178 };
179
180 /* Compact data and code heaps */
181 void factor_vm::collect_compact_impl(bool trace_contexts_p)
182 {
183         current_gc->event->started_compaction();
184
185         tenured_space *tenured = data->tenured;
186         mark_bits<object> *data_forwarding_map = &tenured->state;
187         mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
188
189         /* Figure out where blocks are going to go */
190         data_forwarding_map->compute_forwarding();
191         code_forwarding_map->compute_forwarding();
192
193         update_fixup_set_for_compaction(code_forwarding_map);
194
195         slot_visitor<forwarder<object> > slot_forwarder(this,forwarder<object>(data_forwarding_map));
196         code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
197
198         /* Object start offsets get recomputed by the object_compaction_updater */
199         data->tenured->starts.clear_object_start_offsets();
200
201         /* Slide everything in tenured space up, and update data and code heap
202         pointers inside objects. */
203         object_compaction_updater object_updater(this,data_forwarding_map,code_forwarding_map);
204         compaction_sizer object_sizer(data_forwarding_map);
205         tenured->compact(object_updater,object_sizer);
206
207         /* Slide everything in the code heap up, and update data and code heap
208         pointers inside code blocks. */
209         code_block_compaction_updater<forwarder<object> > code_block_updater(this,slot_forwarder,code_forwarder);
210         standard_sizer<code_block> code_block_sizer;
211         code->allocator->compact(code_block_updater,code_block_sizer);
212
213         slot_forwarder.visit_roots();
214         if(trace_contexts_p)
215         {
216                 slot_forwarder.visit_contexts();
217                 code_forwarder.visit_context_code_blocks();
218                 code_forwarder.visit_callback_code_blocks();
219         }
220
221         update_code_roots_for_compaction();
222
223         current_gc->event->ended_compaction();
224 }
225
226 struct object_grow_heap_updater {
227         code_block_visitor<forwarder<code_block> > code_forwarder;
228
229         explicit object_grow_heap_updater(code_block_visitor<forwarder<code_block> > code_forwarder_) :
230                 code_forwarder(code_forwarder_) {}
231
232         void operator()(object *obj)
233         {
234                 code_forwarder.visit_object_code_block(obj);
235         }
236 };
237
238 struct dummy_slot_forwarder {
239         object *operator()(object *obj) { return obj; }
240 };
241
242 /* Compact just the code heap, after growing the data heap */
243 void factor_vm::collect_compact_code_impl(bool trace_contexts_p)
244 {
245         /* Figure out where blocks are going to go */
246         mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
247         code_forwarding_map->compute_forwarding();
248
249         update_fixup_set_for_compaction(code_forwarding_map);
250
251         slot_visitor<dummy_slot_forwarder> slot_forwarder(this,dummy_slot_forwarder());
252         code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
253
254         if(trace_contexts_p)
255         {
256                 code_forwarder.visit_context_code_blocks();
257                 code_forwarder.visit_callback_code_blocks();
258         }
259
260         /* Update code heap references in data heap */
261         object_grow_heap_updater updater(code_forwarder);
262         each_object(updater);
263
264         /* Slide everything in the code heap up, and update code heap
265         pointers inside code blocks. */
266         code_block_compaction_updater<dummy_slot_forwarder> code_block_updater(this,slot_forwarder,code_forwarder);
267         standard_sizer<code_block> code_block_sizer;
268         code->allocator->compact(code_block_updater,code_block_sizer);
269
270         update_code_roots_for_compaction();
271 }
272
273 }