]> gitweb.factorcode.org Git - factor.git/blob - vm/compaction.cpp
vm: big overhaul of non-optimizing compiler
[factor.git] / vm / compaction.cpp
1 #include "master.hpp"
2
3 namespace factor {
4
5 template<typename Block> struct forwarder {
6         mark_bits<Block> *forwarding_map;
7
8         explicit forwarder(mark_bits<Block> *forwarding_map_) :
9                 forwarding_map(forwarding_map_) {}
10
11         Block *operator()(Block *block)
12         {
13                 return forwarding_map->forward_block(block);
14         }
15 };
16
17 static inline cell tuple_size_with_forwarding(mark_bits<object> *forwarding_map, object *obj)
18 {
19         /* The tuple layout may or may not have been forwarded already. Tricky. */
20         object *layout_obj = (object *)UNTAG(((tuple *)obj)->layout);
21         tuple_layout *layout;
22
23         if(layout_obj < obj)
24         {
25                 /* It's already been moved up; dereference through forwarding
26                 map to get the size */
27                 layout = (tuple_layout *)forwarding_map->forward_block(layout_obj);
28         }
29         else
30         {
31                 /* It hasn't been moved up yet; dereference directly */
32                 layout = (tuple_layout *)layout_obj;
33         }
34
35         return tuple_size(layout);
36 }
37
38 struct compaction_sizer {
39         mark_bits<object> *forwarding_map;
40
41         explicit compaction_sizer(mark_bits<object> *forwarding_map_) :
42                 forwarding_map(forwarding_map_) {}
43
44         cell operator()(object *obj)
45         {
46                 if(!forwarding_map->marked_p(obj))
47                         return forwarding_map->unmarked_block_size(obj);
48                 else if(obj->type() == TUPLE_TYPE)
49                         return align(tuple_size_with_forwarding(forwarding_map,obj),data_alignment);
50                 else
51                         return obj->size();
52         }
53 };
54
55 struct object_compaction_updater {
56         factor_vm *parent;
57         mark_bits<code_block> *code_forwarding_map;
58         mark_bits<object> *data_forwarding_map;
59         object_start_map *starts;
60
61         explicit object_compaction_updater(factor_vm *parent_,
62                 mark_bits<object> *data_forwarding_map_,
63                 mark_bits<code_block> *code_forwarding_map_) :
64                 parent(parent_),
65                 code_forwarding_map(code_forwarding_map_),
66                 data_forwarding_map(data_forwarding_map_),
67                 starts(&parent->data->tenured->starts) {}
68
69         void operator()(object *old_address, object *new_address, cell size)
70         {
71                 cell payload_start;
72                 if(old_address->type() == TUPLE_TYPE)
73                         payload_start = tuple_size_with_forwarding(data_forwarding_map,old_address);
74                 else
75                         payload_start = old_address->binary_payload_start();
76
77                 memmove(new_address,old_address,size);
78
79                 slot_visitor<forwarder<object> > slot_forwarder(parent,forwarder<object>(data_forwarding_map));
80                 slot_forwarder.visit_slots(new_address,payload_start);
81
82                 code_block_visitor<forwarder<code_block> > code_forwarder(parent,forwarder<code_block>(code_forwarding_map));
83                 code_forwarder.visit_object_code_block(new_address);
84
85                 starts->record_object_start_offset(new_address);
86         }
87 };
88
89 template<typename SlotForwarder>
90 struct code_block_compaction_relocation_visitor {
91         factor_vm *parent;
92         code_block *old_address;
93         slot_visitor<SlotForwarder> slot_forwarder;
94         code_block_visitor<forwarder<code_block> > code_forwarder;
95
96         explicit code_block_compaction_relocation_visitor(factor_vm *parent_,
97                 code_block *old_address_,
98                 slot_visitor<SlotForwarder> slot_forwarder_,
99                 code_block_visitor<forwarder<code_block> > code_forwarder_) :
100                 parent(parent_),
101                 old_address(old_address_),
102                 slot_forwarder(slot_forwarder_),
103                 code_forwarder(code_forwarder_) {}
104
105         void operator()(instruction_operand op)
106         {
107                 cell old_offset = op.rel_offset() + (cell)old_address->xt();
108
109                 switch(op.rel_type())
110                 {
111                 case RT_LITERAL:
112                         op.store_value(slot_forwarder.visit_pointer(op.load_value(old_offset)));
113                         break;
114                 case RT_XT:
115                 case RT_XT_PIC:
116                 case RT_XT_PIC_TAIL:
117                         op.store_code_block(code_forwarder.visit_code_block(op.load_code_block(old_offset)));
118                         break;
119                 case RT_HERE:
120                         op.store_value(op.load_value(old_offset) - (cell)old_address + (cell)op.parent_code_block());
121                         break;
122                 case RT_THIS:
123                 case RT_CARDS_OFFSET:
124                 case RT_DECKS_OFFSET:
125                         parent->store_external_address(op);
126                         break;
127                 default:
128                         op.store_value(op.load_value(old_offset));
129                         break;
130                 }
131         }
132 };
133
134 template<typename SlotForwarder>
135 struct code_block_compaction_updater {
136         factor_vm *parent;
137         slot_visitor<SlotForwarder> slot_forwarder;
138         code_block_visitor<forwarder<code_block> > code_forwarder;
139
140         explicit code_block_compaction_updater(factor_vm *parent_,
141                 slot_visitor<SlotForwarder> slot_forwarder_,
142                 code_block_visitor<forwarder<code_block> > code_forwarder_) :
143                 parent(parent_),
144                 slot_forwarder(slot_forwarder_),
145                 code_forwarder(code_forwarder_) {}
146
147         void operator()(code_block *old_address, code_block *new_address, cell size)
148         {
149                 memmove(new_address,old_address,size);
150
151                 slot_forwarder.visit_code_block_objects(new_address);
152
153                 code_block_compaction_relocation_visitor<SlotForwarder> visitor(parent,old_address,slot_forwarder,code_forwarder);
154                 new_address->each_instruction_operand(visitor);
155         }
156 };
157
158 /* Compact data and code heaps */
159 void factor_vm::collect_compact_impl(bool trace_contexts_p)
160 {
161         current_gc->event->started_compaction();
162
163         tenured_space *tenured = data->tenured;
164         mark_bits<object> *data_forwarding_map = &tenured->state;
165         mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
166
167         /* Figure out where blocks are going to go */
168         data_forwarding_map->compute_forwarding();
169         code_forwarding_map->compute_forwarding();
170
171         slot_visitor<forwarder<object> > slot_forwarder(this,forwarder<object>(data_forwarding_map));
172         code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
173
174         code_forwarder.visit_uninitialized_code_blocks();
175
176         /* Object start offsets get recomputed by the object_compaction_updater */
177         data->tenured->starts.clear_object_start_offsets();
178
179         /* Slide everything in tenured space up, and update data and code heap
180         pointers inside objects. */
181         object_compaction_updater object_updater(this,data_forwarding_map,code_forwarding_map);
182         compaction_sizer object_sizer(data_forwarding_map);
183         tenured->compact(object_updater,object_sizer);
184
185         /* Slide everything in the code heap up, and update data and code heap
186         pointers inside code blocks. */
187         code_block_compaction_updater<forwarder<object> > code_block_updater(this,slot_forwarder,code_forwarder);
188         standard_sizer<code_block> code_block_sizer;
189         code->allocator->compact(code_block_updater,code_block_sizer);
190
191         slot_forwarder.visit_roots();
192         if(trace_contexts_p)
193         {
194                 slot_forwarder.visit_contexts();
195                 code_forwarder.visit_context_code_blocks();
196         }
197
198         update_code_roots_for_compaction();
199         callbacks->update();
200
201         current_gc->event->ended_compaction();
202 }
203
204 struct object_grow_heap_updater {
205         code_block_visitor<forwarder<code_block> > code_forwarder;
206
207         explicit object_grow_heap_updater(code_block_visitor<forwarder<code_block> > code_forwarder_) :
208                 code_forwarder(code_forwarder_) {}
209
210         void operator()(object *obj)
211         {
212                 code_forwarder.visit_object_code_block(obj);
213         }
214 };
215
216 struct dummy_slot_forwarder {
217         object *operator()(object *obj) { return obj; }
218 };
219
220 /* Compact just the code heap, after growing the data heap */
221 void factor_vm::collect_compact_code_impl(bool trace_contexts_p)
222 {
223         /* Figure out where blocks are going to go */
224         mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
225         code_forwarding_map->compute_forwarding();
226
227         slot_visitor<dummy_slot_forwarder> slot_forwarder(this,dummy_slot_forwarder());
228         code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
229
230         code_forwarder.visit_uninitialized_code_blocks();
231
232         if(trace_contexts_p)
233                 code_forwarder.visit_context_code_blocks();
234
235         /* Update code heap references in data heap */
236         object_grow_heap_updater updater(code_forwarder);
237         each_object(updater);
238
239         /* Slide everything in the code heap up, and update code heap
240         pointers inside code blocks. */
241         code_block_compaction_updater<dummy_slot_forwarder> code_block_updater(this,slot_forwarder,code_forwarder);
242         standard_sizer<code_block> code_block_sizer;
243         code->allocator->compact(code_block_updater,code_block_sizer);
244
245         update_code_roots_for_compaction();
246         callbacks->update();
247 }
248
249 void factor_vm::collect_compact(bool trace_contexts_p)
250 {
251         collect_mark_impl(trace_contexts_p);
252         collect_compact_impl(trace_contexts_p);
253         code->flush_icache();
254 }
255
256 void factor_vm::collect_growing_heap(cell requested_bytes, bool trace_contexts_p)
257 {
258         /* Grow the data heap and copy all live objects to the new heap. */
259         data_heap *old = data;
260         set_data_heap(data->grow(requested_bytes));
261         collect_mark_impl(trace_contexts_p);
262         collect_compact_code_impl(trace_contexts_p);
263         code->flush_icache();
264         delete old;
265 }
266
267 }