]> gitweb.factorcode.org Git - factor.git/blob - vm/compaction.cpp
VM: macro FACTOR_FOR_EACH used in more places to drive iteration
[factor.git] / vm / compaction.cpp
1 #include "master.hpp"
2
3 namespace factor {
4
5 struct compaction_fixup {
6   static const bool translated_code_block_map = false;
7
8   mark_bits* data_forwarding_map;
9   mark_bits* code_forwarding_map;
10   const object** data_finger;
11   const code_block** code_finger;
12
13   compaction_fixup(mark_bits* data_forwarding_map,
14                    mark_bits* code_forwarding_map,
15                    const object** data_finger,
16                    const code_block** code_finger)
17       : data_forwarding_map(data_forwarding_map),
18         code_forwarding_map(code_forwarding_map),
19         data_finger(data_finger),
20         code_finger(code_finger) {}
21
22   object* fixup_data(object* obj) {
23     return (object*)data_forwarding_map->forward_block((cell)obj);
24   }
25
26   code_block* fixup_code(code_block* compiled) {
27     return (code_block*)code_forwarding_map->forward_block((cell)compiled);
28   }
29
30   object* translate_data(const object* obj) {
31     if (obj < *data_finger)
32       return fixup_data((object*)obj);
33     else
34       return (object*)obj;
35   }
36
37   code_block* translate_code(const code_block* compiled) {
38     if (compiled < *code_finger)
39       return fixup_code((code_block*)compiled);
40     else
41       return (code_block*)compiled;
42   }
43
44   cell size(object* obj) {
45     if (data_forwarding_map->marked_p((cell)obj))
46       return obj->size(*this);
47     else
48       return data_forwarding_map->unmarked_block_size((cell)obj);
49   }
50
51   cell size(code_block* compiled) {
52     if (code_forwarding_map->marked_p((cell)compiled))
53       return compiled->size(*this);
54     else
55       return code_forwarding_map->unmarked_block_size((cell)compiled);
56   }
57 };
58
59 struct object_compaction_updater {
60   factor_vm* parent;
61   compaction_fixup fixup;
62   object_start_map* starts;
63
64   object_compaction_updater(factor_vm* parent, compaction_fixup fixup)
65       : parent(parent),
66         fixup(fixup),
67         starts(&parent->data->tenured->starts) {}
68
69   void operator()(object* old_address, object* new_address, cell size) {
70     slot_visitor<compaction_fixup> forwarder(parent, fixup);
71     forwarder.visit_slots(new_address);
72     forwarder.visit_object_code_block(new_address);
73     starts->record_object_start_offset(new_address);
74   }
75 };
76
77 template <typename Fixup> struct code_block_compaction_relocation_visitor {
78   factor_vm* parent;
79   code_block* old_address;
80   Fixup fixup;
81
82   code_block_compaction_relocation_visitor(factor_vm* parent,
83                                            code_block* old_address,
84                                            Fixup fixup)
85       : parent(parent), old_address(old_address), fixup(fixup) {}
86
87   void operator()(instruction_operand op) {
88     cell old_offset = op.rel_offset() + old_address->entry_point();
89
90     switch (op.rel_type()) {
91       case RT_LITERAL: {
92         cell value = op.load_value(old_offset);
93         if (immediate_p(value))
94           op.store_value(value);
95         else
96           op.store_value(
97               RETAG(fixup.fixup_data(untag<object>(value)), TAG(value)));
98         break;
99       }
100       case RT_ENTRY_POINT:
101       case RT_ENTRY_POINT_PIC:
102       case RT_ENTRY_POINT_PIC_TAIL:
103       case RT_HERE: {
104         cell value = op.load_value(old_offset);
105         cell offset = TAG(value);
106         code_block* compiled = (code_block*)UNTAG(value);
107         op.store_value((cell)fixup.fixup_code(compiled) + offset);
108         break;
109       }
110       case RT_THIS:
111       case RT_CARDS_OFFSET:
112       case RT_DECKS_OFFSET:
113         parent->store_external_address(op);
114         break;
115       default:
116         op.store_value(op.load_value(old_offset));
117         break;
118     }
119   }
120 };
121
122 template <typename Fixup> struct code_block_compaction_updater {
123   factor_vm* parent;
124   Fixup fixup;
125   slot_visitor<Fixup> forwarder;
126
127   code_block_compaction_updater(
128       factor_vm* parent, Fixup fixup, slot_visitor<Fixup> forwarder)
129       : parent(parent),
130         fixup(fixup),
131         forwarder(forwarder) { }
132
133   void operator()(code_block* old_address, code_block* new_address, cell size) {
134     forwarder.visit_code_block_objects(new_address);
135
136     code_block_compaction_relocation_visitor<Fixup> visitor(parent, old_address,
137                                                             fixup);
138     new_address->each_instruction_operand(visitor);
139   }
140 };
141
142 /* After a compaction, invalidate any code heap roots which are not
143 marked, and also slide the valid roots up so that call sites can be updated
144 correctly in case an inline cache compilation triggered compaction. */
145 void factor_vm::update_code_roots_for_compaction() {
146
147   mark_bits* state = &code->allocator->state;
148
149   FACTOR_FOR_EACH(code_roots) {
150     code_root* root = *iter;
151     cell block = root->value & (~data_alignment + 1);
152
153     /* Offset of return address within 16-byte allocation line */
154     cell offset = root->value - block;
155
156     if (root->valid && state->marked_p(block)) {
157       block = state->forward_block(block);
158       root->value = block + offset;
159     } else
160       root->valid = false;
161   }
162 }
163
164 /* Compact data and code heaps */
165 void factor_vm::collect_compact_impl() {
166   gc_event* event = current_gc->event;
167
168 #ifdef FACTOR_DEBUG
169   code->verify_all_blocks_set();
170 #endif
171
172   if (event)
173     event->started_compaction();
174
175   tenured_space* tenured = data->tenured;
176   mark_bits* data_forwarding_map = &tenured->state;
177   mark_bits* code_forwarding_map = &code->allocator->state;
178
179   /* Figure out where blocks are going to go */
180   data_forwarding_map->compute_forwarding();
181   code_forwarding_map->compute_forwarding();
182
183   const object* data_finger = (object*)tenured->start;
184   const code_block* code_finger = (code_block*)code->allocator->start;
185
186   {
187     compaction_fixup fixup(data_forwarding_map, code_forwarding_map, &data_finger,
188                            &code_finger);
189
190     slot_visitor<compaction_fixup> forwarder(this, fixup);
191
192     forwarder.visit_uninitialized_code_blocks();
193
194     /* Object start offsets get recomputed by the object_compaction_updater */
195     data->tenured->starts.clear_object_start_offsets();
196
197     /* Slide everything in tenured space up, and update data and code heap
198        pointers inside objects. */
199     {
200       object_compaction_updater object_updater(this, fixup);
201       tenured->compact(object_updater, fixup, &data_finger);
202     }
203
204     /* Slide everything in the code heap up, and update data and code heap
205        pointers inside code blocks. */
206     {
207       code_block_compaction_updater<compaction_fixup> code_block_updater(
208           this, fixup, forwarder);
209       code->allocator->compact(code_block_updater, fixup, &code_finger);
210     }
211
212     forwarder.visit_all_roots();
213     forwarder.visit_context_code_blocks();
214   }
215
216   update_code_roots_for_compaction();
217   callbacks->update();
218
219   code->initialize_all_blocks_set();
220
221   if (event)
222     event->ended_compaction();
223 }
224
225 struct code_compaction_fixup {
226   static const bool translated_code_block_map = false;
227
228   mark_bits* code_forwarding_map;
229   const code_block** code_finger;
230
231   code_compaction_fixup(mark_bits* code_forwarding_map,
232                         const code_block** code_finger)
233       : code_forwarding_map(code_forwarding_map), code_finger(code_finger) {}
234
235   object* fixup_data(object* obj) { return obj; }
236
237   code_block* fixup_code(code_block* compiled) {
238     return (code_block*)code_forwarding_map->forward_block((cell)compiled);
239   }
240
241   object* translate_data(const object* obj) { return fixup_data((object*)obj); }
242
243   code_block* translate_code(const code_block* compiled) {
244     if (compiled < *code_finger)
245       return fixup_code((code_block*)compiled);
246     else
247       return (code_block*)compiled;
248   }
249
250   cell size(object* obj) { return obj->size(); }
251
252   cell size(code_block* compiled) {
253     if (code_forwarding_map->marked_p((cell)compiled))
254       return compiled->size(*this);
255     else
256       return code_forwarding_map->unmarked_block_size((cell)compiled);
257   }
258 };
259
260 struct object_grow_heap_updater {
261   slot_visitor<code_compaction_fixup> forwarder;
262
263   explicit object_grow_heap_updater(
264       slot_visitor<code_compaction_fixup> forwarder)
265       : forwarder(forwarder) {}
266
267   void operator()(object* obj) { forwarder.visit_object_code_block(obj); }
268 };
269
270 /* Compact just the code heap, after growing the data heap */
271 void factor_vm::collect_compact_code_impl() {
272   /* Figure out where blocks are going to go */
273   mark_bits* code_forwarding_map = &code->allocator->state;
274   code_forwarding_map->compute_forwarding();
275
276   const code_block* code_finger = (code_block*)code->allocator->start;
277
278   code_compaction_fixup fixup(code_forwarding_map, &code_finger);
279   slot_visitor<code_compaction_fixup> forwarder(this, fixup);
280
281   forwarder.visit_uninitialized_code_blocks();
282   forwarder.visit_context_code_blocks();
283
284   /* Update code heap references in data heap */
285   object_grow_heap_updater object_updater(forwarder);
286   each_object(object_updater);
287
288   /* Slide everything in the code heap up, and update code heap
289         pointers inside code blocks. */
290   code_block_compaction_updater<code_compaction_fixup> code_block_updater(
291       this, fixup, forwarder);
292   code->allocator->compact(code_block_updater, fixup, &code_finger);
293
294   update_code_roots_for_compaction();
295   callbacks->update();
296   code->initialize_all_blocks_set();
297 }
298
299 void factor_vm::collect_compact() {
300   collect_mark_impl();
301   collect_compact_impl();
302
303   if (data->high_fragmentation_p()) {
304     /* Compaction did not free up enough memory. Grow the heap. */
305     set_current_gc_op(collect_growing_heap_op);
306     collect_growing_heap(0);
307   }
308
309   code->flush_icache();
310 }
311
312 void factor_vm::collect_growing_heap(cell requested_size) {
313   /* Grow the data heap and copy all live objects to the new heap. */
314   data_heap* old = data;
315   set_data_heap(data->grow(&nursery, requested_size));
316   collect_mark_impl();
317   collect_compact_code_impl();
318   code->flush_icache();
319   delete old;
320 }
321
322 }