]> gitweb.factorcode.org Git - factor.git/blob - vm/compaction.cpp
VM: callback_entry_point() and update() can be removed
[factor.git] / vm / compaction.cpp
1 #include "master.hpp"
2
3 namespace factor {
4
5 struct compaction_fixup {
6   static const bool translated_code_block_map = false;
7
8   mark_bits* data_forwarding_map;
9   mark_bits* code_forwarding_map;
10   const object** data_finger;
11   const code_block** code_finger;
12
13   compaction_fixup(mark_bits* data_forwarding_map,
14                    mark_bits* code_forwarding_map,
15                    const object** data_finger,
16                    const code_block** code_finger)
17       : data_forwarding_map(data_forwarding_map),
18         code_forwarding_map(code_forwarding_map),
19         data_finger(data_finger),
20         code_finger(code_finger) {}
21
22   object* fixup_data(object* obj) {
23     return (object*)data_forwarding_map->forward_block((cell)obj);
24   }
25
26   code_block* fixup_code(code_block* compiled) {
27     return (code_block*)code_forwarding_map->forward_block((cell)compiled);
28   }
29
30   object* translate_data(const object* obj) {
31     if (obj < *data_finger)
32       return fixup_data((object*)obj);
33     return (object*)obj;
34   }
35
36   code_block* translate_code(const code_block* compiled) {
37     if (compiled < *code_finger)
38       return fixup_code((code_block*)compiled);
39     return (code_block*)compiled;
40   }
41
42   cell size(object* obj) {
43     if (data_forwarding_map->marked_p((cell)obj))
44       return obj->size(*this);
45     return data_forwarding_map->unmarked_block_size((cell)obj);
46   }
47
48   cell size(code_block* compiled) {
49     if (code_forwarding_map->marked_p((cell)compiled))
50       return compiled->size(*this);
51     return code_forwarding_map->unmarked_block_size((cell)compiled);
52   }
53 };
54
55 /* After a compaction, invalidate any code heap roots which are not
56 marked, and also slide the valid roots up so that call sites can be updated
57 correctly in case an inline cache compilation triggered compaction. */
58 void factor_vm::update_code_roots_for_compaction() {
59
60   mark_bits* state = &code->allocator->state;
61
62   FACTOR_FOR_EACH(code_roots) {
63     code_root* root = *iter;
64     cell block = root->value & (~data_alignment + 1);
65
66     /* Offset of return address within 16-byte allocation line */
67     cell offset = root->value - block;
68
69     if (root->valid && state->marked_p(block)) {
70       block = state->forward_block(block);
71       root->value = block + offset;
72     } else
73       root->valid = false;
74   }
75 }
76
77 /* Compact data and code heaps */
78 void factor_vm::collect_compact_impl() {
79   gc_event* event = current_gc->event;
80
81 #ifdef FACTOR_DEBUG
82   code->verify_all_blocks_set();
83 #endif
84
85   if (event)
86     event->started_compaction();
87
88   tenured_space* tenured = data->tenured;
89   mark_bits* data_forwarding_map = &tenured->state;
90   mark_bits* code_forwarding_map = &code->allocator->state;
91
92   /* Figure out where blocks are going to go */
93   data_forwarding_map->compute_forwarding();
94   code_forwarding_map->compute_forwarding();
95
96   const object* data_finger = (object*)tenured->start;
97   const code_block* code_finger = (code_block*)code->allocator->start;
98
99   {
100     compaction_fixup fixup(data_forwarding_map, code_forwarding_map, &data_finger,
101                            &code_finger);
102     slot_visitor<compaction_fixup> forwarder(this, fixup);
103
104     forwarder.visit_uninitialized_code_blocks();
105
106     /* Object start offsets get recomputed by the object_compaction_updater */
107     data->tenured->starts.clear_object_start_offsets();
108
109     /* Slide everything in tenured space up, and update data and code heap
110        pointers inside objects. */
111     auto compact_object_func = [&](object* old_addr, object* new_addr, cell size) {
112       forwarder.visit_slots(new_addr);
113       forwarder.visit_object_code_block(new_addr);
114       tenured->starts.record_object_start_offset(new_addr);
115     };
116     tenured->compact(compact_object_func, fixup, &data_finger);
117
118     /* Slide everything in the code heap up, and update data and code heap
119        pointers inside code blocks. */
120     auto compact_code_func = [&](code_block* old_addr,
121                                  code_block* new_addr,
122                                  cell size) {
123       forwarder.visit_code_block_objects(new_addr);
124       cell old_entry_point = old_addr->entry_point();
125       forwarder.visit_instruction_operands(new_addr, old_entry_point);
126     };
127     code->allocator->compact(compact_code_func, fixup, &code_finger);
128
129     forwarder.visit_all_roots();
130     forwarder.visit_context_code_blocks();
131   }
132
133   update_code_roots_for_compaction();
134
135   /* Each callback has a relocation with a pointer to a code block in
136      the code heap. Since the code heap has now been compacted, those
137      pointers are invalid and we need to update them. */
138   auto callback_updater = [&](code_block* stub, cell size) {
139     callbacks->update(stub);
140   };
141   callbacks->allocator->iterate(callback_updater);
142
143   code->initialize_all_blocks_set();
144
145   if (event)
146     event->ended_compaction();
147 }
148
149 void factor_vm::collect_compact() {
150   collect_mark_impl();
151   collect_compact_impl();
152
153   if (data->high_fragmentation_p()) {
154     /* Compaction did not free up enough memory. Grow the heap. */
155     set_current_gc_op(collect_growing_heap_op);
156     collect_growing_heap(0);
157   }
158
159   code->flush_icache();
160 }
161
162 void factor_vm::collect_growing_heap(cell requested_size) {
163   /* Grow the data heap and copy all live objects to the new heap. */
164   data_heap* old = data;
165   set_data_heap(data->grow(&nursery, requested_size));
166   collect_mark_impl();
167   collect_compact_impl();
168   code->flush_icache();
169   delete old;
170 }
171
172 }