]> gitweb.factorcode.org Git - factor.git/blob - vm/compaction.cpp
Put brackets around ipv6 addresses in `inet6 present`
[factor.git] / vm / compaction.cpp
1 #include "master.hpp"
2
3 namespace factor {
4
5 struct compaction_fixup {
6   static const bool translated_code_block_map = false;
7
8   mark_bits* data_forwarding_map;
9   mark_bits* code_forwarding_map;
10   const object** data_finger;
11   const code_block** code_finger;
12
13   compaction_fixup(mark_bits* data_forwarding_map,
14                    mark_bits* code_forwarding_map,
15                    const object** data_finger,
16                    const code_block** code_finger)
17       : data_forwarding_map(data_forwarding_map),
18         code_forwarding_map(code_forwarding_map),
19         data_finger(data_finger),
20         code_finger(code_finger) {}
21
22   object* fixup_data(object* obj) {
23     return (object*)data_forwarding_map->forward_block((cell)obj);
24   }
25
26   code_block* fixup_code(code_block* compiled) {
27     return (code_block*)code_forwarding_map->forward_block((cell)compiled);
28   }
29
30   object* translate_data(const object* obj) {
31     if (obj < *data_finger)
32       return fixup_data((object*)obj);
33     return (object*)obj;
34   }
35
36   code_block* translate_code(const code_block* compiled) {
37     if (compiled < *code_finger)
38       return fixup_code((code_block*)compiled);
39     return (code_block*)compiled;
40   }
41
42   cell size(object* obj) {
43     if (data_forwarding_map->marked_p((cell)obj))
44       return obj->size(*this);
45     return data_forwarding_map->unmarked_block_size((cell)obj);
46   }
47
48   cell size(code_block* compiled) {
49     if (code_forwarding_map->marked_p((cell)compiled))
50       return compiled->size(*this);
51     return code_forwarding_map->unmarked_block_size((cell)compiled);
52   }
53 };
54
55 // After a compaction, invalidate any code heap roots which are not
56 // marked, and also slide the valid roots up so that call sites can be updated
57 // correctly in case an inline cache compilation triggered compaction.
58 void factor_vm::update_code_roots_for_compaction() {
59
60   mark_bits* state = &code->allocator->state;
61
62   FACTOR_FOR_EACH(code_roots) {
63     code_root* root = *iter;
64     cell block = root->value & (~data_alignment + 1);
65
66     // Offset of return address within 16-byte allocation line
67     cell offset = root->value - block;
68
69     if (root->valid && state->marked_p(block)) {
70       block = state->forward_block(block);
71       root->value = block + offset;
72     } else
73       root->valid = false;
74   }
75 }
76
77 // Compact data and code heaps
78 void factor_vm::collect_compact_impl() {
79   gc_event* event = current_gc->event;
80
81 #ifdef FACTOR_DEBUG
82   code->verify_all_blocks_set();
83 #endif
84
85   if (event)
86     event->reset_timer();
87
88   tenured_space* tenured = data->tenured;
89   mark_bits* data_forwarding_map = &tenured->state;
90   mark_bits* code_forwarding_map = &code->allocator->state;
91
92   // Figure out where blocks are going to go
93   data_forwarding_map->compute_forwarding();
94   code_forwarding_map->compute_forwarding();
95
96   const object* data_finger = (object*)tenured->start;
97   const code_block* code_finger = (code_block*)code->allocator->start;
98
99   {
100     compaction_fixup fixup(data_forwarding_map, code_forwarding_map,
101                            &data_finger, &code_finger);
102     slot_visitor<compaction_fixup> forwarder(this, fixup);
103
104     forwarder.visit_uninitialized_code_blocks();
105
106     // Object start offsets get recomputed by the object_compaction_updater
107     data->tenured->starts.clear_object_start_offsets();
108
109     // Slide everything in tenured space up, and update data and code heap
110     // pointers inside objects.
111     auto compact_object_func = [&](object* old_addr, object* new_addr, cell size) {
112       (void)old_addr;
113       (void)size;
114       forwarder.visit_slots(new_addr);
115       forwarder.visit_object_code_block(new_addr);
116       tenured->starts.record_object_start_offset(new_addr);
117     };
118     tenured->compact(compact_object_func, fixup, &data_finger);
119
120     // Slide everything in the code heap up, and update data and code heap
121     // pointers inside code blocks.
122     auto compact_code_func = [&](code_block* old_addr,
123                                  code_block* new_addr,
124                                  cell size) {
125       (void)size;
126       forwarder.visit_code_block_objects(new_addr);
127       cell old_entry_point = old_addr->entry_point();
128       forwarder.visit_instruction_operands(new_addr, old_entry_point);
129     };
130     code->allocator->compact(compact_code_func, fixup, &code_finger);
131
132     forwarder.visit_all_roots();
133     forwarder.visit_context_code_blocks();
134   }
135
136   update_code_roots_for_compaction();
137
138   // Each callback has a relocation with a pointer to a code block in
139   // the code heap. Since the code heap has now been compacted, those
140   // pointers are invalid and we need to update them.
141   auto callback_updater = [&](code_block* stub, cell size) {
142     (void)size;
143     callbacks->update(stub);
144   };
145   callbacks->allocator->iterate(callback_updater, no_fixup());
146
147   code->initialize_all_blocks_set();
148
149   if (event)
150     event->ended_phase(PHASE_DATA_COMPACTION);
151 }
152
153 void factor_vm::collect_compact() {
154   collect_mark_impl();
155   collect_compact_impl();
156
157   // Compaction did not free up enough memory. Grow the data heap.
158   if (data->high_fragmentation_p()) {
159     set_current_gc_op(COLLECT_GROWING_DATA_HEAP_OP);
160     collect_growing_data_heap(0);
161   }
162
163   code->flush_icache();
164 }
165
166 void factor_vm::collect_growing_data_heap(cell requested_size) {
167   // Grow the data heap and copy all live objects to the new heap.
168   data_heap* old = data;
169   set_data_heap(data->grow(&nursery, requested_size));
170   collect_mark_impl();
171   collect_compact_impl();
172   code->flush_icache();
173   delete old;
174 }
175
176 }