]> gitweb.factorcode.org Git - factor.git/blob - vm/compaction.cpp
VM: new method visit_instruction_operands(), it replaces the instruction
[factor.git] / vm / compaction.cpp
1 #include "master.hpp"
2
3 namespace factor {
4
5 struct compaction_fixup {
6   static const bool translated_code_block_map = false;
7
8   mark_bits* data_forwarding_map;
9   mark_bits* code_forwarding_map;
10   const object** data_finger;
11   const code_block** code_finger;
12
13   compaction_fixup(mark_bits* data_forwarding_map,
14                    mark_bits* code_forwarding_map,
15                    const object** data_finger,
16                    const code_block** code_finger)
17       : data_forwarding_map(data_forwarding_map),
18         code_forwarding_map(code_forwarding_map),
19         data_finger(data_finger),
20         code_finger(code_finger) {}
21
22   object* fixup_data(object* obj) {
23     return (object*)data_forwarding_map->forward_block((cell)obj);
24   }
25
26   code_block* fixup_code(code_block* compiled) {
27     return (code_block*)code_forwarding_map->forward_block((cell)compiled);
28   }
29
30   object* translate_data(const object* obj) {
31     if (obj < *data_finger)
32       return fixup_data((object*)obj);
33     return (object*)obj;
34   }
35
36   code_block* translate_code(const code_block* compiled) {
37     if (compiled < *code_finger)
38       return fixup_code((code_block*)compiled);
39     return (code_block*)compiled;
40   }
41
42   cell size(object* obj) {
43     if (data_forwarding_map->marked_p((cell)obj))
44       return obj->size(*this);
45     return data_forwarding_map->unmarked_block_size((cell)obj);
46   }
47
48   cell size(code_block* compiled) {
49     if (code_forwarding_map->marked_p((cell)compiled))
50       return compiled->size(*this);
51     return code_forwarding_map->unmarked_block_size((cell)compiled);
52   }
53 };
54
55 /* After a compaction, invalidate any code heap roots which are not
56 marked, and also slide the valid roots up so that call sites can be updated
57 correctly in case an inline cache compilation triggered compaction. */
58 void factor_vm::update_code_roots_for_compaction() {
59
60   mark_bits* state = &code->allocator->state;
61
62   FACTOR_FOR_EACH(code_roots) {
63     code_root* root = *iter;
64     cell block = root->value & (~data_alignment + 1);
65
66     /* Offset of return address within 16-byte allocation line */
67     cell offset = root->value - block;
68
69     if (root->valid && state->marked_p(block)) {
70       block = state->forward_block(block);
71       root->value = block + offset;
72     } else
73       root->valid = false;
74   }
75 }
76
77 /* Compact data and code heaps */
78 void factor_vm::collect_compact_impl() {
79   gc_event* event = current_gc->event;
80
81 #ifdef FACTOR_DEBUG
82   code->verify_all_blocks_set();
83 #endif
84
85   if (event)
86     event->started_compaction();
87
88   tenured_space* tenured = data->tenured;
89   mark_bits* data_forwarding_map = &tenured->state;
90   mark_bits* code_forwarding_map = &code->allocator->state;
91
92   /* Figure out where blocks are going to go */
93   data_forwarding_map->compute_forwarding();
94   code_forwarding_map->compute_forwarding();
95
96   const object* data_finger = (object*)tenured->start;
97   const code_block* code_finger = (code_block*)code->allocator->start;
98
99   {
100     compaction_fixup fixup(data_forwarding_map, code_forwarding_map, &data_finger,
101                            &code_finger);
102     slot_visitor<compaction_fixup> forwarder(this, fixup);
103
104     forwarder.visit_uninitialized_code_blocks();
105
106     /* Object start offsets get recomputed by the object_compaction_updater */
107     data->tenured->starts.clear_object_start_offsets();
108
109     /* Slide everything in tenured space up, and update data and code heap
110        pointers inside objects. */
111     auto compact_object_func = [&](object* old_addr, object* new_addr, cell size) {
112       forwarder.visit_slots(new_addr);
113       forwarder.visit_object_code_block(new_addr);
114       tenured->starts.record_object_start_offset(new_addr);
115     };
116     tenured->compact(compact_object_func, fixup, &data_finger);
117
118     /* Slide everything in the code heap up, and update data and code heap
119        pointers inside code blocks. */
120     auto compact_code_func = [&](code_block* old_addr,
121                                  code_block* new_addr,
122                                  cell size) {
123       forwarder.visit_code_block_objects(new_addr);
124       cell old_entry_point = old_addr->entry_point();
125       forwarder.visit_instruction_operands(new_addr, old_entry_point);
126     };
127     code->allocator->compact(compact_code_func, fixup, &code_finger);
128
129     forwarder.visit_all_roots();
130     forwarder.visit_context_code_blocks();
131   }
132
133   update_code_roots_for_compaction();
134   callbacks->update();
135
136   code->initialize_all_blocks_set();
137
138   if (event)
139     event->ended_compaction();
140 }
141
142 void factor_vm::collect_compact() {
143   collect_mark_impl();
144   collect_compact_impl();
145
146   if (data->high_fragmentation_p()) {
147     /* Compaction did not free up enough memory. Grow the heap. */
148     set_current_gc_op(collect_growing_heap_op);
149     collect_growing_heap(0);
150   }
151
152   code->flush_icache();
153 }
154
155 void factor_vm::collect_growing_heap(cell requested_size) {
156   /* Grow the data heap and copy all live objects to the new heap. */
157   data_heap* old = data;
158   set_data_heap(data->grow(&nursery, requested_size));
159   collect_mark_impl();
160   collect_compact_impl();
161   code->flush_icache();
162   delete old;
163 }
164
165 }