]> gitweb.factorcode.org Git - factor.git/blob - vm/compaction.cpp
Merge branch 'master' of git://factorcode.org/git/factor into no_literal_table
[factor.git] / vm / compaction.cpp
1 #include "master.hpp"
2
3 namespace factor {
4
5 void factor_vm::update_fixup_set_for_compaction(mark_bits<code_block> *forwarding_map)
6 {
7         std::set<code_block *>::const_iterator iter = code->needs_fixup.begin();
8         std::set<code_block *>::const_iterator end = code->needs_fixup.end();
9
10         std::set<code_block *> new_needs_fixup;
11         for(; iter != end; iter++)
12         {
13                 printf("a block needs fixup\n");
14                 new_needs_fixup.insert(forwarding_map->forward_block(*iter));
15         }
16
17         code->needs_fixup = new_needs_fixup;
18 }
19
20 template<typename Block> struct forwarder {
21         mark_bits<Block> *forwarding_map;
22
23         explicit forwarder(mark_bits<Block> *forwarding_map_) :
24                 forwarding_map(forwarding_map_) {}
25
26         Block *operator()(Block *block)
27         {
28                 return forwarding_map->forward_block(block);
29         }
30 };
31
32 static inline cell tuple_size_with_forwarding(mark_bits<object> *forwarding_map, object *obj)
33 {
34         /* The tuple layout may or may not have been forwarded already. Tricky. */
35         object *layout_obj = (object *)UNTAG(((tuple *)obj)->layout);
36         tuple_layout *layout;
37
38         if(layout_obj < obj)
39         {
40                 /* It's already been moved up; dereference through forwarding
41                 map to get the size */
42                 layout = (tuple_layout *)forwarding_map->forward_block(layout_obj);
43         }
44         else
45         {
46                 /* It hasn't been moved up yet; dereference directly */
47                 layout = (tuple_layout *)layout_obj;
48         }
49
50         return tuple_size(layout);
51 }
52
53 struct compaction_sizer {
54         mark_bits<object> *forwarding_map;
55
56         explicit compaction_sizer(mark_bits<object> *forwarding_map_) :
57                 forwarding_map(forwarding_map_) {}
58
59         cell operator()(object *obj)
60         {
61                 if(!forwarding_map->marked_p(obj))
62                         return forwarding_map->unmarked_block_size(obj);
63                 else if(obj->type() == TUPLE_TYPE)
64                         return align(tuple_size_with_forwarding(forwarding_map,obj),data_alignment);
65                 else
66                         return obj->size();
67         }
68 };
69
70 struct object_compaction_updater {
71         factor_vm *parent;
72         slot_visitor<forwarder<object> > slot_forwarder;
73         code_block_visitor<forwarder<code_block> > code_forwarder;
74         mark_bits<object> *data_forwarding_map;
75         object_start_map *starts;
76
77         explicit object_compaction_updater(factor_vm *parent_,
78                 slot_visitor<forwarder<object> > slot_forwarder_,
79                 code_block_visitor<forwarder<code_block> > code_forwarder_,
80                 mark_bits<object> *data_forwarding_map_) :
81                 parent(parent_),
82                 slot_forwarder(slot_forwarder_),
83                 code_forwarder(code_forwarder_),
84                 data_forwarding_map(data_forwarding_map_),
85                 starts(&parent->data->tenured->starts) {}
86
87         void operator()(object *old_address, object *new_address, cell size)
88         {
89                 cell payload_start;
90                 if(old_address->type() == TUPLE_TYPE)
91                         payload_start = tuple_size_with_forwarding(data_forwarding_map,old_address);
92                 else
93                         payload_start = old_address->binary_payload_start();
94
95                 memmove(new_address,old_address,size);
96
97                 slot_forwarder.visit_slots(new_address,payload_start);
98                 code_forwarder.visit_object_code_block(new_address);
99                 starts->record_object_start_offset(new_address);
100         }
101 };
102
103 struct relative_address_updater {
104         factor_vm *parent;
105         code_block *old_address;
106
107         explicit relative_address_updater(factor_vm *parent_, code_block *old_address_) :
108                 parent(parent_), old_address(old_address_) {}
109
110         void operator()(relocation_entry rel, cell index, code_block *compiled)
111         {
112                 instruction_operand op(rel.rel_class(),rel.rel_offset() + (cell)compiled->xt());
113
114                 relocation_type type = rel.rel_type();
115                 cell value;
116                 if(type == RT_HERE || type == RT_THIS)
117                         value = parent->compute_relocation(rel,index,compiled);
118                 else
119                         value = op.load_value(rel.rel_offset() + (cell)old_address->xt());
120
121                 op.store_value(value);
122         }
123 };
124
125 template<typename SlotForwarder, typename CodeBlockForwarder>
126 struct code_block_compaction_updater {
127         factor_vm *parent;
128         slot_visitor<forwarder<object> > slot_forwarder;
129         code_block_visitor<forwarder<code_block> > code_forwarder;
130
131         explicit code_block_compaction_updater(factor_vm *parent_,
132                 slot_visitor<forwarder<object> > slot_forwarder_,
133                 code_block_visitor<forwarder<code_block> > code_forwarder_) :
134                 parent(parent_), slot_forwarder(slot_forwarder_), code_forwarder(code_forwarder_) {}
135
136         void operator()(code_block *old_address, code_block *new_address, cell size)
137         {
138                 memmove(new_address,old_address,size);
139
140                 slot_forwarder.visit_code_block_objects(new_address);
141
142                 relative_address_updater updater(parent,old_address);
143                 parent->iterate_relocations(new_address,updater);
144
145                 slot_forwarder.visit_embedded_literals(new_address);
146                 code_forwarder.visit_embedded_code_pointers(new_address);
147         }
148 };
149
150 /* Compact data and code heaps */
151 void factor_vm::collect_compact_impl(bool trace_contexts_p)
152 {
153         current_gc->event->started_compaction();
154
155         tenured_space *tenured = data->tenured;
156         mark_bits<object> *data_forwarding_map = &tenured->state;
157         mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
158
159         /* Figure out where blocks are going to go */
160         data_forwarding_map->compute_forwarding();
161         code_forwarding_map->compute_forwarding();
162
163         update_fixup_set_for_compaction(code_forwarding_map);
164
165         slot_visitor<forwarder<object> > slot_forwarder(this,forwarder<object>(data_forwarding_map));
166         code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
167
168         /* Object start offsets get recomputed by the object_compaction_updater */
169         data->tenured->starts.clear_object_start_offsets();
170
171         /* Slide everything in tenured space up, and update data and code heap
172         pointers inside objects. */
173         object_compaction_updater object_updater(this,slot_forwarder,code_forwarder,data_forwarding_map);
174         compaction_sizer object_sizer(data_forwarding_map);
175         tenured->compact(object_updater,object_sizer);
176
177         /* Slide everything in the code heap up, and update data and code heap
178         pointers inside code blocks. */
179         code_block_compaction_updater<slot_visitor<forwarder<object> >, code_block_visitor<forwarder<code_block> > > code_block_updater(this,slot_forwarder,code_forwarder);
180         standard_sizer<code_block> code_block_sizer;
181         code->allocator->compact(code_block_updater,code_block_sizer);
182
183         slot_forwarder.visit_roots();
184         if(trace_contexts_p)
185         {
186                 slot_forwarder.visit_contexts();
187                 code_forwarder.visit_context_code_blocks();
188                 code_forwarder.visit_callback_code_blocks();
189         }
190
191         update_code_roots_for_compaction();
192
193         current_gc->event->ended_compaction();
194 }
195
196 struct object_code_block_updater {
197         code_block_visitor<forwarder<code_block> > code_forwarder;
198
199         explicit object_code_block_updater(code_block_visitor<forwarder<code_block> > code_forwarder_) :
200                 code_forwarder(code_forwarder_) {}
201
202         void operator()(object *obj)
203         {
204                 code_forwarder.visit_object_code_block(obj);
205         }
206 };
207
208 struct code_block_grow_heap_updater {
209         factor_vm *parent;
210
211         explicit code_block_grow_heap_updater(factor_vm *parent_) : parent(parent_) {}
212
213         void operator()(code_block *old_address, code_block *new_address, cell size)
214         {
215                 memmove(new_address,old_address,size);
216                 parent->relocate_code_block(new_address);
217         }
218 };
219
220 /* Compact just the code heap, after growing the data heap */
221 void factor_vm::collect_compact_code_impl(bool trace_contexts_p)
222 {
223         /* Figure out where blocks are going to go */
224         mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
225         code_forwarding_map->compute_forwarding();
226
227         update_fixup_set_for_compaction(code_forwarding_map);
228
229         code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
230
231         if(trace_contexts_p)
232         {
233                 code_forwarder.visit_context_code_blocks();
234                 code_forwarder.visit_callback_code_blocks();
235         }
236
237         /* Update code heap references in data heap */
238         object_code_block_updater updater(code_forwarder);
239         each_object(updater);
240
241         /* Slide everything in the code heap up, and update code heap
242         pointers inside code blocks. */
243         code_block_grow_heap_updater code_block_updater(this);
244         standard_sizer<code_block> code_block_sizer;
245         code->allocator->compact(code_block_updater,code_block_sizer);
246
247         update_code_roots_for_compaction();
248 }
249
250 }