]> gitweb.factorcode.org Git - factor.git/blob - vm/compaction.cpp
Merge branch 'master' into startup
[factor.git] / vm / compaction.cpp
1 #include "master.hpp"
2
3 namespace factor {
4
5 template<typename Block> struct forwarder {
6         mark_bits<Block> *forwarding_map;
7
8         explicit forwarder(mark_bits<Block> *forwarding_map_) :
9                 forwarding_map(forwarding_map_) {}
10
11         Block *operator()(Block *block)
12         {
13                 return forwarding_map->forward_block(block);
14         }
15 };
16
17 static inline cell tuple_size_with_forwarding(mark_bits<object> *forwarding_map, object *obj)
18 {
19         /* The tuple layout may or may not have been forwarded already. Tricky. */
20         object *layout_obj = (object *)UNTAG(((tuple *)obj)->layout);
21         tuple_layout *layout;
22
23         if(layout_obj < obj)
24         {
25                 /* It's already been moved up; dereference through forwarding
26                 map to get the size */
27                 layout = (tuple_layout *)forwarding_map->forward_block(layout_obj);
28         }
29         else
30         {
31                 /* It hasn't been moved up yet; dereference directly */
32                 layout = (tuple_layout *)layout_obj;
33         }
34
35         return tuple_size(layout);
36 }
37
38 struct compaction_sizer {
39         mark_bits<object> *forwarding_map;
40
41         explicit compaction_sizer(mark_bits<object> *forwarding_map_) :
42                 forwarding_map(forwarding_map_) {}
43
44         cell operator()(object *obj)
45         {
46                 if(!forwarding_map->marked_p(obj))
47                         return forwarding_map->unmarked_block_size(obj);
48                 else if(obj->type() == TUPLE_TYPE)
49                         return align(tuple_size_with_forwarding(forwarding_map,obj),data_alignment);
50                 else
51                         return obj->size();
52         }
53 };
54
55 struct object_compaction_updater {
56         factor_vm *parent;
57         slot_visitor<forwarder<object> > slot_forwarder;
58         code_block_visitor<forwarder<code_block> > code_forwarder;
59         mark_bits<object> *data_forwarding_map;
60         object_start_map *starts;
61
62         explicit object_compaction_updater(factor_vm *parent_,
63                 slot_visitor<forwarder<object> > slot_forwarder_,
64                 code_block_visitor<forwarder<code_block> > code_forwarder_,
65                 mark_bits<object> *data_forwarding_map_) :
66                 parent(parent_),
67                 slot_forwarder(slot_forwarder_),
68                 code_forwarder(code_forwarder_),
69                 data_forwarding_map(data_forwarding_map_),
70                 starts(&parent->data->tenured->starts) {}
71
72         void operator()(object *old_address, object *new_address, cell size)
73         {
74                 cell payload_start;
75                 if(old_address->type() == TUPLE_TYPE)
76                         payload_start = tuple_size_with_forwarding(data_forwarding_map,old_address);
77                 else
78                         payload_start = old_address->binary_payload_start();
79
80                 memmove(new_address,old_address,size);
81
82                 slot_forwarder.visit_slots(new_address,payload_start);
83                 code_forwarder.visit_object_code_block(new_address);
84                 starts->record_object_start_offset(new_address);
85         }
86 };
87
88 template<typename SlotForwarder> struct code_block_compaction_updater {
89         factor_vm *parent;
90         SlotForwarder slot_forwarder;
91
92         explicit code_block_compaction_updater(factor_vm *parent_, SlotForwarder slot_forwarder_) :
93                 parent(parent_), slot_forwarder(slot_forwarder_) {}
94
95         void operator()(code_block *old_address, code_block *new_address, cell size)
96         {
97                 memmove(new_address,old_address,size);
98                 slot_forwarder.visit_literal_references(new_address);
99                 parent->relocate_code_block(new_address);
100         }
101 };
102
103 /* Compact data and code heaps */
104 void factor_vm::collect_compact_impl(bool trace_contexts_p)
105 {
106         current_gc->event->started_compaction();
107
108         tenured_space *tenured = data->tenured;
109         mark_bits<object> *data_forwarding_map = &tenured->state;
110         mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
111
112         /* Figure out where blocks are going to go */
113         data_forwarding_map->compute_forwarding();
114         code_forwarding_map->compute_forwarding();
115
116         slot_visitor<forwarder<object> > slot_forwarder(this,forwarder<object>(data_forwarding_map));
117         code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
118
119         /* Object start offsets get recomputed by the object_compaction_updater */
120         data->tenured->starts.clear_object_start_offsets();
121
122         /* Slide everything in tenured space up, and update data and code heap
123         pointers inside objects. */
124         object_compaction_updater object_updater(this,slot_forwarder,code_forwarder,data_forwarding_map);
125         compaction_sizer object_sizer(data_forwarding_map);
126         tenured->compact(object_updater,object_sizer);
127
128         /* Slide everything in the code heap up, and update data and code heap
129         pointers inside code blocks. */
130         code_block_compaction_updater<slot_visitor<forwarder<object> > > code_block_updater(this,slot_forwarder);
131         standard_sizer<code_block> code_block_sizer;
132         code->allocator->compact(code_block_updater,code_block_sizer);
133
134         slot_forwarder.visit_roots();
135         if(trace_contexts_p)
136         {
137                 slot_forwarder.visit_contexts();
138                 code_forwarder.visit_context_code_blocks();
139                 code_forwarder.visit_callback_code_blocks();
140         }
141
142         update_code_roots_for_compaction();
143
144         current_gc->event->ended_compaction();
145 }
146
147 struct object_code_block_updater {
148         code_block_visitor<forwarder<code_block> > *visitor;
149
150         explicit object_code_block_updater(code_block_visitor<forwarder<code_block> > *visitor_) :
151                 visitor(visitor_) {}
152
153         void operator()(object *obj)
154         {
155                 visitor->visit_object_code_block(obj);
156         }
157 };
158
159 struct dummy_slot_forwarder {
160         void visit_literal_references(code_block *compiled) {}
161 };
162
163 /* Compact just the code heap */
164 void factor_vm::collect_compact_code_impl(bool trace_contexts_p)
165 {
166         /* Figure out where blocks are going to go */
167         mark_bits<code_block> *code_forwarding_map = &code->allocator->state;
168         code_forwarding_map->compute_forwarding();
169         code_block_visitor<forwarder<code_block> > code_forwarder(this,forwarder<code_block>(code_forwarding_map));
170
171         if(trace_contexts_p)
172         {
173                 code_forwarder.visit_context_code_blocks();
174                 code_forwarder.visit_callback_code_blocks();
175         }
176
177         /* Update code heap references in data heap */
178         object_code_block_updater updater(&code_forwarder);
179         each_object(updater);
180
181         /* Slide everything in the code heap up, and update code heap
182         pointers inside code blocks. */
183         dummy_slot_forwarder slot_forwarder;
184         code_block_compaction_updater<dummy_slot_forwarder> code_block_updater(this,slot_forwarder);
185         standard_sizer<code_block> code_block_sizer;
186         code->allocator->compact(code_block_updater,code_block_sizer);
187
188         update_code_roots_for_compaction();
189 }
190
191 }