]> gitweb.factorcode.org Git - factor.git/blob - vm/gc.cpp
599ed3cd31ef7bbedd2369930ce3927218793d51
[factor.git] / vm / gc.cpp
1 #include "master.hpp"
2
3 namespace factor
4 {
5
6 gc_event::gc_event(gc_op op_, factor_vm *parent) :
7         op(op_),
8         cards_scanned(0),
9         decks_scanned(0),
10         code_blocks_scanned(0),
11         start_time(nano_count()),
12         card_scan_time(0),
13         code_scan_time(0),
14         data_sweep_time(0),
15         code_sweep_time(0),
16         compaction_time(0)
17 {
18         data_heap_before = parent->data_room();
19         code_heap_before = parent->code_room();
20         start_time = nano_count();
21 }
22
23 void gc_event::started_card_scan()
24 {
25         temp_time = nano_count();
26 }
27
28 void gc_event::ended_card_scan(cell cards_scanned_, cell decks_scanned_)
29 {
30         cards_scanned += cards_scanned_;
31         decks_scanned += decks_scanned_;
32         card_scan_time = (cell)(nano_count() - temp_time);
33 }
34
35 void gc_event::started_code_scan()
36 {
37         temp_time = nano_count();
38 }
39
40 void gc_event::ended_code_scan(cell code_blocks_scanned_)
41 {
42         code_blocks_scanned += code_blocks_scanned_;
43         code_scan_time = (cell)(nano_count() - temp_time);
44 }
45
46 void gc_event::started_data_sweep()
47 {
48         temp_time = nano_count();
49 }
50
51 void gc_event::ended_data_sweep()
52 {
53         data_sweep_time = (cell)(nano_count() - temp_time);
54 }
55
56 void gc_event::started_code_sweep()
57 {
58         temp_time = nano_count();
59 }
60
61 void gc_event::ended_code_sweep()
62 {
63         code_sweep_time = (cell)(nano_count() - temp_time);
64 }
65
66 void gc_event::started_compaction()
67 {
68         temp_time = nano_count();
69 }
70
71 void gc_event::ended_compaction()
72 {
73         compaction_time = (cell)(nano_count() - temp_time);
74 }
75
76 void gc_event::ended_gc(factor_vm *parent)
77 {
78         data_heap_after = parent->data_room();
79         code_heap_after = parent->code_room();
80         total_time = (cell)(nano_count() - start_time);
81 }
82
83 gc_state::gc_state(gc_op op_, factor_vm *parent) : op(op_), start_time(nano_count())
84 {
85         event = new gc_event(op,parent);
86 }
87
88 gc_state::~gc_state()
89 {
90         delete event;
91         event = NULL;
92 }
93
94 void factor_vm::end_gc()
95 {
96         current_gc->event->ended_gc(this);
97         if(gc_events) gc_events->push_back(*current_gc->event);
98         delete current_gc->event;
99         current_gc->event = NULL;
100 }
101
102 void factor_vm::start_gc_again()
103 {
104         end_gc();
105
106         switch(current_gc->op)
107         {
108         case collect_nursery_op:
109                 current_gc->op = collect_aging_op;
110                 break;
111         case collect_aging_op:
112                 current_gc->op = collect_to_tenured_op;
113                 break;
114         case collect_to_tenured_op:
115                 current_gc->op = collect_full_op;
116                 break;
117         case collect_full_op:
118         case collect_compact_op:
119                 current_gc->op = collect_growing_heap_op;
120                 break;
121         default:
122                 critical_error("Bad GC op",current_gc->op);
123                 break;
124         }
125
126         current_gc->event = new gc_event(current_gc->op,this);
127 }
128
129 void factor_vm::gc(gc_op op, cell requested_bytes, bool trace_contexts_p)
130 {
131         assert(!gc_off);
132         assert(!current_gc);
133
134         current_gc = new gc_state(op,this);
135
136         /* Keep trying to GC higher and higher generations until we don't run out
137         of space */
138         for(;;)
139         {
140                 try
141                 {
142                         current_gc->event->op = current_gc->op;
143
144                         switch(current_gc->op)
145                         {
146                         case collect_nursery_op:
147                                 collect_nursery();
148                                 break;
149                         case collect_aging_op:
150                                 collect_aging();
151                                 if(data->high_fragmentation_p())
152                                 {
153                                         current_gc->op = collect_full_op;
154                                         current_gc->event->op = collect_full_op;
155                                         collect_full(trace_contexts_p);
156                                 }
157                                 break;
158                         case collect_to_tenured_op:
159                                 collect_to_tenured();
160                                 if(data->high_fragmentation_p())
161                                 {
162                                         current_gc->op = collect_full_op;
163                                         current_gc->event->op = collect_full_op;
164                                         collect_full(trace_contexts_p);
165                                 }
166                                 break;
167                         case collect_full_op:
168                                 collect_full(trace_contexts_p);
169                                 break;
170                         case collect_compact_op:
171                                 collect_compact(trace_contexts_p);
172                                 break;
173                         case collect_growing_heap_op:
174                                 collect_growing_heap(requested_bytes,trace_contexts_p);
175                                 break;
176                         default:
177                                 critical_error("Bad GC op",current_gc->op);
178                                 break;
179                         }
180
181                         break;
182                 }
183                 catch(const must_start_gc_again &)
184                 {
185                         /* We come back here if a generation is full */
186                         start_gc_again();
187                         continue;
188                 }
189         }
190
191         end_gc();
192
193         delete current_gc;
194         current_gc = NULL;
195 }
196
197 void factor_vm::primitive_minor_gc()
198 {
199         gc(collect_nursery_op,
200                 0, /* requested size */
201                 true /* trace contexts? */);
202 }
203
204 void factor_vm::primitive_full_gc()
205 {
206         gc(collect_full_op,
207                 0, /* requested size */
208                 true /* trace contexts? */);
209 }
210
211 void factor_vm::primitive_compact_gc()
212 {
213         gc(collect_compact_op,
214                 0, /* requested size */
215                 true /* trace contexts? */);
216 }
217
218 void factor_vm::inline_gc(cell gc_roots_)
219 {
220         cell stack_pointer = (cell)ctx->callstack_top;
221
222         if(to_boolean(gc_roots_))
223         {
224                 tagged<array> gc_roots(gc_roots_);
225
226                 cell capacity = array_capacity(gc_roots.untagged());
227                 for(cell i = 0; i < capacity; i++)
228                 {
229                         cell spill_slot = untag_fixnum(array_nth(gc_roots.untagged(),i));
230                         cell *address = (cell *)(spill_slot + stack_pointer);
231                         data_roots.push_back(data_root_range(address,1));
232                 }
233
234                 primitive_minor_gc();
235
236                 for(cell i = 0; i < capacity; i++)
237                         data_roots.pop_back();
238         }
239         else
240                 primitive_minor_gc();
241 }
242
243 VM_C_API void inline_gc(cell gc_roots, factor_vm *parent)
244 {
245         parent->inline_gc(gc_roots);
246 }
247
248 /*
249  * It is up to the caller to fill in the object's fields in a meaningful
250  * fashion!
251  */
252 object *factor_vm::allot_large_object(cell type, cell size)
253 {
254         /* If tenured space does not have enough room, collect and compact */
255         if(!data->tenured->can_allot_p(size))
256         {
257                 primitive_compact_gc();
258
259                 /* If it still won't fit, grow the heap */
260                 if(!data->tenured->can_allot_p(size))
261                 {
262                         gc(collect_growing_heap_op,
263                                 size, /* requested size */
264                                 true /* trace contexts? */);
265                 }
266         }
267
268         object *obj = data->tenured->allot(size);
269
270         /* Allows initialization code to store old->new pointers
271         without hitting the write barrier in the common case of
272         a nursery allocation */
273         write_barrier(obj,size);
274
275         obj->initialize(type);
276         return obj;
277 }
278
279 void factor_vm::primitive_enable_gc_events()
280 {
281         gc_events = new std::vector<gc_event>();
282 }
283
284 void factor_vm::primitive_disable_gc_events()
285 {
286         if(gc_events)
287         {
288                 growable_array result(this);
289
290                 std::vector<gc_event> *gc_events = this->gc_events;
291                 this->gc_events = NULL;
292
293                 std::vector<gc_event>::const_iterator iter = gc_events->begin();
294                 std::vector<gc_event>::const_iterator end = gc_events->end();
295
296                 for(; iter != end; iter++)
297                 {
298                         gc_event event = *iter;
299                         byte_array *obj = byte_array_from_value(&event);
300                         result.add(tag<byte_array>(obj));
301                 }
302
303                 result.trim();
304                 ctx->push(result.elements.value());
305
306                 delete this->gc_events;
307         }
308         else
309                 ctx->push(false_object);
310 }
311
312 }