]> gitweb.factorcode.org Git - factor.git/blob - vm/gc.cpp
WIP verify_callstack function
[factor.git] / vm / gc.cpp
1 #include "master.hpp"
2
3 namespace factor
4 {
5
6 gc_event::gc_event(gc_op op_, factor_vm *parent) :
7         op(op_),
8         cards_scanned(0),
9         decks_scanned(0),
10         code_blocks_scanned(0),
11         start_time(nano_count()),
12         card_scan_time(0),
13         code_scan_time(0),
14         data_sweep_time(0),
15         code_sweep_time(0),
16         compaction_time(0)
17 {
18         data_heap_before = parent->data_room();
19         code_heap_before = parent->code_room();
20         start_time = nano_count();
21 }
22
23 void gc_event::started_card_scan()
24 {
25         temp_time = nano_count();
26 }
27
28 void gc_event::ended_card_scan(cell cards_scanned_, cell decks_scanned_)
29 {
30         cards_scanned += cards_scanned_;
31         decks_scanned += decks_scanned_;
32         card_scan_time = (cell)(nano_count() - temp_time);
33 }
34
35 void gc_event::started_code_scan()
36 {
37         temp_time = nano_count();
38 }
39
40 void gc_event::ended_code_scan(cell code_blocks_scanned_)
41 {
42         code_blocks_scanned += code_blocks_scanned_;
43         code_scan_time = (cell)(nano_count() - temp_time);
44 }
45
46 void gc_event::started_data_sweep()
47 {
48         temp_time = nano_count();
49 }
50
51 void gc_event::ended_data_sweep()
52 {
53         data_sweep_time = (cell)(nano_count() - temp_time);
54 }
55
56 void gc_event::started_code_sweep()
57 {
58         temp_time = nano_count();
59 }
60
61 void gc_event::ended_code_sweep()
62 {
63         code_sweep_time = (cell)(nano_count() - temp_time);
64 }
65
66 void gc_event::started_compaction()
67 {
68         temp_time = nano_count();
69 }
70
71 void gc_event::ended_compaction()
72 {
73         compaction_time = (cell)(nano_count() - temp_time);
74 }
75
76 void gc_event::ended_gc(factor_vm *parent)
77 {
78         data_heap_after = parent->data_room();
79         code_heap_after = parent->code_room();
80         total_time = (cell)(nano_count() - start_time);
81 }
82
83 gc_state::gc_state(gc_op op_, factor_vm *parent) : op(op_)
84 {
85         if(parent->gc_events)
86         {
87                 event = new gc_event(op,parent);
88                 start_time = nano_count();
89         }
90         else
91                 event = NULL;
92 }
93
94 gc_state::~gc_state()
95 {
96         if(event)
97         {
98                 delete event;
99                 event = NULL;
100         }
101 }
102
103 void factor_vm::end_gc()
104 {
105         if(gc_events)
106         {
107                 current_gc->event->ended_gc(this);
108                 gc_events->push_back(*current_gc->event);
109         }
110 }
111
112 void factor_vm::start_gc_again()
113 {
114         end_gc();
115
116         switch(current_gc->op)
117         {
118         case collect_nursery_op:
119                 /* Nursery collection can fail if aging does not have enough
120                 free space to fit all live objects from nursery. */
121                 current_gc->op = collect_aging_op;
122                 break;
123         case collect_aging_op:
124                 /* Aging collection can fail if the aging semispace cannot fit
125                 all the live objects from the other aging semispace and the
126                 nursery. */
127                 current_gc->op = collect_to_tenured_op;
128                 break;
129         default:
130                 /* Nothing else should fail mid-collection due to insufficient
131                 space in the target generation. */
132                 critical_error("Bad GC op",current_gc->op);
133                 break;
134         }
135
136         if(gc_events)
137                 current_gc->event = new gc_event(current_gc->op,this);
138 }
139
140 void factor_vm::set_current_gc_op(gc_op op)
141 {
142         current_gc->op = op;
143         if(gc_events) current_gc->event->op = op;
144 }
145
146 void factor_vm::gc(gc_op op, cell requested_size, bool trace_contexts_p)
147 {
148         FACTOR_ASSERT(!gc_off);
149         FACTOR_ASSERT(!current_gc);
150
151         /* Important invariant: tenured space must have enough contiguous free
152         space to fit the entire contents of the aging space and nursery. This is
153         because when doing a full collection, objects from younger generations
154         are promoted before any unreachable tenured objects are freed. */
155         FACTOR_ASSERT(!data->high_fragmentation_p());
156
157         current_gc = new gc_state(op,this);
158         atomic::store(&current_gc_p, true);
159
160         /* Keep trying to GC higher and higher generations until we don't run
161         out of space in the target generation. */
162         for(;;)
163         {
164                 try
165                 {
166                         if(gc_events) current_gc->event->op = current_gc->op;
167
168                         switch(current_gc->op)
169                         {
170                         case collect_nursery_op:
171                                 collect_nursery();
172                                 break;
173                         case collect_aging_op:
174                                 /* We end up here if the above fails. */
175                                 collect_aging();
176                                 if(data->high_fragmentation_p())
177                                 {
178                                         /* Change GC op so that if we fail again,
179                                         we crash. */
180                                         set_current_gc_op(collect_full_op);
181                                         collect_full(trace_contexts_p);
182                                 }
183                                 break;
184                         case collect_to_tenured_op:
185                                 /* We end up here if the above fails. */
186                                 collect_to_tenured();
187                                 if(data->high_fragmentation_p())
188                                 {
189                                         /* Change GC op so that if we fail again,
190                                         we crash. */
191                                         set_current_gc_op(collect_full_op);
192                                         collect_full(trace_contexts_p);
193                                 }
194                                 break;
195                         case collect_full_op:
196                                 collect_full(trace_contexts_p);
197                                 break;
198                         case collect_compact_op:
199                                 collect_compact(trace_contexts_p);
200                                 break;
201                         case collect_growing_heap_op:
202                                 collect_growing_heap(requested_size,trace_contexts_p);
203                                 break;
204                         default:
205                                 critical_error("Bad GC op",current_gc->op);
206                                 break;
207                         }
208
209                         break;
210                 }
211                 catch(const must_start_gc_again &)
212                 {
213                         /* We come back here if the target generation is full. */
214                         start_gc_again();
215                         continue;
216                 }
217         }
218
219         end_gc();
220
221         atomic::store(&current_gc_p, false);
222         delete current_gc;
223         current_gc = NULL;
224
225         /* Check the invariant again, just in case. */
226         FACTOR_ASSERT(!data->high_fragmentation_p());
227 }
228
229 /* primitive_minor_gc() is invoked by inline GC checks, and it needs to fill in
230 uninitialized stack locations before actually calling the GC. See the comment
231 in compiler.cfg.stacks.uninitialized for details. */
232
233 struct call_frame_scrubber {
234         factor_vm *parent;
235         context *ctx;
236
237         explicit call_frame_scrubber(factor_vm *parent_, context *ctx_) :
238                 parent(parent_), ctx(ctx_) {}
239
240         void operator()(stack_frame *frame)
241         {
242                 cell return_address = parent->frame_offset(frame);
243                 if(return_address == (cell)-1)
244                         return;
245
246                 code_block *compiled = parent->frame_code(frame);
247                 gc_info *info = compiled->block_gc_info();
248
249                 FACTOR_ASSERT(return_address < compiled->size());
250                 cell index = info->return_address_index(return_address);
251                 if(index != (cell)-1)
252                         ctx->scrub_stacks(info,index);
253         }
254 };
255
256 void factor_vm::scrub_context(context *ctx)
257 {
258         call_frame_scrubber scrubber(this,ctx);
259         iterate_callstack(ctx,scrubber);
260         verify_callstack(ctx);
261 }
262
263 void factor_vm::scrub_contexts()
264 {
265         std::set<context *>::const_iterator begin = active_contexts.begin();
266         std::set<context *>::const_iterator end = active_contexts.end();
267         while(begin != end)
268         {
269                 scrub_context(*begin);
270                 begin++;
271         }
272 }
273
274 void factor_vm::primitive_minor_gc()
275 {
276         scrub_contexts();
277
278         gc(collect_nursery_op,
279                 0, /* requested size */
280                 true /* trace contexts? */);
281 }
282
283 void factor_vm::primitive_full_gc()
284 {
285         gc(collect_full_op,
286                 0, /* requested size */
287                 true /* trace contexts? */);
288 }
289
290 void factor_vm::primitive_compact_gc()
291 {
292         gc(collect_compact_op,
293                 0, /* requested size */
294                 true /* trace contexts? */);
295 }
296
297 /*
298  * It is up to the caller to fill in the object's fields in a meaningful
299  * fashion!
300  */
301 object *factor_vm::allot_large_object(cell type, cell size)
302 {
303         /* If tenured space does not have enough room, collect and compact */
304         cell requested_size = size + data->high_water_mark();
305         if(!data->tenured->can_allot_p(requested_size))
306         {
307                 primitive_compact_gc();
308
309                 /* If it still won't fit, grow the heap */
310                 if(!data->tenured->can_allot_p(requested_size))
311                 {
312                         gc(collect_growing_heap_op,
313                                 size, /* requested size */
314                                 true /* trace contexts? */);
315                 }
316         }
317
318         object *obj = data->tenured->allot(size);
319
320         /* Allows initialization code to store old->new pointers
321         without hitting the write barrier in the common case of
322         a nursery allocation */
323         write_barrier(obj,size);
324
325         obj->initialize(type);
326         return obj;
327 }
328
329 void factor_vm::primitive_enable_gc_events()
330 {
331         gc_events = new std::vector<gc_event>();
332 }
333
334 void factor_vm::primitive_disable_gc_events()
335 {
336         if(gc_events)
337         {
338                 growable_array result(this);
339
340                 std::vector<gc_event> *gc_events = this->gc_events;
341                 this->gc_events = NULL;
342
343                 std::vector<gc_event>::const_iterator iter = gc_events->begin();
344                 std::vector<gc_event>::const_iterator end = gc_events->end();
345
346                 for(; iter != end; iter++)
347                 {
348                         gc_event event = *iter;
349                         byte_array *obj = byte_array_from_value(&event);
350                         result.add(tag<byte_array>(obj));
351                 }
352
353                 result.trim();
354                 ctx->push(result.elements.value());
355
356                 delete this->gc_events;
357         }
358         else
359                 ctx->push(false_object);
360 }
361
362 }