]> gitweb.factorcode.org Git - factor.git/blob - vm/gc.cpp
Clean up some GC logic and fix a bug where large object allocation could grow the...
[factor.git] / vm / gc.cpp
1 #include "master.hpp"
2
3 namespace factor
4 {
5
6 gc_event::gc_event(gc_op op_, factor_vm *parent) :
7         op(op_),
8         cards_scanned(0),
9         decks_scanned(0),
10         code_blocks_scanned(0),
11         start_time(nano_count()),
12         card_scan_time(0),
13         code_scan_time(0),
14         data_sweep_time(0),
15         code_sweep_time(0),
16         compaction_time(0)
17 {
18         data_heap_before = parent->data_room();
19         code_heap_before = parent->code_room();
20         start_time = nano_count();
21 }
22
23 void gc_event::started_card_scan()
24 {
25         temp_time = nano_count();
26 }
27
28 void gc_event::ended_card_scan(cell cards_scanned_, cell decks_scanned_)
29 {
30         cards_scanned += cards_scanned_;
31         decks_scanned += decks_scanned_;
32         card_scan_time = (cell)(nano_count() - temp_time);
33 }
34
35 void gc_event::started_code_scan()
36 {
37         temp_time = nano_count();
38 }
39
40 void gc_event::ended_code_scan(cell code_blocks_scanned_)
41 {
42         code_blocks_scanned += code_blocks_scanned_;
43         code_scan_time = (cell)(nano_count() - temp_time);
44 }
45
46 void gc_event::started_data_sweep()
47 {
48         temp_time = nano_count();
49 }
50
51 void gc_event::ended_data_sweep()
52 {
53         data_sweep_time = (cell)(nano_count() - temp_time);
54 }
55
56 void gc_event::started_code_sweep()
57 {
58         temp_time = nano_count();
59 }
60
61 void gc_event::ended_code_sweep()
62 {
63         code_sweep_time = (cell)(nano_count() - temp_time);
64 }
65
66 void gc_event::started_compaction()
67 {
68         temp_time = nano_count();
69 }
70
71 void gc_event::ended_compaction()
72 {
73         compaction_time = (cell)(nano_count() - temp_time);
74 }
75
76 void gc_event::ended_gc(factor_vm *parent)
77 {
78         data_heap_after = parent->data_room();
79         code_heap_after = parent->code_room();
80         total_time = (cell)(nano_count() - start_time);
81 }
82
83 gc_state::gc_state(gc_op op_, factor_vm *parent) : op(op_)
84 {
85         if(parent->gc_events)
86         {
87                 event = new gc_event(op,parent);
88                 start_time = nano_count();
89         }
90         else
91                 event = NULL;
92 }
93
94 gc_state::~gc_state()
95 {
96         if(event)
97         {
98                 delete event;
99                 event = NULL;
100         }
101 }
102
103 void factor_vm::end_gc()
104 {
105         if(gc_events)
106         {
107                 current_gc->event->ended_gc(this);
108                 gc_events->push_back(*current_gc->event);
109         }
110 }
111
112 void factor_vm::start_gc_again()
113 {
114         end_gc();
115
116         switch(current_gc->op)
117         {
118         case collect_nursery_op:
119                 /* Nursery collection can fail if aging does not have enough
120                 free space to fit all live objects from nursery. */
121                 current_gc->op = collect_aging_op;
122                 break;
123         case collect_aging_op:
124                 /* Aging collection can fail if the aging semispace cannot fit
125                 all the live objects from the other aging semispace and the
126                 nursery. */
127                 current_gc->op = collect_to_tenured_op;
128                 break;
129         default:
130                 /* Nothing else should fail mid-collection due to insufficient
131                 space in the target generation. */
132                 critical_error("Bad GC op",current_gc->op);
133                 break;
134         }
135
136         if(gc_events)
137                 current_gc->event = new gc_event(current_gc->op,this);
138 }
139
140 void factor_vm::set_current_gc_op(gc_op op)
141 {
142         current_gc->op = op;
143         if(gc_events) current_gc->event->op = op;
144 }
145
146 void factor_vm::gc(gc_op op, cell requested_bytes, bool trace_contexts_p)
147 {
148         assert(!gc_off);
149         assert(!current_gc);
150
151         /* Important invariant: tenured space must have enough contiguous free
152         space to fit the entire contents of the aging space and nursery. This is
153         because when doing a full collection, objects from younger generations
154         are promoted before any unreachable tenured objects are freed. */
155         assert(!data->high_fragmentation_p());
156
157         current_gc = new gc_state(op,this);
158
159         /* Keep trying to GC higher and higher generations until we don't run
160         out of space in the target generation. */
161         for(;;)
162         {
163                 try
164                 {
165                         if(gc_events) current_gc->event->op = current_gc->op;
166
167                         switch(current_gc->op)
168                         {
169                         case collect_nursery_op:
170                                 collect_nursery();
171                                 break;
172                         case collect_aging_op:
173                                 /* We end up here if the above fails. */
174                                 collect_aging();
175                                 if(data->high_fragmentation_p())
176                                 {
177                                         /* Change GC op so that if we fail again,
178                                         we crash. */
179                                         set_current_gc_op(collect_full_op);
180                                         collect_full(trace_contexts_p);
181                                 }
182                                 break;
183                         case collect_to_tenured_op:
184                                 /* We end up here if the above fails. */
185                                 collect_to_tenured();
186                                 if(data->high_fragmentation_p())
187                                 {
188                                         /* Change GC op so that if we fail again,
189                                         we crash. */
190                                         set_current_gc_op(collect_full_op);
191                                         collect_full(trace_contexts_p);
192                                 }
193                                 break;
194                         case collect_full_op:
195                                 collect_full(trace_contexts_p);
196                                 break;
197                         case collect_compact_op:
198                                 collect_compact(trace_contexts_p);
199                                 break;
200                         case collect_growing_heap_op:
201                                 collect_growing_heap(requested_bytes,trace_contexts_p);
202                                 break;
203                         default:
204                                 critical_error("Bad GC op",current_gc->op);
205                                 break;
206                         }
207
208                         break;
209                 }
210                 catch(const must_start_gc_again &)
211                 {
212                         /* We come back here if the target generation is full. */
213                         start_gc_again();
214                         continue;
215                 }
216         }
217
218         end_gc();
219
220         delete current_gc;
221         current_gc = NULL;
222
223         /* Check the invariant again, just in case. */
224         assert(!data->high_fragmentation_p());
225 }
226
227 /* primitive_minor_gc() is invoked by inline GC checks, and it needs to fill in
228 uninitialized stack locations before actually calling the GC. See the comment
229 in compiler.cfg.stacks.uninitialized for details. */
230
231 struct call_frame_scrubber {
232         factor_vm *parent;
233         context *ctx;
234
235         explicit call_frame_scrubber(factor_vm *parent_, context *ctx_) :
236                 parent(parent_), ctx(ctx_) {}
237
238         void operator()(stack_frame *frame)
239         {
240                 cell return_address = parent->frame_offset(frame);
241                 if(return_address == (cell)-1)
242                         return;
243
244                 code_block *compiled = parent->frame_code(frame);
245                 gc_info *info = compiled->block_gc_info();
246
247                 assert(return_address < compiled->size());
248                 cell index = info->return_address_index(return_address);
249                 if(index != (cell)-1)
250                         ctx->scrub_stacks(info,index);
251         }
252 };
253
254 void factor_vm::scrub_context(context *ctx)
255 {
256         call_frame_scrubber scrubber(this,ctx);
257         iterate_callstack(ctx,scrubber);
258 }
259
260 void factor_vm::scrub_contexts()
261 {
262         std::set<context *>::const_iterator begin = active_contexts.begin();
263         std::set<context *>::const_iterator end = active_contexts.end();
264         while(begin != end)
265         {
266                 scrub_context(*begin);
267                 begin++;
268         }
269 }
270
271 void factor_vm::primitive_minor_gc()
272 {
273         scrub_contexts();
274
275         gc(collect_nursery_op,
276                 0, /* requested size */
277                 true /* trace contexts? */);
278 }
279
280 void factor_vm::primitive_full_gc()
281 {
282         gc(collect_full_op,
283                 0, /* requested size */
284                 true /* trace contexts? */);
285 }
286
287 void factor_vm::primitive_compact_gc()
288 {
289         gc(collect_compact_op,
290                 0, /* requested size */
291                 true /* trace contexts? */);
292 }
293
294 /*
295  * It is up to the caller to fill in the object's fields in a meaningful
296  * fashion!
297  */
298 object *factor_vm::allot_large_object(cell type, cell size)
299 {
300         /* If tenured space does not have enough room, collect and compact */
301         if(!data->tenured->can_allot_p(size + data->high_water_mark()))
302         {
303                 primitive_compact_gc();
304
305                 /* If it still won't fit, grow the heap */
306                 if(!data->tenured->can_allot_p(size))
307                 {
308                         gc(collect_growing_heap_op,
309                                 size, /* requested size */
310                                 true /* trace contexts? */);
311                 }
312         }
313
314         object *obj = data->tenured->allot(size);
315
316         /* Allows initialization code to store old->new pointers
317         without hitting the write barrier in the common case of
318         a nursery allocation */
319         write_barrier(obj,size);
320
321         obj->initialize(type);
322         return obj;
323 }
324
325 void factor_vm::primitive_enable_gc_events()
326 {
327         gc_events = new std::vector<gc_event>();
328 }
329
330 void factor_vm::primitive_disable_gc_events()
331 {
332         if(gc_events)
333         {
334                 growable_array result(this);
335
336                 std::vector<gc_event> *gc_events = this->gc_events;
337                 this->gc_events = NULL;
338
339                 std::vector<gc_event>::const_iterator iter = gc_events->begin();
340                 std::vector<gc_event>::const_iterator end = gc_events->end();
341
342                 for(; iter != end; iter++)
343                 {
344                         gc_event event = *iter;
345                         byte_array *obj = byte_array_from_value(&event);
346                         result.add(tag<byte_array>(obj));
347                 }
348
349                 result.trim();
350                 ctx->push(result.elements.value());
351
352                 delete this->gc_events;
353         }
354         else
355                 ctx->push(false_object);
356 }
357
358 }