]> gitweb.factorcode.org Git - factor.git/blob - vm/gc.cpp
vm: replace assert with FACTOR_ASSERT
[factor.git] / vm / gc.cpp
1 #include "master.hpp"
2
3 namespace factor
4 {
5
6 gc_event::gc_event(gc_op op_, factor_vm *parent) :
7         op(op_),
8         cards_scanned(0),
9         decks_scanned(0),
10         code_blocks_scanned(0),
11         start_time(nano_count()),
12         card_scan_time(0),
13         code_scan_time(0),
14         data_sweep_time(0),
15         code_sweep_time(0),
16         compaction_time(0)
17 {
18         data_heap_before = parent->data_room();
19         code_heap_before = parent->code_room();
20         start_time = nano_count();
21 }
22
23 void gc_event::started_card_scan()
24 {
25         temp_time = nano_count();
26 }
27
28 void gc_event::ended_card_scan(cell cards_scanned_, cell decks_scanned_)
29 {
30         cards_scanned += cards_scanned_;
31         decks_scanned += decks_scanned_;
32         card_scan_time = (cell)(nano_count() - temp_time);
33 }
34
35 void gc_event::started_code_scan()
36 {
37         temp_time = nano_count();
38 }
39
40 void gc_event::ended_code_scan(cell code_blocks_scanned_)
41 {
42         code_blocks_scanned += code_blocks_scanned_;
43         code_scan_time = (cell)(nano_count() - temp_time);
44 }
45
46 void gc_event::started_data_sweep()
47 {
48         temp_time = nano_count();
49 }
50
51 void gc_event::ended_data_sweep()
52 {
53         data_sweep_time = (cell)(nano_count() - temp_time);
54 }
55
56 void gc_event::started_code_sweep()
57 {
58         temp_time = nano_count();
59 }
60
61 void gc_event::ended_code_sweep()
62 {
63         code_sweep_time = (cell)(nano_count() - temp_time);
64 }
65
66 void gc_event::started_compaction()
67 {
68         temp_time = nano_count();
69 }
70
71 void gc_event::ended_compaction()
72 {
73         compaction_time = (cell)(nano_count() - temp_time);
74 }
75
76 void gc_event::ended_gc(factor_vm *parent)
77 {
78         data_heap_after = parent->data_room();
79         code_heap_after = parent->code_room();
80         total_time = (cell)(nano_count() - start_time);
81 }
82
83 gc_state::gc_state(gc_op op_, factor_vm *parent) : op(op_)
84 {
85         if(parent->gc_events)
86         {
87                 event = new gc_event(op,parent);
88                 start_time = nano_count();
89         }
90         else
91                 event = NULL;
92 }
93
94 gc_state::~gc_state()
95 {
96         if(event)
97         {
98                 delete event;
99                 event = NULL;
100         }
101 }
102
103 void factor_vm::end_gc()
104 {
105         if(gc_events)
106         {
107                 current_gc->event->ended_gc(this);
108                 gc_events->push_back(*current_gc->event);
109         }
110 }
111
112 void factor_vm::start_gc_again()
113 {
114         end_gc();
115
116         switch(current_gc->op)
117         {
118         case collect_nursery_op:
119                 /* Nursery collection can fail if aging does not have enough
120                 free space to fit all live objects from nursery. */
121                 current_gc->op = collect_aging_op;
122                 break;
123         case collect_aging_op:
124                 /* Aging collection can fail if the aging semispace cannot fit
125                 all the live objects from the other aging semispace and the
126                 nursery. */
127                 current_gc->op = collect_to_tenured_op;
128                 break;
129         default:
130                 /* Nothing else should fail mid-collection due to insufficient
131                 space in the target generation. */
132                 critical_error("Bad GC op",current_gc->op);
133                 break;
134         }
135
136         if(gc_events)
137                 current_gc->event = new gc_event(current_gc->op,this);
138 }
139
140 void factor_vm::set_current_gc_op(gc_op op)
141 {
142         current_gc->op = op;
143         if(gc_events) current_gc->event->op = op;
144 }
145
146 void factor_vm::gc(gc_op op, cell requested_size, bool trace_contexts_p)
147 {
148         FACTOR_ASSERT(!gc_off);
149         FACTOR_ASSERT(!current_gc);
150
151         /* Important invariant: tenured space must have enough contiguous free
152         space to fit the entire contents of the aging space and nursery. This is
153         because when doing a full collection, objects from younger generations
154         are promoted before any unreachable tenured objects are freed. */
155         FACTOR_ASSERT(!data->high_fragmentation_p());
156
157         current_gc = new gc_state(op,this);
158         atomic::store(&current_gc_p, true);
159
160         /* Keep trying to GC higher and higher generations until we don't run
161         out of space in the target generation. */
162         for(;;)
163         {
164                 try
165                 {
166                         if(gc_events) current_gc->event->op = current_gc->op;
167
168                         switch(current_gc->op)
169                         {
170                         case collect_nursery_op:
171                                 collect_nursery();
172                                 break;
173                         case collect_aging_op:
174                                 /* We end up here if the above fails. */
175                                 collect_aging();
176                                 if(data->high_fragmentation_p())
177                                 {
178                                         /* Change GC op so that if we fail again,
179                                         we crash. */
180                                         set_current_gc_op(collect_full_op);
181                                         collect_full(trace_contexts_p);
182                                 }
183                                 break;
184                         case collect_to_tenured_op:
185                                 /* We end up here if the above fails. */
186                                 collect_to_tenured();
187                                 if(data->high_fragmentation_p())
188                                 {
189                                         /* Change GC op so that if we fail again,
190                                         we crash. */
191                                         set_current_gc_op(collect_full_op);
192                                         collect_full(trace_contexts_p);
193                                 }
194                                 break;
195                         case collect_full_op:
196                                 collect_full(trace_contexts_p);
197                                 break;
198                         case collect_compact_op:
199                                 collect_compact(trace_contexts_p);
200                                 break;
201                         case collect_growing_heap_op:
202                                 collect_growing_heap(requested_size,trace_contexts_p);
203                                 break;
204                         default:
205                                 critical_error("Bad GC op",current_gc->op);
206                                 break;
207                         }
208
209                         break;
210                 }
211                 catch(const must_start_gc_again &)
212                 {
213                         /* We come back here if the target generation is full. */
214                         start_gc_again();
215                         continue;
216                 }
217         }
218
219         end_gc();
220
221         atomic::store(&current_gc_p, false);
222         delete current_gc;
223         current_gc = NULL;
224
225         /* Check the invariant again, just in case. */
226         FACTOR_ASSERT(!data->high_fragmentation_p());
227 }
228
229 /* primitive_minor_gc() is invoked by inline GC checks, and it needs to fill in
230 uninitialized stack locations before actually calling the GC. See the comment
231 in compiler.cfg.stacks.uninitialized for details. */
232
233 struct call_frame_scrubber {
234         factor_vm *parent;
235         context *ctx;
236
237         explicit call_frame_scrubber(factor_vm *parent_, context *ctx_) :
238                 parent(parent_), ctx(ctx_) {}
239
240         void operator()(stack_frame *frame)
241         {
242                 cell return_address = parent->frame_offset(frame);
243                 if(return_address == (cell)-1)
244                         return;
245
246                 code_block *compiled = parent->frame_code(frame);
247                 gc_info *info = compiled->block_gc_info();
248
249                 FACTOR_ASSERT(return_address < compiled->size());
250                 cell index = info->return_address_index(return_address);
251                 if(index != (cell)-1)
252                         ctx->scrub_stacks(info,index);
253         }
254 };
255
256 void factor_vm::scrub_context(context *ctx)
257 {
258         call_frame_scrubber scrubber(this,ctx);
259         iterate_callstack(ctx,scrubber);
260 }
261
262 void factor_vm::scrub_contexts()
263 {
264         std::set<context *>::const_iterator begin = active_contexts.begin();
265         std::set<context *>::const_iterator end = active_contexts.end();
266         while(begin != end)
267         {
268                 scrub_context(*begin);
269                 begin++;
270         }
271 }
272
273 void factor_vm::primitive_minor_gc()
274 {
275         scrub_contexts();
276
277         gc(collect_nursery_op,
278                 0, /* requested size */
279                 true /* trace contexts? */);
280 }
281
282 void factor_vm::primitive_full_gc()
283 {
284         gc(collect_full_op,
285                 0, /* requested size */
286                 true /* trace contexts? */);
287 }
288
289 void factor_vm::primitive_compact_gc()
290 {
291         gc(collect_compact_op,
292                 0, /* requested size */
293                 true /* trace contexts? */);
294 }
295
296 /*
297  * It is up to the caller to fill in the object's fields in a meaningful
298  * fashion!
299  */
300 object *factor_vm::allot_large_object(cell type, cell size)
301 {
302         /* If tenured space does not have enough room, collect and compact */
303         cell requested_size = size + data->high_water_mark();
304         if(!data->tenured->can_allot_p(requested_size))
305         {
306                 primitive_compact_gc();
307
308                 /* If it still won't fit, grow the heap */
309                 if(!data->tenured->can_allot_p(requested_size))
310                 {
311                         gc(collect_growing_heap_op,
312                                 size, /* requested size */
313                                 true /* trace contexts? */);
314                 }
315         }
316
317         object *obj = data->tenured->allot(size);
318
319         /* Allows initialization code to store old->new pointers
320         without hitting the write barrier in the common case of
321         a nursery allocation */
322         write_barrier(obj,size);
323
324         obj->initialize(type);
325         return obj;
326 }
327
328 void factor_vm::primitive_enable_gc_events()
329 {
330         gc_events = new std::vector<gc_event>();
331 }
332
333 void factor_vm::primitive_disable_gc_events()
334 {
335         if(gc_events)
336         {
337                 growable_array result(this);
338
339                 std::vector<gc_event> *gc_events = this->gc_events;
340                 this->gc_events = NULL;
341
342                 std::vector<gc_event>::const_iterator iter = gc_events->begin();
343                 std::vector<gc_event>::const_iterator end = gc_events->end();
344
345                 for(; iter != end; iter++)
346                 {
347                         gc_event event = *iter;
348                         byte_array *obj = byte_array_from_value(&event);
349                         result.add(tag<byte_array>(obj));
350                 }
351
352                 result.trim();
353                 ctx->push(result.elements.value());
354
355                 delete this->gc_events;
356         }
357         else
358                 ctx->push(false_object);
359 }
360
361 }