6 gc_event::gc_event(gc_op op_, factor_vm *parent) :
10 code_blocks_scanned(0),
11 start_time(nano_count()),
18 data_heap_before = parent->data_room();
19 code_heap_before = parent->code_room();
20 start_time = nano_count();
23 void gc_event::started_card_scan()
25 temp_time = nano_count();
28 void gc_event::ended_card_scan(cell cards_scanned_, cell decks_scanned_)
30 cards_scanned += cards_scanned_;
31 decks_scanned += decks_scanned_;
32 card_scan_time = (cell)(nano_count() - temp_time);
35 void gc_event::started_code_scan()
37 temp_time = nano_count();
40 void gc_event::ended_code_scan(cell code_blocks_scanned_)
42 code_blocks_scanned += code_blocks_scanned_;
43 code_scan_time = (cell)(nano_count() - temp_time);
46 void gc_event::started_data_sweep()
48 temp_time = nano_count();
51 void gc_event::ended_data_sweep()
53 data_sweep_time = (cell)(nano_count() - temp_time);
56 void gc_event::started_code_sweep()
58 temp_time = nano_count();
61 void gc_event::ended_code_sweep()
63 code_sweep_time = (cell)(nano_count() - temp_time);
66 void gc_event::started_compaction()
68 temp_time = nano_count();
71 void gc_event::ended_compaction()
73 compaction_time = (cell)(nano_count() - temp_time);
76 void gc_event::ended_gc(factor_vm *parent)
78 data_heap_after = parent->data_room();
79 code_heap_after = parent->code_room();
80 total_time = (cell)(nano_count() - start_time);
83 gc_state::gc_state(gc_op op_, factor_vm *parent) : op(op_)
87 event = new gc_event(op,parent);
88 start_time = nano_count();
103 void factor_vm::end_gc()
107 current_gc->event->ended_gc(this);
108 gc_events->push_back(*current_gc->event);
112 void factor_vm::start_gc_again()
116 switch(current_gc->op)
118 case collect_nursery_op:
119 /* Nursery collection can fail if aging does not have enough
120 free space to fit all live objects from nursery. */
121 current_gc->op = collect_aging_op;
123 case collect_aging_op:
124 /* Aging collection can fail if the aging semispace cannot fit
125 all the live objects from the other aging semispace and the
127 current_gc->op = collect_to_tenured_op;
130 /* Nothing else should fail mid-collection due to insufficient
131 space in the target generation. */
132 critical_error("Bad GC op",current_gc->op);
137 current_gc->event = new gc_event(current_gc->op,this);
140 void factor_vm::set_current_gc_op(gc_op op)
143 if(gc_events) current_gc->event->op = op;
146 void factor_vm::gc(gc_op op, cell requested_size, bool trace_contexts_p)
148 FACTOR_ASSERT(!gc_off);
149 FACTOR_ASSERT(!current_gc);
151 /* Important invariant: tenured space must have enough contiguous free
152 space to fit the entire contents of the aging space and nursery. This is
153 because when doing a full collection, objects from younger generations
154 are promoted before any unreachable tenured objects are freed. */
155 FACTOR_ASSERT(!data->high_fragmentation_p());
157 current_gc = new gc_state(op,this);
158 atomic::store(¤t_gc_p, true);
160 /* Keep trying to GC higher and higher generations until we don't run
161 out of space in the target generation. */
166 if(gc_events) current_gc->event->op = current_gc->op;
168 switch(current_gc->op)
170 case collect_nursery_op:
173 case collect_aging_op:
174 /* We end up here if the above fails. */
176 if(data->high_fragmentation_p())
178 /* Change GC op so that if we fail again,
180 set_current_gc_op(collect_full_op);
181 collect_full(trace_contexts_p);
184 case collect_to_tenured_op:
185 /* We end up here if the above fails. */
186 collect_to_tenured();
187 if(data->high_fragmentation_p())
189 /* Change GC op so that if we fail again,
191 set_current_gc_op(collect_full_op);
192 collect_full(trace_contexts_p);
195 case collect_full_op:
196 collect_full(trace_contexts_p);
198 case collect_compact_op:
199 collect_compact(trace_contexts_p);
201 case collect_growing_heap_op:
202 collect_growing_heap(requested_size,trace_contexts_p);
205 critical_error("Bad GC op",current_gc->op);
211 catch(const must_start_gc_again &)
213 /* We come back here if the target generation is full. */
221 atomic::store(¤t_gc_p, false);
225 /* Check the invariant again, just in case. */
226 FACTOR_ASSERT(!data->high_fragmentation_p());
229 /* primitive_minor_gc() is invoked by inline GC checks, and it needs to fill in
230 uninitialized stack locations before actually calling the GC. See the comment
231 in compiler.cfg.stacks.uninitialized for details. */
233 struct call_frame_scrubber {
237 explicit call_frame_scrubber(factor_vm *parent_, context *ctx_) :
238 parent(parent_), ctx(ctx_) {}
240 void operator()(void *frame_top, cell frame_size, code_block *owner, void *addr)
242 cell return_address = owner->offset(addr);
244 gc_info *info = owner->block_gc_info();
246 FACTOR_ASSERT(return_address < owner->size());
247 cell index = info->return_address_index(return_address);
248 if(index != (cell)-1)
249 ctx->scrub_stacks(info,index);
253 void factor_vm::scrub_context(context *ctx)
255 call_frame_scrubber scrubber(this,ctx);
256 iterate_callstack(ctx,scrubber);
259 void factor_vm::scrub_contexts()
261 std::set<context *>::const_iterator begin = active_contexts.begin();
262 std::set<context *>::const_iterator end = active_contexts.end();
265 scrub_context(*begin);
270 void factor_vm::primitive_minor_gc()
274 gc(collect_nursery_op,
275 0, /* requested size */
276 true /* trace contexts? */);
279 void factor_vm::primitive_full_gc()
282 0, /* requested size */
283 true /* trace contexts? */);
286 void factor_vm::primitive_compact_gc()
288 gc(collect_compact_op,
289 0, /* requested size */
290 true /* trace contexts? */);
294 * It is up to the caller to fill in the object's fields in a meaningful
297 object *factor_vm::allot_large_object(cell type, cell size)
299 /* If tenured space does not have enough room, collect and compact */
300 cell requested_size = size + data->high_water_mark();
301 if(!data->tenured->can_allot_p(requested_size))
303 primitive_compact_gc();
305 /* If it still won't fit, grow the heap */
306 if(!data->tenured->can_allot_p(requested_size))
308 gc(collect_growing_heap_op,
309 size, /* requested size */
310 true /* trace contexts? */);
314 object *obj = data->tenured->allot(size);
316 /* Allows initialization code to store old->new pointers
317 without hitting the write barrier in the common case of
318 a nursery allocation */
319 write_barrier(obj,size);
321 obj->initialize(type);
325 void factor_vm::primitive_enable_gc_events()
327 gc_events = new std::vector<gc_event>();
330 void factor_vm::primitive_disable_gc_events()
334 growable_array result(this);
336 std::vector<gc_event> *gc_events = this->gc_events;
337 this->gc_events = NULL;
339 std::vector<gc_event>::const_iterator iter = gc_events->begin();
340 std::vector<gc_event>::const_iterator end = gc_events->end();
342 for(; iter != end; iter++)
344 gc_event event = *iter;
345 byte_array *obj = byte_array_from_value(&event);
346 result.add(tag<byte_array>(obj));
350 ctx->push(result.elements.value());
352 delete this->gc_events;
355 ctx->push(false_object);