6 gc_event::gc_event(gc_op op_, factor_vm *parent) :
10 code_blocks_scanned(0),
11 start_time(nano_count()),
18 data_heap_before = parent->data_room();
19 code_heap_before = parent->code_room();
20 start_time = nano_count();
23 void gc_event::started_card_scan()
25 temp_time = nano_count();
28 void gc_event::ended_card_scan(cell cards_scanned_, cell decks_scanned_)
30 cards_scanned += cards_scanned_;
31 decks_scanned += decks_scanned_;
32 card_scan_time = (cell)(nano_count() - temp_time);
35 void gc_event::started_code_scan()
37 temp_time = nano_count();
40 void gc_event::ended_code_scan(cell code_blocks_scanned_)
42 code_blocks_scanned += code_blocks_scanned_;
43 code_scan_time = (cell)(nano_count() - temp_time);
46 void gc_event::started_data_sweep()
48 temp_time = nano_count();
51 void gc_event::ended_data_sweep()
53 data_sweep_time = (cell)(nano_count() - temp_time);
56 void gc_event::started_code_sweep()
58 temp_time = nano_count();
61 void gc_event::ended_code_sweep()
63 code_sweep_time = (cell)(nano_count() - temp_time);
66 void gc_event::started_compaction()
68 temp_time = nano_count();
71 void gc_event::ended_compaction()
73 compaction_time = (cell)(nano_count() - temp_time);
76 void gc_event::ended_gc(factor_vm *parent)
78 data_heap_after = parent->data_room();
79 code_heap_after = parent->code_room();
80 total_time = (cell)(nano_count() - start_time);
83 gc_state::gc_state(gc_op op_, factor_vm *parent) : op(op_)
87 event = new gc_event(op,parent);
88 start_time = nano_count();
103 void factor_vm::end_gc()
107 current_gc->event->ended_gc(this);
108 gc_events->push_back(*current_gc->event);
112 void factor_vm::start_gc_again()
116 switch(current_gc->op)
118 case collect_nursery_op:
119 current_gc->op = collect_aging_op;
121 case collect_aging_op:
122 current_gc->op = collect_to_tenured_op;
124 case collect_to_tenured_op:
125 current_gc->op = collect_full_op;
127 case collect_full_op:
128 case collect_compact_op:
129 current_gc->op = collect_growing_heap_op;
132 critical_error("Bad GC op",current_gc->op);
137 current_gc->event = new gc_event(current_gc->op,this);
140 void factor_vm::set_current_gc_op(gc_op op)
143 if(gc_events) current_gc->event->op = op;
146 void factor_vm::gc(gc_op op, cell requested_bytes, bool trace_contexts_p)
151 current_gc = new gc_state(op,this);
153 /* Keep trying to GC higher and higher generations until we don't run out
159 if(gc_events) current_gc->event->op = current_gc->op;
161 switch(current_gc->op)
163 case collect_nursery_op:
166 case collect_aging_op:
168 if(data->high_fragmentation_p())
170 set_current_gc_op(collect_full_op);
171 collect_full(trace_contexts_p);
174 case collect_to_tenured_op:
175 collect_to_tenured();
176 if(data->high_fragmentation_p())
178 set_current_gc_op(collect_full_op);
179 collect_full(trace_contexts_p);
182 case collect_full_op:
183 collect_full(trace_contexts_p);
185 case collect_compact_op:
186 collect_compact(trace_contexts_p);
188 case collect_growing_heap_op:
189 collect_growing_heap(requested_bytes,trace_contexts_p);
192 critical_error("Bad GC op",current_gc->op);
198 catch(const must_start_gc_again &)
200 /* We come back here if a generation is full */
212 /* primitive_minor_gc() is invoked by inline GC checks, and it needs to fill in
213 uninitialized stack locations before actually calling the GC. See the comment
214 in compiler.cfg.stacks.uninitialized for details. */
216 struct call_frame_scrubber {
220 explicit call_frame_scrubber(factor_vm *parent_, context *ctx_) :
221 parent(parent_), ctx(ctx_) {}
223 void operator()(stack_frame *frame)
225 cell return_address = parent->frame_offset(frame);
226 if(return_address == (cell)-1)
229 code_block *compiled = parent->frame_code(frame);
230 gc_info *info = compiled->block_gc_info();
232 assert(return_address < compiled->size());
233 cell index = info->return_address_index(return_address);
234 if(index != (cell)-1)
235 ctx->scrub_stacks(info,index);
239 void factor_vm::scrub_context(context *ctx)
241 call_frame_scrubber scrubber(this,ctx);
242 iterate_callstack(ctx,scrubber);
245 void factor_vm::scrub_contexts()
247 std::set<context *>::const_iterator begin = active_contexts.begin();
248 std::set<context *>::const_iterator end = active_contexts.end();
251 scrub_context(*begin);
256 void factor_vm::primitive_minor_gc()
260 gc(collect_nursery_op,
261 0, /* requested size */
262 true /* trace contexts? */);
265 void factor_vm::primitive_full_gc()
268 0, /* requested size */
269 true /* trace contexts? */);
272 void factor_vm::primitive_compact_gc()
274 gc(collect_compact_op,
275 0, /* requested size */
276 true /* trace contexts? */);
280 * It is up to the caller to fill in the object's fields in a meaningful
283 object *factor_vm::allot_large_object(cell type, cell size)
285 /* If tenured space does not have enough room, collect and compact */
286 if(!data->tenured->can_allot_p(size))
288 primitive_compact_gc();
290 /* If it still won't fit, grow the heap */
291 if(!data->tenured->can_allot_p(size))
293 gc(collect_growing_heap_op,
294 size, /* requested size */
295 true /* trace contexts? */);
299 object *obj = data->tenured->allot(size);
301 /* Allows initialization code to store old->new pointers
302 without hitting the write barrier in the common case of
303 a nursery allocation */
304 write_barrier(obj,size);
306 obj->initialize(type);
310 void factor_vm::primitive_enable_gc_events()
312 gc_events = new std::vector<gc_event>();
315 void factor_vm::primitive_disable_gc_events()
319 growable_array result(this);
321 std::vector<gc_event> *gc_events = this->gc_events;
322 this->gc_events = NULL;
324 std::vector<gc_event>::const_iterator iter = gc_events->begin();
325 std::vector<gc_event>::const_iterator end = gc_events->end();
327 for(; iter != end; iter++)
329 gc_event event = *iter;
330 byte_array *obj = byte_array_from_value(&event);
331 result.add(tag<byte_array>(obj));
335 ctx->push(result.elements.value());
337 delete this->gc_events;
340 ctx->push(false_object);