6 gc_event::gc_event(gc_op op_, factor_vm *parent) :
10 code_blocks_scanned(0),
11 start_time(nano_count()),
18 data_heap_before = parent->data_room();
19 code_heap_before = parent->code_room();
20 start_time = nano_count();
23 void gc_event::started_card_scan()
25 temp_time = nano_count();
28 void gc_event::ended_card_scan(cell cards_scanned_, cell decks_scanned_)
30 cards_scanned += cards_scanned_;
31 decks_scanned += decks_scanned_;
32 card_scan_time = (nano_count() - temp_time);
35 void gc_event::started_code_scan()
37 temp_time = nano_count();
40 void gc_event::ended_code_scan(cell code_blocks_scanned_)
42 code_blocks_scanned += code_blocks_scanned_;
43 code_scan_time = (nano_count() - temp_time);
46 void gc_event::started_data_sweep()
48 temp_time = nano_count();
51 void gc_event::ended_data_sweep()
53 data_sweep_time = (nano_count() - temp_time);
56 void gc_event::started_code_sweep()
58 temp_time = nano_count();
61 void gc_event::ended_code_sweep()
63 code_sweep_time = (nano_count() - temp_time);
66 void gc_event::started_compaction()
68 temp_time = nano_count();
71 void gc_event::ended_compaction()
73 compaction_time = (nano_count() - temp_time);
76 void gc_event::ended_gc(factor_vm *parent)
78 data_heap_after = parent->data_room();
79 code_heap_after = parent->code_room();
80 total_time = nano_count() - start_time;
83 gc_state::gc_state(gc_op op_, factor_vm *parent) : op(op_), start_time(nano_count())
85 event = new gc_event(op,parent);
94 void factor_vm::end_gc()
96 current_gc->event->ended_gc(this);
97 if(gc_events) gc_events->push_back(*current_gc->event);
98 delete current_gc->event;
99 current_gc->event = NULL;
102 void factor_vm::start_gc_again()
106 switch(current_gc->op)
108 case collect_nursery_op:
109 current_gc->op = collect_aging_op;
111 case collect_aging_op:
112 current_gc->op = collect_to_tenured_op;
114 case collect_to_tenured_op:
115 current_gc->op = collect_full_op;
117 case collect_full_op:
118 case collect_compact_op:
119 current_gc->op = collect_growing_heap_op;
122 critical_error("Bad GC op",current_gc->op);
126 current_gc->event = new gc_event(current_gc->op,this);
129 void factor_vm::gc(gc_op op, cell requested_bytes, bool trace_contexts_p)
134 current_gc = new gc_state(op,this);
136 /* Keep trying to GC higher and higher generations until we don't run out
138 if(setjmp(current_gc->gc_unwind))
140 /* We come back here if a generation is full */
144 current_gc->event->op = current_gc->op;
146 switch(current_gc->op)
148 case collect_nursery_op:
151 case collect_aging_op:
153 if(data->high_fragmentation_p())
155 current_gc->op = collect_full_op;
156 current_gc->event->op = collect_full_op;
157 collect_full(trace_contexts_p);
160 case collect_to_tenured_op:
161 collect_to_tenured();
162 if(data->high_fragmentation_p())
164 current_gc->op = collect_full_op;
165 current_gc->event->op = collect_full_op;
166 collect_full(trace_contexts_p);
169 case collect_full_op:
170 collect_full(trace_contexts_p);
172 case collect_compact_op:
173 collect_compact(trace_contexts_p);
175 case collect_growing_heap_op:
176 collect_growing_heap(requested_bytes,trace_contexts_p);
179 critical_error("Bad GC op",current_gc->op);
189 void factor_vm::primitive_minor_gc()
191 gc(collect_nursery_op,
192 0, /* requested size */
193 true /* trace contexts? */);
196 void factor_vm::primitive_full_gc()
199 0, /* requested size */
200 true /* trace contexts? */);
203 void factor_vm::primitive_compact_gc()
205 gc(collect_compact_op,
206 0, /* requested size */
207 true /* trace contexts? */);
210 void factor_vm::inline_gc(cell *data_roots_base, cell data_roots_size)
212 data_roots.push_back(data_root_range(data_roots_base,data_roots_size));
213 primitive_minor_gc();
214 data_roots.pop_back();
217 VM_C_API void inline_gc(cell *data_roots_base, cell data_roots_size, factor_vm *parent)
219 parent->inline_gc(data_roots_base,data_roots_size);
223 * It is up to the caller to fill in the object's fields in a meaningful
226 object *factor_vm::allot_large_object(cell type, cell size)
228 /* If tenured space does not have enough room, collect and compact */
229 if(!data->tenured->can_allot_p(size))
231 primitive_compact_gc();
233 /* If it still won't fit, grow the heap */
234 if(!data->tenured->can_allot_p(size))
236 gc(collect_growing_heap_op,
237 size, /* requested size */
238 true /* trace contexts? */);
242 object *obj = data->tenured->allot(size);
244 /* Allows initialization code to store old->new pointers
245 without hitting the write barrier in the common case of
246 a nursery allocation */
247 write_barrier(obj,size);
249 obj->initialize(type);
253 void factor_vm::primitive_enable_gc_events()
255 gc_events = new std::vector<gc_event>();
258 void factor_vm::primitive_disable_gc_events()
262 growable_array result(this);
264 std::vector<gc_event> *gc_events = this->gc_events;
265 this->gc_events = NULL;
267 std::vector<gc_event>::const_iterator iter = gc_events->begin();
268 std::vector<gc_event>::const_iterator end = gc_events->end();
270 for(; iter != end; iter++)
272 gc_event event = *iter;
273 byte_array *obj = byte_array_from_value(&event);
274 result.add(tag<byte_array>(obj));
278 ctx->push(result.elements.value());
280 delete this->gc_events;
283 ctx->push(false_object);