5 gc_event::gc_event(gc_op op, factor_vm* parent)
9 code_blocks_scanned(0),
10 start_time(nano_count()),
16 data_heap_before = parent->data_room();
17 code_heap_before = parent->code_room();
18 start_time = nano_count();
21 void gc_event::started_card_scan() { temp_time = nano_count(); }
23 void gc_event::ended_card_scan(cell cards_scanned_, cell decks_scanned_) {
24 cards_scanned += cards_scanned_;
25 decks_scanned += decks_scanned_;
26 card_scan_time = (cell)(nano_count() - temp_time);
29 void gc_event::started_code_scan() { temp_time = nano_count(); }
31 void gc_event::ended_code_scan(cell code_blocks_scanned_) {
32 code_blocks_scanned += code_blocks_scanned_;
33 code_scan_time = (cell)(nano_count() - temp_time);
36 void gc_event::started_data_sweep() { temp_time = nano_count(); }
38 void gc_event::ended_data_sweep() {
39 data_sweep_time = (cell)(nano_count() - temp_time);
42 void gc_event::started_code_sweep() { temp_time = nano_count(); }
44 void gc_event::ended_code_sweep() {
45 code_sweep_time = (cell)(nano_count() - temp_time);
48 void gc_event::started_compaction() { temp_time = nano_count(); }
50 void gc_event::ended_compaction() {
51 compaction_time = (cell)(nano_count() - temp_time);
54 void gc_event::ended_gc(factor_vm* parent) {
55 data_heap_after = parent->data_room();
56 code_heap_after = parent->code_room();
57 total_time = (cell)(nano_count() - start_time);
60 gc_state::gc_state(gc_op op, factor_vm* parent) : op(op) {
61 if (parent->gc_events) {
62 event = new gc_event(op, parent);
63 start_time = nano_count();
68 gc_state::~gc_state() {
75 void factor_vm::end_gc() {
77 current_gc->event->ended_gc(this);
78 gc_events->push_back(*current_gc->event);
82 void factor_vm::start_gc_again() {
85 switch (current_gc->op) {
86 case collect_nursery_op:
87 /* Nursery collection can fail if aging does not have enough
88 free space to fit all live objects from nursery. */
89 current_gc->op = collect_aging_op;
91 case collect_aging_op:
92 /* Aging collection can fail if the aging semispace cannot fit
93 all the live objects from the other aging semispace and the
95 current_gc->op = collect_to_tenured_op;
98 /* Nothing else should fail mid-collection due to insufficient
99 space in the target generation. */
100 critical_error("Bad GC op", current_gc->op);
105 current_gc->event = new gc_event(current_gc->op, this);
108 void factor_vm::set_current_gc_op(gc_op op) {
111 current_gc->event->op = op;
114 void factor_vm::gc(gc_op op, cell requested_size, bool trace_contexts_p) {
115 FACTOR_ASSERT(!gc_off);
116 FACTOR_ASSERT(!current_gc);
118 /* Important invariant: tenured space must have enough contiguous free
119 space to fit the entire contents of the aging space and nursery. This is
120 because when doing a full collection, objects from younger generations
121 are promoted before any unreachable tenured objects are freed. */
122 FACTOR_ASSERT(!data->high_fragmentation_p());
124 current_gc = new gc_state(op, this);
125 atomic::store(¤t_gc_p, true);
127 /* Keep trying to GC higher and higher generations until we don't run
128 out of space in the target generation. */
132 current_gc->event->op = current_gc->op;
134 switch (current_gc->op) {
135 case collect_nursery_op:
138 case collect_aging_op:
139 /* We end up here if the above fails. */
141 if (data->high_fragmentation_p()) {
142 /* Change GC op so that if we fail again, we crash. */
143 set_current_gc_op(collect_full_op);
144 collect_full(trace_contexts_p);
147 case collect_to_tenured_op:
148 /* We end up here if the above fails. */
149 collect_to_tenured();
150 if (data->high_fragmentation_p()) {
151 /* Change GC op so that if we fail again, we crash. */
152 set_current_gc_op(collect_full_op);
153 collect_full(trace_contexts_p);
156 case collect_full_op:
157 collect_full(trace_contexts_p);
159 case collect_compact_op:
160 collect_compact(trace_contexts_p);
162 case collect_growing_heap_op:
163 collect_growing_heap(requested_size, trace_contexts_p);
166 critical_error("Bad GC op", current_gc->op);
172 catch (const must_start_gc_again&) {
173 /* We come back here if the target generation is full. */
181 atomic::store(¤t_gc_p, false);
185 /* Check the invariant again, just in case. */
186 FACTOR_ASSERT(!data->high_fragmentation_p());
189 /* primitive_minor_gc() is invoked by inline GC checks, and it needs to fill in
190 uninitialized stack locations before actually calling the GC. See the
191 comment in compiler.cfg.stacks.uninitialized for details. */
193 struct call_frame_scrubber {
197 call_frame_scrubber(factor_vm* parent, context* ctx)
198 : parent(parent), ctx(ctx) {}
200 void operator()(void* frame_top, cell frame_size, code_block* owner,
202 cell return_address = owner->offset(addr);
204 gc_info* info = owner->block_gc_info();
206 FACTOR_ASSERT(return_address < owner->size());
207 cell index = info->return_address_index(return_address);
208 if (index != (cell) - 1)
209 ctx->scrub_stacks(info, index);
213 void factor_vm::scrub_context(context* ctx) {
214 call_frame_scrubber scrubber(this, ctx);
215 iterate_callstack(ctx, scrubber);
218 void factor_vm::scrub_contexts() {
219 std::set<context*>::const_iterator begin = active_contexts.begin();
220 std::set<context*>::const_iterator end = active_contexts.end();
221 while (begin != end) {
222 scrub_context(*begin);
227 void factor_vm::primitive_minor_gc() {
230 gc(collect_nursery_op, 0, /* requested size */
231 true /* trace contexts? */);
234 void factor_vm::primitive_full_gc() {
235 gc(collect_full_op, 0, /* requested size */
236 true /* trace contexts? */);
239 void factor_vm::primitive_compact_gc() {
240 gc(collect_compact_op, 0, /* requested size */
241 true /* trace contexts? */);
245 * It is up to the caller to fill in the object's fields in a meaningful
248 /* Allocates memory */
249 object* factor_vm::allot_large_object(cell type, cell size) {
250 /* If tenured space does not have enough room, collect and compact */
251 cell requested_size = size + data->high_water_mark();
252 if (!data->tenured->can_allot_p(requested_size)) {
253 primitive_compact_gc();
255 /* If it still won't fit, grow the heap */
256 if (!data->tenured->can_allot_p(requested_size)) {
257 gc(collect_growing_heap_op, size, /* requested size */
258 true /* trace contexts? */);
262 object* obj = data->tenured->allot(size);
264 /* Allows initialization code to store old->new pointers
265 without hitting the write barrier in the common case of
266 a nursery allocation */
267 write_barrier(obj, size);
269 obj->initialize(type);
273 void factor_vm::primitive_enable_gc_events() {
274 gc_events = new std::vector<gc_event>();
277 /* Allocates memory */
278 void factor_vm::primitive_disable_gc_events() {
280 growable_array result(this);
282 std::vector<gc_event>* gc_events = this->gc_events;
283 this->gc_events = NULL;
285 std::vector<gc_event>::const_iterator iter = gc_events->begin();
286 std::vector<gc_event>::const_iterator end = gc_events->end();
288 for (; iter != end; iter++) {
289 gc_event event = *iter;
290 byte_array* obj = byte_array_from_value(&event);
291 result.add(tag<byte_array>(obj));
295 ctx->push(result.elements.value());
297 delete this->gc_events;
299 ctx->push(false_object);