5 gc_event::gc_event(gc_op op, factor_vm* parent)
9 code_blocks_scanned(0),
10 start_time(nano_count()),
16 data_heap_before = parent->data_room();
17 code_heap_before = parent->code->allocator->as_allocator_room();
18 start_time = nano_count();
21 void gc_event::reset_timer() { temp_time = nano_count(); }
23 void gc_event::ended_card_scan(cell cards_scanned_, cell decks_scanned_) {
24 cards_scanned += cards_scanned_;
25 decks_scanned += decks_scanned_;
26 card_scan_time = (cell)(nano_count() - temp_time);
29 void gc_event::ended_code_scan(cell code_blocks_scanned_) {
30 code_blocks_scanned += code_blocks_scanned_;
31 code_scan_time = (cell)(nano_count() - temp_time);
34 void gc_event::ended_data_sweep() {
35 data_sweep_time = (cell)(nano_count() - temp_time);
38 void gc_event::ended_code_sweep() {
39 code_sweep_time = (cell)(nano_count() - temp_time);
42 void gc_event::ended_compaction() {
43 compaction_time = (cell)(nano_count() - temp_time);
46 void gc_event::ended_gc(factor_vm* parent) {
47 data_heap_after = parent->data_room();
48 code_heap_after = parent->code->allocator->as_allocator_room();
49 total_time = (cell)(nano_count() - start_time);
52 gc_state::gc_state(gc_op op, factor_vm* parent) : op(op) {
53 if (parent->gc_events) {
54 event = new gc_event(op, parent);
55 start_time = nano_count();
60 gc_state::~gc_state() {
67 void factor_vm::end_gc() {
69 current_gc->event->ended_gc(this);
70 gc_events->push_back(*current_gc->event);
74 void factor_vm::start_gc_again() {
75 if (current_gc->op == collect_nursery_op) {
76 // Nursery collection can fail if aging does not have enough
77 // free space to fit all live objects from nursery.
78 current_gc->op = collect_aging_op;
79 } else if (current_gc->op == collect_aging_op) {
80 // Aging collection can fail if the aging semispace cannot fit
81 // all the live objects from the other aging semispace and the
83 current_gc->op = collect_to_tenured_op;
85 // Nothing else should fail mid-collection due to insufficient
86 // space in the target generation.
87 critical_error("in start_gc_again, bad GC op", current_gc->op);
91 void factor_vm::set_current_gc_op(gc_op op) {
94 current_gc->event->op = op;
97 void factor_vm::gc(gc_op op, cell requested_size) {
98 FACTOR_ASSERT(!gc_off);
99 FACTOR_ASSERT(!current_gc);
101 // Important invariant: tenured space must have enough contiguous free
102 // space to fit the entire contents of the aging space and nursery. This is
103 // because when doing a full collection, objects from younger generations
104 // are promoted before any unreachable tenured objects are freed.
105 FACTOR_ASSERT(!data->high_fragmentation_p());
107 current_gc = new gc_state(op, this);
109 ctx->callstack_seg->set_border_locked(false);
110 atomic::store(¤t_gc_p, true);
112 // Keep trying to GC higher and higher generations until we don't run
113 // out of space in the target generation.
117 current_gc->event->op = current_gc->op;
119 switch (current_gc->op) {
120 case collect_nursery_op:
123 case collect_aging_op:
124 // We end up here if the above fails.
126 if (data->high_fragmentation_p()) {
127 // Change GC op so that if we fail again, we crash.
128 set_current_gc_op(collect_full_op);
132 case collect_to_tenured_op:
133 // We end up here if the above fails.
134 collect_to_tenured();
135 if (data->high_fragmentation_p()) {
136 // Change GC op so that if we fail again, we crash.
137 set_current_gc_op(collect_full_op);
141 case collect_full_op:
144 case collect_compact_op:
147 case collect_growing_heap_op:
148 collect_growing_heap(requested_size);
151 critical_error("in gc, bad GC op", current_gc->op);
157 catch (const must_start_gc_again&) {
158 // We come back here if the target generation is full.
165 atomic::store(¤t_gc_p, false);
167 ctx->callstack_seg->set_border_locked(true);
171 // Check the invariant again, just in case.
172 FACTOR_ASSERT(!data->high_fragmentation_p());
175 void factor_vm::primitive_minor_gc() {
176 gc(collect_nursery_op, 0);
179 void factor_vm::primitive_full_gc() {
180 gc(collect_full_op, 0);
183 void factor_vm::primitive_compact_gc() {
184 gc(collect_compact_op, 0);
187 void factor_vm::primitive_enable_gc_events() {
188 gc_events = new std::vector<gc_event>();
191 // Allocates memory (byte_array_from_value, result.add)
192 // XXX: Remember that growable_array has a data_root already
193 void factor_vm::primitive_disable_gc_events() {
195 growable_array result(this);
197 std::vector<gc_event>* gc_events = this->gc_events;
198 this->gc_events = NULL;
200 FACTOR_FOR_EACH(*gc_events) {
201 gc_event event = *iter;
202 byte_array* obj = byte_array_from_value(&event);
203 result.add(tag<byte_array>(obj));
207 ctx->push(result.elements.value());
209 delete this->gc_events;
211 ctx->push(false_object);