]> gitweb.factorcode.org Git - factor.git/blob - vm/gc.cpp
Put brackets around ipv6 addresses in `inet6 present`
[factor.git] / vm / gc.cpp
1 #include "master.hpp"
2
3 namespace factor {
4
5 gc_event::gc_event(gc_op op, factor_vm* parent)
6     : op(op),
7       cards_scanned(0),
8       decks_scanned(0),
9       code_blocks_scanned(0),
10       start_time(nano_count()),
11       times{0} {
12   data_heap_before = parent->data_room();
13   code_heap_before = parent->code->allocator->as_allocator_room();
14   start_time = nano_count();
15 }
16
17 void gc_event::reset_timer() { temp_time = nano_count(); }
18
19 void gc_event::ended_phase(gc_phase phase) {
20   times[phase] = (cell)(nano_count() - temp_time);
21 }
22
23 void gc_event::ended_gc(factor_vm* parent) {
24   data_heap_after = parent->data_room();
25   code_heap_after = parent->code->allocator->as_allocator_room();
26   total_time = (cell)(nano_count() - start_time);
27 }
28
29 gc_state::gc_state(gc_op op, factor_vm* parent) : op(op) {
30   if (parent->gc_events) {
31     event = new gc_event(op, parent);
32     start_time = nano_count();
33   } else
34     event = NULL;
35 }
36
37 gc_state::~gc_state() {
38   if (event) {
39     delete event;
40     event = NULL;
41   }
42 }
43
44 void factor_vm::start_gc_again() {
45   if (current_gc->op == COLLECT_NURSERY_OP) {
46     // Nursery collection can fail if aging does not have enough
47     // free space to fit all live objects from nursery.
48     current_gc->op = COLLECT_AGING_OP;
49   } else if (current_gc->op == COLLECT_AGING_OP) {
50     // Aging collection can fail if the aging semispace cannot fit
51     // all the live objects from the other aging semispace and the
52     // nursery.
53     current_gc->op = COLLECT_TO_TENURED_OP;
54   } else {
55     // Nothing else should fail mid-collection due to insufficient
56     // space in the target generation.
57     critical_error("in start_gc_again, bad GC op", current_gc->op);
58   }
59 }
60
61 void factor_vm::set_current_gc_op(gc_op op) {
62   current_gc->op = op;
63   if (gc_events)
64     current_gc->event->op = op;
65 }
66
67 void factor_vm::gc(gc_op op, cell requested_size) {
68   FACTOR_ASSERT(!gc_off);
69   FACTOR_ASSERT(!current_gc);
70
71   // Important invariant: tenured space must have enough contiguous free
72   // space to fit the entire contents of the aging space and nursery. This is
73   // because when doing a full collection, objects from younger generations
74   // are promoted before any unreachable tenured objects are freed.
75   FACTOR_ASSERT(!data->high_fragmentation_p());
76
77   current_gc = new gc_state(op, this);
78   if (ctx)
79     ctx->callstack_seg->set_border_locked(false);
80   atomic::store(&current_gc_p, true);
81
82   // Keep trying to GC higher and higher generations until we don't run
83   // out of space in the target generation.
84   for (;;) {
85     try {
86       if (gc_events)
87         current_gc->event->op = current_gc->op;
88
89       switch (current_gc->op) {
90         case COLLECT_NURSERY_OP:
91           collect_nursery();
92           break;
93         case COLLECT_AGING_OP:
94           // We end up here if the above fails.
95           collect_aging();
96           if (data->high_fragmentation_p()) {
97             // Change GC op so that if we fail again, we crash.
98             set_current_gc_op(COLLECT_FULL_OP);
99             collect_full();
100           }
101           break;
102         case COLLECT_TO_TENURED_OP:
103           // We end up here if the above fails.
104           collect_to_tenured();
105           if (data->high_fragmentation_p()) {
106             // Change GC op so that if we fail again, we crash.
107             set_current_gc_op(COLLECT_FULL_OP);
108             collect_full();
109           }
110           break;
111         case COLLECT_FULL_OP:
112           collect_full();
113           break;
114         case COLLECT_COMPACT_OP:
115           collect_compact();
116           break;
117         case COLLECT_GROWING_DATA_HEAP_OP:
118           collect_growing_data_heap(requested_size);
119           break;
120         default:
121           critical_error("in gc, bad GC op", current_gc->op);
122           break;
123       }
124
125       break;
126     }
127     catch (const must_start_gc_again&) {
128       // We come back here if the target generation is full.
129       start_gc_again();
130     }
131   }
132
133   if (gc_events) {
134     current_gc->event->ended_gc(this);
135     gc_events->push_back(*current_gc->event);
136   }
137
138   atomic::store(&current_gc_p, false);
139   if (ctx)
140     ctx->callstack_seg->set_border_locked(true);
141   delete current_gc;
142   current_gc = NULL;
143
144   // Check the invariant again, just in case.
145   FACTOR_ASSERT(!data->high_fragmentation_p());
146 }
147
148 void factor_vm::primitive_minor_gc() {
149   gc(COLLECT_NURSERY_OP, 0);
150 }
151
152 void factor_vm::primitive_full_gc() {
153   gc(COLLECT_FULL_OP, 0);
154 }
155
156 void factor_vm::primitive_compact_gc() {
157   gc(COLLECT_COMPACT_OP, 0);
158 }
159
160 void factor_vm::primitive_enable_gc_events() {
161   gc_events = new std::vector<gc_event>();
162 }
163
164 // Allocates memory (byte_array_from_value, result.add)
165 // XXX: Remember that growable_array has a data_root already
166 void factor_vm::primitive_disable_gc_events() {
167   if (gc_events) {
168     growable_array result(this);
169
170     std::vector<gc_event>* gc_events = this->gc_events;
171     this->gc_events = NULL;
172
173     FACTOR_FOR_EACH(*gc_events) {
174       gc_event event = *iter;
175       byte_array* obj = byte_array_from_value(&event);
176       result.add(tag<byte_array>(obj));
177     }
178
179     result.trim();
180     ctx->push(result.elements.value());
181
182     delete this->gc_events;
183   } else
184     ctx->push(false_object);
185 }
186
187 }