]> gitweb.factorcode.org Git - factor.git/blob - vm/gc.cpp
0e3d87836d1cffc8ce6c78b95a7fa1d8d458519a
[factor.git] / vm / gc.cpp
1 #include "master.hpp"
2
3 namespace factor
4 {
5
6 gc_event::gc_event(gc_op op_, factor_vm *parent) :
7         op(op_),
8         cards_scanned(0),
9         decks_scanned(0),
10         code_blocks_scanned(0),
11         start_time(nano_count()),
12         card_scan_time(0),
13         code_scan_time(0),
14         data_sweep_time(0),
15         code_sweep_time(0),
16         compaction_time(0)
17 {
18         data_heap_before = parent->data_room();
19         code_heap_before = parent->code_room();
20         start_time = nano_count();
21 }
22
23 void gc_event::started_card_scan()
24 {
25         temp_time = nano_count();
26 }
27
28 void gc_event::ended_card_scan(cell cards_scanned_, cell decks_scanned_)
29 {
30         cards_scanned += cards_scanned_;
31         decks_scanned += decks_scanned_;
32         card_scan_time = (cell)(nano_count() - temp_time);
33 }
34
35 void gc_event::started_code_scan()
36 {
37         temp_time = nano_count();
38 }
39
40 void gc_event::ended_code_scan(cell code_blocks_scanned_)
41 {
42         code_blocks_scanned += code_blocks_scanned_;
43         code_scan_time = (cell)(nano_count() - temp_time);
44 }
45
46 void gc_event::started_data_sweep()
47 {
48         temp_time = nano_count();
49 }
50
51 void gc_event::ended_data_sweep()
52 {
53         data_sweep_time = (cell)(nano_count() - temp_time);
54 }
55
56 void gc_event::started_code_sweep()
57 {
58         temp_time = nano_count();
59 }
60
61 void gc_event::ended_code_sweep()
62 {
63         code_sweep_time = (cell)(nano_count() - temp_time);
64 }
65
66 void gc_event::started_compaction()
67 {
68         temp_time = nano_count();
69 }
70
71 void gc_event::ended_compaction()
72 {
73         compaction_time = (cell)(nano_count() - temp_time);
74 }
75
76 void gc_event::ended_gc(factor_vm *parent)
77 {
78         data_heap_after = parent->data_room();
79         code_heap_after = parent->code_room();
80         total_time = (cell)(nano_count() - start_time);
81 }
82
83 gc_state::gc_state(gc_op op_, factor_vm *parent) : op(op_)
84 {
85         if(parent->gc_events)
86         {
87                 event = new gc_event(op,parent);
88                 start_time = nano_count();
89         }
90         else
91                 event = NULL;
92 }
93
94 gc_state::~gc_state()
95 {
96         if(event)
97         {
98                 delete event;
99                 event = NULL;
100         }
101 }
102
103 void factor_vm::end_gc()
104 {
105         if(gc_events)
106         {
107                 current_gc->event->ended_gc(this);
108                 gc_events->push_back(*current_gc->event);
109         }
110 }
111
112 void factor_vm::start_gc_again()
113 {
114         end_gc();
115
116         switch(current_gc->op)
117         {
118         case collect_nursery_op:
119                 /* Nursery collection can fail if aging does not have enough
120                 free space to fit all live objects from nursery. */
121                 current_gc->op = collect_aging_op;
122                 break;
123         case collect_aging_op:
124                 /* Aging collection can fail if the aging semispace cannot fit
125                 all the live objects from the other aging semispace and the
126                 nursery. */
127                 current_gc->op = collect_to_tenured_op;
128                 break;
129         default:
130                 /* Nothing else should fail mid-collection due to insufficient
131                 space in the target generation. */
132                 critical_error("Bad GC op",current_gc->op);
133                 break;
134         }
135
136         if(gc_events)
137                 current_gc->event = new gc_event(current_gc->op,this);
138 }
139
140 void factor_vm::set_current_gc_op(gc_op op)
141 {
142         current_gc->op = op;
143         if(gc_events) current_gc->event->op = op;
144 }
145
146 void factor_vm::gc(gc_op op, cell requested_size, bool trace_contexts_p)
147 {
148         FACTOR_ASSERT(!gc_off);
149         FACTOR_ASSERT(!current_gc);
150
151         /* Important invariant: tenured space must have enough contiguous free
152         space to fit the entire contents of the aging space and nursery. This is
153         because when doing a full collection, objects from younger generations
154         are promoted before any unreachable tenured objects are freed. */
155         FACTOR_ASSERT(!data->high_fragmentation_p());
156
157         current_gc = new gc_state(op,this);
158         atomic::store(&current_gc_p, true);
159
160         /* Keep trying to GC higher and higher generations until we don't run
161         out of space in the target generation. */
162         for(;;)
163         {
164                 try
165                 {
166                         if(gc_events) current_gc->event->op = current_gc->op;
167
168                         switch(current_gc->op)
169                         {
170                         case collect_nursery_op:
171                                 collect_nursery();
172                                 break;
173                         case collect_aging_op:
174                                 /* We end up here if the above fails. */
175                                 collect_aging();
176                                 if(data->high_fragmentation_p())
177                                 {
178                                         /* Change GC op so that if we fail again,
179                                         we crash. */
180                                         set_current_gc_op(collect_full_op);
181                                         collect_full(trace_contexts_p);
182                                 }
183                                 break;
184                         case collect_to_tenured_op:
185                                 /* We end up here if the above fails. */
186                                 collect_to_tenured();
187                                 if(data->high_fragmentation_p())
188                                 {
189                                         /* Change GC op so that if we fail again,
190                                         we crash. */
191                                         set_current_gc_op(collect_full_op);
192                                         collect_full(trace_contexts_p);
193                                 }
194                                 break;
195                         case collect_full_op:
196                                 collect_full(trace_contexts_p);
197                                 break;
198                         case collect_compact_op:
199                                 collect_compact(trace_contexts_p);
200                                 break;
201                         case collect_growing_heap_op:
202                                 collect_growing_heap(requested_size,trace_contexts_p);
203                                 break;
204                         default:
205                                 critical_error("Bad GC op",current_gc->op);
206                                 break;
207                         }
208
209                         break;
210                 }
211                 catch(const must_start_gc_again &)
212                 {
213                         /* We come back here if the target generation is full. */
214                         start_gc_again();
215                         continue;
216                 }
217         }
218
219         end_gc();
220
221         atomic::store(&current_gc_p, false);
222         delete current_gc;
223         current_gc = NULL;
224
225         /* Check the invariant again, just in case. */
226         FACTOR_ASSERT(!data->high_fragmentation_p());
227 }
228
229 /* primitive_minor_gc() is invoked by inline GC checks, and it needs to fill in
230 uninitialized stack locations before actually calling the GC. See the comment
231 in compiler.cfg.stacks.uninitialized for details. */
232
233 struct call_frame_scrubber {
234         factor_vm *parent;
235         context *ctx;
236
237         explicit call_frame_scrubber(factor_vm *parent_, context *ctx_) :
238                 parent(parent_), ctx(ctx_) {}
239
240         void operator()(void *frame_top, cell frame_size, code_block *owner, void *addr)
241         {
242                 cell return_address = owner->offset(addr);
243
244                 gc_info *info = owner->block_gc_info();
245
246                 FACTOR_ASSERT(return_address < owner->size());
247                 cell index = info->return_address_index(return_address);
248                 if(index != (cell)-1)
249                         ctx->scrub_stacks(info,index);
250         }
251 };
252
253 void factor_vm::scrub_context(context *ctx)
254 {
255         call_frame_scrubber scrubber(this,ctx);
256         iterate_callstack(ctx,scrubber);
257 }
258
259 void factor_vm::scrub_contexts()
260 {
261         std::set<context *>::const_iterator begin = active_contexts.begin();
262         std::set<context *>::const_iterator end = active_contexts.end();
263         while(begin != end)
264         {
265                 scrub_context(*begin);
266                 begin++;
267         }
268 }
269
270 void factor_vm::primitive_minor_gc()
271 {
272         scrub_contexts();
273
274         gc(collect_nursery_op,
275                 0, /* requested size */
276                 true /* trace contexts? */);
277 }
278
279 void factor_vm::primitive_full_gc()
280 {
281         gc(collect_full_op,
282                 0, /* requested size */
283                 true /* trace contexts? */);
284 }
285
286 void factor_vm::primitive_compact_gc()
287 {
288         gc(collect_compact_op,
289                 0, /* requested size */
290                 true /* trace contexts? */);
291 }
292
293 /*
294  * It is up to the caller to fill in the object's fields in a meaningful
295  * fashion!
296  */
297 /* Allocates memory */
298 object *factor_vm::allot_large_object(cell type, cell size)
299 {
300         /* If tenured space does not have enough room, collect and compact */
301         cell requested_size = size + data->high_water_mark();
302         if(!data->tenured->can_allot_p(requested_size))
303         {
304                 primitive_compact_gc();
305
306                 /* If it still won't fit, grow the heap */
307                 if(!data->tenured->can_allot_p(requested_size))
308                 {
309                         gc(collect_growing_heap_op,
310                                 size, /* requested size */
311                                 true /* trace contexts? */);
312                 }
313         }
314
315         object *obj = data->tenured->allot(size);
316
317         /* Allows initialization code to store old->new pointers
318         without hitting the write barrier in the common case of
319         a nursery allocation */
320         write_barrier(obj,size);
321
322         obj->initialize(type);
323         return obj;
324 }
325
326 void factor_vm::primitive_enable_gc_events()
327 {
328         gc_events = new std::vector<gc_event>();
329 }
330
331 /* Allocates memory */
332 void factor_vm::primitive_disable_gc_events()
333 {
334         if(gc_events)
335         {
336                 growable_array result(this);
337
338                 std::vector<gc_event> *gc_events = this->gc_events;
339                 this->gc_events = NULL;
340
341                 std::vector<gc_event>::const_iterator iter = gc_events->begin();
342                 std::vector<gc_event>::const_iterator end = gc_events->end();
343
344                 for(; iter != end; iter++)
345                 {
346                         gc_event event = *iter;
347                         byte_array *obj = byte_array_from_value(&event);
348                         result.add(tag<byte_array>(obj));
349                 }
350
351                 result.trim();
352                 ctx->push(result.elements.value());
353
354                 delete this->gc_events;
355         }
356         else
357                 ctx->push(false_object);
358 }
359
360 }