]> gitweb.factorcode.org Git - factor.git/blob - vm/gc.cpp
vm: don't call nano_count() from the GC at all, unless GC event recording is on
[factor.git] / vm / gc.cpp
1 #include "master.hpp"
2
3 namespace factor
4 {
5
6 gc_event::gc_event(gc_op op_, factor_vm *parent) :
7         op(op_),
8         cards_scanned(0),
9         decks_scanned(0),
10         code_blocks_scanned(0),
11         start_time(nano_count()),
12         card_scan_time(0),
13         code_scan_time(0),
14         data_sweep_time(0),
15         code_sweep_time(0),
16         compaction_time(0)
17 {
18         data_heap_before = parent->data_room();
19         code_heap_before = parent->code_room();
20         start_time = nano_count();
21 }
22
23 void gc_event::started_card_scan()
24 {
25         temp_time = nano_count();
26 }
27
28 void gc_event::ended_card_scan(cell cards_scanned_, cell decks_scanned_)
29 {
30         cards_scanned += cards_scanned_;
31         decks_scanned += decks_scanned_;
32         card_scan_time = (cell)(nano_count() - temp_time);
33 }
34
35 void gc_event::started_code_scan()
36 {
37         temp_time = nano_count();
38 }
39
40 void gc_event::ended_code_scan(cell code_blocks_scanned_)
41 {
42         code_blocks_scanned += code_blocks_scanned_;
43         code_scan_time = (cell)(nano_count() - temp_time);
44 }
45
46 void gc_event::started_data_sweep()
47 {
48         temp_time = nano_count();
49 }
50
51 void gc_event::ended_data_sweep()
52 {
53         data_sweep_time = (cell)(nano_count() - temp_time);
54 }
55
56 void gc_event::started_code_sweep()
57 {
58         temp_time = nano_count();
59 }
60
61 void gc_event::ended_code_sweep()
62 {
63         code_sweep_time = (cell)(nano_count() - temp_time);
64 }
65
66 void gc_event::started_compaction()
67 {
68         temp_time = nano_count();
69 }
70
71 void gc_event::ended_compaction()
72 {
73         compaction_time = (cell)(nano_count() - temp_time);
74 }
75
76 void gc_event::ended_gc(factor_vm *parent)
77 {
78         data_heap_after = parent->data_room();
79         code_heap_after = parent->code_room();
80         total_time = (cell)(nano_count() - start_time);
81 }
82
83 gc_state::gc_state(gc_op op_, factor_vm *parent) : op(op_)
84 {
85         if(parent->gc_events)
86         {
87                 event = new gc_event(op,parent);
88                 start_time = nano_count();
89         }
90         else
91                 event = NULL;
92 }
93
94 gc_state::~gc_state()
95 {
96         if(event)
97         {
98                 delete event;
99                 event = NULL;
100         }
101 }
102
103 void factor_vm::end_gc()
104 {
105         if(gc_events)
106         {
107                 current_gc->event->ended_gc(this);
108                 gc_events->push_back(*current_gc->event);
109         }
110 }
111
112 void factor_vm::start_gc_again()
113 {
114         end_gc();
115
116         switch(current_gc->op)
117         {
118         case collect_nursery_op:
119                 current_gc->op = collect_aging_op;
120                 break;
121         case collect_aging_op:
122                 current_gc->op = collect_to_tenured_op;
123                 break;
124         case collect_to_tenured_op:
125                 current_gc->op = collect_full_op;
126                 break;
127         case collect_full_op:
128         case collect_compact_op:
129                 current_gc->op = collect_growing_heap_op;
130                 break;
131         default:
132                 critical_error("Bad GC op",current_gc->op);
133                 break;
134         }
135
136         if(gc_events)
137                 current_gc->event = new gc_event(current_gc->op,this);
138 }
139
140 void factor_vm::set_current_gc_op(gc_op op)
141 {
142         current_gc->op = op;
143         if(gc_events) current_gc->event->op = op;
144 }
145
146 void factor_vm::gc(gc_op op, cell requested_bytes, bool trace_contexts_p)
147 {
148         /* Save and reset FPU state before, restore it after, so that
149         nano_count() doesn't bomb on Windows if inexact traps are enabled
150         (fun huh?) */
151         cell fpu_state = get_fpu_state();
152
153         assert(!gc_off);
154         assert(!current_gc);
155
156         current_gc = new gc_state(op,this);
157
158         /* Keep trying to GC higher and higher generations until we don't run out
159         of space */
160         for(;;)
161         {
162                 try
163                 {
164                         if(gc_events) current_gc->event->op = current_gc->op;
165
166                         switch(current_gc->op)
167                         {
168                         case collect_nursery_op:
169                                 collect_nursery();
170                                 break;
171                         case collect_aging_op:
172                                 collect_aging();
173                                 if(data->high_fragmentation_p())
174                                 {
175                                         set_current_gc_op(collect_full_op);
176                                         collect_full(trace_contexts_p);
177                                 }
178                                 break;
179                         case collect_to_tenured_op:
180                                 collect_to_tenured();
181                                 if(data->high_fragmentation_p())
182                                 {
183                                         set_current_gc_op(collect_full_op);
184                                         collect_full(trace_contexts_p);
185                                 }
186                                 break;
187                         case collect_full_op:
188                                 collect_full(trace_contexts_p);
189                                 break;
190                         case collect_compact_op:
191                                 collect_compact(trace_contexts_p);
192                                 break;
193                         case collect_growing_heap_op:
194                                 collect_growing_heap(requested_bytes,trace_contexts_p);
195                                 break;
196                         default:
197                                 critical_error("Bad GC op",current_gc->op);
198                                 break;
199                         }
200
201                         break;
202                 }
203                 catch(const must_start_gc_again &)
204                 {
205                         /* We come back here if a generation is full */
206                         start_gc_again();
207                         continue;
208                 }
209         }
210
211         end_gc();
212
213         delete current_gc;
214         current_gc = NULL;
215
216         set_fpu_state(fpu_state);
217 }
218
219 /* primitive_minor_gc() is invoked by inline GC checks, and it needs to fill in
220 uninitialized stack locations before actually calling the GC. See the comment
221 in compiler.cfg.stacks.uninitialized for details. */
222
223 struct call_frame_scrubber {
224         factor_vm *parent;
225         context *ctx;
226
227         explicit call_frame_scrubber(factor_vm *parent_, context *ctx_) :
228                 parent(parent_), ctx(ctx_) {}
229
230         void operator()(stack_frame *frame)
231         {
232                 cell return_address = parent->frame_offset(frame);
233                 if(return_address == (cell)-1)
234                         return;
235
236                 code_block *compiled = parent->frame_code(frame);
237                 gc_info *info = compiled->block_gc_info();
238
239                 assert(return_address < compiled->size());
240                 int index = info->return_address_index(return_address);
241                 if(index != -1)
242                         ctx->scrub_stacks(info,index);
243         }
244 };
245
246 void factor_vm::scrub_context(context *ctx)
247 {
248         call_frame_scrubber scrubber(this,ctx);
249         iterate_callstack(ctx,scrubber);
250 }
251
252 void factor_vm::scrub_contexts()
253 {
254         std::set<context *>::const_iterator begin = active_contexts.begin();
255         std::set<context *>::const_iterator end = active_contexts.end();
256         while(begin != end)
257         {
258                 scrub_context(*begin);
259                 begin++;
260         }
261 }
262
263 void factor_vm::primitive_minor_gc()
264 {
265         scrub_contexts();
266
267         gc(collect_nursery_op,
268                 0, /* requested size */
269                 true /* trace contexts? */);
270 }
271
272 void factor_vm::primitive_full_gc()
273 {
274         gc(collect_full_op,
275                 0, /* requested size */
276                 true /* trace contexts? */);
277 }
278
279 void factor_vm::primitive_compact_gc()
280 {
281         gc(collect_compact_op,
282                 0, /* requested size */
283                 true /* trace contexts? */);
284 }
285
286 /*
287  * It is up to the caller to fill in the object's fields in a meaningful
288  * fashion!
289  */
290 object *factor_vm::allot_large_object(cell type, cell size)
291 {
292         /* If tenured space does not have enough room, collect and compact */
293         if(!data->tenured->can_allot_p(size))
294         {
295                 primitive_compact_gc();
296
297                 /* If it still won't fit, grow the heap */
298                 if(!data->tenured->can_allot_p(size))
299                 {
300                         gc(collect_growing_heap_op,
301                                 size, /* requested size */
302                                 true /* trace contexts? */);
303                 }
304         }
305
306         object *obj = data->tenured->allot(size);
307
308         /* Allows initialization code to store old->new pointers
309         without hitting the write barrier in the common case of
310         a nursery allocation */
311         write_barrier(obj,size);
312
313         obj->initialize(type);
314         return obj;
315 }
316
317 void factor_vm::primitive_enable_gc_events()
318 {
319         gc_events = new std::vector<gc_event>();
320 }
321
322 void factor_vm::primitive_disable_gc_events()
323 {
324         if(gc_events)
325         {
326                 growable_array result(this);
327
328                 std::vector<gc_event> *gc_events = this->gc_events;
329                 this->gc_events = NULL;
330
331                 std::vector<gc_event>::const_iterator iter = gc_events->begin();
332                 std::vector<gc_event>::const_iterator end = gc_events->end();
333
334                 for(; iter != end; iter++)
335                 {
336                         gc_event event = *iter;
337                         byte_array *obj = byte_array_from_value(&event);
338                         result.add(tag<byte_array>(obj));
339                 }
340
341                 result.trim();
342                 ctx->push(result.elements.value());
343
344                 delete this->gc_events;
345         }
346         else
347                 ctx->push(false_object);
348 }
349
350 }