]> gitweb.factorcode.org Git - factor.git/blob - vm/gc.cpp
Fix three problems discovered by running math.floats.env tests in a loop:
[factor.git] / vm / gc.cpp
1 #include "master.hpp"
2
3 namespace factor
4 {
5
6 gc_event::gc_event(gc_op op_, factor_vm *parent) :
7         op(op_),
8         cards_scanned(0),
9         decks_scanned(0),
10         code_blocks_scanned(0),
11         start_time(nano_count()),
12         card_scan_time(0),
13         code_scan_time(0),
14         data_sweep_time(0),
15         code_sweep_time(0),
16         compaction_time(0)
17 {
18         data_heap_before = parent->data_room();
19         code_heap_before = parent->code_room();
20         start_time = nano_count();
21 }
22
23 void gc_event::started_card_scan()
24 {
25         temp_time = nano_count();
26 }
27
28 void gc_event::ended_card_scan(cell cards_scanned_, cell decks_scanned_)
29 {
30         cards_scanned += cards_scanned_;
31         decks_scanned += decks_scanned_;
32         card_scan_time = (cell)(nano_count() - temp_time);
33 }
34
35 void gc_event::started_code_scan()
36 {
37         temp_time = nano_count();
38 }
39
40 void gc_event::ended_code_scan(cell code_blocks_scanned_)
41 {
42         code_blocks_scanned += code_blocks_scanned_;
43         code_scan_time = (cell)(nano_count() - temp_time);
44 }
45
46 void gc_event::started_data_sweep()
47 {
48         temp_time = nano_count();
49 }
50
51 void gc_event::ended_data_sweep()
52 {
53         data_sweep_time = (cell)(nano_count() - temp_time);
54 }
55
56 void gc_event::started_code_sweep()
57 {
58         temp_time = nano_count();
59 }
60
61 void gc_event::ended_code_sweep()
62 {
63         code_sweep_time = (cell)(nano_count() - temp_time);
64 }
65
66 void gc_event::started_compaction()
67 {
68         temp_time = nano_count();
69 }
70
71 void gc_event::ended_compaction()
72 {
73         compaction_time = (cell)(nano_count() - temp_time);
74 }
75
76 void gc_event::ended_gc(factor_vm *parent)
77 {
78         data_heap_after = parent->data_room();
79         code_heap_after = parent->code_room();
80         total_time = (cell)(nano_count() - start_time);
81 }
82
83 gc_state::gc_state(gc_op op_, factor_vm *parent) : op(op_), start_time(nano_count())
84 {
85         event = new gc_event(op,parent);
86 }
87
88 gc_state::~gc_state()
89 {
90         delete event;
91         event = NULL;
92 }
93
94 void factor_vm::end_gc()
95 {
96         current_gc->event->ended_gc(this);
97         if(gc_events) gc_events->push_back(*current_gc->event);
98         delete current_gc->event;
99         current_gc->event = NULL;
100 }
101
102 void factor_vm::start_gc_again()
103 {
104         end_gc();
105
106         switch(current_gc->op)
107         {
108         case collect_nursery_op:
109                 current_gc->op = collect_aging_op;
110                 break;
111         case collect_aging_op:
112                 current_gc->op = collect_to_tenured_op;
113                 break;
114         case collect_to_tenured_op:
115                 current_gc->op = collect_full_op;
116                 break;
117         case collect_full_op:
118         case collect_compact_op:
119                 current_gc->op = collect_growing_heap_op;
120                 break;
121         default:
122                 critical_error("Bad GC op",current_gc->op);
123                 break;
124         }
125
126         current_gc->event = new gc_event(current_gc->op,this);
127 }
128
129 void factor_vm::gc(gc_op op, cell requested_bytes, bool trace_contexts_p)
130 {
131         /* Save and reset FPU state before, restore it after, so that
132         nano_count() doesn't bomb on Windows if inexact traps are enabled
133         (fun huh?) */
134         cell fpu_state = get_fpu_state();
135
136         assert(!gc_off);
137         assert(!current_gc);
138
139         current_gc = new gc_state(op,this);
140
141         /* Keep trying to GC higher and higher generations until we don't run out
142         of space */
143         for(;;)
144         {
145                 try
146                 {
147                         current_gc->event->op = current_gc->op;
148
149                         switch(current_gc->op)
150                         {
151                         case collect_nursery_op:
152                                 collect_nursery();
153                                 break;
154                         case collect_aging_op:
155                                 collect_aging();
156                                 if(data->high_fragmentation_p())
157                                 {
158                                         current_gc->op = collect_full_op;
159                                         current_gc->event->op = collect_full_op;
160                                         collect_full(trace_contexts_p);
161                                 }
162                                 break;
163                         case collect_to_tenured_op:
164                                 collect_to_tenured();
165                                 if(data->high_fragmentation_p())
166                                 {
167                                         current_gc->op = collect_full_op;
168                                         current_gc->event->op = collect_full_op;
169                                         collect_full(trace_contexts_p);
170                                 }
171                                 break;
172                         case collect_full_op:
173                                 collect_full(trace_contexts_p);
174                                 break;
175                         case collect_compact_op:
176                                 collect_compact(trace_contexts_p);
177                                 break;
178                         case collect_growing_heap_op:
179                                 collect_growing_heap(requested_bytes,trace_contexts_p);
180                                 break;
181                         default:
182                                 critical_error("Bad GC op",current_gc->op);
183                                 break;
184                         }
185
186                         break;
187                 }
188                 catch(const must_start_gc_again &)
189                 {
190                         /* We come back here if a generation is full */
191                         start_gc_again();
192                         continue;
193                 }
194         }
195
196         end_gc();
197
198         delete current_gc;
199         current_gc = NULL;
200
201         set_fpu_state(fpu_state);
202 }
203
204 /* primitive_minor_gc() is invoked by inline GC checks, and it needs to fill in
205 uninitialized stack locations before actually calling the GC. See the comment
206 in compiler.cfg.stacks.uninitialized for details. */
207
208 struct call_frame_scrubber {
209         factor_vm *parent;
210         context *ctx;
211
212         explicit call_frame_scrubber(factor_vm *parent_, context *ctx_) :
213                 parent(parent_), ctx(ctx_) {}
214
215         void operator()(stack_frame *frame)
216         {
217                 cell return_address = parent->frame_offset(frame);
218                 if(return_address == (cell)-1)
219                         return;
220
221                 code_block *compiled = parent->frame_code(frame);
222                 gc_info *info = compiled->block_gc_info();
223
224                 assert(return_address < compiled->size());
225                 int index = info->return_address_index(return_address);
226                 if(index != -1)
227                         ctx->scrub_stacks(info,index);
228         }
229 };
230
231 void factor_vm::scrub_context(context *ctx)
232 {
233         call_frame_scrubber scrubber(this,ctx);
234         iterate_callstack(ctx,scrubber);
235 }
236
237 void factor_vm::scrub_contexts()
238 {
239         std::set<context *>::const_iterator begin = active_contexts.begin();
240         std::set<context *>::const_iterator end = active_contexts.end();
241         while(begin != end)
242         {
243                 scrub_context(*begin);
244                 begin++;
245         }
246 }
247
248 void factor_vm::primitive_minor_gc()
249 {
250         scrub_contexts();
251
252         gc(collect_nursery_op,
253                 0, /* requested size */
254                 true /* trace contexts? */);
255 }
256
257 void factor_vm::primitive_full_gc()
258 {
259         gc(collect_full_op,
260                 0, /* requested size */
261                 true /* trace contexts? */);
262 }
263
264 void factor_vm::primitive_compact_gc()
265 {
266         gc(collect_compact_op,
267                 0, /* requested size */
268                 true /* trace contexts? */);
269 }
270
271 /*
272  * It is up to the caller to fill in the object's fields in a meaningful
273  * fashion!
274  */
275 object *factor_vm::allot_large_object(cell type, cell size)
276 {
277         /* If tenured space does not have enough room, collect and compact */
278         if(!data->tenured->can_allot_p(size))
279         {
280                 primitive_compact_gc();
281
282                 /* If it still won't fit, grow the heap */
283                 if(!data->tenured->can_allot_p(size))
284                 {
285                         gc(collect_growing_heap_op,
286                                 size, /* requested size */
287                                 true /* trace contexts? */);
288                 }
289         }
290
291         object *obj = data->tenured->allot(size);
292
293         /* Allows initialization code to store old->new pointers
294         without hitting the write barrier in the common case of
295         a nursery allocation */
296         write_barrier(obj,size);
297
298         obj->initialize(type);
299         return obj;
300 }
301
302 void factor_vm::primitive_enable_gc_events()
303 {
304         gc_events = new std::vector<gc_event>();
305 }
306
307 void factor_vm::primitive_disable_gc_events()
308 {
309         if(gc_events)
310         {
311                 growable_array result(this);
312
313                 std::vector<gc_event> *gc_events = this->gc_events;
314                 this->gc_events = NULL;
315
316                 std::vector<gc_event>::const_iterator iter = gc_events->begin();
317                 std::vector<gc_event>::const_iterator end = gc_events->end();
318
319                 for(; iter != end; iter++)
320                 {
321                         gc_event event = *iter;
322                         byte_array *obj = byte_array_from_value(&event);
323                         result.add(tag<byte_array>(obj));
324                 }
325
326                 result.trim();
327                 ctx->push(result.elements.value());
328
329                 delete this->gc_events;
330         }
331         else
332                 ctx->push(false_object);
333 }
334
335 }