]> gitweb.factorcode.org Git - factor.git/blobdiff - vm/gc.cpp
io.streams.256color: faster by caching styles
[factor.git] / vm / gc.cpp
index 6dce41083d6cffcaf8662008f8c89f0e74173230..58e39d8d6274adc992164b1abae7902e25651436 100644 (file)
--- a/vm/gc.cpp
+++ b/vm/gc.cpp
@@ -8,52 +8,21 @@ gc_event::gc_event(gc_op op, factor_vm* parent)
       decks_scanned(0),
       code_blocks_scanned(0),
       start_time(nano_count()),
-      card_scan_time(0),
-      code_scan_time(0),
-      data_sweep_time(0),
-      code_sweep_time(0),
-      compaction_time(0) {
+      times{0} {
   data_heap_before = parent->data_room();
-  code_heap_before = parent->code_room();
+  code_heap_before = parent->code->allocator->as_allocator_room();
   start_time = nano_count();
 }
 
-void gc_event::started_card_scan() { temp_time = nano_count(); }
+void gc_event::reset_timer() { temp_time = nano_count(); }
 
-void gc_event::ended_card_scan(cell cards_scanned_, cell decks_scanned_) {
-  cards_scanned += cards_scanned_;
-  decks_scanned += decks_scanned_;
-  card_scan_time = (cell)(nano_count() - temp_time);
-}
-
-void gc_event::started_code_scan() { temp_time = nano_count(); }
-
-void gc_event::ended_code_scan(cell code_blocks_scanned_) {
-  code_blocks_scanned += code_blocks_scanned_;
-  code_scan_time = (cell)(nano_count() - temp_time);
-}
-
-void gc_event::started_data_sweep() { temp_time = nano_count(); }
-
-void gc_event::ended_data_sweep() {
-  data_sweep_time = (cell)(nano_count() - temp_time);
-}
-
-void gc_event::started_code_sweep() { temp_time = nano_count(); }
-
-void gc_event::ended_code_sweep() {
-  code_sweep_time = (cell)(nano_count() - temp_time);
-}
-
-void gc_event::started_compaction() { temp_time = nano_count(); }
-
-void gc_event::ended_compaction() {
-  compaction_time = (cell)(nano_count() - temp_time);
+void gc_event::ended_phase(gc_phase phase) {
+  times[phase] = (cell)(nano_count() - temp_time);
 }
 
 void gc_event::ended_gc(factor_vm* parent) {
   data_heap_after = parent->data_room();
-  code_heap_after = parent->code_room();
+  code_heap_after = parent->code->allocator->as_allocator_room();
   total_time = (cell)(nano_count() - start_time);
 }
 
@@ -72,37 +41,21 @@ gc_state::~gc_state() {
   }
 }
 
-void factor_vm::end_gc() {
-  if (gc_events) {
-    current_gc->event->ended_gc(this);
-    gc_events->push_back(*current_gc->event);
-  }
-}
-
 void factor_vm::start_gc_again() {
-  end_gc();
-
-  switch (current_gc->op) {
-    case collect_nursery_op:
-      /* Nursery collection can fail if aging does not have enough
-         free space to fit all live objects from nursery. */
-      current_gc->op = collect_aging_op;
-      break;
-    case collect_aging_op:
-      /* Aging collection can fail if the aging semispace cannot fit
-         all the live objects from the other aging semispace and the
-         nursery. */
-      current_gc->op = collect_to_tenured_op;
-      break;
-    default:
-      /* Nothing else should fail mid-collection due to insufficient
-         space in the target generation. */
-      critical_error("Bad GC op", current_gc->op);
-      break;
+  if (current_gc->op == COLLECT_NURSERY_OP) {
+    // Nursery collection can fail if aging does not have enough
+    // free space to fit all live objects from nursery.
+    current_gc->op = COLLECT_AGING_OP;
+  } else if (current_gc->op == COLLECT_AGING_OP) {
+    // Aging collection can fail if the aging semispace cannot fit
+    // all the live objects from the other aging semispace and the
+    // nursery.
+    current_gc->op = COLLECT_TO_TENURED_OP;
+  } else {
+    // Nothing else should fail mid-collection due to insufficient
+    // space in the target generation.
+    critical_error("in start_gc_again, bad GC op", current_gc->op);
   }
-
-  if (gc_events)
-    current_gc->event = new gc_event(current_gc->op, this);
 }
 
 void factor_vm::set_current_gc_op(gc_op op) {
@@ -111,170 +64,105 @@ void factor_vm::set_current_gc_op(gc_op op) {
     current_gc->event->op = op;
 }
 
-void factor_vm::gc(gc_op op, cell requested_size, bool trace_contexts_p) {
+void factor_vm::gc(gc_op op, cell requested_size) {
   FACTOR_ASSERT(!gc_off);
   FACTOR_ASSERT(!current_gc);
 
-  /* Important invariant: tenured space must have enough contiguous free
-     space to fit the entire contents of the aging space and nursery. This is
-     because when doing a full collection, objects from younger generations
-     are promoted before any unreachable tenured objects are freed. */
+  // Important invariant: tenured space must have enough contiguous free
+  // space to fit the entire contents of the aging space and nursery. This is
+  // because when doing a full collection, objects from younger generations
+  // are promoted before any unreachable tenured objects are freed.
   FACTOR_ASSERT(!data->high_fragmentation_p());
 
   current_gc = new gc_state(op, this);
+  if (ctx)
+    ctx->callstack_seg->set_border_locked(false);
   atomic::store(&current_gc_p, true);
 
-  /* Keep trying to GC higher and higher generations until we don't run
-     out of space in the target generation. */
+  // Keep trying to GC higher and higher generations until we don't run
+  // out of space in the target generation.
   for (;;) {
     try {
       if (gc_events)
         current_gc->event->op = current_gc->op;
 
       switch (current_gc->op) {
-        case collect_nursery_op:
+        case COLLECT_NURSERY_OP:
           collect_nursery();
           break;
-        case collect_aging_op:
-          /* We end up here if the above fails. */
+        case COLLECT_AGING_OP:
+          // We end up here if the above fails.
           collect_aging();
           if (data->high_fragmentation_p()) {
-            /* Change GC op so that if we fail again, we crash. */
-            set_current_gc_op(collect_full_op);
-            collect_full(trace_contexts_p);
+            // Change GC op so that if we fail again, we crash.
+            set_current_gc_op(COLLECT_FULL_OP);
+            collect_full();
           }
           break;
-        case collect_to_tenured_op:
-          /* We end up here if the above fails. */
+        case COLLECT_TO_TENURED_OP:
+          // We end up here if the above fails.
           collect_to_tenured();
           if (data->high_fragmentation_p()) {
-            /* Change GC op so that if we fail again, we crash. */
-            set_current_gc_op(collect_full_op);
-            collect_full(trace_contexts_p);
+            // Change GC op so that if we fail again, we crash.
+            set_current_gc_op(COLLECT_FULL_OP);
+            collect_full();
           }
           break;
-        case collect_full_op:
-          collect_full(trace_contexts_p);
+        case COLLECT_FULL_OP:
+          collect_full();
           break;
-        case collect_compact_op:
-          collect_compact(trace_contexts_p);
+        case COLLECT_COMPACT_OP:
+          collect_compact();
           break;
-        case collect_growing_heap_op:
-          collect_growing_heap(requested_size, trace_contexts_p);
+        case COLLECT_GROWING_DATA_HEAP_OP:
+          collect_growing_data_heap(requested_size);
           break;
         default:
-          critical_error("Bad GC op", current_gc->op);
+          critical_error("in gc, bad GC op", current_gc->op);
           break;
       }
 
       break;
     }
     catch (const must_start_gc_again&) {
-      /* We come back here if the target generation is full. */
+      // We come back here if the target generation is full.
       start_gc_again();
-      continue;
     }
   }
 
-  end_gc();
+  if (gc_events) {
+    current_gc->event->ended_gc(this);
+    gc_events->push_back(*current_gc->event);
+  }
 
   atomic::store(&current_gc_p, false);
+  if (ctx)
+    ctx->callstack_seg->set_border_locked(true);
   delete current_gc;
   current_gc = NULL;
 
-  /* Check the invariant again, just in case. */
+  // Check the invariant again, just in case.
   FACTOR_ASSERT(!data->high_fragmentation_p());
 }
 
-/* primitive_minor_gc() is invoked by inline GC checks, and it needs to fill in
-   uninitialized stack locations before actually calling the GC. See the
-   comment in compiler.cfg.stacks.uninitialized for details. */
-
-struct call_frame_scrubber {
-  factor_vm* parent;
-  context* ctx;
-
-  call_frame_scrubber(factor_vm* parent, context* ctx)
-      : parent(parent), ctx(ctx) {}
-
-  void operator()(void* frame_top, cell frame_size, code_block* owner,
-                  void* addr) {
-    cell return_address = owner->offset(addr);
-
-    gc_info* info = owner->block_gc_info();
-
-    FACTOR_ASSERT(return_address < owner->size());
-    cell index = info->return_address_index(return_address);
-    if (index != (cell)-1)
-      ctx->scrub_stacks(info, index);
-  }
-};
-
-void factor_vm::scrub_context(context* ctx) {
-  call_frame_scrubber scrubber(this, ctx);
-  iterate_callstack(ctx, scrubber);
-}
-
-void factor_vm::scrub_contexts() {
-  std::set<context*>::const_iterator begin = active_contexts.begin();
-  std::set<context*>::const_iterator end = active_contexts.end();
-  while (begin != end) {
-    scrub_context(*begin);
-    begin++;
-  }
-}
-
 void factor_vm::primitive_minor_gc() {
-  scrub_contexts();
-
-  gc(collect_nursery_op, 0, /* requested size */
-     true /* trace contexts? */);
+  gc(COLLECT_NURSERY_OP, 0);
 }
 
 void factor_vm::primitive_full_gc() {
-  gc(collect_full_op, 0, /* requested size */
-     true /* trace contexts? */);
+  gc(COLLECT_FULL_OP, 0);
 }
 
 void factor_vm::primitive_compact_gc() {
-  gc(collect_compact_op, 0, /* requested size */
-     true /* trace contexts? */);
-}
-
-/*
- * It is up to the caller to fill in the object's fields in a meaningful
- * fashion!
- */
-/* Allocates memory */
-object* factor_vm::allot_large_object(cell type, cell size) {
-  /* If tenured space does not have enough room, collect and compact */
-  cell requested_size = size + data->high_water_mark();
-  if (!data->tenured->can_allot_p(requested_size)) {
-    primitive_compact_gc();
-
-    /* If it still won't fit, grow the heap */
-    if (!data->tenured->can_allot_p(requested_size)) {
-      gc(collect_growing_heap_op, size, /* requested size */
-         true /* trace contexts? */);
-    }
-  }
-
-  object* obj = data->tenured->allot(size);
-
-  /* Allows initialization code to store old->new pointers
-     without hitting the write barrier in the common case of
-     a nursery allocation */
-  write_barrier(obj, size);
-
-  obj->initialize(type);
-  return obj;
+  gc(COLLECT_COMPACT_OP, 0);
 }
 
 void factor_vm::primitive_enable_gc_events() {
   gc_events = new std::vector<gc_event>();
 }
 
-/* Allocates memory (byte_array_from_value, result.add) */
+// Allocates memory (byte_array_from_value, result.add)
+// XXX: Remember that growable_array has a data_root already
 void factor_vm::primitive_disable_gc_events() {
   if (gc_events) {
     growable_array result(this);
@@ -282,10 +170,7 @@ void factor_vm::primitive_disable_gc_events() {
     std::vector<gc_event>* gc_events = this->gc_events;
     this->gc_events = NULL;
 
-    std::vector<gc_event>::const_iterator iter = gc_events->begin();
-    std::vector<gc_event>::const_iterator end = gc_events->end();
-
-    for (; iter != end; iter++) {
+    FACTOR_FOR_EACH(*gc_events) {
       gc_event event = *iter;
       byte_array* obj = byte_array_from_value(&event);
       result.add(tag<byte_array>(obj));