data(data_),
growing_data_heap(growing_data_heap_),
collecting_gen(collecting_gen_),
+ collecting_aging_again(false),
start_time(current_micros()) { }
gc_state::~gc_state() { }
-/* If a generation fills up, throw this error. It is caught in garbage_collection() */
-struct generation_full_condition { };
-
/* Given a pointer to oldspace, copy it to newspace */
object *factor_vm::copy_untagged_object_impl(object *pointer, cell size)
{
if(current_gc->newspace->here + size >= current_gc->newspace->end)
- throw generation_full_condition();
+ longjmp(current_gc->gc_unwind,1);
object *newpointer = allot_zone(current_gc->newspace,size);
void factor_vm::end_gc()
{
-
gc_stats *s = &stats[current_gc->collecting_gen];
cell gc_elapsed = (current_micros() - current_gc->start_time);
/* Keep trying to GC higher and higher generations until we don't run out
of space */
- for(;;)
- {
- try
- {
- begin_gc(requested_bytes);
-
- /* Initialize chase pointer */
- cell scan = current_gc->newspace->here;
-
- /* Trace objects referenced from global environment */
- trace_roots();
-
- /* Trace objects referenced from stacks, unless we're doing
- save-image-and-exit in which case stack objects are irrelevant */
- if(trace_contexts_) trace_contexts();
-
- /* Trace objects referenced from older generations */
- trace_cards();
-
- /* On minor GC, trace code heap roots if it has pointers
- to this generation or younger. Otherwise, tracing data heap objects
- will mark all reachable code blocks, and we free the unmarked ones
- after. */
- if(!current_gc->collecting_tenured_p() && current_gc->collecting_gen >= last_code_heap_scan)
- {
- update_code_heap_roots();
- }
-
- /* do some copying -- this is where most of the work is done */
- copy_reachable_objects(scan,¤t_gc->newspace->here);
-
- /* On minor GC, update literal references in code blocks, now that all
- data heap objects are in their final location. On a major GC,
- free all code blocks that did not get marked during tracing. */
- if(current_gc->collecting_tenured_p())
- free_unmarked_code_blocks();
- else
- update_dirty_code_blocks();
-
- /* GC completed without any generations filling up; finish up */
- break;
- }
- catch(const generation_full_condition &c)
- {
- /* We come back here if a generation is full */
-
- /* We have no older generations we can try collecting, so we
- resort to growing the data heap */
- if(current_gc->collecting_tenured_p())
- {
- current_gc->growing_data_heap = true;
-
- /* see the comment in unmark_marked() */
- code->unmark_marked();
- }
- /* we try collecting aging space twice before going on to
- collect tenured */
- else if(data->have_aging_p()
- && current_gc->collecting_gen == data->aging()
- && !current_gc->collecting_aging_again)
- {
- current_gc->collecting_aging_again = true;
- }
- /* Collect the next oldest generation */
- else
- {
- current_gc->collecting_gen++;
- }
- }
- }
-
+ if(setjmp(current_gc->gc_unwind))
+ {
+ /* We come back here if a generation is full */
+
+ /* We have no older generations we can try collecting, so we
+ resort to growing the data heap */
+ if(current_gc->collecting_tenured_p())
+ {
+ current_gc->growing_data_heap = true;
+
+ /* see the comment in unmark_marked() */
+ code->unmark_marked();
+ }
+ /* we try collecting aging space twice before going on to
+ collect tenured */
+ else if(data->have_aging_p()
+ && current_gc->collecting_gen == data->aging()
+ && !current_gc->collecting_aging_again)
+ {
+ current_gc->collecting_aging_again = true;
+ }
+ /* Collect the next oldest generation */
+ else
+ {
+ current_gc->collecting_gen++;
+ }
+ }
+
+ begin_gc(requested_bytes);
+
+ /* Initialize chase pointer */
+ cell scan = current_gc->newspace->here;
+
+ /* Trace objects referenced from global environment */
+ trace_roots();
+
+ /* Trace objects referenced from stacks, unless we're doing
+ save-image-and-exit in which case stack objects are irrelevant */
+ if(trace_contexts_) trace_contexts();
+
+ /* Trace objects referenced from older generations */
+ trace_cards();
+
+ /* On minor GC, trace code heap roots if it has pointers
+ to this generation or younger. Otherwise, tracing data heap objects
+ will mark all reachable code blocks, and we free the unmarked ones
+ after. */
+ if(!current_gc->collecting_tenured_p() && current_gc->collecting_gen >= last_code_heap_scan)
+ {
+ update_code_heap_roots();
+ }
+
+ /* do some copying -- this is where most of the work is done */
+ copy_reachable_objects(scan,¤t_gc->newspace->here);
+
+ /* On minor GC, update literal references in code blocks, now that all
+ data heap objects are in their final location. On a major GC,
+ free all code blocks that did not get marked during tracing. */
+ if(current_gc->collecting_tenured_p())
+ free_unmarked_code_blocks();
+ else
+ update_dirty_code_blocks();
+
+ /* GC completed without any generations filling up; finish up */
end_gc();
delete current_gc;
namespace factor
{
-struct factor_vm
+struct factor_vm
{
// First five fields accessed directly by assembler. See vm.factor
- context *stack_chain;
+ context *stack_chain;
zone nursery; /* new objects are allocated here */
cell cards_offset;
cell decks_offset;
bignum *bignum_multiply_unsigned_small_factor(bignum * x, bignum_digit_type y,int negative_p);
void bignum_destructive_add(bignum * bignum, bignum_digit_type n);
void bignum_destructive_scale_up(bignum * bignum, bignum_digit_type factor);
- void bignum_divide_unsigned_large_denominator(bignum * numerator, bignum * denominator,
+ void bignum_divide_unsigned_large_denominator(bignum * numerator, bignum * denominator,
bignum * * quotient, bignum * * remainder, int q_negative_p, int r_negative_p);
void bignum_divide_unsigned_normalized(bignum * u, bignum * v, bignum * q);
- bignum_digit_type bignum_divide_subtract(bignum_digit_type * v_start, bignum_digit_type * v_end,
+ bignum_digit_type bignum_divide_subtract(bignum_digit_type * v_start, bignum_digit_type * v_end,
bignum_digit_type guess, bignum_digit_type * u_start);
- void bignum_divide_unsigned_medium_denominator(bignum * numerator,bignum_digit_type denominator,
+ void bignum_divide_unsigned_medium_denominator(bignum * numerator,bignum_digit_type denominator,
bignum * * quotient, bignum * * remainder,int q_negative_p, int r_negative_p);
void bignum_destructive_normalization(bignum * source, bignum * target, int shift_left);
void bignum_destructive_unnormalization(bignum * bignum, int shift_right);
- bignum_digit_type bignum_digit_divide(bignum_digit_type uh, bignum_digit_type ul,
+ bignum_digit_type bignum_digit_divide(bignum_digit_type uh, bignum_digit_type ul,
bignum_digit_type v, bignum_digit_type * q) /* return value */;
- bignum_digit_type bignum_digit_divide_subtract(bignum_digit_type v1, bignum_digit_type v2,
+ bignum_digit_type bignum_digit_divide_subtract(bignum_digit_type v1, bignum_digit_type v2,
bignum_digit_type guess, bignum_digit_type * u);
- void bignum_divide_unsigned_small_denominator(bignum * numerator, bignum_digit_type denominator,
+ void bignum_divide_unsigned_small_denominator(bignum * numerator, bignum_digit_type denominator,
bignum * * quotient, bignum * * remainder,int q_negative_p, int r_negative_p);
bignum_digit_type bignum_destructive_scale_down(bignum * bignum, bignum_digit_type denominator);
bignum * bignum_remainder_unsigned_small_denominator(bignum * n, bignum_digit_type d, int negative_p);
template<typename Iterator> void each_object(Iterator &iterator);
cell find_all_words();
cell object_size(cell tagged);
-
+
//write barrier
cell allot_markers_offset;
{
return ((cell)c - cards_offset) << card_bits;
}
-
+
inline cell card_offset(card *c)
{
return *(c - (cell)data->cards + (cell)data->allot_markers);
}
-
+
inline card_deck *addr_to_deck(cell a)
{
return (card_deck *)(((cell)a >> deck_bits) + decks_offset);
}
-
+
inline cell deck_to_addr(card_deck *c)
{
return ((cell)c - decks_offset) << deck_bits;
}
-
+
inline card *deck_to_card(card_deck *d)
{
return (card *)((((cell)d - decks_offset) << (deck_bits - card_bits)) + cards_offset);
}
-
+
inline card *addr_to_allot_marker(object *a)
{
return (card *)(((cell)a >> card_bits) + allot_markers_offset);
//math
cell bignum_zero;
cell bignum_pos_one;
- cell bignum_neg_one;
+ cell bignum_neg_one;
void primitive_bignum_to_fixnum();
void primitive_float_to_fixnum();
inline double fixnum_to_float(cell tagged);
template<typename Type> Type *untag_check(cell value);
template<typename Type> Type *untag(cell value);
-
+
//io
void init_c_io();
void io_error();
//code_heap
heap *code;
unordered_map<heap_block *, char *> forwarding;
- typedef void (factor_vm::*code_heap_iterator)(code_block *compiled);
void init_code_heap(cell size);
bool in_code_heap_p(cell ptr);
template<typename Iterator> void iterate_code_heap(Iterator &iter)
{
heap_block *scan = code->first_block();
-
+
while(scan)
{
if(scan->status != B_FREE)
void primitive_set_innermost_stack_frame_quot();
void save_callstack_bottom(stack_frame *callstack_bottom);
template<typename Iterator> void iterate_callstack(cell top, cell bottom, Iterator &iterator);
-
+
/* Every object has a regular representation in the runtime, which makes GC
much simpler. Every slot of the object until binary_payload_start is a pointer
to some other object. */
cell scan = obj;
cell payload_start = binary_payload_start((object *)obj);
cell end = obj + payload_start;
-
+
scan += sizeof(cell);
-
+
while(scan < end)
{
iter((cell *)scan);
const vm_char *default_image_path();
void windows_image_path(vm_char *full_path, vm_char *temp_path, unsigned int length);
bool windows_stat(vm_char *path);
-
+
#if defined(WINNT)
void open_console();
LONG exception_handler(PEXCEPTION_POINTERS pe);
- // next method here:
+ // next method here:
#endif
#else // UNIX
void memory_signal_handler(int signal, siginfo_t *siginfo, void *uap);
#ifdef __APPLE__
void call_fault_handler(exception_type_t exception, exception_data_type_t code, MACH_EXC_STATE_TYPE *exc_state, MACH_THREAD_STATE_TYPE *thread_state, MACH_FLOAT_STATE_TYPE *float_state);
#endif
-
- factor_vm()
- : profiling_p(false),
- secure_gc(false),
- gc_off(false),
- fep_disabled(false),
- full_output(false),
- max_pic_size(0)
- {
- memset(this,0,sizeof(this)); // just to make sure
- }
+
+ factor_vm();
};
#ifndef FACTOR_REENTRANT
- #define FACTOR_SINGLE_THREADED_TESTING
+ #define FACTOR_SINGLE_THREADED_TESTING
#endif
#ifdef FACTOR_SINGLE_THREADED_SINGLETON
/* calls are dispatched using the singleton vm ptr */
- extern factor_vm *vm;
- #define PRIMITIVE_GETVM() vm
- #define PRIMITIVE_OVERFLOW_GETVM() vm
- #define VM_PTR vm
- #define ASSERTVM()
- #define SIGNAL_VM_PTR() vm
+ extern factor_vm *vm;
+ #define PRIMITIVE_GETVM() vm
+ #define PRIMITIVE_OVERFLOW_GETVM() vm
+ #define VM_PTR vm
+ #define ASSERTVM()
+ #define SIGNAL_VM_PTR() vm
#endif
#ifdef FACTOR_SINGLE_THREADED_TESTING
/* calls are dispatched as per multithreaded, but checked against singleton */
- extern factor_vm *vm;
- #define ASSERTVM() assert(vm==myvm)
- #define PRIMITIVE_GETVM() ((factor_vm*)myvm)
- #define PRIMITIVE_OVERFLOW_GETVM() ASSERTVM(); myvm
- #define VM_PTR myvm
- #define SIGNAL_VM_PTR() tls_vm()
+ extern factor_vm *vm;
+ #define ASSERTVM() assert(vm==myvm)
+ #define PRIMITIVE_GETVM() ((factor_vm*)myvm)
+ #define PRIMITIVE_OVERFLOW_GETVM() ASSERTVM(); myvm
+ #define VM_PTR myvm
+ #define SIGNAL_VM_PTR() tls_vm()
#endif
#ifdef FACTOR_REENTRANT_TLS
/* uses thread local storage to obtain vm ptr */
- #define PRIMITIVE_GETVM() tls_vm()
- #define PRIMITIVE_OVERFLOW_GETVM() tls_vm()
- #define VM_PTR tls_vm()
- #define ASSERTVM()
- #define SIGNAL_VM_PTR() tls_vm()
+ #define PRIMITIVE_GETVM() tls_vm()
+ #define PRIMITIVE_OVERFLOW_GETVM() tls_vm()
+ #define VM_PTR tls_vm()
+ #define ASSERTVM()
+ #define SIGNAL_VM_PTR() tls_vm()
#endif
#ifdef FACTOR_REENTRANT
- #define PRIMITIVE_GETVM() ((factor_vm*)myvm)
- #define PRIMITIVE_OVERFLOW_GETVM() ((factor_vm*)myvm)
- #define VM_PTR myvm
- #define ASSERTVM()
- #define SIGNAL_VM_PTR() tls_vm()
+ #define PRIMITIVE_GETVM() ((factor_vm*)myvm)
+ #define PRIMITIVE_OVERFLOW_GETVM() ((factor_vm*)myvm)
+ #define VM_PTR myvm
+ #define ASSERTVM()
+ #define SIGNAL_VM_PTR() tls_vm()
#endif
extern unordered_map<THREADHANDLE, factor_vm *> thread_vms;