From: Slava Pestov Date: Sat, 12 Jun 2010 00:06:00 +0000 (-0400) Subject: GC maps for more compact inline GC checks X-Git-Tag: 0.97~4669^2~24^2~6 X-Git-Url: https://gitweb.factorcode.org/gitweb.cgi?p=factor.git;a=commitdiff_plain;h=806e54630a8645af6463b3654ab65de24d473b56 GC maps for more compact inline GC checks --- diff --git a/GNUmakefile b/GNUmakefile index 300a62f71c..89f7ae1446 100755 --- a/GNUmakefile +++ b/GNUmakefile @@ -46,6 +46,7 @@ ifdef CONFIG vm/free_list.o \ vm/full_collector.o \ vm/gc.o \ + vm/gc_info.o \ vm/image.o \ vm/inline_cache.o \ vm/instruction_operands.o \ diff --git a/Nmakefile b/Nmakefile index 6d9afa1aca..a8b7e103ec 100755 --- a/Nmakefile +++ b/Nmakefile @@ -48,6 +48,7 @@ DLL_OBJS = $(PLAF_DLL_OBJS) \ vm\free_list.obj \ vm\full_collector.obj \ vm\gc.obj \ + vm/gc_info.obj \ vm\image.obj \ vm\inline_cache.obj \ vm\instruction_operands.obj \ diff --git a/basis/bootstrap/stage2.factor b/basis/bootstrap/stage2.factor index da4fbc444b..e3e8b5ddbc 100644 --- a/basis/bootstrap/stage2.factor +++ b/basis/bootstrap/stage2.factor @@ -58,7 +58,6 @@ SYMBOL: bootstrap-time original-error set-global error set-global ; inline - [ ! We time bootstrap nano-count diff --git a/basis/compiler/cfg/gc-checks/gc-checks-tests.factor b/basis/compiler/cfg/gc-checks/gc-checks-tests.factor index 496954de2c..698caa5e68 100644 --- a/basis/compiler/cfg/gc-checks/gc-checks-tests.factor +++ b/basis/compiler/cfg/gc-checks/gc-checks-tests.factor @@ -29,14 +29,6 @@ V{ 2 \ vreg-counter set-global -[ - V{ - T{ ##load-tagged f 3 0 } - T{ ##replace f 3 D 0 } - T{ ##replace f 3 R 3 } - } -] [ [ { D 0 R 3 } wipe-locs ] V{ } make ] unit-test - : gc-check? ( bb -- ? ) instructions>> { @@ -50,15 +42,13 @@ V{ [ V{ - T{ ##load-tagged f 5 0 } - T{ ##replace f 5 D 0 } - T{ ##replace f 5 R 3 } - T{ ##call-gc f { 0 1 2 } } + T{ ##gc-map f V{ 0 } V{ 3 } { 0 1 2 } } + T{ ##call-gc } T{ ##branch } } ] [ - { D 0 R 3 } { 0 1 2 } instructions>> + V{ D 0 R 3 } { 0 1 2 } instructions>> ] unit-test 30 \ vreg-counter set-global @@ -156,11 +146,8 @@ H{ [ V{ - T{ ##load-tagged f 31 0 } - T{ ##replace f 31 D 0 } - T{ ##replace f 31 D 1 } - T{ ##replace f 31 D 2 } - T{ ##call-gc f { 2 } } + T{ ##gc-map f V{ 0 1 2 } V{ } { 2 } } + T{ ##call-gc } T{ ##branch } } ] [ 2 get predecessors>> second instructions>> ] unit-test diff --git a/basis/compiler/cfg/gc-checks/gc-checks.factor b/basis/compiler/cfg/gc-checks/gc-checks.factor index 255e5476e6..60f81f77d9 100644 --- a/basis/compiler/cfg/gc-checks/gc-checks.factor +++ b/basis/compiler/cfg/gc-checks/gc-checks.factor @@ -50,16 +50,12 @@ IN: compiler.cfg.gc-checks ] bi* ] V{ } make >>instructions ; -: wipe-locs ( uninitialized-locs -- ) - '[ - int-rep next-vreg-rep - [ 0 ##load-tagged ] - [ '[ [ _ ] dip ##replace ] each ] bi - ] unless-empty ; +: scrubbed ( uninitialized-locs -- scrub-d scrub-r ) + [ ds-loc? ] partition [ [ n>> ] map ] bi@ ; : ( uninitialized-locs gc-roots -- bb ) [ ] 2dip - [ [ wipe-locs ] [ ##call-gc ] bi* ##branch ] V{ } make + [ [ scrubbed ] dip ##gc-map ##call-gc ##branch ] V{ } make >>instructions t >>unlikely? ; :: insert-guard ( body check bb -- ) diff --git a/basis/compiler/cfg/instructions/instructions.factor b/basis/compiler/cfg/instructions/instructions.factor index e05335b06c..b46a42d8d5 100644 --- a/basis/compiler/cfg/instructions/instructions.factor +++ b/basis/compiler/cfg/instructions/instructions.factor @@ -819,8 +819,10 @@ INSN: ##check-nursery-branch literal: size cc temp: temp1/int-rep temp2/int-rep ; -INSN: ##call-gc -literal: gc-roots ; +INSN: ##call-gc ; + +INSN: ##gc-map +literal: scrub-d scrub-r gc-roots ; ! Spills and reloads, inserted by register allocator TUPLE: spill-slot { n integer } ; diff --git a/basis/compiler/cfg/linear-scan/assignment/assignment.factor b/basis/compiler/cfg/linear-scan/assignment/assignment.factor index 1780a1c907..e6d220a90c 100644 --- a/basis/compiler/cfg/linear-scan/assignment/assignment.factor +++ b/basis/compiler/cfg/linear-scan/assignment/assignment.factor @@ -142,8 +142,7 @@ RENAMING: assign [ vreg>reg ] [ vreg>reg ] [ vreg>reg ] M: vreg-insn assign-registers-in-insn [ assign-insn-defs ] [ assign-insn-uses ] [ assign-insn-temps ] tri ; -M: ##call-gc assign-registers-in-insn - dup call-next-method +M: ##gc-map assign-registers-in-insn [ [ vreg>reg ] map ] change-gc-roots drop ; M: insn assign-registers-in-insn drop ; diff --git a/basis/compiler/cfg/save-contexts/save-contexts.factor b/basis/compiler/cfg/save-contexts/save-contexts.factor index e5edd7cdff..e2ccf943ad 100644 --- a/basis/compiler/cfg/save-contexts/save-contexts.factor +++ b/basis/compiler/cfg/save-contexts/save-contexts.factor @@ -10,7 +10,6 @@ IN: compiler.cfg.save-contexts : needs-save-context? ( insns -- ? ) [ { - [ ##call-gc? ] [ ##unary-float-function? ] [ ##binary-float-function? ] [ ##alien-invoke? ] diff --git a/basis/compiler/cfg/stacks/uninitialized/uninitialized.factor b/basis/compiler/cfg/stacks/uninitialized/uninitialized.factor index 3d7519e14b..982e9b872c 100644 --- a/basis/compiler/cfg/stacks/uninitialized/uninitialized.factor +++ b/basis/compiler/cfg/stacks/uninitialized/uninitialized.factor @@ -77,5 +77,5 @@ M: uninitialized-analysis join-sets ( sets analysis -- pair ) first2 [ [ ] (uninitialized-locs) ] [ [ ] (uninitialized-locs) ] - bi* append + bi* append f like ] when ; diff --git a/basis/compiler/codegen/codegen.factor b/basis/compiler/codegen/codegen.factor index 68b01beed9..f33999ab89 100755 --- a/basis/compiler/codegen/codegen.factor +++ b/basis/compiler/codegen/codegen.factor @@ -258,6 +258,7 @@ CODEGEN: ##restore-context %restore-context CODEGEN: ##vm-field %vm-field CODEGEN: ##set-vm-field %set-vm-field CODEGEN: ##alien-global %alien-global +CODEGEN: ##gc-map %gc-map CODEGEN: ##call-gc %call-gc CODEGEN: ##spill %spill CODEGEN: ##reload %reload diff --git a/basis/compiler/codegen/fixup/fixup-tests.factor b/basis/compiler/codegen/fixup/fixup-tests.factor new file mode 100644 index 0000000000..fcb33e4937 --- /dev/null +++ b/basis/compiler/codegen/fixup/fixup-tests.factor @@ -0,0 +1,67 @@ +USING: namespaces byte-arrays make compiler.codegen.fixup +bit-arrays accessors classes.struct tools.test kernel math +sequences alien.c-types specialized-arrays boxes ; +SPECIALIZED-ARRAY: uint +IN: compiler.codegen.fixup.tests + +STRUCT: gc-info +{ scrub-d-count uint } +{ scrub-r-count uint } +{ gc-root-count uint } +{ return-address-count uint } ; + +[ ] [ + [ + init-fixup + + 50 % + + { { } { } { } } set-next-gc-map + gc-map-here + + 50 % + + { { 0 4 } { 1 } { 1 3 } } set-next-gc-map + gc-map-here + + emit-gc-info + ] B{ } make + "result" set +] unit-test + +[ 0 ] [ "result" get length 16 mod ] unit-test + +[ ] [ + [ + 100 % + + ! The below data is 22 bytes -- 6 bytes padding needed to + ! align + 6 % + + ! Bitmap - 2 bytes + ?{ + ! scrub-d + t f f f t + ! scrub-r + f t + ! gc-roots + f t f t + } underlying>> % + + ! Return addresses - 4 bytes + uint-array{ 100 } underlying>> % + + ! GC info footer - 16 bytes + S{ gc-info + { scrub-d-count 5 } + { scrub-r-count 2 } + { gc-root-count 4 } + { return-address-count 1 } + } (underlying)>> % + ] B{ } make + "expect" set +] unit-test + +[ ] [ "result" get length "expect" get length assert= ] unit-test +[ ] [ "result" get "expect" get assert= ] unit-test diff --git a/basis/compiler/codegen/fixup/fixup.factor b/basis/compiler/codegen/fixup/fixup.factor index 9e366cd408..f0730e91d8 100644 --- a/basis/compiler/codegen/fixup/fixup.factor +++ b/basis/compiler/codegen/fixup/fixup.factor @@ -1,10 +1,11 @@ ! Copyright (C) 2007, 2010 Slava Pestov. ! See http://factorcode.org/license.txt for BSD license. -USING: arrays byte-arrays byte-vectors generic assocs hashtables -io.binary kernel kernel.private math namespaces make sequences -words quotations strings alien.accessors alien.strings layouts -system combinators math.bitwise math.order combinators.smart -accessors growable fry compiler.constants memoize ; +USING: arrays bit-arrays byte-arrays byte-vectors generic assocs +hashtables io.binary kernel kernel.private math namespaces make +sequences words quotations strings alien.accessors alien.strings +layouts system combinators math.bitwise math.order +combinators.smart accessors growable fry compiler.constants +memoize boxes ; IN: compiler.codegen.fixup ! Utilities @@ -95,7 +96,7 @@ MEMO: cached-string>symbol ( symbol -- obj ) string>symbol ; : rel-decks-offset ( class -- ) rt-decks-offset rel-fixup ; -! And the rest +! Labels : compute-target ( label-fixup -- offset ) label>> offset>> [ "Unresolved label" throw ] unless* ; @@ -112,13 +113,7 @@ MEMO: cached-string>symbol ( symbol -- obj ) string>symbol ; [ [ compute-relative-label ] map concat ] bi* ; -: init-fixup ( -- ) - V{ } clone parameter-table set - V{ } clone literal-table set - V{ } clone label-table set - BV{ } clone relocation-table set - V{ } clone binary-literal-table set ; - +! Binary literals : alignment ( align -- n ) [ compiled-offset dup ] dip align swap - ; @@ -136,16 +131,102 @@ MEMO: cached-string>symbol ( symbol -- obj ) string>symbol ; : emit-binary-literals ( -- ) binary-literal-table get [ emit-data ] assoc-each ; +! GC info + +! Every code block either ends with +! +! uint 0 +! +! or +! +! bitmap, byte aligned, three subsequences: +! - +! - +! - +! uint[] +! uint +! uint +! uint +! uint + +SYMBOLS: next-gc-map return-addresses gc-maps ; + +: gc-map? ( triple -- ? ) + ! If there are no stack locations to scrub and no GC roots, + ! there's no point storing the GC map. + [ empty? not ] any? ; + +: gc-map-here ( -- ) + next-gc-map get box> dup gc-map? [ + gc-maps get push + compiled-offset return-addresses get push + ] [ drop ] if ; + +: set-next-gc-map ( gc-map -- ) next-gc-map get >box ; + +: integers>bits ( seq n -- bit-array ) + [ '[ [ t ] dip _ set-nth ] each ] keep ; + +: emit-bitmap ( seqs -- n ) + ! seqs is a sequence of sequences of integers 0..n-1 + [ 0 ] [ + dup [ [ 0 ] [ supremum 1 + ] if-empty ] [ max ] map-reduce + [ '[ _ integers>bits % ] each ] keep + ] if-empty ; + +: emit-uint ( n -- ) + building get push-uint ; + +: gc-info ( -- byte-array ) + [ + return-addresses get empty? [ 0 emit-uint ] [ + gc-maps get + [ + [ [ first ] map emit-bitmap ] + [ [ second ] map emit-bitmap ] + [ [ third ] map emit-bitmap ] tri + ] ?{ } make underlying>> % + return-addresses get [ emit-uint ] each + [ emit-uint ] tri@ + return-addresses get length emit-uint + ] if + ] B{ } make ; + +: emit-gc-info ( -- ) + ! We want to place the GC info so that the end is aligned + ! on a 16-byte boundary. + gc-info [ + length compiled-offset + + [ data-alignment get align ] keep - + (align-code) + ] [ % ] bi ; + +: init-fixup ( -- ) + V{ } clone parameter-table set + V{ } clone literal-table set + V{ } clone label-table set + BV{ } clone relocation-table set + V{ } clone binary-literal-table set + V{ } clone return-addresses set + V{ } clone gc-maps set + next-gc-map set ; + +: check-fixup ( seq -- ) + length data-alignment get mod 0 assert= + next-gc-map get occupied>> f assert= ; + : with-fixup ( quot -- code ) '[ + init-fixup [ - init-fixup @ emit-binary-literals + emit-gc-info label-table [ compute-labels ] change parameter-table get >array literal-table get >array relocation-table get >byte-array label-table get ] B{ } make + dup check-fixup ] output>array ; inline diff --git a/basis/cpu/architecture/architecture.factor b/basis/cpu/architecture/architecture.factor index e2a7bdab10..279947bd43 100644 --- a/basis/cpu/architecture/architecture.factor +++ b/basis/cpu/architecture/architecture.factor @@ -488,7 +488,8 @@ HOOK: %write-barrier-imm cpu ( src slot tag temp1 temp2 -- ) ! GC checks HOOK: %check-nursery-branch cpu ( label size cc temp1 temp2 -- ) -HOOK: %call-gc cpu ( gc-roots -- ) +HOOK: %gc-map cpu ( scrub-d scrub-r gc-roots -- ) +HOOK: %call-gc cpu ( -- ) HOOK: %prologue cpu ( n -- ) HOOK: %epilogue cpu ( n -- ) diff --git a/basis/cpu/x86/32/32.factor b/basis/cpu/x86/32/32.factor index 4812937597..b640008f3a 100755 --- a/basis/cpu/x86/32/32.factor +++ b/basis/cpu/x86/32/32.factor @@ -239,11 +239,6 @@ M:: x86.32 stack-cleanup ( stack-size return abi -- n ) M: x86.32 %cleanup ( n -- ) [ ESP swap SUB ] unless-zero ; -M:: x86.32 %call-gc ( gc-roots -- ) - 4 save-vm-ptr - 0 stack@ gc-roots gc-root-offsets %load-reference - "inline_gc" f %alien-invoke ; - M: x86.32 dummy-stack-params? f ; M: x86.32 dummy-int-params? f ; diff --git a/basis/cpu/x86/64/64.factor b/basis/cpu/x86/64/64.factor index bde0507af9..fb53b6fcbb 100644 --- a/basis/cpu/x86/64/64.factor +++ b/basis/cpu/x86/64/64.factor @@ -154,11 +154,6 @@ M:: x86.64 %binary-float-function ( dst src1 src2 func -- ) func "libm" load-library %alien-invoke dst double-rep %load-return ; -M:: x86.64 %call-gc ( gc-roots -- ) - param-reg-0 gc-roots gc-root-offsets %load-reference - param-reg-1 %mov-vm-ptr - "inline_gc" f %alien-invoke ; - M: x86.64 long-long-on-stack? f ; M: x86.64 float-on-stack? f ; diff --git a/basis/cpu/x86/x86.factor b/basis/cpu/x86/x86.factor index 58343a4eee..b8911f9127 100644 --- a/basis/cpu/x86/x86.factor +++ b/basis/cpu/x86/x86.factor @@ -35,9 +35,6 @@ HOOK: reserved-stack-space cpu ( -- n ) : spill@ ( n -- op ) spill-offset special-offset stack@ ; -: gc-root-offsets ( seq -- seq' ) - [ n>> spill-offset special-offset cell + ] map f like ; - : decr-stack-reg ( n -- ) dup 0 = [ drop ] [ stack-reg swap SUB ] if ; @@ -483,8 +480,18 @@ M:: x86 %check-nursery-branch ( label size cc temp1 temp2 -- ) { cc/<= [ label JG ] } } case ; +: gc-root-offsets ( seq -- seq' ) + [ n>> spill-offset special-offset cell + cell /i ] map f like ; + +M: x86 %gc-map ( scrub-d scrub-r gc-roots -- ) + gc-root-offsets 3array set-next-gc-map ; + +M: x86 %call-gc + \ minor-gc %call + gc-map-here ; + M: x86 %alien-global ( dst symbol library -- ) - [ 0 MOV ] 2dip rc-absolute-cell rel-dlsym ; + [ 0 MOV ] 2dip rc-absolute-cell rel-dlsym ; M: x86 %epilogue ( n -- ) cell - incr-stack-reg ; diff --git a/vm/bitwise_hacks.hpp b/vm/bitwise_hacks.hpp index 162d9272c6..ddff576bef 100755 --- a/vm/bitwise_hacks.hpp +++ b/vm/bitwise_hacks.hpp @@ -60,4 +60,11 @@ inline cell popcount(cell x) return x; } +inline bool bitmap_p(u8 *bitmap, cell index) +{ + cell byte = index >> 3; + cell bit = index & 7; + return (bitmap[byte] & (1 << bit)) != 0; +} + } diff --git a/vm/byte_arrays.cpp b/vm/byte_arrays.cpp index d59563d81c..fb1b44c91e 100644 --- a/vm/byte_arrays.cpp +++ b/vm/byte_arrays.cpp @@ -35,16 +35,18 @@ void factor_vm::primitive_resize_byte_array() ctx->push(tag(reallot_array(array.untagged(),capacity))); } -void growable_byte_array::append_bytes(void *elts, cell len) +void growable_byte_array::grow_bytes(cell len) { - cell new_size = count + len; - factor_vm *parent = elements.parent; - if(new_size >= array_capacity(elements.untagged())) - elements = parent->reallot_array(elements.untagged(),new_size * 2); - - memcpy(&elements->data()[count],elts,len); - count += len; + if(count >= array_capacity(elements.untagged())) + elements = elements.parent->reallot_array(elements.untagged(),count * 2); +} + +void growable_byte_array::append_bytes(void *elts, cell len) +{ + cell old_count = count; + grow_bytes(len); + memcpy(&elements->data()[old_count],elts,len); } void growable_byte_array::append_byte_array(cell byte_array_) diff --git a/vm/byte_arrays.hpp b/vm/byte_arrays.hpp index 2da036709f..f0faac248c 100755 --- a/vm/byte_arrays.hpp +++ b/vm/byte_arrays.hpp @@ -7,6 +7,7 @@ struct growable_byte_array { explicit growable_byte_array(factor_vm *parent,cell capacity = 40) : count(0), elements(parent->allot_byte_array(capacity),parent) { } + void grow_bytes(cell len); void append_bytes(void *elts, cell len); void append_byte_array(cell elts); diff --git a/vm/callstack.cpp b/vm/callstack.cpp index bb716cbc6d..e7892405ad 100755 --- a/vm/callstack.cpp +++ b/vm/callstack.cpp @@ -138,9 +138,6 @@ cell factor_vm::frame_scan(stack_frame *frame) } } -namespace -{ - struct stack_frame_accumulator { factor_vm *parent; growable_array frames; @@ -159,8 +156,6 @@ struct stack_frame_accumulator { } }; -} - void factor_vm::primitive_callstack_to_array() { data_root callstack(ctx->pop(),this); diff --git a/vm/code_block_visitor.hpp b/vm/code_block_visitor.hpp index deaa41e4b8..b6581b8c8f 100644 --- a/vm/code_block_visitor.hpp +++ b/vm/code_block_visitor.hpp @@ -12,12 +12,12 @@ Iteration is driven by visit_*() methods. Some of them define GC roots: - visit_context_code_blocks() - visit_callback_code_blocks() */ -template struct code_block_visitor { +template struct code_block_visitor { factor_vm *parent; - Visitor visitor; + Fixup fixup; - explicit code_block_visitor(factor_vm *parent_, Visitor visitor_) : - parent(parent_), visitor(visitor_) {} + explicit code_block_visitor(factor_vm *parent_, Fixup fixup_) : + parent(parent_), fixup(fixup_) {} code_block *visit_code_block(code_block *compiled); void visit_object_code_block(object *obj); @@ -26,33 +26,34 @@ template struct code_block_visitor { void visit_uninitialized_code_blocks(); }; -template -code_block *code_block_visitor::visit_code_block(code_block *compiled) +template +code_block *code_block_visitor::visit_code_block(code_block *compiled) { - return visitor(compiled); + return fixup.fixup_code(compiled); } -template +template struct call_frame_code_block_visitor { factor_vm *parent; - Visitor visitor; + Fixup fixup; - explicit call_frame_code_block_visitor(factor_vm *parent_, Visitor visitor_) : - parent(parent_), visitor(visitor_) {} + explicit call_frame_code_block_visitor(factor_vm *parent_, Fixup fixup_) : + parent(parent_), fixup(fixup_) {} void operator()(stack_frame *frame) { - cell offset = (cell)FRAME_RETURN_ADDRESS(frame,parent) - (cell)frame->entry_point; + code_block *old_block = parent->frame_code(frame); + cell offset = (char *)FRAME_RETURN_ADDRESS(frame,parent) - (char *)old_block; - code_block *new_block = visitor(parent->frame_code(frame)); + const code_block *new_block = fixup.fixup_code(old_block); frame->entry_point = new_block->entry_point(); - FRAME_RETURN_ADDRESS(frame,parent) = (void *)((cell)frame->entry_point + offset); + FRAME_RETURN_ADDRESS(frame,parent) = (char *)new_block + offset; } }; -template -void code_block_visitor::visit_object_code_block(object *obj) +template +void code_block_visitor::visit_object_code_block(object *obj) { switch(obj->type()) { @@ -60,9 +61,9 @@ void code_block_visitor::visit_object_code_block(object *obj) { word *w = (word *)obj; if(w->code) - w->code = visitor(w->code); + w->code = visit_code_block(w->code); if(w->profiling) - w->profiling = visitor(w->profiling); + w->profiling = visit_code_block(w->profiling); parent->update_word_entry_point(w); break; @@ -71,24 +72,24 @@ void code_block_visitor::visit_object_code_block(object *obj) { quotation *q = (quotation *)obj; if(q->code) - parent->set_quot_entry_point(q,visitor(q->code)); + parent->set_quot_entry_point(q,visit_code_block(q->code)); break; } case CALLSTACK_TYPE: { callstack *stack = (callstack *)obj; - call_frame_code_block_visitor call_frame_visitor(parent,visitor); + call_frame_code_block_visitor call_frame_visitor(parent,fixup); parent->iterate_callstack_object(stack,call_frame_visitor); break; } } } -template +template struct embedded_code_pointers_visitor { - Visitor visitor; + Fixup fixup; - explicit embedded_code_pointers_visitor(Visitor visitor_) : visitor(visitor_) {} + explicit embedded_code_pointers_visitor(Fixup fixup_) : fixup(fixup_) {} void operator()(instruction_operand op) { @@ -96,29 +97,29 @@ struct embedded_code_pointers_visitor { if(type == RT_ENTRY_POINT || type == RT_ENTRY_POINT_PIC || type == RT_ENTRY_POINT_PIC_TAIL) - op.store_code_block(visitor(op.load_code_block())); + op.store_code_block(fixup.fixup_code(op.load_code_block())); } }; -template -void code_block_visitor::visit_embedded_code_pointers(code_block *compiled) +template +void code_block_visitor::visit_embedded_code_pointers(code_block *compiled) { if(!parent->code->uninitialized_p(compiled)) { - embedded_code_pointers_visitor visitor(this->visitor); - compiled->each_instruction_operand(visitor); + embedded_code_pointers_visitor operand_visitor(fixup); + compiled->each_instruction_operand(operand_visitor); } } -template -void code_block_visitor::visit_context_code_blocks() +template +void code_block_visitor::visit_context_code_blocks() { - call_frame_code_block_visitor call_frame_visitor(parent,visitor); + call_frame_code_block_visitor call_frame_visitor(parent,fixup); parent->iterate_active_callstacks(call_frame_visitor); } -template -void code_block_visitor::visit_uninitialized_code_blocks() +template +void code_block_visitor::visit_uninitialized_code_blocks() { std::map *uninitialized_blocks = &parent->code->uninitialized_blocks; std::map::const_iterator iter = uninitialized_blocks->begin(); @@ -128,7 +129,7 @@ void code_block_visitor::visit_uninitialized_code_blocks() for(; iter != end; iter++) { new_uninitialized_blocks.insert(std::make_pair( - visitor(iter->first), + fixup.fixup_code(iter->first), iter->second)); } diff --git a/vm/code_blocks.hpp b/vm/code_blocks.hpp index baf763357c..f20e2da372 100644 --- a/vm/code_blocks.hpp +++ b/vm/code_blocks.hpp @@ -43,11 +43,22 @@ struct code_block return size; } + template cell size(Fixup fixup) const + { + return size(); + } + void *entry_point() const { return (void *)(this + 1); } + /* GC info is stored at the end of the block */ + gc_info *block_gc_info() const + { + return (gc_info *)((u8 *)this + size() - sizeof(gc_info)); + } + void flush_icache() { factor::flush_icache((cell)this,size()); diff --git a/vm/collector.hpp b/vm/collector.hpp index 0b8b473e8b..400e15b974 100644 --- a/vm/collector.hpp +++ b/vm/collector.hpp @@ -3,15 +3,17 @@ namespace factor struct must_start_gc_again {}; -template struct data_workhorse { +template struct gc_workhorse : no_fixup { factor_vm *parent; TargetGeneration *target; Policy policy; + code_heap *code; - explicit data_workhorse(factor_vm *parent_, TargetGeneration *target_, Policy policy_) : + explicit gc_workhorse(factor_vm *parent_, TargetGeneration *target_, Policy policy_) : parent(parent_), target(target_), - policy(policy_) {} + policy(policy_), + code(parent->code) {} object *resolve_forwarding(object *untagged) { @@ -39,7 +41,7 @@ template struct data_workhorse { return newpointer; } - object *operator()(object *obj) + object *fixup_data(object *obj) { if(!policy.should_copy_p(obj)) { @@ -59,17 +61,18 @@ template struct data_workhorse { return forwarding; } } -}; -template -inline static slot_visitor > make_data_visitor( - factor_vm *parent, - TargetGeneration *target, - Policy policy) -{ - return slot_visitor >(parent, - data_workhorse(parent,target,policy)); -} + code_block *fixup_code(code_block *compiled) + { + if(!code->marked_p(compiled)) + { + code->set_marked_p(compiled); + parent->mark_stack.push_back((cell)compiled + 1); + } + + return compiled; + } +}; struct dummy_unmarker { void operator()(card *ptr) {} @@ -92,7 +95,8 @@ struct collector { data_heap *data; code_heap *code; TargetGeneration *target; - slot_visitor > data_visitor; + gc_workhorse workhorse; + slot_visitor > data_visitor; cell cards_scanned; cell decks_scanned; cell code_blocks_scanned; @@ -102,7 +106,8 @@ struct collector { data(parent_->data), code(parent_->code), target(target_), - data_visitor(make_data_visitor(parent_,target_,policy_)), + workhorse(parent,target,policy_), + data_visitor(parent,workhorse), cards_scanned(0), decks_scanned(0), code_blocks_scanned(0) {} diff --git a/vm/compaction.cpp b/vm/compaction.cpp index 5e52c70b0c..3deb0afc2f 100644 --- a/vm/compaction.cpp +++ b/vm/compaction.cpp @@ -2,105 +2,99 @@ namespace factor { -template struct forwarder { - mark_bits *forwarding_map; +struct compaction_fixup { + mark_bits *data_forwarding_map; + mark_bits *code_forwarding_map; + const object **data_finger; + const code_block **code_finger; - explicit forwarder(mark_bits *forwarding_map_) : - forwarding_map(forwarding_map_) {} + explicit compaction_fixup( + mark_bits *data_forwarding_map_, + mark_bits *code_forwarding_map_, + const object **data_finger_, + const code_block **code_finger_) : + data_forwarding_map(data_forwarding_map_), + code_forwarding_map(code_forwarding_map_), + data_finger(data_finger_), + code_finger(code_finger_) {} - Block *operator()(Block *block) + object *fixup_data(object *obj) { - return forwarding_map->forward_block(block); + return data_forwarding_map->forward_block(obj); } -}; - -static inline cell tuple_size_with_forwarding(mark_bits *forwarding_map, object *obj) -{ - /* The tuple layout may or may not have been forwarded already. Tricky. */ - object *layout_obj = (object *)UNTAG(((tuple *)obj)->layout); - tuple_layout *layout; - if(layout_obj < obj) + code_block *fixup_code(code_block *compiled) { - /* It's already been moved up; dereference through forwarding - map to get the size */ - layout = (tuple_layout *)forwarding_map->forward_block(layout_obj); + return code_forwarding_map->forward_block(compiled); } - else + + object *translate_data(const object *obj) { - /* It hasn't been moved up yet; dereference directly */ - layout = (tuple_layout *)layout_obj; + if(obj < *data_finger) + return fixup_data((object *)obj); + else + return (object *)obj; } - return tuple_size(layout); -} - -struct compaction_sizer { - mark_bits *forwarding_map; + code_block *translate_code(const code_block *compiled) + { + if(compiled < *code_finger) + return fixup_code((code_block *)compiled); + else + return (code_block *)compiled; + } - explicit compaction_sizer(mark_bits *forwarding_map_) : - forwarding_map(forwarding_map_) {} + cell size(object *obj) + { + if(data_forwarding_map->marked_p(obj)) + return obj->size(*this); + else + return data_forwarding_map->unmarked_block_size(obj); + } - cell operator()(object *obj) + cell size(code_block *compiled) { - if(!forwarding_map->marked_p(obj)) - return forwarding_map->unmarked_block_size(obj); - else if(obj->type() == TUPLE_TYPE) - return align(tuple_size_with_forwarding(forwarding_map,obj),data_alignment); + if(code_forwarding_map->marked_p(compiled)) + return compiled->size(*this); else - return obj->size(); + return code_forwarding_map->unmarked_block_size(compiled); } }; struct object_compaction_updater { factor_vm *parent; - mark_bits *code_forwarding_map; - mark_bits *data_forwarding_map; + compaction_fixup fixup; object_start_map *starts; - explicit object_compaction_updater(factor_vm *parent_, - mark_bits *data_forwarding_map_, - mark_bits *code_forwarding_map_) : + explicit object_compaction_updater(factor_vm *parent_, compaction_fixup fixup_) : parent(parent_), - code_forwarding_map(code_forwarding_map_), - data_forwarding_map(data_forwarding_map_), + fixup(fixup_), starts(&parent->data->tenured->starts) {} void operator()(object *old_address, object *new_address, cell size) { - cell payload_start; - if(old_address->type() == TUPLE_TYPE) - payload_start = tuple_size_with_forwarding(data_forwarding_map,old_address); - else - payload_start = old_address->binary_payload_start(); - - memmove(new_address,old_address,size); + slot_visitor slot_forwarder(parent,fixup); + slot_forwarder.visit_slots(new_address); - slot_visitor > slot_forwarder(parent,forwarder(data_forwarding_map)); - slot_forwarder.visit_slots(new_address,payload_start); - - code_block_visitor > code_forwarder(parent,forwarder(code_forwarding_map)); + code_block_visitor code_forwarder(parent,fixup); code_forwarder.visit_object_code_block(new_address); starts->record_object_start_offset(new_address); } }; -template +template struct code_block_compaction_relocation_visitor { factor_vm *parent; code_block *old_address; - slot_visitor slot_forwarder; - code_block_visitor > code_forwarder; + Fixup fixup; explicit code_block_compaction_relocation_visitor(factor_vm *parent_, code_block *old_address_, - slot_visitor slot_forwarder_, - code_block_visitor > code_forwarder_) : + Fixup fixup_) : parent(parent_), old_address(old_address_), - slot_forwarder(slot_forwarder_), - code_forwarder(code_forwarder_) {} + fixup(fixup_) {} void operator()(instruction_operand op) { @@ -109,16 +103,24 @@ struct code_block_compaction_relocation_visitor { switch(op.rel_type()) { case RT_LITERAL: - op.store_value(slot_forwarder.visit_pointer(op.load_value(old_offset))); - break; + { + cell value = op.load_value(old_offset); + if(immediate_p(value)) + op.store_value(value); + else + op.store_value(RETAG(fixup.fixup_data(untag(value)),TAG(value))); + break; + } case RT_ENTRY_POINT: case RT_ENTRY_POINT_PIC: case RT_ENTRY_POINT_PIC_TAIL: - op.store_code_block(code_forwarder.visit_code_block(op.load_code_block(old_offset))); - break; case RT_HERE: - op.store_value(op.load_value(old_offset) - (cell)old_address + (cell)op.parent_code_block()); - break; + { + cell value = op.load_value(old_offset); + cell offset = value & (data_alignment - 1); + op.store_value((cell)fixup.fixup_code((code_block *)value) + offset); + break; + } case RT_THIS: case RT_CARDS_OFFSET: case RT_DECKS_OFFSET: @@ -131,26 +133,27 @@ struct code_block_compaction_relocation_visitor { } }; -template +template struct code_block_compaction_updater { factor_vm *parent; - slot_visitor slot_forwarder; - code_block_visitor > code_forwarder; + Fixup fixup; + slot_visitor data_forwarder; + code_block_visitor code_forwarder; explicit code_block_compaction_updater(factor_vm *parent_, - slot_visitor slot_forwarder_, - code_block_visitor > code_forwarder_) : + Fixup fixup_, + slot_visitor data_forwarder_, + code_block_visitor code_forwarder_) : parent(parent_), - slot_forwarder(slot_forwarder_), + fixup(fixup_), + data_forwarder(data_forwarder_), code_forwarder(code_forwarder_) {} void operator()(code_block *old_address, code_block *new_address, cell size) { - memmove(new_address,old_address,size); + data_forwarder.visit_code_block_objects(new_address); - slot_forwarder.visit_code_block_objects(new_address); - - code_block_compaction_relocation_visitor visitor(parent,old_address,slot_forwarder,code_forwarder); + code_block_compaction_relocation_visitor visitor(parent,old_address,fixup); new_address->each_instruction_operand(visitor); } }; @@ -196,8 +199,12 @@ void factor_vm::collect_compact_impl(bool trace_contexts_p) data_forwarding_map->compute_forwarding(); code_forwarding_map->compute_forwarding(); - slot_visitor > slot_forwarder(this,forwarder(data_forwarding_map)); - code_block_visitor > code_forwarder(this,forwarder(code_forwarding_map)); + const object *data_finger = tenured->first_block(); + const code_block *code_finger = code->allocator->first_block(); + + compaction_fixup fixup(data_forwarding_map,code_forwarding_map,&data_finger,&code_finger); + slot_visitor data_forwarder(this,fixup); + code_block_visitor code_forwarder(this,fixup); code_forwarder.visit_uninitialized_code_blocks(); @@ -206,20 +213,18 @@ void factor_vm::collect_compact_impl(bool trace_contexts_p) /* Slide everything in tenured space up, and update data and code heap pointers inside objects. */ - object_compaction_updater object_updater(this,data_forwarding_map,code_forwarding_map); - compaction_sizer object_sizer(data_forwarding_map); - tenured->compact(object_updater,object_sizer); + object_compaction_updater object_updater(this,fixup); + tenured->compact(object_updater,fixup,&data_finger); /* Slide everything in the code heap up, and update data and code heap pointers inside code blocks. */ - code_block_compaction_updater > code_block_updater(this,slot_forwarder,code_forwarder); - standard_sizer code_block_sizer; - code->allocator->compact(code_block_updater,code_block_sizer); + code_block_compaction_updater code_block_updater(this,fixup,data_forwarder,code_forwarder); + code->allocator->compact(code_block_updater,fixup,&code_finger); - slot_forwarder.visit_roots(); + data_forwarder.visit_roots(); if(trace_contexts_p) { - slot_forwarder.visit_contexts(); + data_forwarder.visit_contexts(); code_forwarder.visit_context_code_blocks(); } @@ -229,10 +234,56 @@ void factor_vm::collect_compact_impl(bool trace_contexts_p) current_gc->event->ended_compaction(); } +struct code_compaction_fixup { + mark_bits *code_forwarding_map; + const code_block **code_finger; + + explicit code_compaction_fixup(mark_bits *code_forwarding_map_, + const code_block **code_finger_) : + code_forwarding_map(code_forwarding_map_), + code_finger(code_finger_) {} + + object *fixup_data(object *obj) + { + return obj; + } + + code_block *fixup_code(code_block *compiled) + { + return code_forwarding_map->forward_block(compiled); + } + + object *translate_data(const object *obj) + { + return fixup_data((object *)obj); + } + + code_block *translate_code(const code_block *compiled) + { + if(compiled >= *code_finger) + return fixup_code((code_block *)compiled); + else + return (code_block *)compiled; + } + + cell size(object *obj) + { + return obj->size(); + } + + cell size(code_block *compiled) + { + if(code_forwarding_map->marked_p(compiled)) + return compiled->size(*this); + else + return code_forwarding_map->unmarked_block_size(compiled); + } +}; + struct object_grow_heap_updater { - code_block_visitor > code_forwarder; + code_block_visitor code_forwarder; - explicit object_grow_heap_updater(code_block_visitor > code_forwarder_) : + explicit object_grow_heap_updater(code_block_visitor code_forwarder_) : code_forwarder(code_forwarder_) {} void operator()(object *obj) @@ -241,10 +292,6 @@ struct object_grow_heap_updater { } }; -struct dummy_slot_forwarder { - object *operator()(object *obj) { return obj; } -}; - /* Compact just the code heap, after growing the data heap */ void factor_vm::collect_compact_code_impl(bool trace_contexts_p) { @@ -252,8 +299,11 @@ void factor_vm::collect_compact_code_impl(bool trace_contexts_p) mark_bits *code_forwarding_map = &code->allocator->state; code_forwarding_map->compute_forwarding(); - slot_visitor slot_forwarder(this,dummy_slot_forwarder()); - code_block_visitor > code_forwarder(this,forwarder(code_forwarding_map)); + const code_block *code_finger = code->allocator->first_block(); + + code_compaction_fixup fixup(code_forwarding_map,&code_finger); + slot_visitor data_forwarder(this,fixup); + code_block_visitor code_forwarder(this,fixup); code_forwarder.visit_uninitialized_code_blocks(); @@ -261,14 +311,13 @@ void factor_vm::collect_compact_code_impl(bool trace_contexts_p) code_forwarder.visit_context_code_blocks(); /* Update code heap references in data heap */ - object_grow_heap_updater updater(code_forwarder); - each_object(updater); + object_grow_heap_updater object_updater(code_forwarder); + each_object(object_updater); /* Slide everything in the code heap up, and update code heap pointers inside code blocks. */ - code_block_compaction_updater code_block_updater(this,slot_forwarder,code_forwarder); - standard_sizer code_block_sizer; - code->allocator->compact(code_block_updater,code_block_sizer); + code_block_compaction_updater code_block_updater(this,fixup,data_forwarder,code_forwarder); + code->allocator->compact(code_block_updater,fixup,&code_finger); update_code_roots_for_compaction(); callbacks->update(); diff --git a/vm/contexts.cpp b/vm/contexts.cpp index 25fe0e5280..94bbe7d508 100644 --- a/vm/contexts.cpp +++ b/vm/contexts.cpp @@ -55,6 +55,31 @@ void context::fix_stacks() reset_retainstack(); } +void context::scrub_stacks(gc_info *info, cell index) +{ + u8 *bitmap = info->gc_info_bitmap(); + + { + cell base = info->scrub_d_base(index); + + for(cell loc = 0; loc < info->scrub_d_count; loc++) + { + if(bitmap_p(bitmap,base + loc)) + ((cell *)datastack)[-loc] = 0; + } + } + + { + cell base = info->scrub_r_base(index); + + for(cell loc = 0; loc < info->scrub_r_count; loc++) + { + if(bitmap_p(bitmap,base + loc)) + printf("scrub retainstack %ld\n",loc); + } + } +} + context::~context() { delete datastack_seg; diff --git a/vm/contexts.hpp b/vm/contexts.hpp index 582fab173f..4aa7d7c221 100644 --- a/vm/contexts.hpp +++ b/vm/contexts.hpp @@ -45,6 +45,7 @@ struct context { void reset_context_objects(); void reset(); void fix_stacks(); + void scrub_stacks(gc_info *info, cell index); cell peek() { diff --git a/vm/data_heap.cpp b/vm/data_heap.cpp index 9b28215bb8..3648ba7f48 100755 --- a/vm/data_heap.cpp +++ b/vm/data_heap.cpp @@ -126,85 +126,6 @@ void factor_vm::init_data_heap(cell young_size, cell aging_size, cell tenured_si set_data_heap(new data_heap(young_size,aging_size,tenured_size)); } -/* Size of the object pointed to by an untagged pointer */ -cell object::size() const -{ - if(free_p()) return ((free_heap_block *)this)->size(); - - switch(type()) - { - case ARRAY_TYPE: - return align(array_size((array*)this),data_alignment); - case BIGNUM_TYPE: - return align(array_size((bignum*)this),data_alignment); - case BYTE_ARRAY_TYPE: - return align(array_size((byte_array*)this),data_alignment); - case STRING_TYPE: - return align(string_size(string_capacity((string*)this)),data_alignment); - case TUPLE_TYPE: - { - tuple_layout *layout = (tuple_layout *)UNTAG(((tuple *)this)->layout); - return align(tuple_size(layout),data_alignment); - } - case QUOTATION_TYPE: - return align(sizeof(quotation),data_alignment); - case WORD_TYPE: - return align(sizeof(word),data_alignment); - case FLOAT_TYPE: - return align(sizeof(boxed_float),data_alignment); - case DLL_TYPE: - return align(sizeof(dll),data_alignment); - case ALIEN_TYPE: - return align(sizeof(alien),data_alignment); - case WRAPPER_TYPE: - return align(sizeof(wrapper),data_alignment); - case CALLSTACK_TYPE: - return align(callstack_object_size(untag_fixnum(((callstack *)this)->length)),data_alignment); - default: - critical_error("Invalid header",(cell)this); - return 0; /* can't happen */ - } -} - -/* The number of cells from the start of the object which should be scanned by -the GC. Some types have a binary payload at the end (string, word, DLL) which -we ignore. */ -cell object::binary_payload_start() const -{ - if(free_p()) return 0; - - switch(type()) - { - /* these objects do not refer to other objects at all */ - case FLOAT_TYPE: - case BYTE_ARRAY_TYPE: - case BIGNUM_TYPE: - case CALLSTACK_TYPE: - return 0; - /* these objects have some binary data at the end */ - case WORD_TYPE: - return sizeof(word) - sizeof(cell) * 3; - case ALIEN_TYPE: - return sizeof(cell) * 3; - case DLL_TYPE: - return sizeof(cell) * 2; - case QUOTATION_TYPE: - return sizeof(quotation) - sizeof(cell) * 2; - case STRING_TYPE: - return sizeof(string); - /* everything else consists entirely of pointers */ - case ARRAY_TYPE: - return array_size(array_capacity((array*)this)); - case TUPLE_TYPE: - return tuple_size(untag(((tuple *)this)->layout)); - case WRAPPER_TYPE: - return sizeof(wrapper); - default: - critical_error("Invalid header",(cell)this); - return 0; /* can't happen */ - } -} - data_heap_room factor_vm::data_room() { data_heap_room room; diff --git a/vm/fixup.hpp b/vm/fixup.hpp new file mode 100644 index 0000000000..c92661a03b --- /dev/null +++ b/vm/fixup.hpp @@ -0,0 +1,44 @@ +namespace factor +{ + +template +struct identity { + T operator()(T t) + { + return t; + } +}; + +struct no_fixup { + object *fixup_data(object *obj) + { + return obj; + } + + code_block *fixup_code(code_block *compiled) + { + return compiled; + } + + object *translate_data(const object *obj) + { + return fixup_data((object *)obj); + } + + code_block *translate_code(const code_block *compiled) + { + return fixup_code((code_block *)compiled); + } + + cell size(object *obj) + { + return obj->size(); + } + + cell size(code_block *compiled) + { + return compiled->size(); + } +}; + +} diff --git a/vm/free_list_allocator.hpp b/vm/free_list_allocator.hpp index 4c725bcf4f..7d7807ef9a 100644 --- a/vm/free_list_allocator.hpp +++ b/vm/free_list_allocator.hpp @@ -23,8 +23,8 @@ template struct free_list_allocator { cell largest_free_block(); cell free_block_count(); void sweep(); - template void compact(Iterator &iter, Sizer &sizer); - template void iterate(Iterator &iter, Sizer &sizer); + template void compact(Iterator &iter, Fixup fixup, const Block **finger); + template void iterate(Iterator &iter, Fixup fixup); template void iterate(Iterator &iter); }; @@ -155,14 +155,17 @@ template struct heap_compactor { mark_bits *state; char *address; Iterator &iter; + const Block **finger; - explicit heap_compactor(mark_bits *state_, Block *address_, Iterator &iter_) : - state(state_), address((char *)address_), iter(iter_) {} + explicit heap_compactor(mark_bits *state_, Block *address_, Iterator &iter_, const Block **finger_) : + state(state_), address((char *)address_), iter(iter_), finger(finger_) {} void operator()(Block *block, cell size) { if(this->state->marked_p(block)) { + *finger = block; + memmove((Block *)address,block,size); iter(block,(Block *)address,size); address += size; } @@ -172,11 +175,11 @@ template struct heap_compactor { /* The forwarding map must be computed first by calling state.compute_forwarding(). */ template -template -void free_list_allocator::compact(Iterator &iter, Sizer &sizer) +template +void free_list_allocator::compact(Iterator &iter, Fixup fixup, const Block **finger) { - heap_compactor compactor(&state,first_block(),iter); - iterate(compactor,sizer); + heap_compactor compactor(&state,first_block(),iter,finger); + iterate(compactor,fixup); /* Now update the free list; there will be a single free block at the end */ @@ -185,34 +188,26 @@ void free_list_allocator::compact(Iterator &iter, Sizer &sizer) /* During compaction we have to be careful and measure object sizes differently */ template -template -void free_list_allocator::iterate(Iterator &iter, Sizer &sizer) +template +void free_list_allocator::iterate(Iterator &iter, Fixup fixup) { Block *scan = first_block(); Block *end = last_block(); while(scan != end) { - cell size = sizer(scan); + cell size = fixup.size(scan); Block *next = (Block *)((cell)scan + size); if(!scan->free_p()) iter(scan,size); scan = next; } } -template struct standard_sizer { - cell operator()(Block *block) - { - return block->size(); - } -}; - template template void free_list_allocator::iterate(Iterator &iter) { - standard_sizer sizer; - iterate(iter,sizer); + iterate(iter,no_fixup()); } } diff --git a/vm/full_collector.cpp b/vm/full_collector.cpp index 849ef07084..19d8b576a5 100644 --- a/vm/full_collector.cpp +++ b/vm/full_collector.cpp @@ -3,17 +3,9 @@ namespace factor { -inline static code_block_visitor make_code_visitor(factor_vm *parent) -{ - return code_block_visitor(parent,code_workhorse(parent)); -} - full_collector::full_collector(factor_vm *parent_) : - collector( - parent_, - parent_->data->tenured, - full_policy(parent_)), - code_visitor(make_code_visitor(parent_)) {} + collector(parent_,parent_->data->tenured,full_policy(parent_)), + code_visitor(parent,workhorse) {} void full_collector::trace_code_block(code_block *compiled) { diff --git a/vm/full_collector.hpp b/vm/full_collector.hpp index ba859e28c9..82a057ddbf 100644 --- a/vm/full_collector.hpp +++ b/vm/full_collector.hpp @@ -25,26 +25,8 @@ struct full_policy { } }; -struct code_workhorse { - factor_vm *parent; - code_heap *code; - - explicit code_workhorse(factor_vm *parent_) : parent(parent_), code(parent->code) {} - - code_block *operator()(code_block *compiled) - { - if(!code->marked_p(compiled)) - { - code->set_marked_p(compiled); - parent->mark_stack.push_back((cell)compiled + 1); - } - - return compiled; - } -}; - struct full_collector : collector { - code_block_visitor code_visitor; + code_block_visitor > code_visitor; explicit full_collector(factor_vm *parent_); void trace_code_block(code_block *compiled); diff --git a/vm/gc.cpp b/vm/gc.cpp index 599ed3cd31..224da82a98 100755 --- a/vm/gc.cpp +++ b/vm/gc.cpp @@ -194,8 +194,51 @@ void factor_vm::gc(gc_op op, cell requested_bytes, bool trace_contexts_p) current_gc = NULL; } +/* primitive_minor_gc() is invoked by inline GC checks, and it needs to fill in +uninitialized stack locations before actually calling the GC. See the comment +in compiler.cfg.stacks.uninitialized for details. */ + +struct call_frame_scrubber { + factor_vm *parent; + context *ctx; + + explicit call_frame_scrubber(factor_vm *parent_, context *ctx_) : + parent(parent_), ctx(ctx_) {} + + void operator()(stack_frame *frame) + { + const code_block *compiled = parent->frame_code(frame); + gc_info *info = compiled->block_gc_info(); + + u32 return_address = (cell)FRAME_RETURN_ADDRESS(frame,parent) - (cell)compiled->entry_point(); + int index = info->return_address_index(return_address); + + if(index != -1) + ctx->scrub_stacks(info,index); + } +}; + +void factor_vm::scrub_context(context *ctx) +{ + call_frame_scrubber scrubber(this,ctx); + iterate_callstack(ctx,scrubber); +} + +void factor_vm::scrub_contexts() +{ + std::set::const_iterator begin = active_contexts.begin(); + std::set::const_iterator end = active_contexts.end(); + while(begin != end) + { + scrub_context(*begin); + begin++; + } +} + void factor_vm::primitive_minor_gc() { + scrub_contexts(); + gc(collect_nursery_op, 0, /* requested size */ true /* trace contexts? */); @@ -215,36 +258,6 @@ void factor_vm::primitive_compact_gc() true /* trace contexts? */); } -void factor_vm::inline_gc(cell gc_roots_) -{ - cell stack_pointer = (cell)ctx->callstack_top; - - if(to_boolean(gc_roots_)) - { - tagged gc_roots(gc_roots_); - - cell capacity = array_capacity(gc_roots.untagged()); - for(cell i = 0; i < capacity; i++) - { - cell spill_slot = untag_fixnum(array_nth(gc_roots.untagged(),i)); - cell *address = (cell *)(spill_slot + stack_pointer); - data_roots.push_back(data_root_range(address,1)); - } - - primitive_minor_gc(); - - for(cell i = 0; i < capacity; i++) - data_roots.pop_back(); - } - else - primitive_minor_gc(); -} - -VM_C_API void inline_gc(cell gc_roots, factor_vm *parent) -{ - parent->inline_gc(gc_roots); -} - /* * It is up to the caller to fill in the object's fields in a meaningful * fashion! diff --git a/vm/gc.hpp b/vm/gc.hpp index 39a69e34f4..f6e9a875a6 100755 --- a/vm/gc.hpp +++ b/vm/gc.hpp @@ -52,6 +52,4 @@ struct gc_state { void start_again(gc_op op_, factor_vm *parent); }; -VM_C_API void inline_gc(cell gc_roots, factor_vm *parent); - } diff --git a/vm/gc_info.cpp b/vm/gc_info.cpp new file mode 100644 index 0000000000..6ffe138f94 --- /dev/null +++ b/vm/gc_info.cpp @@ -0,0 +1,19 @@ +#include "master.hpp" + +namespace factor +{ + +int gc_info::return_address_index(u32 return_address) +{ + u32 *return_address_array = return_addresses(); + + for(cell i = 0; i < return_address_count; i++) + { + if(return_address == return_address_array[i]) + return i; + } + + return -1; +} + +} diff --git a/vm/gc_info.hpp b/vm/gc_info.hpp new file mode 100644 index 0000000000..0e641de0eb --- /dev/null +++ b/vm/gc_info.hpp @@ -0,0 +1,51 @@ +namespace factor +{ + +struct gc_info { + u32 scrub_d_count; + u32 scrub_r_count; + u32 gc_root_count; + u32 return_address_count; + + cell total_bitmap_size() + { + return return_address_count * (scrub_d_count + scrub_r_count + gc_root_count); + } + + cell total_bitmap_bytes() + { + return ((total_bitmap_size() + 7) / 8); + } + + u32 *return_addresses() + { + return (u32 *)((u8 *)this - return_address_count * sizeof(u32)); + } + + u8 *gc_info_bitmap() + { + return (u8 *)return_addresses() - total_bitmap_bytes(); + } + + cell scrub_d_base(cell index) + { + return index * scrub_d_count; + } + + cell scrub_r_base(cell index) + { + return return_address_count * scrub_d_count + + index * scrub_r_count; + } + + cell spill_slot_base(cell index) + { + return return_address_count * scrub_d_count + + return_address_count * scrub_r_count + + index * gc_root_count; + } + + int return_address_index(u32 return_address); +}; + +} diff --git a/vm/image.cpp b/vm/image.cpp index ccce96a952..c9682ce192 100755 --- a/vm/image.cpp +++ b/vm/image.cpp @@ -55,70 +55,66 @@ void factor_vm::load_code_heap(FILE *file, image_header *h, vm_parameters *p) code->allocator->initial_free_list(h->code_size); } -struct data_fixupper { - cell offset; +struct startup_fixup { + cell data_offset; + cell code_offset; - explicit data_fixupper(cell offset_) : offset(offset_) {} + explicit startup_fixup(cell data_offset_, cell code_offset_) : + data_offset(data_offset_), code_offset(code_offset_) {} - object *operator()(object *obj) + object *fixup_data(object *obj) { - return (object *)((char *)obj + offset); + return (object *)((cell)obj + data_offset); } -}; - -struct code_fixupper { - cell offset; - - explicit code_fixupper(cell offset_) : offset(offset_) {} - code_block *operator()(code_block *compiled) + code_block *fixup_code(code_block *obj) { - return (code_block *)((char *)compiled + offset); + return (code_block *)((cell)obj + code_offset); } -}; -static inline cell tuple_size_with_fixup(cell offset, object *obj) -{ - tuple_layout *layout = (tuple_layout *)((char *)UNTAG(((tuple *)obj)->layout) + offset); - return tuple_size(layout); -} + object *translate_data(const object *obj) + { + return fixup_data((object *)obj); + } -struct fixup_sizer { - cell offset; + code_block *translate_code(const code_block *compiled) + { + return fixup_code((code_block *)compiled); + } - explicit fixup_sizer(cell offset_) : offset(offset_) {} + cell size(const object *obj) + { + return obj->size(*this); + } - cell operator()(object *obj) + cell size(code_block *compiled) { - if(obj->type() == TUPLE_TYPE) - return align(tuple_size_with_fixup(offset,obj),data_alignment); - else - return obj->size(); + return compiled->size(*this); } }; -struct object_fixupper { +struct start_object_updater { factor_vm *parent; - cell data_offset; - slot_visitor data_visitor; - code_block_visitor code_visitor; + startup_fixup fixup; + slot_visitor data_visitor; + code_block_visitor code_visitor; - object_fixupper(factor_vm *parent_, cell data_offset_, cell code_offset_) : + start_object_updater(factor_vm *parent_, startup_fixup fixup_) : parent(parent_), - data_offset(data_offset_), - data_visitor(slot_visitor(parent_,data_fixupper(data_offset_))), - code_visitor(code_block_visitor(parent_,code_fixupper(code_offset_))) {} + fixup(fixup_), + data_visitor(slot_visitor(parent_,fixup_)), + code_visitor(code_block_visitor(parent_,fixup_)) {} void operator()(object *obj, cell size) { parent->data->tenured->starts.record_object_start_offset(obj); + data_visitor.visit_slots(obj); + switch(obj->type()) { case ALIEN_TYPE: { - cell payload_start = obj->binary_payload_start(); - data_visitor.visit_slots(obj,payload_start); alien *ptr = (alien *)obj; @@ -130,22 +126,11 @@ struct object_fixupper { } case DLL_TYPE: { - cell payload_start = obj->binary_payload_start(); - data_visitor.visit_slots(obj,payload_start); - parent->ffi_dlopen((dll *)obj); break; } - case TUPLE_TYPE: - { - cell payload_start = tuple_size_with_fixup(data_offset,obj); - data_visitor.visit_slots(obj,payload_start); - break; - } default: { - cell payload_start = obj->binary_payload_start(); - data_visitor.visit_slots(obj,payload_start); code_visitor.visit_object_code_block(obj); break; } @@ -155,44 +140,50 @@ struct object_fixupper { void factor_vm::fixup_data(cell data_offset, cell code_offset) { - slot_visitor data_workhorse(this,data_fixupper(data_offset)); + startup_fixup fixup(data_offset,code_offset); + slot_visitor data_workhorse(this,fixup); data_workhorse.visit_roots(); - object_fixupper fixupper(this,data_offset,code_offset); - fixup_sizer sizer(data_offset); - data->tenured->iterate(fixupper,sizer); + start_object_updater updater(this,fixup); + data->tenured->iterate(updater,fixup); } -struct code_block_fixup_relocation_visitor { +struct startup_code_block_relocation_visitor { factor_vm *parent; - cell code_offset; - slot_visitor data_visitor; - code_fixupper code_visitor; + startup_fixup fixup; + slot_visitor data_visitor; - code_block_fixup_relocation_visitor(factor_vm *parent_, cell data_offset_, cell code_offset_) : + startup_code_block_relocation_visitor(factor_vm *parent_, startup_fixup fixup_) : parent(parent_), - code_offset(code_offset_), - data_visitor(slot_visitor(parent_,data_fixupper(data_offset_))), - code_visitor(code_fixupper(code_offset_)) {} + fixup(fixup_), + data_visitor(slot_visitor(parent_,fixup_)) {} void operator()(instruction_operand op) { code_block *compiled = op.parent_code_block(); - cell old_offset = op.rel_offset() + (cell)compiled->entry_point() - code_offset; + cell old_offset = op.rel_offset() + (cell)compiled->entry_point() - fixup.code_offset; switch(op.rel_type()) { case RT_LITERAL: - op.store_value(data_visitor.visit_pointer(op.load_value(old_offset))); - break; + { + cell value = op.load_value(old_offset); + if(immediate_p(value)) + op.store_value(value); + else + op.store_value(RETAG(fixup.fixup_data(untag(value)),TAG(value))); + break; + } case RT_ENTRY_POINT: case RT_ENTRY_POINT_PIC: case RT_ENTRY_POINT_PIC_TAIL: - op.store_code_block(code_visitor(op.load_code_block(old_offset))); - break; case RT_HERE: - op.store_value(op.load_value(old_offset) + code_offset); - break; + { + cell value = op.load_value(old_offset); + cell offset = value & (data_alignment - 1); + op.store_value((cell)fixup.fixup_code((code_block *)value) + offset); + break; + } case RT_UNTAGGED: break; default: @@ -202,30 +193,28 @@ struct code_block_fixup_relocation_visitor { } }; -struct code_block_fixupper { +struct startup_code_block_updater { factor_vm *parent; - cell data_offset; - cell code_offset; + startup_fixup fixup; - code_block_fixupper(factor_vm *parent_, cell data_offset_, cell code_offset_) : - parent(parent_), - data_offset(data_offset_), - code_offset(code_offset_) {} + startup_code_block_updater(factor_vm *parent_, startup_fixup fixup_) : + parent(parent_), fixup(fixup_) {} void operator()(code_block *compiled, cell size) { - slot_visitor data_visitor(parent,data_fixupper(data_offset)); + slot_visitor data_visitor(parent,fixup); data_visitor.visit_code_block_objects(compiled); - code_block_fixup_relocation_visitor code_visitor(parent,data_offset,code_offset); + startup_code_block_relocation_visitor code_visitor(parent,fixup); compiled->each_instruction_operand(code_visitor); } }; void factor_vm::fixup_code(cell data_offset, cell code_offset) { - code_block_fixupper fixupper(this,data_offset,code_offset); - code->allocator->iterate(fixupper); + startup_fixup fixup(data_offset,code_offset); + startup_code_block_updater updater(this,fixup); + code->allocator->iterate(updater,fixup); } /* Read an image file from disk, only done once during startup */ diff --git a/vm/jit.cpp b/vm/jit.cpp index 3324cfb366..b98c6f54ff 100644 --- a/vm/jit.cpp +++ b/vm/jit.cpp @@ -116,6 +116,11 @@ void jit::compute_position(cell offset_) /* Allocates memory */ code_block *jit::to_code_block() { + /* Emit dummy GC info */ + code.grow_bytes(alignment_for(code.count + 4,data_alignment)); + u32 dummy_gc_info = 0; + code.append_bytes(&dummy_gc_info,sizeof(u32)); + code.trim(); relocation.trim(); parameters.trim(); diff --git a/vm/layouts.hpp b/vm/layouts.hpp index 5e7ca0279f..b0edb4be16 100644 --- a/vm/layouts.hpp +++ b/vm/layouts.hpp @@ -23,6 +23,11 @@ inline static cell align(cell a, cell b) return (a + (b-1)) & ~(b-1); } +inline static cell alignment_for(cell a, cell b) +{ + return align(a,b) - a; +} + static const cell data_alignment = 16; #define WORD_SIZE (signed)(sizeof(cell)*8) @@ -98,7 +103,10 @@ struct object { cell header; cell size() const; + template cell size(Fixup fixup) const; + cell binary_payload_start() const; + template cell binary_payload_start(Fixup fixup) const; cell *slots() const { return (cell *)this; } diff --git a/vm/mark_bits.hpp b/vm/mark_bits.hpp index 5115f9a821..b3b73ba1ea 100644 --- a/vm/mark_bits.hpp +++ b/vm/mark_bits.hpp @@ -40,7 +40,7 @@ template struct mark_bits { forwarding = NULL; } - cell block_line(Block *address) + cell block_line(const Block *address) { return (((cell)address - start) / data_alignment); } @@ -50,7 +50,7 @@ template struct mark_bits { return (Block *)(line * data_alignment + start); } - std::pair bitmap_deref(Block *address) + std::pair bitmap_deref(const Block *address) { cell line_number = block_line(address); cell word_index = (line_number / mark_bits_granularity); @@ -58,18 +58,18 @@ template struct mark_bits { return std::make_pair(word_index,word_shift); } - bool bitmap_elt(cell *bits, Block *address) + bool bitmap_elt(cell *bits, const Block *address) { std::pair position = bitmap_deref(address); return (bits[position.first] & ((cell)1 << position.second)) != 0; } - Block *next_block_after(Block *block) + Block *next_block_after(const Block *block) { return (Block *)((cell)block + block->size()); } - void set_bitmap_range(cell *bits, Block *address) + void set_bitmap_range(cell *bits, const Block *address) { std::pair start = bitmap_deref(address); std::pair end = bitmap_deref(next_block_after(address)); @@ -99,12 +99,12 @@ template struct mark_bits { } } - bool marked_p(Block *address) + bool marked_p(const Block *address) { return bitmap_elt(marked,address); } - void set_marked_p(Block *address) + void set_marked_p(const Block *address) { set_bitmap_range(marked,address); } @@ -123,7 +123,7 @@ template struct mark_bits { /* We have the popcount for every mark_bits_granularity entries; look up and compute the rest */ - Block *forward_block(Block *original) + Block *forward_block(const Block *original) { #ifdef FACTOR_DEBUG assert(marked_p(original)); @@ -141,7 +141,7 @@ template struct mark_bits { return new_block; } - Block *next_unmarked_block_after(Block *original) + Block *next_unmarked_block_after(const Block *original) { std::pair position = bitmap_deref(original); cell bit_index = position.second; @@ -168,7 +168,7 @@ template struct mark_bits { return (Block *)(this->start + this->size); } - Block *next_marked_block_after(Block *original) + Block *next_marked_block_after(const Block *original) { std::pair position = bitmap_deref(original); cell bit_index = position.second; diff --git a/vm/master.hpp b/vm/master.hpp index a111a86b69..b8ababeb2d 100755 --- a/vm/master.hpp +++ b/vm/master.hpp @@ -75,6 +75,7 @@ namespace factor #include "platform.hpp" #include "primitives.hpp" #include "segments.hpp" +#include "gc_info.hpp" #include "contexts.hpp" #include "run.hpp" #include "objects.hpp" @@ -89,6 +90,8 @@ namespace factor #include "bitwise_hacks.hpp" #include "mark_bits.hpp" #include "free_list.hpp" +#include "fixup.hpp" +#include "tuples.hpp" #include "free_list_allocator.hpp" #include "write_barrier.hpp" #include "object_start_map.hpp" @@ -100,7 +103,6 @@ namespace factor #include "gc.hpp" #include "debug.hpp" #include "strings.hpp" -#include "tuples.hpp" #include "words.hpp" #include "float_bits.hpp" #include "io.hpp" @@ -115,6 +117,7 @@ namespace factor #include "data_roots.hpp" #include "code_roots.hpp" #include "generic_arrays.hpp" +#include "callstack.hpp" #include "slot_visitor.hpp" #include "collector.hpp" #include "copying_collector.hpp" @@ -124,7 +127,6 @@ namespace factor #include "code_block_visitor.hpp" #include "compaction.hpp" #include "full_collector.hpp" -#include "callstack.hpp" #include "arrays.hpp" #include "math.hpp" #include "byte_arrays.hpp" diff --git a/vm/objects.cpp b/vm/objects.cpp index 6b007f5d42..a370e3f712 100644 --- a/vm/objects.cpp +++ b/vm/objects.cpp @@ -82,13 +82,13 @@ void factor_vm::primitive_size() ctx->push(allot_cell(object_size(ctx->pop()))); } -struct slot_become_visitor { +struct slot_become_fixup : no_fixup { std::map *become_map; - explicit slot_become_visitor(std::map *become_map_) : + explicit slot_become_fixup(std::map *become_map_) : become_map(become_map_) {} - object *operator()(object *old) + object *fixup_data(object *old) { std::map::const_iterator iter = become_map->find(old); if(iter != become_map->end()) @@ -99,9 +99,9 @@ struct slot_become_visitor { }; struct object_become_visitor { - slot_visitor *workhorse; + slot_visitor *workhorse; - explicit object_become_visitor(slot_visitor *workhorse_) : + explicit object_become_visitor(slot_visitor *workhorse_) : workhorse(workhorse_) {} void operator()(object *obj) @@ -111,9 +111,9 @@ struct object_become_visitor { }; struct code_block_become_visitor { - slot_visitor *workhorse; + slot_visitor *workhorse; - explicit code_block_become_visitor(slot_visitor *workhorse_) : + explicit code_block_become_visitor(slot_visitor *workhorse_) : workhorse(workhorse_) {} void operator()(code_block *compiled, cell size) @@ -160,7 +160,7 @@ void factor_vm::primitive_become() /* Update all references to old objects to point to new objects */ { - slot_visitor workhorse(this,slot_become_visitor(&become_map)); + slot_visitor workhorse(this,slot_become_fixup(&become_map)); workhorse.visit_roots(); workhorse.visit_contexts(); diff --git a/vm/slot_visitor.hpp b/vm/slot_visitor.hpp index d4dd44bed1..148c05df1f 100644 --- a/vm/slot_visitor.hpp +++ b/vm/slot_visitor.hpp @@ -1,6 +1,100 @@ namespace factor { +/* Size of the object pointed to by an untagged pointer */ +template +cell object::size(Fixup fixup) const +{ + if(free_p()) return ((free_heap_block *)this)->size(); + + switch(type()) + { + case ARRAY_TYPE: + return align(array_size((array*)this),data_alignment); + case BIGNUM_TYPE: + return align(array_size((bignum*)this),data_alignment); + case BYTE_ARRAY_TYPE: + return align(array_size((byte_array*)this),data_alignment); + case STRING_TYPE: + return align(string_size(string_capacity((string*)this)),data_alignment); + case TUPLE_TYPE: + { + tuple_layout *layout = (tuple_layout *)fixup.translate_data(untag(((tuple *)this)->layout)); + return align(tuple_size(layout),data_alignment); + } + case QUOTATION_TYPE: + return align(sizeof(quotation),data_alignment); + case WORD_TYPE: + return align(sizeof(word),data_alignment); + case FLOAT_TYPE: + return align(sizeof(boxed_float),data_alignment); + case DLL_TYPE: + return align(sizeof(dll),data_alignment); + case ALIEN_TYPE: + return align(sizeof(alien),data_alignment); + case WRAPPER_TYPE: + return align(sizeof(wrapper),data_alignment); + case CALLSTACK_TYPE: + return align(callstack_object_size(untag_fixnum(((callstack *)this)->length)),data_alignment); + default: + critical_error("Invalid header in size",(cell)this); + return 0; /* can't happen */ + } +} + +inline cell object::size() const +{ + return size(no_fixup()); +} + +/* The number of cells from the start of the object which should be scanned by +the GC. Some types have a binary payload at the end (string, word, DLL) which +we ignore. */ +template +cell object::binary_payload_start(Fixup fixup) const +{ + if(free_p()) return 0; + + switch(type()) + { + /* these objects do not refer to other objects at all */ + case FLOAT_TYPE: + case BYTE_ARRAY_TYPE: + case BIGNUM_TYPE: + case CALLSTACK_TYPE: + return 0; + /* these objects have some binary data at the end */ + case WORD_TYPE: + return sizeof(word) - sizeof(cell) * 3; + case ALIEN_TYPE: + return sizeof(cell) * 3; + case DLL_TYPE: + return sizeof(cell) * 2; + case QUOTATION_TYPE: + return sizeof(quotation) - sizeof(cell) * 2; + case STRING_TYPE: + return sizeof(string); + /* everything else consists entirely of pointers */ + case ARRAY_TYPE: + return array_size(array_capacity((array*)this)); + case TUPLE_TYPE: + { + tuple_layout *layout = (tuple_layout *)fixup.translate_data(untag(((tuple *)this)->layout)); + return tuple_size(layout); + } + case WRAPPER_TYPE: + return sizeof(wrapper); + default: + critical_error("Invalid header in binary_payload_start",(cell)this); + return 0; /* can't happen */ + } +} + +inline cell object::binary_payload_start() const +{ + return binary_payload_start(no_fixup()); +} + /* Slot visitors iterate over the slots of an object, applying a functor to each one that is a non-immediate slot. The pointer is untagged first. The functor returns a new untagged object pointer. The return value may or may not equal the old one, @@ -17,12 +111,12 @@ Iteration is driven by visit_*() methods. Some of them define GC roots: - visit_roots() - visit_contexts() */ -template struct slot_visitor { +template struct slot_visitor { factor_vm *parent; - Visitor visitor; + Fixup fixup; - explicit slot_visitor(factor_vm *parent_, Visitor visitor_) : - parent(parent_), visitor(visitor_) {} + explicit slot_visitor(factor_vm *parent_, Fixup fixup_) : + parent(parent_), fixup(fixup_) {} cell visit_pointer(cell pointer); void visit_handle(cell *handle); @@ -35,35 +129,36 @@ template struct slot_visitor { void visit_callback_roots(); void visit_literal_table_roots(); void visit_roots(); + void visit_callstack_object(callstack *stack); + void visit_callstack(context *ctx); void visit_contexts(); void visit_code_block_objects(code_block *compiled); void visit_embedded_literals(code_block *compiled); }; -template -cell slot_visitor::visit_pointer(cell pointer) +template +cell slot_visitor::visit_pointer(cell pointer) { if(immediate_p(pointer)) return pointer; - object *untagged = untag(pointer); - untagged = visitor(untagged); + object *untagged = fixup.fixup_data(untag(pointer)); return RETAG(untagged,TAG(pointer)); } -template -void slot_visitor::visit_handle(cell *handle) +template +void slot_visitor::visit_handle(cell *handle) { *handle = visit_pointer(*handle); } -template -void slot_visitor::visit_object_array(cell *start, cell *end) +template +void slot_visitor::visit_object_array(cell *start, cell *end) { while(start < end) visit_handle(start++); } -template -void slot_visitor::visit_slots(object *ptr, cell payload_start) +template +void slot_visitor::visit_slots(object *ptr, cell payload_start) { cell *slot = (cell *)ptr; cell *end = (cell *)((cell)ptr + payload_start); @@ -75,20 +170,23 @@ void slot_visitor::visit_slots(object *ptr, cell payload_start) } } -template -void slot_visitor::visit_slots(object *ptr) +template +void slot_visitor::visit_slots(object *obj) { - visit_slots(ptr,ptr->binary_payload_start()); + if(obj->type() == CALLSTACK_TYPE) + visit_callstack_object((callstack *)obj); + else + visit_slots(obj,obj->binary_payload_start(fixup)); } -template -void slot_visitor::visit_stack_elements(segment *region, cell *top) +template +void slot_visitor::visit_stack_elements(segment *region, cell *top) { visit_object_array((cell *)region->start,top + 1); } -template -void slot_visitor::visit_data_roots() +template +void slot_visitor::visit_data_roots() { std::vector::const_iterator iter = parent->data_roots.begin(); std::vector::const_iterator end = parent->data_roots.end(); @@ -97,8 +195,8 @@ void slot_visitor::visit_data_roots() visit_object_array(iter->start,iter->start + iter->len); } -template -void slot_visitor::visit_bignum_roots() +template +void slot_visitor::visit_bignum_roots() { std::vector::const_iterator iter = parent->bignum_roots.begin(); std::vector::const_iterator end = parent->bignum_roots.end(); @@ -108,16 +206,16 @@ void slot_visitor::visit_bignum_roots() cell *handle = (cell *)(*iter); if(*handle) - *handle = (cell)visitor(*(object **)handle); + *handle = (cell)fixup.fixup_data(*(object **)handle); } } -template +template struct callback_slot_visitor { callback_heap *callbacks; - slot_visitor *visitor; + slot_visitor *visitor; - explicit callback_slot_visitor(callback_heap *callbacks_, slot_visitor *visitor_) : + explicit callback_slot_visitor(callback_heap *callbacks_, slot_visitor *visitor_) : callbacks(callbacks_), visitor(visitor_) {} void operator()(code_block *stub) @@ -126,15 +224,15 @@ struct callback_slot_visitor { } }; -template -void slot_visitor::visit_callback_roots() +template +void slot_visitor::visit_callback_roots() { - callback_slot_visitor callback_visitor(parent->callbacks,this); + callback_slot_visitor callback_visitor(parent->callbacks,this); parent->callbacks->each_callback(callback_visitor); } -template -void slot_visitor::visit_literal_table_roots() +template +void slot_visitor::visit_literal_table_roots() { std::map *uninitialized_blocks = &parent->code->uninitialized_blocks; std::map::const_iterator iter = uninitialized_blocks->begin(); @@ -151,8 +249,8 @@ void slot_visitor::visit_literal_table_roots() parent->code->uninitialized_blocks = new_uninitialized_blocks; } -template -void slot_visitor::visit_roots() +template +void slot_visitor::visit_roots() { visit_handle(&parent->true_object); visit_handle(&parent->bignum_zero); @@ -167,8 +265,62 @@ void slot_visitor::visit_roots() visit_object_array(parent->special_objects,parent->special_objects + special_object_count); } -template -void slot_visitor::visit_contexts() +template +struct call_frame_slot_visitor { + factor_vm *parent; + slot_visitor *visitor; + + explicit call_frame_slot_visitor(factor_vm *parent_, slot_visitor *visitor_) : + parent(parent_), visitor(visitor_) {} + + /* + next -> [entry_point] + [size] + [return address] -- x86 only, backend adds 1 to each spill location + [spill area] + ... + frame -> [entry_point] + [size] + */ + void operator()(stack_frame *frame) + { + const code_block *compiled = visitor->fixup.translate_code(parent->frame_code(frame)); + gc_info *info = compiled->block_gc_info(); + + u32 return_address = (cell)FRAME_RETURN_ADDRESS(frame,parent) - (cell)compiled->entry_point(); + int index = info->return_address_index(return_address); + + if(index != -1) + { + u8 *bitmap = info->gc_info_bitmap(); + cell base = info->spill_slot_base(index); + cell *stack_pointer = (cell *)(parent->frame_successor(frame) + 1); + + for(cell spill_slot = 0; spill_slot < info->gc_root_count; spill_slot++) + { + if(bitmap_p(bitmap,base + spill_slot)) + visitor->visit_handle(&stack_pointer[spill_slot]); + } + } + } +}; + +template +void slot_visitor::visit_callstack_object(callstack *stack) +{ + call_frame_slot_visitor call_frame_visitor(parent,this); + parent->iterate_callstack_object(stack,call_frame_visitor); +} + +template +void slot_visitor::visit_callstack(context *ctx) +{ + call_frame_slot_visitor call_frame_visitor(parent,this); + parent->iterate_callstack(ctx,call_frame_visitor); +} + +template +void slot_visitor::visit_contexts() { std::set::const_iterator begin = parent->active_contexts.begin(); std::set::const_iterator end = parent->active_contexts.end(); @@ -179,16 +331,16 @@ void slot_visitor::visit_contexts() visit_stack_elements(ctx->datastack_seg,(cell *)ctx->datastack); visit_stack_elements(ctx->retainstack_seg,(cell *)ctx->retainstack); visit_object_array(ctx->context_objects,ctx->context_objects + context_object_count); - + visit_callstack(ctx); begin++; } } -template +template struct literal_references_visitor { - slot_visitor *visitor; + slot_visitor *visitor; - explicit literal_references_visitor(slot_visitor *visitor_) : visitor(visitor_) {} + explicit literal_references_visitor(slot_visitor *visitor_) : visitor(visitor_) {} void operator()(instruction_operand op) { @@ -197,20 +349,20 @@ struct literal_references_visitor { } }; -template -void slot_visitor::visit_code_block_objects(code_block *compiled) +template +void slot_visitor::visit_code_block_objects(code_block *compiled) { visit_handle(&compiled->owner); visit_handle(&compiled->parameters); visit_handle(&compiled->relocation); } -template -void slot_visitor::visit_embedded_literals(code_block *compiled) +template +void slot_visitor::visit_embedded_literals(code_block *compiled) { if(!parent->code->uninitialized_p(compiled)) { - literal_references_visitor visitor(this); + literal_references_visitor visitor(this); compiled->each_instruction_operand(visitor); } } diff --git a/vm/vm.hpp b/vm/vm.hpp index 645e748ea4..14a00e9d2a 100755 --- a/vm/vm.hpp +++ b/vm/vm.hpp @@ -317,10 +317,11 @@ struct factor_vm void collect_compact(bool trace_contexts_p); void collect_growing_heap(cell requested_bytes, bool trace_contexts_p); void gc(gc_op op, cell requested_bytes, bool trace_contexts_p); + void scrub_context(context *ctx); + void scrub_contexts(); void primitive_minor_gc(); void primitive_full_gc(); void primitive_compact_gc(); - void inline_gc(cell gc_roots); void primitive_enable_gc_events(); void primitive_disable_gc_events(); object *allot_object(cell type, cell size);