}
};
-void factor_vm::update_code_block_for_full_gc(code_block *compiled)
+void factor_vm::update_code_block_words_and_literals(code_block *compiled)
{
if(code->needs_fixup_p(compiled))
relocate_code_block(compiled);
factor_vm *parent;
explicit word_updater(factor_vm *parent_) : parent(parent_) {}
- void operator()(code_block *compiled)
+ void operator()(code_block *compiled, cell size)
{
parent->update_word_references(compiled);
}
iterate_code_heap(updater);
}
+/* After a full GC that did not grow the heap, we have to update references
+to literals and other words. */
+struct word_and_literal_code_heap_updater {
+ factor_vm *parent;
+
+ word_and_literal_code_heap_updater(factor_vm *parent_) : parent(parent_) {}
+
+ void operator()(heap_block *block, cell size)
+ {
+ parent->update_code_block_words_and_literals((code_block *)block);
+ }
+};
+
+void factor_vm::update_code_heap_words_and_literals()
+{
+ word_and_literal_code_heap_updater updater(this);
+ code->sweep_heap(updater);
+}
+
+/* After growing the heap, we have to perform a full relocation to update
+references to card and deck arrays. */
+struct code_heap_relocator {
+ factor_vm *parent;
+
+ code_heap_relocator(factor_vm *parent_) : parent(parent_) {}
+
+ void operator()(code_block *block, cell size)
+ {
+ parent->relocate_code_block(block);
+ }
+};
+
+void factor_vm::relocate_code_heap()
+{
+ code_heap_relocator relocator(this);
+ code_heap_iterator<code_heap_relocator> iter(relocator);
+ code->sweep_heap(iter);
+}
+
void factor_vm::primitive_modify_code_heap()
{
gc_root<array> alist(dpop(),this);
code_block *code_heap::forward_code_block(code_block *compiled)
{
- return (code_block *)forwarding[compiled];
+ return (code_block *)state->forward_block(compiled);
}
struct callframe_forwarder {
function returns. */
void factor_vm::compact_code_heap(bool trace_contexts_p)
{
- code->compact_heap();
+ /* Figure out where blocks are going to go */
+ code->state->compute_forwarding();
+
+ /* Update references to the code heap from the data heap */
forward_object_xts();
if(trace_contexts_p)
{
forward_context_xts();
forward_callback_xts();
}
+
+ /* Move code blocks and update references amongst them (this requires
+ that the data heap is up to date since relocation looks up object XTs) */
+ code_heap_relocator relocator(this);
+ code_heap_iterator<code_heap_relocator> iter(relocator);
+ code->compact_heap(iter);
}
struct stack_trace_stripper {
explicit stack_trace_stripper() {}
- void operator()(code_block *compiled)
+ void operator()(code_block *compiled, cell size)
{
compiled->owner = false_object;
}
cell reloc_size = 0, literal_size = 0;
heap_block *scan = code->first_block();
+ heap_block *end = code->last_block();
- while(scan)
+ while(scan != end)
{
const char *status;
if(scan->type() == FREE_BLOCK_TYPE)
status = "free";
- else if(scan->marked_p())
+ else if(code->state->is_marked_p(scan))
{
reloc_size += object_size(((code_block *)scan)->relocation);
literal_size += object_size(((code_block *)scan)->literals);
print_cell_hex(scan->size()); print_string(" ");
print_string(status); print_string("\n");
- scan = code->next_block(scan);
+ scan = scan->next();
}
print_cell(reloc_size); print_string(" bytes of relocation data\n");
{
factor_vm *vm;
-unordered_map<THREADHANDLE, factor_vm*> thread_vms;
+std::map<THREADHANDLE, factor_vm*> thread_vms;
void init_globals()
{
}
}
-/* After growing the heap, we have to perform a full relocation to update
-references to card and deck arrays. */
-struct big_code_heap_updater {
- factor_vm *parent;
-
- big_code_heap_updater(factor_vm *parent_) : parent(parent_) {}
-
- void operator()(heap_block *block)
- {
- parent->relocate_code_block((code_block *)block);
- }
-};
-
-/* After a full GC that did not grow the heap, we have to update references
-to literals and other words. */
-struct small_code_heap_updater {
- factor_vm *parent;
-
- small_code_heap_updater(factor_vm *parent_) : parent(parent_) {}
-
- void operator()(heap_block *block)
- {
- parent->update_code_block_for_full_gc((code_block *)block);
- }
-};
-
void factor_vm::collect_full_impl(bool trace_contexts_p)
{
full_collector collector(this);
+ code->state->clear_mark_bits();
+
collector.trace_roots();
if(trace_contexts_p)
{
nursery.here = nursery.start;
}
-/* In both cases, compact code heap before updating code blocks so that
-XTs are correct after */
-
-void factor_vm::big_code_heap_update()
-{
- big_code_heap_updater updater(this);
- code->free_unmarked(updater);
- code->clear_remembered_set();
-}
-
void factor_vm::collect_growing_heap(cell requested_bytes,
bool trace_contexts_p,
bool compact_code_heap_p)
collect_full_impl(trace_contexts_p);
delete old;
- if(compact_code_heap_p) compact_code_heap(trace_contexts_p);
-
- big_code_heap_update();
-}
+ if(compact_code_heap_p)
+ compact_code_heap(trace_contexts_p);
+ else
+ relocate_code_heap();
-void factor_vm::small_code_heap_update()
-{
- small_code_heap_updater updater(this);
- code->free_unmarked(updater);
code->clear_remembered_set();
}
collect_full_impl(trace_contexts_p);
if(compact_code_heap_p)
- {
compact_code_heap(trace_contexts_p);
- big_code_heap_update();
- }
else
- small_code_heap_update();
+ update_code_heap_words_and_literals();
+
+ code->clear_remembered_set();
}
}
current_gc->op = collect_full_op;
break;
case collect_full_op:
- /* Since we start tracing again, any previously
- marked code blocks must be re-marked and re-traced */
- code->clear_mark_bits();
current_gc->op = collect_growing_heap_op;
break;
default:
if(size > (1L << (sizeof(cell) * 8 - 6))) fatal_error("Heap too large",size);
seg = new segment(align_page(size),executable_p);
if(!seg) fatal_error("Out of memory in heap allocator",size);
+ state = new mark_bits<heap_block,block_size_increment>(seg->start,size);
clear_free_list();
}
+heap::~heap()
+{
+ delete seg;
+ seg = NULL;
+ delete state;
+ state = NULL;
+}
+
void heap::add_to_free_list(free_heap_block *block)
{
if(block->size() < free_list_count * block_size_increment)
}
}
-/* Called after reading the code heap from the image file, and after code GC.
-
-In the former case, we must add a large free block from compiling.base + size to
-compiling.limit. */
+/* Called after reading the code heap from the image file, and after code heap
+compaction. Makes a free list consisting of one free block, at the very end. */
void heap::build_free_list(cell size)
{
- heap_block *prev = NULL;
-
clear_free_list();
-
- size = (size + block_size_increment - 1) & ~(block_size_increment - 1);
-
- heap_block *scan = first_block();
free_heap_block *end = (free_heap_block *)(seg->start + size);
-
- /* Add all free blocks to the free list */
- while(scan && scan < (heap_block *)end)
- {
- if(scan->type() == FREE_BLOCK_TYPE)
- add_to_free_list((free_heap_block *)scan);
-
- prev = scan;
- scan = next_block(scan);
- }
-
- /* If there is room at the end of the heap, add a free block. This
- branch is only taken after loading a new image, not after code GC */
- if((cell)(end + 1) <= seg->end)
- {
- end->set_marked_p(false);
- end->set_type(FREE_BLOCK_TYPE);
- end->set_size(seg->end - (cell)end);
-
- /* add final free block */
- add_to_free_list(end);
- }
- /* This branch is taken if the newly loaded image fits exactly, or
- after code GC */
- else
- {
- /* even if there's no room at the end of the heap for a new
- free block, we might have to jigger it up by a few bytes in
- case prev + prev->size */
- if(prev) prev->set_size(seg->end - (cell)prev);
- }
-
+ end->set_type(FREE_BLOCK_TYPE);
+ end->set_size(seg->end - (cell)end);
+ add_to_free_list(end);
}
void heap::assert_free_block(free_heap_block *block)
{
block = split_free_block(block,size);
block->set_type(type);
- block->set_marked_p(false);
return block;
}
else
void heap::mark_block(heap_block *block)
{
- block->set_marked_p(true);
-}
-
-void heap::clear_mark_bits()
-{
- heap_block *scan = first_block();
-
- while(scan)
- {
- scan->set_marked_p(false);
- scan = next_block(scan);
- }
+ state->set_marked_p(block);
}
/* Compute total sum of sizes of free blocks, and size of largest free block */
*max_free = 0;
heap_block *scan = first_block();
+ heap_block *end = last_block();
- while(scan)
+ while(scan != end)
{
cell size = scan->size();
else
*used += size;
- scan = next_block(scan);
+ scan = scan->next();
}
}
-/* The size of the heap, not including the last block if it's free */
+/* The size of the heap after compaction */
cell heap::heap_size()
{
heap_block *scan = first_block();
-
- while(next_block(scan) != NULL)
- scan = next_block(scan);
-
- /* this is the last block in the heap, and it is free */
- if(scan->type() == FREE_BLOCK_TYPE)
- return (cell)scan - seg->start;
- /* otherwise the last block is allocated */
- else
- return seg->size;
-}
-
-void heap::compact_heap()
-{
- forwarding.clear();
-
- heap_block *scan = first_block();
- char *address = (char *)scan;
-
- /* Slide blocks up while building the forwarding hashtable. */
- while(scan)
+ heap_block *end = last_block();
+
+ while(scan != end)
{
- heap_block *next = next_block(scan);
-
- if(scan->type() != FREE_BLOCK_TYPE && scan->marked_p())
- {
- cell size = scan->size();
- memmove(address,scan,size);
- forwarding[scan] = address;
- address += size;
- }
-
- scan = next;
+ if(scan->type() == FREE_BLOCK_TYPE) break;
+ else scan = scan->next();
}
- /* Now update the free list; there will be a single free block at
- the end */
- build_free_list((cell)address - seg->start);
+ assert(scan->type() == FREE_BLOCK_TYPE);
+ assert((cell)scan + scan->size() == seg->end);
+
+ return (cell)scan - (cell)first_block();
}
heap_block *heap::free_allocated(heap_block *prev, heap_block *scan)
bool secure_gc;
segment *seg;
heap_free_list free;
- unordered_map<heap_block *, char *> forwarding;
+ mark_bits<heap_block,block_size_increment> *state;
explicit heap(bool secure_gc_, cell size, bool executable_p);
-
- inline heap_block *next_block(heap_block *block)
- {
- cell next = ((cell)block + block->size());
- if(next == seg->end)
- return NULL;
- else
- return (heap_block *)next;
- }
+ ~heap();
inline heap_block *first_block()
{
}
void clear_free_list();
- void new_heap(cell size);
void add_to_free_list(free_heap_block *block);
void build_free_list(cell size);
void assert_free_block(free_heap_block *block);
heap_block *heap_allot(cell size, cell type);
void heap_free(heap_block *block);
void mark_block(heap_block *block);
- void clear_mark_bits();
void heap_usage(cell *used, cell *total_free, cell *max_free);
cell heap_size();
void compact_heap();
heap_block *free_allocated(heap_block *prev, heap_block *scan);
- /* After code GC, all referenced code blocks have status set to B_MARKED, so any
- which are allocated and not marked can be reclaimed. */
- template<typename Iterator> void free_unmarked(Iterator &iter)
+ template<typename Iterator> void sweep_heap(Iterator &iter);
+ template<typename Iterator> void compact_heap(Iterator &iter);
+
+ template<typename Iterator> void iterate_heap(Iterator &iter)
{
- clear_free_list();
-
- heap_block *prev = NULL;
heap_block *scan = first_block();
-
- while(scan)
+ heap_block *end = last_block();
+
+ while(scan != end)
{
- if(scan->type() == FREE_BLOCK_TYPE)
- {
- if(prev && prev->type() == FREE_BLOCK_TYPE)
- prev->set_size(prev->size() + scan->size());
- else
- prev = scan;
- }
- else if(scan->marked_p())
- {
- if(prev && prev->type() == FREE_BLOCK_TYPE)
- add_to_free_list((free_heap_block *)prev);
- scan->set_marked_p(false);
- prev = scan;
- iter(scan);
- }
- else
- prev = free_allocated(prev,scan);
+ heap_block *next = scan->next();
+ if(scan->type() != FREE_BLOCK_TYPE) iter(scan,scan->size());
+ scan = next;
+ }
+ }
+};
+
+/* After code GC, all live code blocks are marked, so any
+which are not marked can be reclaimed. */
+template<typename Iterator> void heap::sweep_heap(Iterator &iter)
+{
+ this->clear_free_list();
- scan = next_block(scan);
+ heap_block *prev = NULL;
+ heap_block *scan = this->first_block();
+ heap_block *end = this->last_block();
+
+ while(scan != end)
+ {
+ if(scan->type() == FREE_BLOCK_TYPE)
+ {
+ if(prev && prev->type() == FREE_BLOCK_TYPE)
+ prev->set_size(prev->size() + scan->size());
+ else
+ prev = scan;
+ }
+ else if(this->state->is_marked_p(scan))
+ {
+ if(prev && prev->type() == FREE_BLOCK_TYPE)
+ this->add_to_free_list((free_heap_block *)prev);
+ prev = scan;
+ iter(scan,scan->size());
}
+ else
+ prev = this->free_allocated(prev,scan);
- if(prev && prev->type() == FREE_BLOCK_TYPE)
- add_to_free_list((free_heap_block *)prev);
+ scan = scan->next();
}
-};
+
+ if(prev && prev->type() == FREE_BLOCK_TYPE)
+ this->add_to_free_list((free_heap_block *)prev);
+}
+
+/* The forwarding map must be computed first by calling
+state->compute_forwarding(). */
+template<typename Iterator> void heap::compact_heap(Iterator &iter)
+{
+ heap_compacter<heap_block,block_size_increment,Iterator> compacter(state,first_block(),iter);
+ this->iterate_heap(compacter);
+
+ /* Now update the free list; there will be a single free block at
+ the end */
+ this->build_free_list((cell)compacter.address - this->seg->start);
+}
}
code->build_free_list(h->code_size);
}
-/* Save the current image to disk */
-bool factor_vm::save_image(const vm_char *filename)
-{
- FILE* file;
- image_header h;
-
- file = OPEN_WRITE(filename);
- if(file == NULL)
- {
- print_string("Cannot open image file: "); print_native_string(filename); nl();
- print_string(strerror(errno)); nl();
- return false;
- }
-
- h.magic = image_magic;
- h.version = image_version;
- h.data_relocation_base = data->tenured->start;
- h.data_size = data->tenured->here - data->tenured->start;
- h.code_relocation_base = code->seg->start;
- h.code_size = code->heap_size();
-
- h.true_object = true_object;
- h.bignum_zero = bignum_zero;
- h.bignum_pos_one = bignum_pos_one;
- h.bignum_neg_one = bignum_neg_one;
-
- for(cell i = 0; i < USER_ENV; i++)
- h.userenv[i] = (save_env_p(i) ? userenv[i] : false_object);
-
- bool ok = true;
-
- if(fwrite(&h,sizeof(image_header),1,file) != 1) ok = false;
- if(fwrite((void*)data->tenured->start,h.data_size,1,file) != 1) ok = false;
- if(fwrite(code->first_block(),h.code_size,1,file) != 1) ok = false;
- if(fclose(file)) ok = false;
-
- if(!ok)
- {
- print_string("save-image failed: "); print_string(strerror(errno)); nl();
- }
-
- return ok;
-}
-
-void factor_vm::primitive_save_image()
-{
- /* do a full GC to push everything into tenured space */
- primitive_compact_gc();
-
- gc_root<byte_array> path(dpop(),this);
- path.untag_check(this);
- save_image((vm_char *)(path.untagged() + 1));
-}
-
-void factor_vm::primitive_save_image_and_exit()
-{
- /* We unbox this before doing anything else. This is the only point
- where we might throw an error, so we have to throw an error here since
- later steps destroy the current image. */
- gc_root<byte_array> path(dpop(),this);
- path.untag_check(this);
-
- /* strip out userenv data which is set on startup anyway */
- for(cell i = 0; i < USER_ENV; i++)
- {
- if(!save_env_p(i)) userenv[i] = false_object;
- }
-
- gc(collect_full_op,
- 0, /* requested size */
- false, /* discard objects only reachable from stacks */
- true /* compact the code heap */);
-
- /* Save the image */
- if(save_image((vm_char *)(path.untagged() + 1)))
- exit(0);
- else
- exit(1);
-}
-
void factor_vm::data_fixup(cell *handle, cell data_relocation_base)
{
if(immediate_p(*handle))
code_block_fixupper(factor_vm *parent_, cell data_relocation_base_) :
parent(parent_), data_relocation_base(data_relocation_base_) { }
- void operator()(code_block *compiled)
+ void operator()(code_block *compiled, cell size)
{
parent->fixup_code_block(compiled,data_relocation_base);
}
userenv[IMAGE_ENV] = allot_alien(false_object,(cell)p->image_path);
}
+/* Save the current image to disk */
+bool factor_vm::save_image(const vm_char *filename)
+{
+ FILE* file;
+ image_header h;
+
+ file = OPEN_WRITE(filename);
+ if(file == NULL)
+ {
+ print_string("Cannot open image file: "); print_native_string(filename); nl();
+ print_string(strerror(errno)); nl();
+ return false;
+ }
+
+ h.magic = image_magic;
+ h.version = image_version;
+ h.data_relocation_base = data->tenured->start;
+ h.data_size = data->tenured->here - data->tenured->start;
+ h.code_relocation_base = code->seg->start;
+ h.code_size = code->heap_size();
+
+ h.true_object = true_object;
+ h.bignum_zero = bignum_zero;
+ h.bignum_pos_one = bignum_pos_one;
+ h.bignum_neg_one = bignum_neg_one;
+
+ for(cell i = 0; i < USER_ENV; i++)
+ h.userenv[i] = (save_env_p(i) ? userenv[i] : false_object);
+
+ bool ok = true;
+
+ if(fwrite(&h,sizeof(image_header),1,file) != 1) ok = false;
+ if(fwrite((void*)data->tenured->start,h.data_size,1,file) != 1) ok = false;
+ if(fwrite(code->first_block(),h.code_size,1,file) != 1) ok = false;
+ if(fclose(file)) ok = false;
+
+ if(!ok)
+ {
+ print_string("save-image failed: "); print_string(strerror(errno)); nl();
+ }
+
+ return ok;
+}
+
+void factor_vm::primitive_save_image()
+{
+ /* do a full GC to push everything into tenured space */
+ primitive_compact_gc();
+
+ gc_root<byte_array> path(dpop(),this);
+ path.untag_check(this);
+ save_image((vm_char *)(path.untagged() + 1));
+}
+
+void factor_vm::primitive_save_image_and_exit()
+{
+ /* We unbox this before doing anything else. This is the only point
+ where we might throw an error, so we have to throw an error here since
+ later steps destroy the current image. */
+ gc_root<byte_array> path(dpop(),this);
+ path.untag_check(this);
+
+ /* strip out userenv data which is set on startup anyway */
+ for(cell i = 0; i < USER_ENV; i++)
+ if(!save_env_p(i)) userenv[i] = false_object;
+
+ gc(collect_full_op,
+ 0, /* requested size */
+ false, /* discard objects only reachable from stacks */
+ true /* compact the code heap */);
+
+ /* Save the image */
+ if(save_image((vm_char *)(path.untagged() + 1)))
+ exit(0);
+ else
+ exit(1);
+}
+
}
{
cell header;
- bool marked_p() { return header & 1; }
- void set_marked_p(bool marked)
- {
- if(marked)
- header |= 1;
- else
- header &= ~1;
- }
-
cell type() { return (header >> 1) & 0x1f; }
void set_type(cell type)
{
{
header = (header & 0x2f) | (size << 6);
}
+
+ inline heap_block *next()
+ {
+ return (heap_block *)((cell)this + size());
+ }
};
struct free_heap_block : public heap_block
{
THREADHANDLE thread_id = pthread_from_mach_thread_np(thread);
assert(thread_id);
- unordered_map<THREADHANDLE, factor_vm*>::const_iterator vm = thread_vms.find(thread_id);
+ std::map<THREADHANDLE, factor_vm*>::const_iterator vm = thread_vms.find(thread_id);
if (vm != thread_vms.end())
vm->second->call_fault_handler(exception,code,exc_state,thread_state,float_state);
}
--- /dev/null
+namespace factor
+{
+
+const int forwarding_granularity = 64;
+
+template<typename Block, int Granularity> struct mark_bits {
+ cell start;
+ cell size;
+ cell bits_size;
+ u64 *marked;
+ u64 *allocated;
+ cell *forwarding;
+
+ void clear_mark_bits()
+ {
+ memset(marked,0,bits_size * sizeof(u64));
+ }
+
+ void clear_allocated_bits()
+ {
+ memset(allocated,0,bits_size * sizeof(u64));
+ }
+
+ void clear_forwarding()
+ {
+ memset(forwarding,0,bits_size * sizeof(cell));
+ }
+
+ explicit mark_bits(cell start_, cell size_) :
+ start(start_),
+ size(size_),
+ bits_size(size / Granularity / forwarding_granularity),
+ marked(new u64[bits_size]),
+ allocated(new u64[bits_size]),
+ forwarding(new cell[bits_size])
+ {
+ clear_mark_bits();
+ clear_allocated_bits();
+ clear_forwarding();
+ }
+
+ ~mark_bits()
+ {
+ delete[] marked;
+ marked = NULL;
+ delete[] allocated;
+ allocated = NULL;
+ delete[] forwarding;
+ forwarding = NULL;
+ }
+
+ cell block_line(Block *address)
+ {
+ return (((cell)address - start) / Granularity);
+ }
+
+ Block *line_block(cell line)
+ {
+ return (Block *)(line * Granularity + start);
+ }
+
+ std::pair<cell,cell> bitmap_deref(Block *address)
+ {
+ cell line_number = block_line(address);
+ cell word_index = (line_number >> 6);
+ cell word_shift = (line_number & 63);
+
+#ifdef FACTOR_DEBUG
+ assert(word_index < bits_size);
+#endif
+
+ return std::make_pair(word_index,word_shift);
+ }
+
+ bool bitmap_elt(u64 *bits, Block *address)
+ {
+ std::pair<cell,cell> pair = bitmap_deref(address);
+ return (bits[pair.first] & ((u64)1 << pair.second)) != 0;
+ }
+
+ void set_bitmap_range(u64 *bits, Block *address)
+ {
+ std::pair<cell,cell> start = bitmap_deref(address);
+ std::pair<cell,cell> end = bitmap_deref(address->next());
+
+ u64 start_mask = ((u64)1 << start.second) - 1;
+ u64 end_mask = ((u64)1 << end.second) - 1;
+
+ if(start.first == end.first)
+ bits[start.first] |= start_mask ^ end_mask;
+ else
+ {
+ bits[start.first] |= ~start_mask;
+
+ for(cell index = start.first + 1; index < end.first; index++)
+ bits[index] = (u64)-1;
+
+ bits[end.first] |= end_mask;
+ }
+ }
+
+ bool is_marked_p(Block *address)
+ {
+ return bitmap_elt(marked,address);
+ }
+
+ void set_marked_p(Block *address)
+ {
+ set_bitmap_range(marked,address);
+ }
+
+ bool is_allocated_p(Block *address)
+ {
+ return bitmap_elt(allocated,address);
+ }
+
+ void set_allocated_p(Block *address)
+ {
+ set_bitmap_range(allocated,address);
+ }
+
+ /* From http://chessprogramming.wikispaces.com/Population+Count */
+ cell popcount(u64 x)
+ {
+ u64 k1 = 0x5555555555555555ll;
+ u64 k2 = 0x3333333333333333ll;
+ u64 k4 = 0x0f0f0f0f0f0f0f0fll;
+ u64 kf = 0x0101010101010101ll;
+ x = x - ((x >> 1) & k1); // put count of each 2 bits into those 2 bits
+ x = (x & k2) + ((x >> 2) & k2); // put count of each 4 bits into those 4 bits
+ x = (x + (x >> 4)) & k4 ; // put count of each 8 bits into those 8 bits
+ x = (x * kf) >> 56; // returns 8 most significant bits of x + (x<<8) + (x<<16) + (x<<24) + ...
+
+ return (cell)x;
+ }
+
+ /* The eventual destination of a block after compaction is just the number
+ of marked blocks before it. Live blocks must be marked on entry. */
+ void compute_forwarding()
+ {
+ cell accum = 0;
+ for(cell index = 0; index < bits_size; index++)
+ {
+ forwarding[index] = accum;
+ accum += popcount(marked[index]);
+ }
+ }
+
+ /* We have the popcount for every 64 entries; look up and compute the rest */
+ Block *forward_block(Block *original)
+ {
+ std::pair<cell,cell> pair = bitmap_deref(original);
+
+ cell approx_popcount = forwarding[pair.first];
+ u64 mask = ((u64)1 << pair.second) - 1;
+
+ cell new_line_number = approx_popcount + popcount(marked[pair.first] & mask);
+ return line_block(new_line_number);
+ }
+};
+
+template<typename Block, int Granularity, typename Iterator> struct heap_compacter {
+ mark_bits<Block,Granularity> *state;
+ char *address;
+ Iterator &iter;
+
+ explicit heap_compacter(mark_bits<Block,Granularity> *state_, Block *address_, Iterator &iter_) :
+ state(state_), address((char *)address_), iter(iter_) {}
+
+ void operator()(Block *block, cell size)
+ {
+ if(this->state->is_marked_p(block))
+ {
+ memmove(address,block,size);
+ iter((Block *)address,size);
+ address += size;
+ }
+ }
+};
+
+}
/* C++ headers */
#include <algorithm>
+#include <map>
#include <set>
#include <vector>
-#if __GNUC__ == 4
- #include <tr1/unordered_map>
-
- namespace factor
- {
- using std::tr1::unordered_map;
- }
-#elif __GNUC__ == 3
- #include <boost/unordered_map.hpp>
-
- namespace factor
- {
- using boost::unordered_map;
- }
-#else
- #error Factor requires GCC 3.x or later
-#endif
-
/* Forward-declare this since it comes up in function prototypes */
namespace factor
{
#include "words.hpp"
#include "float_bits.hpp"
#include "io.hpp"
+#include "mark_bits.hpp"
#include "heap.hpp"
#include "image.hpp"
#include "alien.hpp"
void factor_vm::set_quot_xt(quotation *quot, code_block *code)
{
- assert(code->type() == QUOTATION_TYPE);
quot->code = code;
quot->xt = code->xt();
}
void collect_nursery();
void collect_aging();
void collect_to_tenured();
- void big_code_heap_update();
- void small_code_heap_update();
void collect_full_impl(bool trace_contexts_p);
void collect_growing_heap(cell requested_bytes, bool trace_contexts_p, bool compact_code_heap_p);
void collect_full(bool trace_contexts_p, bool compact_code_heap_p);
void update_literal_references(code_block *compiled);
void relocate_code_block_step(relocation_entry rel, cell index, code_block *compiled);
void update_word_references(code_block *compiled);
- void update_code_block_for_full_gc(code_block *compiled);
+ void update_code_block_words_and_literals(code_block *compiled);
void check_code_address(cell address);
void relocate_code_block(code_block *compiled);
void fixup_labels(array *labels, code_block *compiled);
bool in_code_heap_p(cell ptr);
void jit_compile_word(cell word_, cell def_, bool relocate);
void update_code_heap_words();
+ void update_code_heap_words_and_literals();
+ void relocate_code_heap();
void primitive_modify_code_heap();
void primitive_code_room();
void forward_object_xts();
void primitive_strip_stack_traces();
/* Apply a function to every code block */
- template<typename Iterator> void iterate_code_heap(Iterator &iter)
- {
- heap_block *scan = code->first_block();
-
- while(scan)
+ template<typename Iterator> struct code_heap_iterator {
+ Iterator &iter;
+ explicit code_heap_iterator(Iterator &iter_) : iter(iter_) {}
+ void operator()(heap_block *block, cell size)
{
- if(scan->type() != FREE_BLOCK_TYPE)
- iter((code_block *)scan);
- scan = code->next_block(scan);
+ iter((code_block *)block,size);
}
+ };
+
+ template<typename Iterator> void iterate_code_heap(Iterator &iter_)
+ {
+ code_heap_iterator<Iterator> iter(iter_);
+ code->iterate_heap(iter);
}
//callbacks
};
-extern unordered_map<THREADHANDLE, factor_vm *> thread_vms;
+extern std::map<THREADHANDLE, factor_vm *> thread_vms;
}