namespace factor {
code_heap::code_heap(cell size) {
- if (size > ((uint64_t)1 << (sizeof(cell) * 8 - 6)))
+ if (size > ((uint64_t)1 << (sizeof(cell) * 8 - 5)))
fatal_error("Heap too large", size);
seg = new segment(align_page(size), true);
if (!seg)
allocator = new free_list_allocator<code_block>(seg->end - start, start);
- /* See os-windows-x86.64.cpp for seh_area usage */
+ // See os-windows-x86.64.cpp for seh_area usage
safepoint_page = seg->start;
seh_area = (char*)seg->start + getpagesize();
}
void code_heap::flush_icache() { factor::flush_icache(seg->start, seg->size); }
+void code_heap::set_safepoint_guard(bool locked) {
+ if (!set_memory_locked(safepoint_page, getpagesize(), locked)) {
+ fatal_error("Cannot (un)protect safepoint guard page", safepoint_page);
+ }
+}
+
void code_heap::sweep() {
auto clear_free_blocks_from_all_blocks = [&](code_block* block, cell size) {
std::set<cell>::iterator erase_from =
void code_heap::verify_all_blocks_set() {
auto all_blocks_set_verifier = [&](code_block* block, cell size) {
+ (void)block;
+ (void)size;
FACTOR_ASSERT(all_blocks.find((cell)block) != all_blocks.end());
};
- allocator->iterate(all_blocks_set_verifier);
+ allocator->iterate(all_blocks_set_verifier, no_fixup());
}
code_block* code_heap::code_block_for_address(cell address) {
--blocki;
code_block* found_block = (code_block*)*blocki;
FACTOR_ASSERT(found_block->entry_point() <=
- address /* XXX this isn't valid during fixup. should store the
- size in the map
- && address - found_block->entry_point() <
- found_block->size()*/);
+ address // XXX this isn't valid during fixup. should store the
+ // size in the map
+ // && address - found_block->entry_point() <
+ // found_block->size()
+ );
return found_block;
}
cell code_heap::frame_predecessor(cell frame_top) {
cell addr = *(cell*)frame_top;
FACTOR_ASSERT(seg->in_segment_p(addr));
- //FACTOR_ASSERT(addr != 0);
code_block* owner = code_block_for_address(addr);
cell frame_size = owner->stack_frame_size_for_address(addr);
return frame_top + frame_size;
}
-/* Recomputes the all_blocks set of code blocks */
+// Recomputes the all_blocks set of code blocks
void code_heap::initialize_all_blocks_set() {
all_blocks.clear();
auto all_blocks_set_inserter = [&](code_block* block, cell size) {
+ (void)size;
all_blocks.insert((cell)block);
};
- allocator->iterate(all_blocks_set_inserter);
+ allocator->iterate(all_blocks_set_inserter, no_fixup());
#ifdef FACTOR_DEBUG
verify_all_blocks_set();
#endif
}
-/* Update pointers to words referenced from all code blocks.
-Only needed after redefining an existing word.
-If generic words were redefined, inline caches need to be reset. */
+// Update pointers to words referenced from all code blocks.
+// Only needed after redefining an existing word.
+// If generic words were redefined, inline caches need to be reset.
void factor_vm::update_code_heap_words(bool reset_inline_caches) {
auto word_updater = [&](code_block* block, cell size) {
+ (void)size;
update_word_references(block, reset_inline_caches);
};
each_code_block(word_updater);
}
-/* Fix up new words only.
-Fast path for compilation units that only define new words. */
-void factor_vm::initialize_code_blocks() {
-
- FACTOR_FOR_EACH(code->uninitialized_blocks) {
- initialize_code_block(iter->first, iter->second);
- }
- code->uninitialized_blocks.clear();
-}
-
-/* Allocates memory */
+// Allocates memory
void factor_vm::primitive_modify_code_heap() {
bool reset_inline_caches = to_boolean(ctx->pop());
bool update_existing_words = to_boolean(ctx->pop());
cell frame_size = untag_fixnum(array_nth(compiled_data, 5));
code_block* compiled =
- add_code_block(code_block_optimized, code, labels, word.value(),
+ add_code_block(CODE_BLOCK_OPTIMIZED, code, labels, word.value(),
relocation, parameters, literals, frame_size);
word->entry_point = compiled->entry_point();
if (update_existing_words)
update_code_heap_words(reset_inline_caches);
- else
- initialize_code_blocks();
+ else {
+ // Fast path for compilation units that only define new words.
+ FACTOR_FOR_EACH(code->uninitialized_blocks) {
+ initialize_code_block(iter->first, iter->second);
+ }
+ code->uninitialized_blocks.clear();
+ }
+ FACTOR_ASSERT(code->uninitialized_blocks.size() == 0);
}
-/* Allocates memory */
+// Allocates memory
void factor_vm::primitive_code_room() {
allocator_room room = code->allocator->as_allocator_room();
ctx->push(tag<byte_array>(byte_array_from_value(&room)));
void factor_vm::primitive_strip_stack_traces() {
auto stack_trace_stripper = [](code_block* block, cell size) {
+ (void)size;
block->owner = false_object;
};
each_code_block(stack_trace_stripper);
}
-/* Allocates memory */
-cell factor_vm::code_blocks() {
+// Allocates memory
+void factor_vm::primitive_code_blocks() {
std::vector<cell> objects;
-
auto code_block_accumulator = [&](code_block* block, cell size) {
+ (void)size;
objects.push_back(block->owner);
objects.push_back(block->parameters);
objects.push_back(block->relocation);
objects.push_back(tag_fixnum(block->type()));
objects.push_back(tag_fixnum(block->size()));
- /* Note: the entry point is always a multiple of the heap
- alignment (16 bytes). We cannot allocate while iterating
- through the code heap, so it is not possible to call
- from_unsigned_cell() here. It is OK, however, to add it as
- if it were a fixnum, and have library code shift it to the
- left by 4. */
+ // Note: the entry point is always a multiple of the heap
+ // alignment (16 bytes). We cannot allocate while iterating
+ // through the code heap, so it is not possible to call
+ // from_unsigned_cell() here. It is OK, however, to add it as
+ // if it were a fixnum, and have library code shift it to the
+ // left by 4.
cell entry_point = block->entry_point();
FACTOR_ASSERT((entry_point & (data_alignment - 1)) == 0);
FACTOR_ASSERT((entry_point & TAG_MASK) == FIXNUM_TYPE);
objects.push_back(entry_point);
};
each_code_block(code_block_accumulator);
- return std_vector_to_array(objects);
+ ctx->push(std_vector_to_array(objects));
}
-/* Allocates memory */
-void factor_vm::primitive_code_blocks() { ctx->push(code_blocks()); }
-
}