5 /* Certain special objects in the image are known to the runtime */
6 void factor_vm::init_objects(image_header* h) {
7 memcpy(special_objects, h->special_objects, sizeof(special_objects));
9 true_object = h->true_object;
10 bignum_zero = h->bignum_zero;
11 bignum_pos_one = h->bignum_pos_one;
12 bignum_neg_one = h->bignum_neg_one;
15 void factor_vm::load_data_heap(FILE* file, image_header* h, vm_parameters* p) {
16 p->tenured_size = std::max((h->data_size * 3) / 2, p->tenured_size);
18 init_data_heap(p->young_size, p->aging_size, p->tenured_size);
21 raw_fread((void*)data->tenured->start, 1, h->data_size, file);
23 if ((cell)bytes_read != h->data_size) {
24 std::cout << "truncated image: " << bytes_read << " bytes read, ";
25 std::cout << h->data_size << " bytes expected\n";
26 fatal_error("load_data_heap failed", 0);
29 data->tenured->initial_free_list(h->data_size);
32 void factor_vm::load_code_heap(FILE* file, image_header* h, vm_parameters* p) {
33 if (h->code_size > p->code_size)
34 fatal_error("Code heap too small to fit image", h->code_size);
36 code = new code_heap(p->code_size);
38 if (h->code_size != 0) {
40 raw_fread((void*)code->allocator->start, 1, h->code_size, file);
41 if (bytes_read != h->code_size) {
42 std::cout << "truncated image: " << bytes_read << " bytes read, ";
43 std::cout << h->code_size << " bytes expected\n";
44 fatal_error("load_code_heap failed", 0);
48 code->allocator->initial_free_list(h->code_size);
49 code->initialize_all_blocks_set();
52 struct startup_fixup {
53 static const bool translated_code_block_map = true;
58 startup_fixup(cell data_offset, cell code_offset)
59 : data_offset(data_offset), code_offset(code_offset) {}
61 object* fixup_data(object* obj) {
62 return (object*)((cell)obj + data_offset);
65 code_block* fixup_code(code_block* obj) {
66 return (code_block*)((cell)obj + code_offset);
69 object* translate_data(const object* obj) { return fixup_data((object*)obj); }
71 code_block* translate_code(const code_block* compiled) {
72 return fixup_code((code_block*)compiled);
75 cell size(const object* obj) { return obj->size(*this); }
77 cell size(code_block* compiled) { return compiled->size(*this); }
80 void factor_vm::fixup_data(cell data_offset, cell code_offset) {
81 startup_fixup fixup(data_offset, code_offset);
82 slot_visitor<startup_fixup> visitor(this, fixup);
83 visitor.visit_all_roots();
85 auto start_object_updater = [&](object *obj, cell size) {
86 data->tenured->starts.record_object_start_offset(obj);
87 visitor.visit_slots(obj);
88 switch (obj->type()) {
91 alien* ptr = (alien*)obj;
93 if (to_boolean(ptr->base))
94 ptr->update_address();
96 ptr->expired = true_object;
100 ffi_dlopen((dll*)obj);
104 visitor.visit_object_code_block(obj);
109 data->tenured->iterate(start_object_updater, fixup);
112 void factor_vm::fixup_code(cell data_offset, cell code_offset) {
113 startup_fixup fixup(data_offset, code_offset);
114 auto updater = [&](code_block* compiled, cell size) {
115 slot_visitor<startup_fixup> visitor(this, fixup);
116 visitor.visit_code_block_objects(compiled);
117 cell rel_base = compiled->entry_point() - fixup.code_offset;
118 visitor.visit_instruction_operands(compiled, rel_base);
120 code->allocator->iterate(updater, fixup);
123 bool factor_vm::read_embedded_image_footer(FILE* file,
124 embedded_image_footer* footer) {
125 safe_fseek(file, -(off_t)sizeof(embedded_image_footer), SEEK_END);
126 safe_fread(footer, (off_t)sizeof(embedded_image_footer), 1, file);
127 return footer->magic == image_magic;
130 char *threadsafe_strerror(int errnum) {
131 char *buf = (char *) malloc(STRERROR_BUFFER_SIZE);
133 fatal_error("Out of memory in threadsafe_strerror, errno", errnum);
135 THREADSAFE_STRERROR(errnum, buf, STRERROR_BUFFER_SIZE);
139 FILE* factor_vm::open_image(vm_parameters* p) {
140 if (p->embedded_image) {
141 FILE* file = OPEN_READ(p->executable_path);
143 std::cout << "Cannot open embedded image" << std::endl;
144 char *msg = threadsafe_strerror(errno);
145 std::cout << "strerror:1: " << msg << std::endl;
149 embedded_image_footer footer;
150 if (!read_embedded_image_footer(file, &footer)) {
151 std::cout << "No embedded image" << std::endl;
154 safe_fseek(file, (off_t)footer.image_offset, SEEK_SET);
157 return OPEN_READ(p->image_path);
160 /* Read an image file from disk, only done once during startup */
161 /* This function also initializes the data and code heaps */
162 void factor_vm::load_image(vm_parameters* p) {
163 FILE* file = open_image(p);
165 std::cout << "Cannot open image file: " << p->image_path << std::endl;
166 char *msg = threadsafe_strerror(errno);
167 std::cout << "strerror:2: " << msg << std::endl;
172 if (raw_fread(&h, sizeof(image_header), 1, file) != 1)
173 fatal_error("Cannot read image header", 0);
175 if (h.magic != image_magic)
176 fatal_error("Bad image: magic number check failed", h.magic);
178 if (h.version != image_version)
179 fatal_error("Bad image: version number check failed", h.version);
181 load_data_heap(file, &h, p);
182 load_code_heap(file, &h, p);
188 cell data_offset = data->tenured->start - h.data_relocation_base;
189 cell code_offset = code->allocator->start - h.code_relocation_base;
191 fixup_data(data_offset, code_offset);
192 fixup_code(data_offset, code_offset);
194 /* Store image path name */
195 special_objects[OBJ_IMAGE] = allot_alien(false_object, (cell)p->image_path);
198 /* Save the current image to disk. We don't throw any exceptions here
199 because if the 'then-die' argument is t it is not safe to do
200 so. Instead we signal failure by returning false. */
201 bool factor_vm::save_image(const vm_char* saving_filename,
202 const vm_char* filename) {
205 h.magic = image_magic;
206 h.version = image_version;
207 h.data_relocation_base = data->tenured->start;
208 h.data_size = data->tenured->occupied_space();
209 h.code_relocation_base = code->allocator->start;
210 h.code_size = code->allocator->occupied_space();
212 h.true_object = true_object;
213 h.bignum_zero = bignum_zero;
214 h.bignum_pos_one = bignum_pos_one;
215 h.bignum_neg_one = bignum_neg_one;
217 for (cell i = 0; i < special_object_count; i++)
218 h.special_objects[i] =
219 (save_special_p(i) ? special_objects[i] : false_object);
221 FILE* file = OPEN_WRITE(saving_filename);
224 if (safe_fwrite(&h, sizeof(image_header), 1, file) != 1)
226 if (safe_fwrite((void*)data->tenured->start, h.data_size, 1, file) != 1)
228 if (safe_fwrite((void*)code->allocator->start, h.code_size, 1, file) != 1)
230 if (raw_fclose(file) == -1)
232 if (!move_file(saving_filename, filename))
237 /* Allocates memory */
238 void factor_vm::primitive_save_image() {
239 /* We unbox this before doing anything else. This is the only point
240 where we might throw an error, so we have to throw an error here since
241 later steps destroy the current image. */
242 bool then_die = to_boolean(ctx->pop());
243 byte_array* path2 = untag_check<byte_array>(ctx->pop());
244 byte_array* path1 = untag_check<byte_array>(ctx->pop());
246 /* Copy the paths to non-gc memory to avoid them hanging around in
248 vm_char* path1_saved = safe_strdup(path1->data<vm_char>());
249 vm_char* path2_saved = safe_strdup(path2->data<vm_char>());
252 /* strip out special_objects data which is set on startup anyway */
253 for (cell i = 0; i < special_object_count; i++)
254 if (!save_special_p(i))
255 special_objects[i] = false_object;
257 /* dont trace objects only reachable from context stacks so we don't
258 get volatile data saved in the image. */
259 active_contexts.clear();
260 code->uninitialized_blocks.clear();
263 /* do a full GC to push everything remaining into tenured space */
264 primitive_compact_gc();
267 bool ret = save_image(path1_saved, path2_saved);
275 general_error(ERROR_IO, tag_fixnum(errno), false_object);
279 bool factor_vm::embedded_image_p() {
280 const vm_char* vm_path = vm_executable_path();
283 FILE* file = OPEN_READ(vm_path);
286 embedded_image_footer footer;
287 bool embedded_p = read_embedded_image_footer(file, &footer);