.*.swp
.DS_Store
.gdb_history
+.vs
/factor
/logs
/work
# batch mode has ::
.cpp.obj::
- cl /EHsc $(CL_FLAGS) /MP /Fovm/ /c $<
+ cl /EHsc $(CL_FLAGS) /W4 /MP /Fovm/ /c $<
.c.obj::
cl /EHsc $(CL_FLAGS) /MP /Fovm/ /c $<
aging_space(cell size, cell start)
: bump_allocator(size, start), starts(size, start) {}
- object* allot(cell size) {
- if (here + size > end)
+ object* allot(cell dsize) {
+ if (here + dsize > end)
return NULL;
- object* obj = bump_allocator::allot(size);
+ object* obj = bump_allocator::allot(dsize);
starts.record_object_start_offset(obj);
return obj;
}
cell next_object_after(cell scan) {
- cell size = ((object*)scan)->size();
- if (scan + size < here)
- return scan + size;
+ cell dsize = ((object*)scan)->size();
+ if (scan + dsize < here)
+ return scan + dsize;
return 0;
}
inline object* factor_vm::allot_object(cell type, cell size) {
FACTOR_ASSERT(!current_gc);
- bump_allocator *nursery = data->nursery;
+ bump_allocator *dnursery = data->nursery;
// If the object is bigger than the nursery, allocate it in tenured space
- if (size >= nursery->size)
+ if (size >= dnursery->size)
return allot_large_object(type, size);
// If the object is smaller than the nursery, allocate it in the nursery,
// after a GC if needed
- if (nursery->here + size > nursery->end)
+ if (dnursery->here + size > dnursery->end)
primitive_minor_gc();
- object* obj = nursery->allot(size);
+ object* obj = dnursery->allot(size);
obj->initialize(type);
return obj;
return (cell)obj >= start && (cell)obj < end;
}
- object* allot(cell size) {
+ object* allot(cell dsize) {
cell h = here;
- here = h + align(size, data_alignment);
+ here = h + align(dsize, data_alignment);
return (object*)h;
}
return (bits[position.first] & ((cell)1 << position.second)) != 0;
}
- void set_bitmap_range(cell* bits, const cell address, const cell size) {
- std::pair<cell, cell> start = bitmap_deref(address);
- std::pair<cell, cell> end = bitmap_deref(address + size);
+ void set_bitmap_range(cell* bits, const cell address, const cell dsize) {
+ std::pair<cell, cell> bstart = bitmap_deref(address);
+ std::pair<cell, cell> end = bitmap_deref(address + dsize);
- cell start_mask = ((cell)1 << start.second) - 1;
+ cell start_mask = ((cell)1 << bstart.second) - 1;
cell end_mask = ((cell)1 << end.second) - 1;
- if (start.first == end.first)
- bits[start.first] |= start_mask ^ end_mask;
+ if (bstart.first == end.first)
+ bits[bstart.first] |= start_mask ^ end_mask;
else {
- FACTOR_ASSERT(start.first < bits_size);
- bits[start.first] |= ~start_mask;
+ FACTOR_ASSERT(bstart.first < bits_size);
+ bits[bstart.first] |= ~start_mask;
- for (cell index = start.first + 1; index < end.first; index++)
+ for (cell index = bstart.first + 1; index < end.first; index++)
bits[index] = (cell)-1;
if (end_mask != 0) {
bool marked_p(const cell address) { return bitmap_elt(marked, address); }
- void set_marked_p(const cell address, const cell size) {
- set_bitmap_range(marked, address, size);
+ void set_marked_p(const cell address, const cell dsize) {
+ set_bitmap_range(marked, address, dsize);
}
// The eventual destination of a block after compaction is just the number
tenured_space(cell size, cell start)
: free_list_allocator<object>(size, start), starts(size, start) {}
- object* allot(cell size) {
- object* obj = free_list_allocator<object>::allot(size);
+ object* allot(cell dsize) {
+ object* obj = free_list_allocator<object>::allot(dsize);
if (obj) {
starts.record_object_start_offset(obj);
return obj;
}
cell next_object_after(cell scan) {
- cell size = ((object*)scan)->size();
- return next_allocated_object_after(scan + size);
+ cell dsize = ((object*)scan)->size();
+ return next_allocated_object_after(scan + dsize);
}
void sweep() {