]> gitweb.factorcode.org Git - factor.git/blob - vm/code_gc.cpp
Merge branch 'master' of git://factorcode.org/git/factor
[factor.git] / vm / code_gc.cpp
1 #include "master.hpp"
2
3 namespace factor
4 {
5
6 static void clear_free_list(heap *heap)
7 {
8         memset(&heap->free,0,sizeof(heap_free_list));
9 }
10
11 /* This malloc-style heap code is reasonably generic. Maybe in the future, it
12 will be used for the data heap too, if we ever get incremental
13 mark/sweep/compact GC. */
14 void new_heap(heap *heap, cell size)
15 {
16         heap->seg = alloc_segment(align_page(size));
17         if(!heap->seg)
18                 fatal_error("Out of memory in new_heap",size);
19
20         clear_free_list(heap);
21 }
22
23 static void add_to_free_list(heap *heap, free_heap_block *block)
24 {
25         if(block->size < free_list_count * block_size_increment)
26         {
27                 int index = block->size / block_size_increment;
28                 block->next_free = heap->free.small_blocks[index];
29                 heap->free.small_blocks[index] = block;
30         }
31         else
32         {
33                 block->next_free = heap->free.large_blocks;
34                 heap->free.large_blocks = block;
35         }
36 }
37
38 /* Called after reading the code heap from the image file, and after code GC.
39
40 In the former case, we must add a large free block from compiling.base + size to
41 compiling.limit. */
42 void build_free_list(heap *heap, cell size)
43 {
44         heap_block *prev = NULL;
45
46         clear_free_list(heap);
47
48         size = (size + block_size_increment - 1) & ~(block_size_increment - 1);
49
50         heap_block *scan = first_block(heap);
51         free_heap_block *end = (free_heap_block *)(heap->seg->start + size);
52
53         /* Add all free blocks to the free list */
54         while(scan && scan < (heap_block *)end)
55         {
56                 switch(scan->status)
57                 {
58                 case B_FREE:
59                         add_to_free_list(heap,(free_heap_block *)scan);
60                         break;
61                 case B_ALLOCATED:
62                         break;
63                 default:
64                         critical_error("Invalid scan->status",(cell)scan);
65                         break;
66                 }
67
68                 prev = scan;
69                 scan = next_block(heap,scan);
70         }
71
72         /* If there is room at the end of the heap, add a free block. This
73         branch is only taken after loading a new image, not after code GC */
74         if((cell)(end + 1) <= heap->seg->end)
75         {
76                 end->status = B_FREE;
77                 end->size = heap->seg->end - (cell)end;
78
79                 /* add final free block */
80                 add_to_free_list(heap,end);
81         }
82         /* This branch is taken if the newly loaded image fits exactly, or
83         after code GC */
84         else
85         {
86                 /* even if there's no room at the end of the heap for a new
87                 free block, we might have to jigger it up by a few bytes in
88                 case prev + prev->size */
89                 if(prev) prev->size = heap->seg->end - (cell)prev;
90         }
91
92 }
93
94 static void assert_free_block(free_heap_block *block)
95 {
96         if(block->status != B_FREE)
97                 critical_error("Invalid block in free list",(cell)block);
98 }
99                 
100 static free_heap_block *find_free_block(heap *heap, cell size)
101 {
102         cell attempt = size;
103
104         while(attempt < free_list_count * block_size_increment)
105         {
106                 int index = attempt / block_size_increment;
107                 free_heap_block *block = heap->free.small_blocks[index];
108                 if(block)
109                 {
110                         assert_free_block(block);
111                         heap->free.small_blocks[index] = block->next_free;
112                         return block;
113                 }
114
115                 attempt *= 2;
116         }
117
118         free_heap_block *prev = NULL;
119         free_heap_block *block = heap->free.large_blocks;
120
121         while(block)
122         {
123                 assert_free_block(block);
124                 if(block->size >= size)
125                 {
126                         if(prev)
127                                 prev->next_free = block->next_free;
128                         else
129                                 heap->free.large_blocks = block->next_free;
130                         return block;
131                 }
132
133                 prev = block;
134                 block = block->next_free;
135         }
136
137         return NULL;
138 }
139
140 static free_heap_block *split_free_block(heap *heap, free_heap_block *block, cell size)
141 {
142         if(block->size != size )
143         {
144                 /* split the block in two */
145                 free_heap_block *split = (free_heap_block *)((cell)block + size);
146                 split->status = B_FREE;
147                 split->size = block->size - size;
148                 split->next_free = block->next_free;
149                 block->size = size;
150                 add_to_free_list(heap,split);
151         }
152
153         return block;
154 }
155
156 /* Allocate a block of memory from the mark and sweep GC heap */
157 heap_block *heap_allot(heap *heap, cell size)
158 {
159         size = (size + block_size_increment - 1) & ~(block_size_increment - 1);
160
161         free_heap_block *block = find_free_block(heap,size);
162         if(block)
163         {
164                 block = split_free_block(heap,block,size);
165
166                 block->status = B_ALLOCATED;
167                 return block;
168         }
169         else
170                 return NULL;
171 }
172
173 /* Deallocates a block manually */
174 void heap_free(heap *heap, heap_block *block)
175 {
176         block->status = B_FREE;
177         add_to_free_list(heap,(free_heap_block *)block);
178 }
179
180 void mark_block(heap_block *block)
181 {
182         /* If already marked, do nothing */
183         switch(block->status)
184         {
185         case B_MARKED:
186                 return;
187         case B_ALLOCATED:
188                 block->status = B_MARKED;
189                 break;
190         default:
191                 critical_error("Marking the wrong block",(cell)block);
192                 break;
193         }
194 }
195
196 /* If in the middle of code GC, we have to grow the heap, data GC restarts from
197 scratch, so we have to unmark any marked blocks. */
198 void unmark_marked(heap *heap)
199 {
200         heap_block *scan = first_block(heap);
201
202         while(scan)
203         {
204                 if(scan->status == B_MARKED)
205                         scan->status = B_ALLOCATED;
206
207                 scan = next_block(heap,scan);
208         }
209 }
210
211 /* After code GC, all referenced code blocks have status set to B_MARKED, so any
212 which are allocated and not marked can be reclaimed. */
213 void free_unmarked(heap *heap, heap_iterator iter)
214 {
215         clear_free_list(heap);
216
217         heap_block *prev = NULL;
218         heap_block *scan = first_block(heap);
219
220         while(scan)
221         {
222                 switch(scan->status)
223                 {
224                 case B_ALLOCATED:
225                         if(secure_gc)
226                                 memset(scan + 1,0,scan->size - sizeof(heap_block));
227
228                         if(prev && prev->status == B_FREE)
229                                 prev->size += scan->size;
230                         else
231                         {
232                                 scan->status = B_FREE;
233                                 prev = scan;
234                         }
235                         break;
236                 case B_FREE:
237                         if(prev && prev->status == B_FREE)
238                                 prev->size += scan->size;
239                         else
240                                 prev = scan;
241                         break;
242                 case B_MARKED:
243                         if(prev && prev->status == B_FREE)
244                                 add_to_free_list(heap,(free_heap_block *)prev);
245                         scan->status = B_ALLOCATED;
246                         prev = scan;
247                         iter(scan);
248                         break;
249                 default:
250                         critical_error("Invalid scan->status",(cell)scan);
251                 }
252
253                 scan = next_block(heap,scan);
254         }
255
256         if(prev && prev->status == B_FREE)
257                 add_to_free_list(heap,(free_heap_block *)prev);
258 }
259
260 /* Compute total sum of sizes of free blocks, and size of largest free block */
261 void heap_usage(heap *heap, cell *used, cell *total_free, cell *max_free)
262 {
263         *used = 0;
264         *total_free = 0;
265         *max_free = 0;
266
267         heap_block *scan = first_block(heap);
268
269         while(scan)
270         {
271                 switch(scan->status)
272                 {
273                 case B_ALLOCATED:
274                         *used += scan->size;
275                         break;
276                 case B_FREE:
277                         *total_free += scan->size;
278                         if(scan->size > *max_free)
279                                 *max_free = scan->size;
280                         break;
281                 default:
282                         critical_error("Invalid scan->status",(cell)scan);
283                 }
284
285                 scan = next_block(heap,scan);
286         }
287 }
288
289 /* The size of the heap, not including the last block if it's free */
290 cell heap_size(heap *heap)
291 {
292         heap_block *scan = first_block(heap);
293
294         while(next_block(heap,scan) != NULL)
295                 scan = next_block(heap,scan);
296
297         /* this is the last block in the heap, and it is free */
298         if(scan->status == B_FREE)
299                 return (cell)scan - heap->seg->start;
300         /* otherwise the last block is allocated */
301         else
302                 return heap->seg->size;
303 }
304
305 /* Compute where each block is going to go, after compaction */
306 cell compute_heap_forwarding(heap *heap, unordered_map<heap_block *,char *> &forwarding)
307 {
308         heap_block *scan = first_block(heap);
309         char *address = (char *)first_block(heap);
310
311         while(scan)
312         {
313                 if(scan->status == B_ALLOCATED)
314                 {
315                         forwarding[scan] = address;
316                         address += scan->size;
317                 }
318                 else if(scan->status == B_MARKED)
319                         critical_error("Why is the block marked?",0);
320
321                 scan = next_block(heap,scan);
322         }
323
324         return (cell)address - heap->seg->start;
325 }
326
327 void compact_heap(heap *heap, unordered_map<heap_block *,char *> &forwarding)
328 {
329         heap_block *scan = first_block(heap);
330
331         while(scan)
332         {
333                 heap_block *next = next_block(heap,scan);
334
335                 if(scan->status == B_ALLOCATED)
336                         memmove(forwarding[scan],scan,scan->size);
337                 scan = next;
338         }
339 }
340
341 }