]> gitweb.factorcode.org Git - factor.git/blob - vm/inline_cache.cpp
30e568ab78697bf81131d8cd5540f7b3d29a56d9
[factor.git] / vm / inline_cache.cpp
1 #include "master.hpp"
2
3 namespace factor
4 {
5
6 void factor_vm::init_inline_caching(int max_size)
7 {
8         max_pic_size = max_size;
9 }
10
11 void factor_vm::deallocate_inline_cache(cell return_address)
12 {
13         /* Find the call target. */
14         void *old_entry_point = get_call_target(return_address);
15         check_code_pointer((cell)old_entry_point);
16
17         code_block *old_block = (code_block *)old_entry_point - 1;
18
19         /* Free the old PIC since we know its unreachable */
20         if(old_block->pic_p())
21                 code->free(old_block);
22 }
23
24 /* Figure out what kind of type check the PIC needs based on the methods
25 it contains */
26 cell factor_vm::determine_inline_cache_type(array *cache_entries)
27 {
28         bool seen_tuple = false;
29
30         cell i;
31         for(i = 0; i < array_capacity(cache_entries); i += 2)
32         {
33                 /* Is it a tuple layout? */
34                 if(TAG(array_nth(cache_entries,i)) == ARRAY_TYPE)
35                 {
36                         seen_tuple = true;
37                         break;
38                 }
39         }
40
41         return seen_tuple ? PIC_TUPLE : PIC_TAG;
42 }
43
44 void factor_vm::update_pic_count(cell type)
45 {
46         if(type == PIC_TAG)
47                 dispatch_stats.pic_tag_count++;
48         else
49                 dispatch_stats.pic_tuple_count++;
50 }
51
52 struct inline_cache_jit : public jit {
53         fixnum index;
54
55         explicit inline_cache_jit(cell generic_word_,factor_vm *vm) : jit(code_block_pic,generic_word_,vm) {};
56
57         void emit_check(cell klass);
58         void compile_inline_cache(fixnum index,
59                 cell generic_word_,
60                 cell methods_,
61                 cell cache_entries_,
62                 bool tail_call_p);
63 };
64
65 void inline_cache_jit::emit_check(cell klass)
66 {
67         cell code_template;
68         if(TAG(klass) == FIXNUM_TYPE)
69                 code_template = parent->special_objects[PIC_CHECK_TAG];
70         else
71                 code_template = parent->special_objects[PIC_CHECK_TUPLE];
72
73         emit_with_literal(code_template,klass);
74 }
75
76 /* index: 0 = top of stack, 1 = item underneath, etc
77    cache_entries: array of class/method pairs */
78 void inline_cache_jit::compile_inline_cache(fixnum index,
79         cell generic_word_,
80         cell methods_,
81         cell cache_entries_,
82         bool tail_call_p)
83 {
84         data_root<word> generic_word(generic_word_,parent);
85         data_root<array> methods(methods_,parent);
86         data_root<array> cache_entries(cache_entries_,parent);
87
88         cell inline_cache_type = parent->determine_inline_cache_type(cache_entries.untagged());
89         parent->update_pic_count(inline_cache_type);
90
91         /* Generate machine code to determine the object's class. */
92         emit_with_literal(parent->special_objects[PIC_LOAD],tag_fixnum(-index * sizeof(cell)));
93         emit(parent->special_objects[inline_cache_type]);
94
95         /* Generate machine code to check, in turn, if the class is one of the cached entries. */
96         cell i;
97         for(i = 0; i < array_capacity(cache_entries.untagged()); i += 2)
98         {
99                 /* Class equal? */
100                 cell klass = array_nth(cache_entries.untagged(),i);
101                 emit_check(klass);
102
103                 /* Yes? Jump to method */
104                 cell method = array_nth(cache_entries.untagged(),i + 1);
105                 emit_with_literal(parent->special_objects[PIC_HIT],method);
106         }
107
108         /* If none of the above conditionals tested true, then execution "falls
109            through" to here. */
110
111         /* A stack frame is set up, since the inline-cache-miss sub-primitive
112         makes a subroutine call to the VM. */
113         emit(parent->special_objects[JIT_PROLOG]);
114
115         /* The inline-cache-miss sub-primitive call receives enough information to
116            reconstruct the PIC with the new entry. */
117         push(generic_word.value());
118         push(methods.value());
119         push(tag_fixnum(index));
120         push(cache_entries.value());
121
122         emit_subprimitive(
123                 parent->special_objects[tail_call_p ? PIC_MISS_TAIL_WORD : PIC_MISS_WORD],
124                 true, /* tail_call_p */
125                 true); /* stack_frame_p */
126 }
127
128 code_block *factor_vm::compile_inline_cache(fixnum index,
129         cell generic_word_,
130         cell methods_,
131         cell cache_entries_,
132         bool tail_call_p)
133 {
134         data_root<word> generic_word(generic_word_,this);
135         data_root<array> methods(methods_,this);
136         data_root<array> cache_entries(cache_entries_,this);
137
138         inline_cache_jit jit(generic_word.value(),this);
139         jit.compile_inline_cache(index,
140                                  generic_word.value(),
141                                  methods.value(),
142                                  cache_entries.value(),
143                                  tail_call_p);
144         code_block *code = jit.to_code_block(JIT_FRAME_SIZE);
145         initialize_code_block(code);
146         return code;
147 }
148
149 /* A generic word's definition performs general method lookup. */
150 void *factor_vm::megamorphic_call_stub(cell generic_word)
151 {
152         return untag<word>(generic_word)->entry_point;
153 }
154
155 cell factor_vm::inline_cache_size(cell cache_entries)
156 {
157         return array_capacity(untag_check<array>(cache_entries)) / 2;
158 }
159
160 /* Allocates memory */
161 cell factor_vm::add_inline_cache_entry(cell cache_entries_, cell klass_, cell method_)
162 {
163         data_root<array> cache_entries(cache_entries_,this);
164         data_root<object> klass(klass_,this);
165         data_root<word> method(method_,this);
166
167         cell pic_size = array_capacity(cache_entries.untagged());
168         data_root<array> new_cache_entries(reallot_array(cache_entries.untagged(),pic_size + 2),this);
169         set_array_nth(new_cache_entries.untagged(),pic_size,klass.value());
170         set_array_nth(new_cache_entries.untagged(),pic_size + 1,method.value());
171         return new_cache_entries.value();
172 }
173
174 void factor_vm::update_pic_transitions(cell pic_size)
175 {
176         if(pic_size == max_pic_size)
177                 dispatch_stats.pic_to_mega_transitions++;
178         else if(pic_size == 0)
179                 dispatch_stats.cold_call_to_ic_transitions++;
180         else if(pic_size == 1)
181                 dispatch_stats.ic_to_pic_transitions++;
182 }
183
184 /* The cache_entries parameter is empty (on cold call site) or has entries
185 (on cache miss). Called from assembly with the actual return address.
186 Compilation of the inline cache may trigger a GC, which may trigger a compaction;
187 also, the block containing the return address may now be dead. Use a code_root
188 to take care of the details. */
189 void *factor_vm::inline_cache_miss(cell return_address_)
190 {
191         code_root return_address(return_address_,this);
192         check_code_pointer(return_address.value);
193         bool tail_call_site = tail_call_site_p(return_address.value);
194
195 #ifdef PIC_DEBUG
196         std::cout << "Inline cache miss at "
197                 << (tail_call_site ? "tail" : "non-tail")
198                 << " call site 0x" << std::hex << return_address.value << std::dec
199                 << std::endl;
200         print_callstack();
201 #endif
202
203         data_root<array> cache_entries(ctx->pop(),this);
204         fixnum index = untag_fixnum(ctx->pop());
205         data_root<array> methods(ctx->pop(),this);
206         data_root<word> generic_word(ctx->pop(),this);
207         data_root<object> object(((cell *)ctx->datastack)[-index],this);
208
209         cell pic_size = inline_cache_size(cache_entries.value());
210
211         update_pic_transitions(pic_size);
212
213         void *xt;
214
215         if(pic_size >= max_pic_size)
216                 xt = megamorphic_call_stub(generic_word.value());
217         else
218         {
219                 cell klass = object_class(object.value());
220                 cell method = lookup_method(object.value(),methods.value());
221
222                 data_root<array> new_cache_entries(add_inline_cache_entry(
223                         cache_entries.value(),
224                         klass,
225                         method),this);
226
227                 xt = compile_inline_cache(index,
228                         generic_word.value(),
229                         methods.value(),
230                         new_cache_entries.value(),
231                         tail_call_site)->entry_point();
232         }
233
234         /* Install the new stub. */
235         if(return_address.valid)
236         {
237                 /* Since each PIC is only referenced from a single call site,
238                    if the old call target was a PIC, we can deallocate it immediately,
239                    instead of leaving dead PICs around until the next GC. */
240                 deallocate_inline_cache(return_address.value);
241                 set_call_target(return_address.value,xt);
242
243 #ifdef PIC_DEBUG
244                 std::cout << "Updated "
245                         << (tail_call_site ? "tail" : "non-tail")
246                         << " call site 0x" << std::hex << return_address.value << std::dec
247                         << " with 0x" << std::hex << (cell)xt << std::dec << std::endl;
248                 print_callstack();
249 #endif
250         }
251
252         return xt;
253 }
254
255 VM_C_API void *inline_cache_miss(cell return_address, factor_vm *parent)
256 {
257         return parent->inline_cache_miss(return_address);
258 }
259
260 }