]> gitweb.factorcode.org Git - factor.git/blob - vm/inline_cache.cpp
Stop wearing monocle and use the term "entry point" instead of "XT" throughout VM...
[factor.git] / vm / inline_cache.cpp
1 #include "master.hpp"
2
3 namespace factor
4 {
5
6 void factor_vm::init_inline_caching(int max_size)
7 {
8         max_pic_size = max_size;
9 }
10
11 void factor_vm::deallocate_inline_cache(cell return_address)
12 {
13         /* Find the call target. */
14         void *old_entry_point = get_call_target(return_address);
15         check_code_pointer((cell)old_entry_point);
16
17         code_block *old_block = (code_block *)old_entry_point - 1;
18
19         /* Free the old PIC since we know its unreachable */
20         if(old_block->pic_p())
21                 code->free(old_block);
22 }
23
24 /* Figure out what kind of type check the PIC needs based on the methods
25 it contains */
26 cell factor_vm::determine_inline_cache_type(array *cache_entries)
27 {
28         bool seen_tuple = false;
29
30         cell i;
31         for(i = 0; i < array_capacity(cache_entries); i += 2)
32         {
33                 /* Is it a tuple layout? */
34                 if(TAG(array_nth(cache_entries,i)) == ARRAY_TYPE)
35                 {
36                         seen_tuple = true;
37                         break;
38                 }
39         }
40
41         return seen_tuple ? PIC_TUPLE : PIC_TAG;
42 }
43
44 void factor_vm::update_pic_count(cell type)
45 {
46         if(type == PIC_TAG)
47                 dispatch_stats.pic_tag_count++;
48         else
49                 dispatch_stats.pic_tuple_count++;
50 }
51
52 struct inline_cache_jit : public jit {
53         fixnum index;
54
55         explicit inline_cache_jit(cell generic_word_,factor_vm *vm) : jit(code_block_pic,generic_word_,vm) {};
56
57         void emit_check(cell klass);
58         void compile_inline_cache(fixnum index,
59                                   cell generic_word_,
60                                   cell methods_,
61                                   cell cache_entries_,
62                                   bool tail_call_p);
63 };
64
65 void inline_cache_jit::emit_check(cell klass)
66 {
67         cell code_template;
68         if(TAG(klass) == FIXNUM_TYPE)
69                 code_template = parent->special_objects[PIC_CHECK_TAG];
70         else
71                 code_template = parent->special_objects[PIC_CHECK_TUPLE];
72
73         emit_with_literal(code_template,klass);
74 }
75
76 /* index: 0 = top of stack, 1 = item underneath, etc
77    cache_entries: array of class/method pairs */
78 void inline_cache_jit::compile_inline_cache(fixnum index,
79                                             cell generic_word_,
80                                             cell methods_,
81                                             cell cache_entries_,
82                                             bool tail_call_p)
83 {
84         data_root<word> generic_word(generic_word_,parent);
85         data_root<array> methods(methods_,parent);
86         data_root<array> cache_entries(cache_entries_,parent);
87
88         cell inline_cache_type = parent->determine_inline_cache_type(cache_entries.untagged());
89         parent->update_pic_count(inline_cache_type);
90
91         /* Generate machine code to determine the object's class. */
92         emit_class_lookup(index,inline_cache_type);
93
94         /* Generate machine code to check, in turn, if the class is one of the cached entries. */
95         cell i;
96         for(i = 0; i < array_capacity(cache_entries.untagged()); i += 2)
97         {
98                 /* Class equal? */
99                 cell klass = array_nth(cache_entries.untagged(),i);
100                 emit_check(klass);
101
102                 /* Yes? Jump to method */
103                 cell method = array_nth(cache_entries.untagged(),i + 1);
104                 emit_with_literal(parent->special_objects[PIC_HIT],method);
105         }
106
107         /* If none of the above conditionals tested true, then execution "falls
108            through" to here. */
109
110         /* A stack frame is set up, since the inline-cache-miss sub-primitive
111         makes a subroutine call to the VM. */
112         emit(parent->special_objects[JIT_PROLOG]);
113
114         /* The inline-cache-miss sub-primitive call receives enough information to
115            reconstruct the PIC with the new entry. */
116         push(generic_word.value());
117         push(methods.value());
118         push(tag_fixnum(index));
119         push(cache_entries.value());
120
121         emit_subprimitive(
122                 parent->special_objects[tail_call_p ? PIC_MISS_TAIL_WORD : PIC_MISS_WORD],
123                 true, /* tail_call_p */
124                 true); /* stack_frame_p */
125 }
126
127 code_block *factor_vm::compile_inline_cache(fixnum index,
128         cell generic_word_,
129         cell methods_,
130         cell cache_entries_,
131         bool tail_call_p)
132 {
133         data_root<word> generic_word(generic_word_,this);
134         data_root<array> methods(methods_,this);
135         data_root<array> cache_entries(cache_entries_,this);
136
137         inline_cache_jit jit(generic_word.value(),this);
138         jit.compile_inline_cache(index,
139                                  generic_word.value(),
140                                  methods.value(),
141                                  cache_entries.value(),
142                                  tail_call_p);
143         code_block *code = jit.to_code_block();
144         initialize_code_block(code);
145         return code;
146 }
147
148 /* A generic word's definition performs general method lookup. */
149 void *factor_vm::megamorphic_call_stub(cell generic_word)
150 {
151         return untag<word>(generic_word)->entry_point;
152 }
153
154 cell factor_vm::inline_cache_size(cell cache_entries)
155 {
156         return array_capacity(untag_check<array>(cache_entries)) / 2;
157 }
158
159 /* Allocates memory */
160 cell factor_vm::add_inline_cache_entry(cell cache_entries_, cell klass_, cell method_)
161 {
162         data_root<array> cache_entries(cache_entries_,this);
163         data_root<object> klass(klass_,this);
164         data_root<word> method(method_,this);
165
166         cell pic_size = array_capacity(cache_entries.untagged());
167         data_root<array> new_cache_entries(reallot_array(cache_entries.untagged(),pic_size + 2),this);
168         set_array_nth(new_cache_entries.untagged(),pic_size,klass.value());
169         set_array_nth(new_cache_entries.untagged(),pic_size + 1,method.value());
170         return new_cache_entries.value();
171 }
172
173 void factor_vm::update_pic_transitions(cell pic_size)
174 {
175         if(pic_size == max_pic_size)
176                 dispatch_stats.pic_to_mega_transitions++;
177         else if(pic_size == 0)
178                 dispatch_stats.cold_call_to_ic_transitions++;
179         else if(pic_size == 1)
180                 dispatch_stats.ic_to_pic_transitions++;
181 }
182
183 /* The cache_entries parameter is empty (on cold call site) or has entries
184 (on cache miss). Called from assembly with the actual return address.
185 Compilation of the inline cache may trigger a GC, which may trigger a compaction;
186 also, the block containing the return address may now be dead. Use a code_root
187 to take care of the details. */
188 void *factor_vm::inline_cache_miss(cell return_address_)
189 {
190         code_root return_address(return_address_,this);
191         check_code_pointer(return_address.value);
192         bool tail_call_site = tail_call_site_p(return_address.value);
193
194 #ifdef PIC_DEBUG
195         std::cout << "Inline cache miss at "
196                 << (tail_call_site ? "tail" : "non-tail")
197                 << " call site 0x" << std::hex << return_address.value << std::dec
198                 << std::endl;
199 #endif
200
201         data_root<array> cache_entries(ctx->pop(),this);
202         fixnum index = untag_fixnum(ctx->pop());
203         data_root<array> methods(ctx->pop(),this);
204         data_root<word> generic_word(ctx->pop(),this);
205         data_root<object> object(((cell *)ctx->datastack)[-index],this);
206
207         cell pic_size = inline_cache_size(cache_entries.value());
208
209         update_pic_transitions(pic_size);
210
211         void *xt;
212
213         if(pic_size >= max_pic_size)
214                 xt = megamorphic_call_stub(generic_word.value());
215         else
216         {
217                 cell klass = object_class(object.value());
218                 cell method = lookup_method(object.value(),methods.value());
219
220                 data_root<array> new_cache_entries(add_inline_cache_entry(
221                         cache_entries.value(),
222                         klass,
223                         method),this);
224
225                 xt = compile_inline_cache(index,
226                         generic_word.value(),
227                         methods.value(),
228                         new_cache_entries.value(),
229                         tail_call_site)->entry_point();
230         }
231
232         /* Install the new stub. */
233         if(return_address.valid)
234         {
235                 /* Since each PIC is only referenced from a single call site,
236                    if the old call target was a PIC, we can deallocate it immediately,
237                    instead of leaving dead PICs around until the next GC. */
238                 deallocate_inline_cache(return_address.value);
239                 set_call_target(return_address.value,xt);
240
241 #ifdef PIC_DEBUG
242                 std::cout << "Updated "
243                         << (tail_call_site ? "tail" : "non-tail")
244                         << " call site 0x" << std::hex << return_address.value << std::dec
245                         << " with 0x" << std::hex << (cell)xt << std::dec << std::endl;
246 #endif
247         }
248
249         return xt;
250 }
251
252 VM_C_API void *inline_cache_miss(cell return_address, factor_vm *parent)
253 {
254         return parent->inline_cache_miss(return_address);
255 }
256
257 }