]> gitweb.factorcode.org Git - factor.git/blob - vm/inline_cache.cpp
Merge branch 'simd-cleanup' of git://factorcode.org/git/factor into simd-cleanup
[factor.git] / vm / inline_cache.cpp
1 #include "master.hpp"
2
3 namespace factor
4 {
5
6 void factor_vm::init_inline_caching(int max_size)
7 {
8         max_pic_size = max_size;
9 }
10
11 void factor_vm::deallocate_inline_cache(cell return_address)
12 {
13         /* Find the call target. */
14         void *old_xt = get_call_target(return_address);
15         check_code_pointer((cell)old_xt);
16
17         code_block *old_block = (code_block *)old_xt - 1;
18
19         /* Free the old PIC since we know its unreachable */
20         if(old_block->pic_p())
21                 code->free(old_block);
22 }
23
24 /* Figure out what kind of type check the PIC needs based on the methods
25 it contains */
26 cell factor_vm::determine_inline_cache_type(array *cache_entries)
27 {
28         bool seen_tuple = false;
29
30         cell i;
31         for(i = 0; i < array_capacity(cache_entries); i += 2)
32         {
33                 /* Is it a tuple layout? */
34                 if(TAG(array_nth(cache_entries,i)) == ARRAY_TYPE)
35                 {
36                         seen_tuple = true;
37                         break;
38                 }
39         }
40
41         return seen_tuple ? PIC_TUPLE : PIC_TAG;
42 }
43
44 void factor_vm::update_pic_count(cell type)
45 {
46         if(type == PIC_TAG)
47                 dispatch_stats.pic_tag_count++;
48         else
49                 dispatch_stats.pic_tuple_count++;
50 }
51
52 struct inline_cache_jit : public jit {
53         fixnum index;
54
55         explicit inline_cache_jit(cell generic_word_,factor_vm *vm) : jit(code_block_pic,generic_word_,vm) {};
56
57         void emit_check(cell klass);
58         void compile_inline_cache(fixnum index,
59                                   cell generic_word_,
60                                   cell methods_,
61                                   cell cache_entries_,
62                                   bool tail_call_p);
63 };
64
65 void inline_cache_jit::emit_check(cell klass)
66 {
67         cell code_template;
68         if(TAG(klass) == FIXNUM_TYPE)
69                 code_template = parent->special_objects[PIC_CHECK_TAG];
70         else
71                 code_template = parent->special_objects[PIC_CHECK_TUPLE];
72
73         emit_with_literal(code_template,klass);
74 }
75
76 /* index: 0 = top of stack, 1 = item underneath, etc
77    cache_entries: array of class/method pairs */
78 void inline_cache_jit::compile_inline_cache(fixnum index,
79                                             cell generic_word_,
80                                             cell methods_,
81                                             cell cache_entries_,
82                                             bool tail_call_p)
83 {
84         data_root<word> generic_word(generic_word_,parent);
85         data_root<array> methods(methods_,parent);
86         data_root<array> cache_entries(cache_entries_,parent);
87
88         cell inline_cache_type = parent->determine_inline_cache_type(cache_entries.untagged());
89         parent->update_pic_count(inline_cache_type);
90
91         /* Generate machine code to determine the object's class. */
92         emit_class_lookup(index,inline_cache_type);
93
94         /* Generate machine code to check, in turn, if the class is one of the cached entries. */
95         cell i;
96         for(i = 0; i < array_capacity(cache_entries.untagged()); i += 2)
97         {
98                 /* Class equal? */
99                 cell klass = array_nth(cache_entries.untagged(),i);
100                 emit_check(klass);
101
102                 /* Yes? Jump to method */
103                 cell method = array_nth(cache_entries.untagged(),i + 1);
104                 emit_with_literal(parent->special_objects[PIC_HIT],method);
105         }
106
107         /* Generate machine code to handle a cache miss, which ultimately results in
108            this function being called again.
109
110            The inline-cache-miss primitive call receives enough information to
111            reconstruct the PIC. */
112         push(generic_word.value());
113         push(methods.value());
114         push(tag_fixnum(index));
115         push(cache_entries.value());
116         word_special(parent->special_objects[tail_call_p ? PIC_MISS_TAIL_WORD : PIC_MISS_WORD]);
117 }
118
119 code_block *factor_vm::compile_inline_cache(fixnum index,
120         cell generic_word_,
121         cell methods_,
122         cell cache_entries_,
123         bool tail_call_p)
124 {
125         data_root<word> generic_word(generic_word_,this);
126         data_root<array> methods(methods_,this);
127         data_root<array> cache_entries(cache_entries_,this);
128
129         inline_cache_jit jit(generic_word.value(),this);
130         jit.compile_inline_cache(index,
131                                  generic_word.value(),
132                                  methods.value(),
133                                  cache_entries.value(),
134                                  tail_call_p);
135         code_block *code = jit.to_code_block();
136         initialize_code_block(code);
137         return code;
138 }
139
140 /* A generic word's definition performs general method lookup. */
141 void *factor_vm::megamorphic_call_stub(cell generic_word)
142 {
143         return untag<word>(generic_word)->xt;
144 }
145
146 cell factor_vm::inline_cache_size(cell cache_entries)
147 {
148         return array_capacity(untag_check<array>(cache_entries)) / 2;
149 }
150
151 /* Allocates memory */
152 cell factor_vm::add_inline_cache_entry(cell cache_entries_, cell klass_, cell method_)
153 {
154         data_root<array> cache_entries(cache_entries_,this);
155         data_root<object> klass(klass_,this);
156         data_root<word> method(method_,this);
157
158         cell pic_size = array_capacity(cache_entries.untagged());
159         data_root<array> new_cache_entries(reallot_array(cache_entries.untagged(),pic_size + 2),this);
160         set_array_nth(new_cache_entries.untagged(),pic_size,klass.value());
161         set_array_nth(new_cache_entries.untagged(),pic_size + 1,method.value());
162         return new_cache_entries.value();
163 }
164
165 void factor_vm::update_pic_transitions(cell pic_size)
166 {
167         if(pic_size == max_pic_size)
168                 dispatch_stats.pic_to_mega_transitions++;
169         else if(pic_size == 0)
170                 dispatch_stats.cold_call_to_ic_transitions++;
171         else if(pic_size == 1)
172                 dispatch_stats.ic_to_pic_transitions++;
173 }
174
175 /* The cache_entries parameter is empty (on cold call site) or has entries
176 (on cache miss). Called from assembly with the actual return address.
177 Compilation of the inline cache may trigger a GC, which may trigger a compaction;
178 also, the block containing the return address may now be dead. Use a code_root
179 to take care of the details. */
180 void *factor_vm::inline_cache_miss(cell return_address_)
181 {
182         code_root return_address(return_address_,this);
183
184         check_code_pointer(return_address.value);
185
186         data_root<array> cache_entries(dpop(),this);
187         fixnum index = untag_fixnum(dpop());
188         data_root<array> methods(dpop(),this);
189         data_root<word> generic_word(dpop(),this);
190         data_root<object> object(((cell *)ds)[-index],this);
191
192         cell pic_size = inline_cache_size(cache_entries.value());
193
194         update_pic_transitions(pic_size);
195
196         void *xt;
197
198         if(pic_size >= max_pic_size)
199                 xt = megamorphic_call_stub(generic_word.value());
200         else
201         {
202                 cell klass = object_class(object.value());
203                 cell method = lookup_method(object.value(),methods.value());
204
205                 data_root<array> new_cache_entries(add_inline_cache_entry(
206                                                            cache_entries.value(),
207                                                            klass,
208                                                            method),this);
209                 xt = compile_inline_cache(index,
210                                           generic_word.value(),
211                                           methods.value(),
212                                           new_cache_entries.value(),
213                                           tail_call_site_p(return_address.value))->xt();
214         }
215
216         /* Install the new stub. */
217         if(return_address.valid)
218         {
219                 /* Since each PIC is only referenced from a single call site,
220                    if the old call target was a PIC, we can deallocate it immediately,
221                    instead of leaving dead PICs around until the next GC. */
222                 deallocate_inline_cache(return_address.value);
223                 set_call_target(return_address.value,xt);
224
225 #ifdef PIC_DEBUG
226                 std::cout << "Updated "
227                         << (tail_call_site_p(return_address.value) ? "tail" : "non-tail")
228                         << " call site 0x" << std::hex << return_address.value << std::dec
229                         << " with " << std::hex << (cell)xt << std::dec << "\n";
230 #endif
231         }
232
233         return xt;
234 }
235
236 VM_C_API void *inline_cache_miss(cell return_address, factor_vm *parent)
237 {
238         return parent->inline_cache_miss(return_address);
239 }
240
241 }