5 void factor_vm::init_inline_caching(int max_size) { max_pic_size = max_size; }
7 void factor_vm::deallocate_inline_cache(cell return_address) {
8 /* Find the call target. */
9 void* old_entry_point = get_call_target(return_address);
10 check_code_pointer((cell)old_entry_point);
12 code_block* old_block = (code_block*)old_entry_point - 1;
14 /* Free the old PIC since we know its unreachable */
15 if (old_block->pic_p())
16 code->free(old_block);
19 /* Figure out what kind of type check the PIC needs based on the methods
21 cell factor_vm::determine_inline_cache_type(array* cache_entries) {
22 bool seen_tuple = false;
25 for (i = 0; i < array_capacity(cache_entries); i += 2) {
26 /* Is it a tuple layout? */
27 if (TAG(array_nth(cache_entries, i)) == ARRAY_TYPE) {
33 return seen_tuple ? PIC_TUPLE : PIC_TAG;
36 void factor_vm::update_pic_count(cell type) {
38 dispatch_stats.pic_tag_count++;
40 dispatch_stats.pic_tuple_count++;
43 struct inline_cache_jit : public jit {
46 inline_cache_jit(cell generic_word, factor_vm* vm)
47 : jit(code_block_pic, generic_word, vm) {}
50 void emit_check(cell klass);
51 void compile_inline_cache(fixnum index, cell generic_word_, cell methods_,
52 cell cache_entries_, bool tail_call_p);
55 void inline_cache_jit::emit_check(cell klass) {
57 if (TAG(klass) == FIXNUM_TYPE)
58 code_template = parent->special_objects[PIC_CHECK_TAG];
60 code_template = parent->special_objects[PIC_CHECK_TUPLE];
62 emit_with_literal(code_template, klass);
65 /* index: 0 = top of stack, 1 = item underneath, etc
66 cache_entries: array of class/method pairs */
67 void inline_cache_jit::compile_inline_cache(fixnum index, cell generic_word_,
68 cell methods_, cell cache_entries_,
70 data_root<word> generic_word(generic_word_, parent);
71 data_root<array> methods(methods_, parent);
72 data_root<array> cache_entries(cache_entries_, parent);
74 cell inline_cache_type =
75 parent->determine_inline_cache_type(cache_entries.untagged());
76 parent->update_pic_count(inline_cache_type);
78 /* Generate machine code to determine the object's class. */
79 emit_with_literal(parent->special_objects[PIC_LOAD],
80 tag_fixnum(-index * sizeof(cell)));
81 emit(parent->special_objects[inline_cache_type]);
83 /* Generate machine code to check, in turn, if the class is one of the cached
86 for (i = 0; i < array_capacity(cache_entries.untagged()); i += 2) {
88 cell klass = array_nth(cache_entries.untagged(), i);
91 /* Yes? Jump to method */
92 cell method = array_nth(cache_entries.untagged(), i + 1);
93 emit_with_literal(parent->special_objects[PIC_HIT], method);
96 /* If none of the above conditionals tested true, then execution "falls
99 /* A stack frame is set up, since the inline-cache-miss sub-primitive
100 makes a subroutine call to the VM. */
101 emit(parent->special_objects[JIT_PROLOG]);
103 /* The inline-cache-miss sub-primitive call receives enough information to
104 reconstruct the PIC with the new entry. */
105 push(generic_word.value());
106 push(methods.value());
107 push(tag_fixnum(index));
108 push(cache_entries.value());
111 parent->special_objects[tail_call_p ? PIC_MISS_TAIL_WORD : PIC_MISS_WORD],
112 true, /* tail_call_p */
113 true); /* stack_frame_p */
116 code_block* factor_vm::compile_inline_cache(fixnum index, cell generic_word_,
117 cell methods_, cell cache_entries_,
119 data_root<word> generic_word(generic_word_, this);
120 data_root<array> methods(methods_, this);
121 data_root<array> cache_entries(cache_entries_, this);
123 inline_cache_jit jit(generic_word.value(), this);
124 jit.compile_inline_cache(index, generic_word.value(), methods.value(),
125 cache_entries.value(), tail_call_p);
126 code_block* code = jit.to_code_block(JIT_FRAME_SIZE);
127 initialize_code_block(code);
131 /* A generic word's definition performs general method lookup. */
132 void* factor_vm::megamorphic_call_stub(cell generic_word) {
133 return untag<word>(generic_word)->entry_point;
136 cell factor_vm::inline_cache_size(cell cache_entries) {
137 return array_capacity(untag_check<array>(cache_entries)) / 2;
140 /* Allocates memory */
141 cell factor_vm::add_inline_cache_entry(cell cache_entries_, cell klass_,
143 data_root<array> cache_entries(cache_entries_, this);
144 data_root<object> klass(klass_, this);
145 data_root<word> method(method_, this);
147 cell pic_size = array_capacity(cache_entries.untagged());
148 data_root<array> new_cache_entries(
149 reallot_array(cache_entries.untagged(), pic_size + 2), this);
150 set_array_nth(new_cache_entries.untagged(), pic_size, klass.value());
151 set_array_nth(new_cache_entries.untagged(), pic_size + 1, method.value());
152 return new_cache_entries.value();
155 void factor_vm::update_pic_transitions(cell pic_size) {
156 if (pic_size == max_pic_size)
157 dispatch_stats.pic_to_mega_transitions++;
158 else if (pic_size == 0)
159 dispatch_stats.cold_call_to_ic_transitions++;
160 else if (pic_size == 1)
161 dispatch_stats.ic_to_pic_transitions++;
164 /* The cache_entries parameter is empty (on cold call site) or has entries
165 (on cache miss). Called from assembly with the actual return address.
166 Compilation of the inline cache may trigger a GC, which may trigger a
168 also, the block containing the return address may now be dead. Use a
169 code_root to take care of the details. */
170 void* factor_vm::inline_cache_miss(cell return_address_) {
171 code_root return_address(return_address_, this);
172 check_code_pointer(return_address.value);
173 bool tail_call_site = tail_call_site_p(return_address.value);
176 std::cout << "Inline cache miss at " << (tail_call_site ? "tail" : "non-tail")
177 << " call site 0x" << std::hex << return_address.value << std::dec
182 data_root<array> cache_entries(ctx->pop(), this);
183 fixnum index = untag_fixnum(ctx->pop());
184 data_root<array> methods(ctx->pop(), this);
185 data_root<word> generic_word(ctx->pop(), this);
186 data_root<object> object(((cell*)ctx->datastack)[-index], this);
188 cell pic_size = inline_cache_size(cache_entries.value());
190 update_pic_transitions(pic_size);
194 if (pic_size >= max_pic_size)
195 xt = megamorphic_call_stub(generic_word.value());
197 cell klass = object_class(object.value());
198 cell method = lookup_method(object.value(), methods.value());
200 data_root<array> new_cache_entries(
201 add_inline_cache_entry(cache_entries.value(), klass, method), this);
203 xt = compile_inline_cache(index, generic_word.value(), methods.value(),
204 new_cache_entries.value(), tail_call_site)
208 /* Install the new stub. */
209 if (return_address.valid) {
210 /* Since each PIC is only referenced from a single call site,
211 if the old call target was a PIC, we can deallocate it immediately,
212 instead of leaving dead PICs around until the next GC. */
213 deallocate_inline_cache(return_address.value);
214 set_call_target(return_address.value, xt);
217 std::cout << "Updated " << (tail_call_site ? "tail" : "non-tail")
218 << " call site 0x" << std::hex << return_address.value << std::dec
219 << " with 0x" << std::hex << (cell)xt << std::dec << std::endl;
227 VM_C_API void* inline_cache_miss(cell return_address, factor_vm* parent) {
228 return parent->inline_cache_miss(return_address);