5 void factor_vm::init_inline_caching(int max_size) { max_pic_size = max_size; }
7 void factor_vm::deallocate_inline_cache(cell return_address) {
8 /* Find the call target. */
9 void* old_entry_point = get_call_target(return_address);
10 check_code_pointer((cell)old_entry_point);
12 code_block* old_block = (code_block*)old_entry_point - 1;
14 /* Free the old PIC since we know its unreachable */
15 if (old_block->pic_p())
16 code->free(old_block);
19 /* Figure out what kind of type check the PIC needs based on the methods
21 cell factor_vm::determine_inline_cache_type(array* cache_entries) {
22 bool seen_tuple = false;
25 for (i = 0; i < array_capacity(cache_entries); i += 2) {
26 /* Is it a tuple layout? */
27 if (TAG(array_nth(cache_entries, i)) == ARRAY_TYPE) {
33 return seen_tuple ? PIC_TUPLE : PIC_TAG;
36 void factor_vm::update_pic_count(cell type) {
38 dispatch_stats.pic_tag_count++;
40 dispatch_stats.pic_tuple_count++;
43 struct inline_cache_jit : public jit {
46 inline_cache_jit(cell generic_word, factor_vm* vm)
47 : jit(code_block_pic, generic_word, vm) {}
50 void emit_check(cell klass);
51 void compile_inline_cache(fixnum index, cell generic_word_, cell methods_,
52 cell cache_entries_, bool tail_call_p);
55 /* Allocates memory */
56 void inline_cache_jit::emit_check(cell klass) {
58 if (TAG(klass) == FIXNUM_TYPE)
59 code_template = parent->special_objects[PIC_CHECK_TAG];
61 code_template = parent->special_objects[PIC_CHECK_TUPLE];
63 emit_with_literal(code_template, klass);
66 /* index: 0 = top of stack, 1 = item underneath, etc
67 cache_entries: array of class/method pairs */
68 /* Allocates memory */
69 void inline_cache_jit::compile_inline_cache(fixnum index, cell generic_word_,
70 cell methods_, cell cache_entries_,
72 data_root<word> generic_word(generic_word_, parent);
73 data_root<array> methods(methods_, parent);
74 data_root<array> cache_entries(cache_entries_, parent);
76 cell inline_cache_type =
77 parent->determine_inline_cache_type(cache_entries.untagged());
78 parent->update_pic_count(inline_cache_type);
80 /* Generate machine code to determine the object's class. */
81 emit_with_literal(parent->special_objects[PIC_LOAD],
82 tag_fixnum(-index * sizeof(cell)));
83 emit(parent->special_objects[inline_cache_type]);
85 /* Generate machine code to check, in turn, if the class is one of the cached
88 for (i = 0; i < array_capacity(cache_entries.untagged()); i += 2) {
90 cell klass = array_nth(cache_entries.untagged(), i);
93 /* Yes? Jump to method */
94 cell method = array_nth(cache_entries.untagged(), i + 1);
95 emit_with_literal(parent->special_objects[PIC_HIT], method);
98 /* If none of the above conditionals tested true, then execution "falls
101 /* A stack frame is set up, since the inline-cache-miss sub-primitive
102 makes a subroutine call to the VM. */
103 emit(parent->special_objects[JIT_PROLOG]);
105 /* The inline-cache-miss sub-primitive call receives enough information to
106 reconstruct the PIC with the new entry. */
107 push(generic_word.value());
108 push(methods.value());
109 push(tag_fixnum(index));
110 push(cache_entries.value());
113 parent->special_objects[tail_call_p ? PIC_MISS_TAIL_WORD : PIC_MISS_WORD],
114 true, /* tail_call_p */
115 true); /* stack_frame_p */
118 /* Allocates memory */
119 code_block* factor_vm::compile_inline_cache(fixnum index, cell generic_word_,
120 cell methods_, cell cache_entries_,
122 data_root<word> generic_word(generic_word_, this);
123 data_root<array> methods(methods_, this);
124 data_root<array> cache_entries(cache_entries_, this);
126 inline_cache_jit jit(generic_word.value(), this);
127 jit.compile_inline_cache(index, generic_word.value(), methods.value(),
128 cache_entries.value(), tail_call_p);
129 code_block* code = jit.to_code_block(JIT_FRAME_SIZE);
130 initialize_code_block(code);
134 /* A generic word's definition performs general method lookup. */
135 void* factor_vm::megamorphic_call_stub(cell generic_word) {
136 return untag<word>(generic_word)->entry_point;
139 cell factor_vm::inline_cache_size(cell cache_entries) {
140 return array_capacity(untag_check<array>(cache_entries)) / 2;
143 /* Allocates memory */
144 cell factor_vm::add_inline_cache_entry(cell cache_entries_, cell klass_,
146 data_root<array> cache_entries(cache_entries_, this);
147 data_root<object> klass(klass_, this);
148 data_root<word> method(method_, this);
150 cell pic_size = array_capacity(cache_entries.untagged());
151 data_root<array> new_cache_entries(
152 reallot_array(cache_entries.untagged(), pic_size + 2), this);
153 set_array_nth(new_cache_entries.untagged(), pic_size, klass.value());
154 set_array_nth(new_cache_entries.untagged(), pic_size + 1, method.value());
155 return new_cache_entries.value();
158 void factor_vm::update_pic_transitions(cell pic_size) {
159 if (pic_size == max_pic_size)
160 dispatch_stats.pic_to_mega_transitions++;
161 else if (pic_size == 0)
162 dispatch_stats.cold_call_to_ic_transitions++;
163 else if (pic_size == 1)
164 dispatch_stats.ic_to_pic_transitions++;
167 /* The cache_entries parameter is empty (on cold call site) or has entries
168 (on cache miss). Called from assembly with the actual return address.
169 Compilation of the inline cache may trigger a GC, which may trigger a
171 also, the block containing the return address may now be dead. Use a
172 code_root to take care of the details. */
173 /* Allocates memory */
174 void* factor_vm::inline_cache_miss(cell return_address_) {
175 code_root return_address(return_address_, this);
176 check_code_pointer(return_address.value);
177 bool tail_call_site = tail_call_site_p(return_address.value);
180 std::cout << "Inline cache miss at " << (tail_call_site ? "tail" : "non-tail")
181 << " call site 0x" << std::hex << return_address.value << std::dec
186 data_root<array> cache_entries(ctx->pop(), this);
187 fixnum index = untag_fixnum(ctx->pop());
188 data_root<array> methods(ctx->pop(), this);
189 data_root<word> generic_word(ctx->pop(), this);
190 data_root<object> object(((cell*)ctx->datastack)[-index], this);
192 cell pic_size = inline_cache_size(cache_entries.value());
194 update_pic_transitions(pic_size);
198 if (pic_size >= max_pic_size)
199 xt = megamorphic_call_stub(generic_word.value());
201 cell klass = object_class(object.value());
202 cell method = lookup_method(object.value(), methods.value());
204 data_root<array> new_cache_entries(
205 add_inline_cache_entry(cache_entries.value(), klass, method), this);
207 xt = compile_inline_cache(index, generic_word.value(), methods.value(),
208 new_cache_entries.value(), tail_call_site)
212 /* Install the new stub. */
213 if (return_address.valid) {
214 /* Since each PIC is only referenced from a single call site,
215 if the old call target was a PIC, we can deallocate it immediately,
216 instead of leaving dead PICs around until the next GC. */
217 deallocate_inline_cache(return_address.value);
218 set_call_target(return_address.value, xt);
221 std::cout << "Updated " << (tail_call_site ? "tail" : "non-tail")
222 << " call site 0x" << std::hex << return_address.value << std::dec
223 << " with 0x" << std::hex << (cell)xt << std::dec << std::endl;
231 /* Allocates memory */
232 VM_C_API void* inline_cache_miss(cell return_address, factor_vm* parent) {
233 return parent->inline_cache_miss(return_address);