5 void factor_vm::deallocate_inline_cache(cell return_address) {
6 // Find the call target.
7 void* old_entry_point = get_call_target(return_address);
8 code_block* old_block = (code_block*)old_entry_point - 1;
10 // Free the old PIC since we know its unreachable
11 if (old_block->pic_p())
12 code->free(old_block);
15 // Figure out what kind of type check the PIC needs based on the methods
17 static cell determine_inline_cache_type(array* cache_entries) {
18 for (cell i = 0; i < array_capacity(cache_entries); i += 2) {
19 // Is it a tuple layout?
20 if (TAG(array_nth(cache_entries, i)) == ARRAY_TYPE) {
27 void factor_vm::update_pic_count(cell type) {
29 dispatch_stats.pic_tag_count++;
31 dispatch_stats.pic_tuple_count++;
34 struct inline_cache_jit : public jit {
35 inline_cache_jit(cell generic_word, factor_vm* vm)
36 : jit(code_block_pic, generic_word, vm) {}
38 void emit_check_and_jump(cell ic_type, cell i, cell klass, cell method);
39 void emit_inline_cache(fixnum index, cell generic_word_, cell methods_,
40 cell cache_entries_, bool tail_call_p);
43 void inline_cache_jit::emit_check_and_jump(cell ic_type, cell i,
44 cell klass, cell method) {
46 cell check_type = PIC_CHECK_TAG;
47 if (TAG(klass) != FIXNUM_TYPE)
48 check_type = PIC_CHECK_TUPLE;
50 // The tag check can be skipped if it is the first one and we are
51 // checking for the fixnum type which is 0. That is because the
52 // AND instruction in the PIC_TAG template already sets the zero
54 if (!(i == 0 && ic_type == PIC_TAG && klass == 0)) {
55 emit_with_literal(parent->special_objects[check_type], klass);
58 // Yes? Jump to method
59 emit_with_literal(parent->special_objects[PIC_HIT], method);
62 // index: 0 = top of stack, 1 = item underneath, etc
63 // cache_entries: array of class/method pairs
65 void inline_cache_jit::emit_inline_cache(fixnum index, cell generic_word_,
66 cell methods_, cell cache_entries_,
68 data_root<word> generic_word(generic_word_, parent);
69 data_root<array> methods(methods_, parent);
70 data_root<array> cache_entries(cache_entries_, parent);
72 cell ic_type = determine_inline_cache_type(cache_entries.untagged());
73 parent->update_pic_count(ic_type);
75 // Generate machine code to determine the object's class.
76 emit_with_literal(parent->special_objects[PIC_LOAD],
77 tag_fixnum(-index * sizeof(cell)));
79 // Put the tag of the object, or class of the tuple in a register.
80 emit(parent->special_objects[ic_type]);
82 // Generate machine code to check, in turn, if the class is one of the cached
84 for (cell i = 0; i < array_capacity(cache_entries.untagged()); i += 2) {
85 cell klass = array_nth(cache_entries.untagged(), i);
86 cell method = array_nth(cache_entries.untagged(), i + 1);
88 emit_check_and_jump(ic_type, i, klass, method);
91 // If none of the above conditionals tested true, then execution "falls
94 // A stack frame is set up, since the inline-cache-miss sub-primitive
95 // makes a subroutine call to the VM.
96 emit(parent->special_objects[JIT_PROLOG]);
98 // The inline-cache-miss sub-primitive call receives enough information to
99 // reconstruct the PIC with the new entry.
100 push(generic_word.value());
101 push(methods.value());
102 push(tag_fixnum(index));
103 push(cache_entries.value());
106 parent->special_objects[tail_call_p ? PIC_MISS_TAIL_WORD : PIC_MISS_WORD],
108 true); // stack_frame_p
112 code_block* factor_vm::compile_inline_cache(fixnum index, cell generic_word_,
113 cell methods_, cell cache_entries_,
115 data_root<word> generic_word(generic_word_, this);
116 data_root<array> methods(methods_, this);
117 data_root<array> cache_entries(cache_entries_, this);
119 inline_cache_jit jit(generic_word.value(), this);
120 jit.emit_inline_cache(index, generic_word.value(), methods.value(),
121 cache_entries.value(), tail_call_p);
122 code_block* code = jit.to_code_block(JIT_FRAME_SIZE);
123 initialize_code_block(code);
128 cell factor_vm::add_inline_cache_entry(cell cache_entries_, cell klass_,
130 data_root<array> cache_entries(cache_entries_, this);
131 data_root<object> klass(klass_, this);
132 data_root<word> method(method_, this);
134 cell pic_size = array_capacity(cache_entries.untagged());
135 data_root<array> new_cache_entries(
136 reallot_array(cache_entries.untagged(), pic_size + 2), this);
137 set_array_nth(new_cache_entries.untagged(), pic_size, klass.value());
138 set_array_nth(new_cache_entries.untagged(), pic_size + 1, method.value());
139 return new_cache_entries.value();
142 void factor_vm::update_pic_transitions(cell pic_size) {
143 if (pic_size == max_pic_size)
144 dispatch_stats.pic_to_mega_transitions++;
145 else if (pic_size == 0)
146 dispatch_stats.cold_call_to_ic_transitions++;
147 else if (pic_size == 1)
148 dispatch_stats.ic_to_pic_transitions++;
151 // The cache_entries parameter is empty (on cold call site) or has entries
152 // (on cache miss). Called from assembly with the actual return address.
153 // Compilation of the inline cache may trigger a GC, which may trigger a
155 // also, the block containing the return address may now be dead. Use a
156 // code_root to take care of the details.
158 cell factor_vm::inline_cache_miss(cell return_address_) {
159 code_root return_address(return_address_, this);
160 bool tail_call_site = tail_call_site_p(return_address.value);
163 FACTOR_PRINT("Inline cache miss at "
164 << (tail_call_site ? "tail" : "non-tail")
165 << " call site 0x" << std::hex << return_address.value
170 data_root<array> cache_entries(ctx->pop(), this);
171 fixnum index = untag_fixnum(ctx->pop());
172 data_root<array> methods(ctx->pop(), this);
173 data_root<word> generic_word(ctx->pop(), this);
174 data_root<object> object(((cell*)ctx->datastack)[-index], this);
176 cell pic_size = array_capacity(cache_entries.untagged()) / 2;
178 update_pic_transitions(pic_size);
182 if (pic_size >= max_pic_size)
183 xt = generic_word->entry_point;
185 cell klass = object_class(object.value());
186 cell method = lookup_method(object.value(), methods.value());
188 data_root<array> new_cache_entries(
189 add_inline_cache_entry(cache_entries.value(), klass, method), this);
191 xt = compile_inline_cache(index, generic_word.value(), methods.value(),
192 new_cache_entries.value(), tail_call_site)
196 // Install the new stub.
197 if (return_address.valid) {
198 // Since each PIC is only referenced from a single call site,
199 // if the old call target was a PIC, we can deallocate it immediately,
200 // instead of leaving dead PICs around until the next GC.
201 deallocate_inline_cache(return_address.value);
202 set_call_target(return_address.value, xt);
205 FACTOR_PRINT("Updated " << (tail_call_site ? "tail" : "non-tail")
206 << " call site 0x" << std::hex << return_address.value << std::dec
207 << " with 0x" << std::hex << (cell)xt << std::dec);
216 VM_C_API cell inline_cache_miss(cell return_address, factor_vm* parent) {
217 return parent->inline_cache_miss(return_address);