5 void factor_vm::init_inline_caching(int max_size) { max_pic_size = max_size; }
7 void factor_vm::deallocate_inline_cache(cell return_address) {
8 /* Find the call target. */
9 void* old_entry_point = get_call_target(return_address);
10 code_block* old_block = (code_block*)old_entry_point - 1;
12 /* Free the old PIC since we know its unreachable */
13 if (old_block->pic_p())
14 code->free(old_block);
17 /* Figure out what kind of type check the PIC needs based on the methods
19 cell factor_vm::determine_inline_cache_type(array* cache_entries) {
20 bool seen_tuple = false;
23 for (i = 0; i < array_capacity(cache_entries); i += 2) {
24 /* Is it a tuple layout? */
25 if (TAG(array_nth(cache_entries, i)) == ARRAY_TYPE) {
31 return seen_tuple ? PIC_TUPLE : PIC_TAG;
34 void factor_vm::update_pic_count(cell type) {
36 dispatch_stats.pic_tag_count++;
38 dispatch_stats.pic_tuple_count++;
41 struct inline_cache_jit : public jit {
44 inline_cache_jit(cell generic_word, factor_vm* vm)
45 : jit(code_block_pic, generic_word, vm) {}
48 void emit_check(cell klass);
49 void compile_inline_cache(fixnum index, cell generic_word_, cell methods_,
50 cell cache_entries_, bool tail_call_p);
53 /* Allocates memory */
54 void inline_cache_jit::emit_check(cell klass) {
56 if (TAG(klass) == FIXNUM_TYPE)
57 code_template = parent->special_objects[PIC_CHECK_TAG];
59 code_template = parent->special_objects[PIC_CHECK_TUPLE];
61 emit_with_literal(code_template, klass);
64 /* index: 0 = top of stack, 1 = item underneath, etc
65 cache_entries: array of class/method pairs */
66 /* Allocates memory */
67 void inline_cache_jit::compile_inline_cache(fixnum index, cell generic_word_,
68 cell methods_, cell cache_entries_,
70 data_root<word> generic_word(generic_word_, parent);
71 data_root<array> methods(methods_, parent);
72 data_root<array> cache_entries(cache_entries_, parent);
74 cell inline_cache_type =
75 parent->determine_inline_cache_type(cache_entries.untagged());
76 parent->update_pic_count(inline_cache_type);
78 /* Generate machine code to determine the object's class. */
79 emit_with_literal(parent->special_objects[PIC_LOAD],
80 tag_fixnum(-index * sizeof(cell)));
81 emit(parent->special_objects[inline_cache_type]);
83 /* Generate machine code to check, in turn, if the class is one of the cached
86 for (i = 0; i < array_capacity(cache_entries.untagged()); i += 2) {
88 cell klass = array_nth(cache_entries.untagged(), i);
91 /* Yes? Jump to method */
92 cell method = array_nth(cache_entries.untagged(), i + 1);
93 emit_with_literal(parent->special_objects[PIC_HIT], method);
96 /* If none of the above conditionals tested true, then execution "falls
99 /* A stack frame is set up, since the inline-cache-miss sub-primitive
100 makes a subroutine call to the VM. */
101 emit(parent->special_objects[JIT_PROLOG]);
103 /* The inline-cache-miss sub-primitive call receives enough information to
104 reconstruct the PIC with the new entry. */
105 push(generic_word.value());
106 push(methods.value());
107 push(tag_fixnum(index));
108 push(cache_entries.value());
111 parent->special_objects[tail_call_p ? PIC_MISS_TAIL_WORD : PIC_MISS_WORD],
112 true, /* tail_call_p */
113 true); /* stack_frame_p */
116 /* Allocates memory */
117 code_block* factor_vm::compile_inline_cache(fixnum index, cell generic_word_,
118 cell methods_, cell cache_entries_,
120 data_root<word> generic_word(generic_word_, this);
121 data_root<array> methods(methods_, this);
122 data_root<array> cache_entries(cache_entries_, this);
124 inline_cache_jit jit(generic_word.value(), this);
125 jit.compile_inline_cache(index, generic_word.value(), methods.value(),
126 cache_entries.value(), tail_call_p);
127 code_block* code = jit.to_code_block(JIT_FRAME_SIZE);
128 initialize_code_block(code);
132 cell factor_vm::inline_cache_size(cell cache_entries) {
133 return array_capacity(untag_check<array>(cache_entries)) / 2;
136 /* Allocates memory */
137 cell factor_vm::add_inline_cache_entry(cell cache_entries_, cell klass_,
139 data_root<array> cache_entries(cache_entries_, this);
140 data_root<object> klass(klass_, this);
141 data_root<word> method(method_, this);
143 cell pic_size = array_capacity(cache_entries.untagged());
144 data_root<array> new_cache_entries(
145 reallot_array(cache_entries.untagged(), pic_size + 2), this);
146 set_array_nth(new_cache_entries.untagged(), pic_size, klass.value());
147 set_array_nth(new_cache_entries.untagged(), pic_size + 1, method.value());
148 return new_cache_entries.value();
151 void factor_vm::update_pic_transitions(cell pic_size) {
152 if (pic_size == max_pic_size)
153 dispatch_stats.pic_to_mega_transitions++;
154 else if (pic_size == 0)
155 dispatch_stats.cold_call_to_ic_transitions++;
156 else if (pic_size == 1)
157 dispatch_stats.ic_to_pic_transitions++;
160 /* The cache_entries parameter is empty (on cold call site) or has entries
161 (on cache miss). Called from assembly with the actual return address.
162 Compilation of the inline cache may trigger a GC, which may trigger a
164 also, the block containing the return address may now be dead. Use a
165 code_root to take care of the details. */
166 /* Allocates memory */
167 cell factor_vm::inline_cache_miss(cell return_address_) {
168 code_root return_address(return_address_, this);
169 bool tail_call_site = tail_call_site_p(return_address.value);
172 std::cout << "Inline cache miss at " << (tail_call_site ? "tail" : "non-tail")
173 << " call site 0x" << std::hex << return_address.value << std::dec
178 data_root<array> cache_entries(ctx->pop(), this);
179 fixnum index = untag_fixnum(ctx->pop());
180 data_root<array> methods(ctx->pop(), this);
181 data_root<word> generic_word(ctx->pop(), this);
182 data_root<object> object(((cell*)ctx->datastack)[-index], this);
184 cell pic_size = inline_cache_size(cache_entries.value());
186 update_pic_transitions(pic_size);
190 if (pic_size >= max_pic_size)
191 xt = generic_word->entry_point;
193 cell klass = object_class(object.value());
194 cell method = lookup_method(object.value(), methods.value());
196 data_root<array> new_cache_entries(
197 add_inline_cache_entry(cache_entries.value(), klass, method), this);
199 xt = compile_inline_cache(index, generic_word.value(), methods.value(),
200 new_cache_entries.value(), tail_call_site)
204 /* Install the new stub. */
205 if (return_address.valid) {
206 /* Since each PIC is only referenced from a single call site,
207 if the old call target was a PIC, we can deallocate it immediately,
208 instead of leaving dead PICs around until the next GC. */
209 deallocate_inline_cache(return_address.value);
210 set_call_target(return_address.value, xt);
213 std::cout << "Updated " << (tail_call_site ? "tail" : "non-tail")
214 << " call site 0x" << std::hex << return_address.value << std::dec
215 << " with 0x" << std::hex << (cell)xt << std::dec << std::endl;
223 /* Allocates memory */
224 VM_C_API cell inline_cache_miss(cell return_address, factor_vm* parent) {
225 return parent->inline_cache_miss(return_address);