1 /*
2 * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "code/compiledIC.hpp"
28 #include "code/vtableStubs.hpp"
29 #include "interp_masm_arm.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "oops/instanceKlass.hpp"
32 #include "oops/klassVtable.hpp"
33 #include "oops/klass.inline.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "vmreg_arm.inline.hpp"
36 #ifdef COMPILER2
37 #include "opto/runtime.hpp"
38 #endif
39
40 // machine-dependent part of VtableStubs: create VtableStub of correct size and
41 // initialize its code
42
43 #define __ masm->
44
45 #ifndef PRODUCT
46 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
47 #endif
48
49 VtableStub* VtableStubs::create_vtable_stub(int vtable_index, bool caller_is_c1) {
50 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
51 const int stub_code_length = code_size_limit(true);
52 VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index, caller_is_c1);
53 // Can be null if there is no free space in the code cache.
54 if (s == nullptr) {
55 return nullptr;
56 }
57
58 // Count unused bytes in instruction sequences of variable size.
59 // We add them to the computed buffer size in order to avoid
60 // overflow in subsequently generated stubs.
61 address start_pc;
62 int slop_bytes = 0;
63 int slop_delta = 0;
64
65 ResourceMark rm;
66 CodeBuffer cb(s->entry_point(), stub_code_length);
67 MacroAssembler* masm = new MacroAssembler(&cb);
68
69 #if (!defined(PRODUCT) && defined(COMPILER2))
70 if (CountCompiledCalls) {
71 // Implementation required?
72 }
73 #endif
74
75 assert(VtableStub::receiver_location() == R0->as_VMReg(), "receiver expected in R0");
76
77 const Register tmp = Rtemp; // Rtemp OK, should be free at call sites
78
79 address npe_addr = __ pc();
80 __ load_klass(tmp, R0);
81
82 #ifndef PRODUCT
83 if (DebugVtables) {
84 // Implementation required?
85 }
86 #endif
87
88 start_pc = __ pc();
89 { // lookup virtual method
90 int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index * vtableEntry::size_in_bytes();
91 int method_offset = in_bytes(vtableEntry::method_offset()) + entry_offset;
92
93 assert ((method_offset & (wordSize - 1)) == 0, "offset should be aligned");
94 int offset_mask = 0xfff;
95 if (method_offset & ~offset_mask) {
96 __ add(tmp, tmp, method_offset & ~offset_mask);
97 }
98 __ ldr(Rmethod, Address(tmp, method_offset & offset_mask));
99 }
100 slop_delta = 8 - (int)(__ pc() - start_pc);
101 slop_bytes += slop_delta;
102 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
103
104 #ifndef PRODUCT
105 if (DebugVtables) {
106 // Implementation required?
107 }
108 #endif
109
110 address ame_addr = __ pc();
111 __ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
112
113 masm->flush();
114 bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
115
116 return s;
117 }
118
119 VtableStub* VtableStubs::create_itable_stub(int itable_index, bool caller_is_c1) {
120 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
121 const int stub_code_length = code_size_limit(false);
122 VtableStub* s = new(stub_code_length) VtableStub(false, itable_index, caller_is_c1);
123 // Can be null if there is no free space in the code cache.
124 if (s == nullptr) {
125 return nullptr;
126 }
127 // Count unused bytes in instruction sequences of variable size.
128 // We add them to the computed buffer size in order to avoid
129 // overflow in subsequently generated stubs.
130 address start_pc;
131 int slop_bytes = 0;
132 int slop_delta = 0;
133
134 ResourceMark rm;
135 CodeBuffer cb(s->entry_point(), stub_code_length);
136 MacroAssembler* masm = new MacroAssembler(&cb);
137
138 #if (!defined(PRODUCT) && defined(COMPILER2))
139 if (CountCompiledCalls) {
140 // Implementation required?
141 }
142 #endif
143
144 assert(VtableStub::receiver_location() == R0->as_VMReg(), "receiver expected in R0");
145
146 // R0-R3 / R0-R7 registers hold the arguments and cannot be spoiled
147 const Register Rclass = R4;
148 const Register Rintf = R5;
149 const Register Rscan = R6;
150
151 Label L_no_such_interface;
152
153 assert_different_registers(Ricklass, Rclass, Rintf, Rscan, Rtemp);
154
155 start_pc = __ pc();
156
157 // get receiver klass (also an implicit null-check)
158 address npe_addr = __ pc();
159 __ load_klass(Rclass, R0);
160
161 // Receiver subtype check against REFC.
162 __ ldr(Rintf, Address(Ricklass, CompiledICData::itable_refc_klass_offset()));
163 __ lookup_interface_method(// inputs: rec. class, interface, itable index
164 Rclass, Rintf, noreg,
165 // outputs: temp reg1, temp reg2
166 noreg, Rscan, Rtemp,
167 L_no_such_interface);
168
169 const ptrdiff_t typecheckSize = __ pc() - start_pc;
170 start_pc = __ pc();
171
172 // Get Method* and entry point for compiler
173 __ ldr(Rintf, Address(Ricklass, CompiledICData::itable_defc_klass_offset()));
174 __ lookup_interface_method(// inputs: rec. class, interface, itable index
175 Rclass, Rintf, itable_index,
176 // outputs: temp reg1, temp reg2, temp reg3
177 Rmethod, Rscan, Rtemp,
178 L_no_such_interface);
179
180 const ptrdiff_t lookupSize = __ pc() - start_pc;
181
182 // Reduce "estimate" such that "padding" does not drop below 8.
183 const ptrdiff_t estimate = 140;
184 const ptrdiff_t codesize = typecheckSize + lookupSize;
185 slop_delta = (int)(estimate - codesize);
186 slop_bytes += slop_delta;
187 assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
188
189 #ifndef PRODUCT
190 if (DebugVtables) {
191 // Implementation required?
192 }
193 #endif
194
195 address ame_addr = __ pc();
196
197 __ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
198
199 __ bind(L_no_such_interface);
200 // Handle IncompatibleClassChangeError in itable stubs.
201 // More detailed error message.
202 // We force resolving of the call site by jumping to the "handle
203 // wrong method" stub, and so let the interpreter runtime do all the
204 // dirty work.
205 assert(SharedRuntime::get_handle_wrong_method_stub() != nullptr, "check initialization order");
206 __ jump(SharedRuntime::get_handle_wrong_method_stub(), relocInfo::runtime_call_type, Rtemp);
207
208 masm->flush();
209 bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
210
211 return s;
212 }
213
214 int VtableStub::pd_code_alignment() {
215 // ARM32 cache line size is not an architected constant. We just align on word size.
216 const unsigned int icache_line_size = wordSize;
217 return icache_line_size;
218 }