1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2025 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "code/compiledIC.hpp"
28 #include "code/vtableStubs.hpp"
29 #include "interp_masm_ppc.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "oops/instanceKlass.hpp"
32 #include "oops/klass.inline.hpp"
33 #include "oops/klassVtable.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "vmreg_ppc.inline.hpp"
36 #ifdef COMPILER2
37 #include "opto/runtime.hpp"
38 #endif
39
40 #define __ masm->
41
42 #ifndef PRODUCT
43 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oopDesc* receiver, int index);
44 #endif
45
46 // Used by compiler only; may use only caller saved, non-argument registers.
47 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
48 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
49 const int stub_code_length = code_size_limit(true);
50 VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
51 // Can be null if there is no free space in the code cache.
52 if (s == nullptr) {
53 return nullptr;
54 }
55
56 // Count unused bytes in instruction sequences of variable size.
57 // We add them to the computed buffer size in order to avoid
58 // overflow in subsequently generated stubs.
59 address start_pc;
60 int slop_bytes = 8; // just a two-instruction safety net
61 int slop_delta = 0;
62
63 ResourceMark rm;
64 CodeBuffer cb(s->entry_point(), stub_code_length);
65 MacroAssembler* masm = new MacroAssembler(&cb);
66
67 #if (!defined(PRODUCT) && defined(COMPILER2))
68 if (CountCompiledCalls) {
69 start_pc = __ pc();
70 int load_const_maxLen = 5*BytesPerInstWord; // load_const generates 5 instructions. Assume that as max size for laod_const_optimized
71 int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true);
72 slop_delta = load_const_maxLen - (__ pc() - start_pc);
73 slop_bytes += slop_delta;
74 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
75 __ ld(R12_scratch2, offs, R11_scratch1);
76 __ addi(R12_scratch2, R12_scratch2, 1);
77 __ std(R12_scratch2, offs, R11_scratch1);
78 }
79 #endif
80
81 assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1");
82
83 const Register rcvr_klass = R11_scratch1;
84 address npe_addr = __ pc(); // npe = null pointer exception
85 // Get receiver klass.
86 __ load_klass_check_null(rcvr_klass, R3);
87
88 #ifndef PRODUCT
89 if (DebugVtables) {
90 Label L;
91 // Check offset vs vtable length.
92 const Register vtable_len = R12_scratch2;
93 __ lwz(vtable_len, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
94 __ cmpwi(CR0, vtable_len, vtable_index*vtableEntry::size());
95 __ bge(CR0, L);
96 __ li(R12_scratch2, vtable_index);
97 __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), R3_ARG1, R12_scratch2, false);
98 __ bind(L);
99 }
100 #endif
101
102 int entry_offset = in_bytes(Klass::vtable_start_offset()) +
103 vtable_index*vtableEntry::size_in_bytes();
104 int v_off = entry_offset + in_bytes(vtableEntry::method_offset());
105
106 __ ld(R19_method, (RegisterOrConstant)v_off, rcvr_klass);
107
108 #ifndef PRODUCT
109 if (DebugVtables) {
110 Label L;
111 __ cmpdi(CR0, R19_method, 0);
112 __ bne(CR0, L);
113 __ stop("Vtable entry is ZERO");
114 __ bind(L);
115 }
116 #endif
117
118 address ame_addr = __ pc(); // ame = abstract method error
119 // if the vtable entry is null, the method is abstract
120 // NOTE: for vtable dispatches, the vtable entry will never be null.
121
122 __ null_check(R19_method, in_bytes(Method::from_compiled_offset()), /*implicit only*/nullptr);
123 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
124 __ mtctr(R12_scratch2);
125 __ bctr();
126
127 masm->flush();
128 bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
129
130 return s;
131 }
132
133 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
134 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
135 const int stub_code_length = code_size_limit(false);
136 VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
137 // Can be null if there is no free space in the code cache.
138 if (s == nullptr) {
139 return nullptr;
140 }
141
142 // Count unused bytes in instruction sequences of variable size.
143 // We add them to the computed buffer size in order to avoid
144 // overflow in subsequently generated stubs.
145 address start_pc;
146 int slop_bytes = 8; // just a two-instruction safety net
147 int slop_delta = 0;
148
149 ResourceMark rm;
150 CodeBuffer cb(s->entry_point(), stub_code_length);
151 MacroAssembler* masm = new MacroAssembler(&cb);
152 int load_const_maxLen = 5*BytesPerInstWord; // load_const generates 5 instructions. Assume that as max size for laod_const_optimized
153
154 #if (!defined(PRODUCT) && defined(COMPILER2))
155 if (CountCompiledCalls) {
156 start_pc = __ pc();
157 int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true);
158 slop_delta = load_const_maxLen - (__ pc() - start_pc);
159 slop_bytes += slop_delta;
160 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
161 __ ld(R12_scratch2, offs, R11_scratch1);
162 __ addi(R12_scratch2, R12_scratch2, 1);
163 __ std(R12_scratch2, offs, R11_scratch1);
164 }
165 #endif
166
167 assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1");
168
169 // Entry arguments:
170 // R19_method: Interface
171 // R3_ARG1: Receiver
172
173 Label L_no_such_interface;
174 const Register rcvr_klass = R11_scratch1,
175 interface = R12_scratch2,
176 tmp1 = R21_tmp1,
177 tmp2 = R22_tmp2;
178
179 address npe_addr = __ pc(); // npe = null pointer exception
180 __ load_klass_check_null(rcvr_klass, R3_ARG1);
181
182 // Receiver subtype check against REFC.
183 __ ld(interface, CompiledICData::itable_refc_klass_offset(), R19_method);
184 __ lookup_interface_method(rcvr_klass, interface, noreg,
185 R0, tmp1, tmp2,
186 L_no_such_interface, /*return_method=*/ false);
187
188 // Get Method* and entrypoint for compiler
189 __ ld(interface, CompiledICData::itable_defc_klass_offset(), R19_method);
190 __ lookup_interface_method(rcvr_klass, interface, itable_index,
191 R19_method, tmp1, tmp2,
192 L_no_such_interface, /*return_method=*/ true);
193
194 #ifndef PRODUCT
195 if (DebugVtables) {
196 Label ok;
197 __ cmpdi(CR0, R19_method, 0);
198 __ bne(CR0, ok);
199 __ stop("method is null");
200 __ bind(ok);
201 }
202 #endif
203
204 // If the vtable entry is null, the method is abstract.
205 address ame_addr = __ pc(); // ame = abstract method error
206
207 // Must do an explicit check if implicit checks are disabled.
208 __ null_check(R19_method, in_bytes(Method::from_compiled_offset()), &L_no_such_interface);
209 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
210 __ mtctr(R12_scratch2);
211 __ bctr();
212
213 // Handle IncompatibleClassChangeError in itable stubs.
214 // More detailed error message.
215 // We force resolving of the call site by jumping to the "handle
216 // wrong method" stub, and so let the interpreter runtime do all the
217 // dirty work.
218 __ bind(L_no_such_interface);
219 start_pc = __ pc();
220 __ load_const_optimized(R11_scratch1, SharedRuntime::get_handle_wrong_method_stub(), R12_scratch2);
221 slop_delta = load_const_maxLen - (__ pc() - start_pc);
222 slop_bytes += slop_delta;
223 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
224 __ mtctr(R11_scratch1);
225 __ bctr();
226
227 masm->flush();
228 bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
229
230 return s;
231 }
232
233 int VtableStub::pd_code_alignment() {
234 // Power cache line size is 128 bytes, but we want to limit alignment loss.
235 const unsigned int icache_line_size = 32;
236 return icache_line_size;
237 }