1 /*
2 * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2016, 2023 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "code/compiledIC.hpp"
28 #include "code/vtableStubs.hpp"
29 #include "interp_masm_s390.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "oops/instanceKlass.hpp"
32 #include "oops/klass.inline.hpp"
33 #include "oops/klassVtable.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "vmreg_s390.inline.hpp"
36 #ifdef COMPILER2
37 #include "opto/runtime.hpp"
38 #endif
39
40 #define __ masm->
41
42 #ifndef PRODUCT
43 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
44 #endif
45
46 // Used by compiler only; may use only caller saved, non-argument registers.
47 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
48 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
49 const int stub_code_length = code_size_limit(true);
50 VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
51 // Can be null if there is no free space in the code cache.
52 if (s == nullptr) {
53 return nullptr;
54 }
55
56 // Count unused bytes in instruction sequences of variable size.
57 // We add them to the computed buffer size in order to avoid
58 // overflow in subsequently generated stubs.
59 address start_pc;
60 int slop_bytes = 0;
61 int slop_delta = 0;
62
63 ResourceMark rm;
64 CodeBuffer cb(s->entry_point(), stub_code_length);
65 MacroAssembler* masm = new MacroAssembler(&cb);
66
67 #if (!defined(PRODUCT) && defined(COMPILER2))
68 if (CountCompiledCalls) {
69 // worst case actual size
70 slop_delta = __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::nof_megamorphic_calls_addr(), true);
71 slop_bytes += slop_delta;
72 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
73 // Use generic emitter for direct memory increment.
74 // Abuse Z_method as scratch register for generic emitter.
75 // It is loaded further down anyway before it is first used.
76 // No dynamic code size variance here, increment is 1, always.
77 __ add2mem_64(Address(Z_R1_scratch), 1, Z_method);
78 }
79 #endif
80
81 assert(VtableStub::receiver_location() == Z_R2->as_VMReg(), "receiver expected in Z_ARG1");
82
83 const Register rcvr_klass = Z_R1_scratch;
84 address npe_addr = __ pc(); // npe is short for null pointer exception
85 // Get receiver klass.
86 __ load_klass(rcvr_klass, Z_ARG1);
87
88 #ifndef PRODUCT
89 if (DebugVtables) {
90 NearLabel L;
91 // Check offset vs vtable length.
92 const Register vtable_idx = Z_R0_scratch;
93
94 // worst case actual size
95 slop_delta = __ load_const_size() - __ load_const_optimized_rtn_len(vtable_idx, vtable_index*vtableEntry::size(), true);
96 slop_bytes += slop_delta;
97 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
98
99 assert(Displacement::is_shortDisp(in_bytes(Klass::vtable_length_offset())), "disp to large");
100 __ z_cl(vtable_idx, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
101 __ z_brl(L);
102 __ z_lghi(Z_ARG3, vtable_index); // Debug code, don't optimize.
103 __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), Z_ARG1, Z_ARG3, false);
104 // Count unused bytes (assume worst case here).
105 slop_bytes += 12;
106 __ bind(L);
107 }
108 #endif
109
110 int entry_offset = in_bytes(Klass::vtable_start_offset()) +
111 vtable_index * vtableEntry::size_in_bytes();
112 int v_off = entry_offset + in_bytes(vtableEntry::method_offset());
113
114 // Set method (in case of interpreted method), and destination address.
115 // Duplicate safety code from enc_class Java_Dynamic_Call_dynTOC.
116 if (Displacement::is_validDisp(v_off)) {
117 __ z_lg(Z_method/*method*/, v_off, rcvr_klass/*class*/);
118 // Account for the load_const in the else path.
119 slop_delta = __ load_const_size();
120 } else {
121 // Worse case, offset does not fit in displacement field.
122 // worst case actual size
123 slop_delta = __ load_const_size() - __ load_const_optimized_rtn_len(Z_method, v_off, true);
124 __ z_lg(Z_method/*method*/, 0, Z_method/*method offset*/, rcvr_klass/*class*/);
125 }
126 slop_bytes += slop_delta;
127
128 #ifndef PRODUCT
129 if (DebugVtables) {
130 NearLabel L;
131 __ z_ltgr(Z_method, Z_method);
132 __ z_brne(L);
133 __ stop("Vtable entry is ZERO", 102);
134 __ bind(L);
135 }
136 #endif
137
138 // Must do an explicit check if offset too large or implicit checks are disabled.
139 address ame_addr = __ pc();
140 __ null_check(Z_method, Z_R1_scratch, in_bytes(Method::from_compiled_offset()));
141 __ z_lg(Z_R1_scratch, in_bytes(Method::from_compiled_offset()), Z_method);
142 __ z_br(Z_R1_scratch);
143
144 masm->flush();
145 bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
146
147 return s;
148 }
149
150 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
151 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
152 const int stub_code_length = code_size_limit(false);
153 VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
154 // Can be null if there is no free space in the code cache.
155 if (s == nullptr) {
156 return nullptr;
157 }
158
159 // Count unused bytes in instruction sequences of variable size.
160 // We add them to the computed buffer size in order to avoid
161 // overflow in subsequently generated stubs.
162 address start_pc;
163 int slop_bytes = 0;
164 int slop_delta = 0;
165
166 ResourceMark rm;
167 CodeBuffer cb(s->entry_point(), stub_code_length);
168 MacroAssembler* masm = new MacroAssembler(&cb);
169
170 #if (!defined(PRODUCT) && defined(COMPILER2))
171 if (CountCompiledCalls) {
172 // worst case actual size
173 slop_delta = __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::nof_megamorphic_calls_addr(), true);
174 slop_bytes += slop_delta;
175 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
176 // Use generic emitter for direct memory increment.
177 // Abuse Z_method as scratch register for generic emitter.
178 // It is loaded further down anyway before it is first used.
179 // No dynamic code size variance here, increment is 1, always.
180 __ add2mem_64(Address(Z_R1_scratch), 1, Z_method);
181 }
182 #endif
183
184 assert(VtableStub::receiver_location() == Z_R2->as_VMReg(), "receiver expected in Z_ARG1");
185
186 // Entry arguments:
187 // Z_method: Interface
188 // Z_ARG1: Receiver
189 NearLabel no_such_interface;
190 const Register rcvr_klass = Z_tmp_1,
191 interface = Z_tmp_2;
192
193 // Get receiver klass.
194 // Must do an explicit check if offset too large or implicit checks are disabled.
195 address npe_addr = __ pc(); // npe is short for null pointer exception
196 __ load_klass(rcvr_klass, Z_ARG1);
197
198 // Receiver subtype check against REFC.
199 __ z_lg(interface, Address(Z_method, CompiledICData::itable_refc_klass_offset()));
200 __ lookup_interface_method(rcvr_klass, interface, noreg,
201 noreg, Z_R1, no_such_interface, /*return_method=*/ false);
202
203 // Get Method* and entrypoint for compiler
204 __ z_lg(interface, Address(Z_method, CompiledICData::itable_defc_klass_offset()));
205 __ lookup_interface_method(rcvr_klass, interface, itable_index,
206 Z_method, Z_R1, no_such_interface, /*return_method=*/ true);
207
208 #ifndef PRODUCT
209 if (DebugVtables) {
210 NearLabel ok1;
211 __ z_ltgr(Z_method, Z_method);
212 __ z_brne(ok1);
213 __ stop("method is null", 103);
214 __ bind(ok1);
215 }
216 #endif
217
218 address ame_addr = __ pc();
219 // Must do an explicit check if implicit checks are disabled.
220 if (!ImplicitNullChecks) {
221 __ compare64_and_branch(Z_method, (intptr_t) 0, Assembler::bcondEqual, no_such_interface);
222 }
223 __ z_lg(Z_R1_scratch, in_bytes(Method::from_compiled_offset()), Z_method);
224 __ z_br(Z_R1_scratch);
225
226 // Handle IncompatibleClassChangeError in itable stubs.
227 __ bind(no_such_interface);
228 // more detailed IncompatibleClassChangeError
229 // we force re-resolving of the call site by jumping to
230 // the "handle wrong method" stub, thus letting the
231 // interpreter runtime do all the dirty work.
232 // worst case actual size
233 slop_delta = __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::get_handle_wrong_method_stub(), true);
234 slop_bytes += slop_delta;
235 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
236 __ z_br(Z_R1_scratch);
237
238 masm->flush();
239 bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
240
241 return s;
242 }
243
244 int VtableStub::pd_code_alignment() {
245 // System z cache line size is 256 bytes, but octoword-alignment is quite ok.
246 const unsigned int icache_line_size = 32;
247 return icache_line_size;
248 }