1 /*
  2  * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  4  * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #include "asm/assembler.inline.hpp"
 28 #include "asm/macroAssembler.inline.hpp"
 29 #include "code/compiledIC.hpp"
 30 #include "code/vtableStubs.hpp"
 31 #include "interp_masm_riscv.hpp"
 32 #include "memory/resourceArea.hpp"
 33 #include "oops/instanceKlass.hpp"
 34 #include "oops/klassVtable.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "vmreg_riscv.inline.hpp"
 37 #ifdef COMPILER2
 38 #include "opto/runtime.hpp"
 39 #endif
 40 
 41 // machine-dependent part of VtableStubs: create VtableStub of correct size and
 42 // initialize its code
 43 
 44 #define __ masm->
 45 
 46 #ifndef PRODUCT
 47 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
 48 #endif
 49 
 50 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
 51   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
 52   const int stub_code_length = code_size_limit(true);
 53   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
 54   // Can be null if there is no free space in the code cache.
 55   if (s == nullptr) {
 56     return nullptr;
 57   }
 58 
 59   // Count unused bytes in instruction sequences of variable size.
 60   // We add them to the computed buffer size in order to avoid
 61   // overflow in subsequently generated stubs.
 62   address   start_pc = nullptr;
 63   int       slop_bytes = 0;
 64   int       slop_delta = 0;
 65 
 66   ResourceMark    rm;
 67   CodeBuffer      cb(s->entry_point(), stub_code_length);
 68   MacroAssembler* masm = new MacroAssembler(&cb);
 69   assert_cond(masm != nullptr);
 70 
 71 #if (!defined(PRODUCT) && defined(COMPILER2))
 72   if (CountCompiledCalls) {
 73     __ la(t2, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 74     __ increment(Address(t2));
 75   }
 76 #endif
 77 
 78   // get receiver (need to skip return address on top of stack)
 79   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
 80 
 81   // get receiver klass
 82   address npe_addr = __ pc();
 83   __ load_klass(t2, j_rarg0);
 84 
 85 #ifndef PRODUCT
 86   if (DebugVtables) {
 87     Label L;
 88     start_pc = __ pc();
 89 
 90     // check offset vs vtable length
 91     __ lwu(t0, Address(t2, Klass::vtable_length_offset()));
 92     __ mv(t1, vtable_index * vtableEntry::size());
 93     __ bgt(t0, t1, L);
 94     __ enter();
 95     __ mv(x12, vtable_index);
 96 
 97     __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), j_rarg0, x12);
 98     const ptrdiff_t estimate = 256;
 99     const ptrdiff_t codesize = __ pc() - start_pc;
100     slop_delta = estimate - codesize;  // call_VM varies in length, depending on data
101     slop_bytes += slop_delta;
102     assert(slop_delta >= 0, "vtable #%d: Code size estimate (%d) for DebugVtables too small, required: %d", vtable_index, (int)estimate, (int)codesize);
103 
104     __ leave();
105     __ bind(L);
106   }
107 #endif // PRODUCT
108 
109   start_pc = __ pc();
110   __ lookup_virtual_method(t2, vtable_index, xmethod);
111   // lookup_virtual_method generates
112   // 4 instructions (maximum value encountered in normal case):li(lui + addiw) + add + ld
113   // 1 instruction (best case):ld * 1
114   slop_delta = 16 - (int)(__ pc() - start_pc);
115   slop_bytes += slop_delta;
116   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
117 
118 #ifndef PRODUCT
119   if (DebugVtables) {
120     Label L;
121     __ beqz(xmethod, L);
122     __ ld(t0, Address(xmethod, Method::from_compiled_offset()));
123     __ bnez(t0, L);
124     __ stop("Vtable entry is null");
125     __ bind(L);
126   }
127 #endif // PRODUCT
128 
129   // x10: receiver klass
130   // xmethod: Method*
131   // x12: receiver
132   address ame_addr = __ pc();
133   __ ld(t1, Address(xmethod, Method::from_compiled_offset()));
134   __ jr(t1);
135 
136   masm->flush();
137   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
138 
139   return s;
140 }
141 
142 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
143   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
144   const int stub_code_length = code_size_limit(false);
145   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
146   // Can be null if there is no free space in the code cache.
147   if (s == nullptr) {
148     return nullptr;
149   }
150   // Count unused bytes in instruction sequences of variable size.
151   // We add them to the computed buffer size in order to avoid
152   // overflow in subsequently generated stubs.
153   address   start_pc = nullptr;
154   int       slop_bytes = 0;
155   int       slop_delta = 0;
156 
157   ResourceMark    rm;
158   CodeBuffer      cb(s->entry_point(), stub_code_length);
159   MacroAssembler* masm = new MacroAssembler(&cb);
160   assert_cond(masm != nullptr);
161 
162   // Real entry arguments:
163   //  t0: CompiledICData
164   //  j_rarg0: Receiver
165   // Make sure the move of CompiledICData from t0 to t1 is the frist thing that happens.
166   // Otherwise we risk clobber t0 as it is used as scratch.
167   __ mv(t1, t0);
168 
169 #if (!defined(PRODUCT) && defined(COMPILER2))
170   if (CountCompiledCalls) {
171     __ la(x18, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
172     __ increment(Address(x18));
173   }
174 #endif
175 
176   // get receiver (need to skip return address on top of stack)
177   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
178 
179   // Arguments from this point:
180   //  t1 (moved from t0): CompiledICData
181   //  j_rarg0: Receiver
182 
183   // This stub is called from compiled code which has no callee-saved registers,
184   // so all registers except arguments are free at this point.
185   const Register recv_klass_reg     = x18;
186   const Register holder_klass_reg   = x19; // declaring interface klass (DEFC)
187   const Register resolved_klass_reg = x30; // resolved interface klass (REFC)
188   const Register temp_reg           = x28;
189   const Register temp_reg2          = x29;
190   const Register icdata_reg         = t1;
191 
192   Label L_no_such_interface;
193 
194   __ ld(resolved_klass_reg, Address(icdata_reg, CompiledICData::itable_refc_klass_offset()));
195   __ ld(holder_klass_reg,   Address(icdata_reg, CompiledICData::itable_defc_klass_offset()));
196 
197   start_pc = __ pc();
198 
199   // get receiver klass (also an implicit null-check)
200   address npe_addr = __ pc();
201   __ load_klass(recv_klass_reg, j_rarg0);
202 
203   // Receiver subtype check against REFC.
204   // Get selected method from declaring class and itable index
205   __ lookup_interface_method_stub(recv_klass_reg, holder_klass_reg, resolved_klass_reg, xmethod,
206                                   temp_reg, temp_reg2, itable_index, L_no_such_interface);
207 
208   // Reduce "estimate" such that "padding" does not drop below 8.
209   const ptrdiff_t estimate = 256;
210   const ptrdiff_t codesize = __ pc() - start_pc;
211   slop_delta = (int)(estimate - codesize);
212   slop_bytes += slop_delta;
213   assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
214 
215 #ifdef ASSERT
216   if (DebugVtables) {
217     Label L2;
218     __ beqz(xmethod, L2);
219     __ ld(t0, Address(xmethod, Method::from_compiled_offset()));
220     __ bnez(t0, L2);
221     __ stop("compiler entrypoint is null");
222     __ bind(L2);
223   }
224 #endif // ASSERT
225 
226   // xmethod: Method*
227   // j_rarg0: receiver
228   address ame_addr = __ pc();
229   __ ld(t1, Address(xmethod, Method::from_compiled_offset()));
230   __ jr(t1);
231 
232   __ bind(L_no_such_interface);
233   // Handle IncompatibleClassChangeError in itable stubs.
234   // More detailed error message.
235   // We force resolving of the call site by jumping to the "handle
236   // wrong method" stub, and so let the interpreter runtime do all the
237   // dirty work.
238   assert(SharedRuntime::get_handle_wrong_method_stub() != nullptr, "check initialization order");
239   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
240 
241   masm->flush();
242   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
243 
244   return s;
245 }
246 
247 int VtableStub::pd_code_alignment() {
248   // RISCV cache line size is not an architected constant. We just align on word size.
249   const unsigned int icache_line_size = wordSize;
250   return icache_line_size;
251 }