1 /*
  2  * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  4  * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #include "precompiled.hpp"
 28 #include "asm/macroAssembler.inline.hpp"
 29 #include "assembler_riscv.inline.hpp"
 30 #include "code/vtableStubs.hpp"
 31 #include "interp_masm_riscv.hpp"
 32 #include "memory/resourceArea.hpp"
 33 #include "oops/compiledICHolder.hpp"
 34 #include "oops/instanceKlass.hpp"
 35 #include "oops/klassVtable.hpp"
 36 #include "runtime/sharedRuntime.hpp"
 37 #include "vmreg_riscv.inline.hpp"
 38 #ifdef COMPILER2
 39 #include "opto/runtime.hpp"
 40 #endif
 41 
 42 // machine-dependent part of VtableStubs: create VtableStub of correct size and
 43 // initialize its code
 44 
 45 #define __ masm->
 46 
 47 #ifndef PRODUCT
 48 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
 49 #endif
 50 
 51 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
 52   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
 53   const int stub_code_length = code_size_limit(true);
 54   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
 55   // Can be NULL if there is no free space in the code cache.
 56   if (s == NULL) {
 57     return NULL;
 58   }
 59 
 60   // Count unused bytes in instruction sequences of variable size.
 61   // We add them to the computed buffer size in order to avoid
 62   // overflow in subsequently generated stubs.
 63   address   start_pc = NULL;
 64   int       slop_bytes = 0;
 65   int       slop_delta = 0;
 66 
 67   ResourceMark    rm;
 68   CodeBuffer      cb(s->entry_point(), stub_code_length);
 69   MacroAssembler* masm = new MacroAssembler(&cb);
 70   assert_cond(masm != NULL);
 71 
 72 #if (!defined(PRODUCT) && defined(COMPILER2))
 73   if (CountCompiledCalls) {
 74     __ la(t2, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 75     __ add_memory_int64(Address(t2), 1);
 76   }
 77 #endif
 78 
 79   // get receiver (need to skip return address on top of stack)
 80   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
 81 
 82   // get receiver klass
 83   address npe_addr = __ pc();
 84   __ load_klass(t2, j_rarg0);
 85 
 86 #ifndef PRODUCT
 87   if (DebugVtables) {
 88     Label L;
 89     start_pc = __ pc();
 90 
 91     // check offset vs vtable length
 92     __ lwu(t0, Address(t2, Klass::vtable_length_offset()));
 93     __ mvw(t1, vtable_index * vtableEntry::size());
 94     __ bgt(t0, t1, L);
 95     __ enter();
 96     __ mv(x12, vtable_index);
 97 
 98     __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), j_rarg0, x12);
 99     const ptrdiff_t estimate = 256;
100     const ptrdiff_t codesize = __ pc() - start_pc;
101     slop_delta = estimate - codesize;  // call_VM varies in length, depending on data
102     slop_bytes += slop_delta;
103     assert(slop_delta >= 0, "vtable #%d: Code size estimate (%d) for DebugVtables too small, required: %d", vtable_index, (int)estimate, (int)codesize);
104 
105     __ leave();
106     __ bind(L);
107   }
108 #endif // PRODUCT
109 
110   start_pc = __ pc();
111   __ lookup_virtual_method(t2, vtable_index, xmethod);
112   // lookup_virtual_method generates
113   // 4 instructions (maximum value encountered in normal case):li(lui + addiw) + add + ld
114   // 1 instruction (best case):ld * 1
115   slop_delta = 16 - (int)(__ pc() - start_pc);
116   slop_bytes += slop_delta;
117   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
118 
119 #ifndef PRODUCT
120   if (DebugVtables) {
121     Label L;
122     __ beqz(xmethod, L);
123     __ ld(t0, Address(xmethod, Method::from_compiled_offset()));
124     __ bnez(t0, L);
125     __ stop("Vtable entry is NULL");
126     __ bind(L);
127   }
128 #endif // PRODUCT
129 
130   // x10: receiver klass
131   // xmethod: Method*
132   // x12: receiver
133   address ame_addr = __ pc();
134   __ ld(t0, Address(xmethod, Method::from_compiled_offset()));
135   __ jr(t0);
136 
137   masm->flush();
138   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
139 
140   return s;
141 }
142 
143 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
144   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
145   const int stub_code_length = code_size_limit(false);
146   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
147   // Can be NULL if there is no free space in the code cache.
148   if (s == NULL) {
149     return NULL;
150   }
151   // Count unused bytes in instruction sequences of variable size.
152   // We add them to the computed buffer size in order to avoid
153   // overflow in subsequently generated stubs.
154   address   start_pc = NULL;
155   int       slop_bytes = 0;
156   int       slop_delta = 0;
157 
158   ResourceMark    rm;
159   CodeBuffer      cb(s->entry_point(), stub_code_length);
160   MacroAssembler* masm = new MacroAssembler(&cb);
161   assert_cond(masm != NULL);
162 
163 #if (!defined(PRODUCT) && defined(COMPILER2))
164   if (CountCompiledCalls) {
165     __ la(x18, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
166     __ add_memory_int64(Address(x18), 1);
167   }
168 #endif
169 
170   // get receiver (need to skip return address on top of stack)
171   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
172 
173   // Entry arguments:
174   //  t2: CompiledICHolder
175   //  j_rarg0: Receiver
176 
177   // This stub is called from compiled code which has no callee-saved registers,
178   // so all registers except arguments are free at this point.
179   const Register recv_klass_reg     = x18;
180   const Register holder_klass_reg   = x19; // declaring interface klass (DECC)
181   const Register resolved_klass_reg = xmethod; // resolved interface klass (REFC)
182   const Register temp_reg           = x28;
183   const Register temp_reg2          = x29;
184   const Register icholder_reg       = t1;
185 
186   Label L_no_such_interface;
187 
188   __ ld(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
189   __ ld(holder_klass_reg,   Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
190 
191   start_pc = __ pc();
192 
193   // get receiver klass (also an implicit null-check)
194   address npe_addr = __ pc();
195   __ load_klass(recv_klass_reg, j_rarg0);
196 
197   // Receiver subtype check against REFC.
198   __ lookup_interface_method(// inputs: rec. class, interface
199                              recv_klass_reg, resolved_klass_reg, noreg,
200                              // outputs:  scan temp. reg1, scan temp. reg2
201                              temp_reg2, temp_reg,
202                              L_no_such_interface,
203                              /*return_method=*/false);
204 
205   const ptrdiff_t typecheckSize = __ pc() - start_pc;
206   start_pc = __ pc();
207 
208   // Get selected method from declaring class and itable index
209   __ lookup_interface_method(// inputs: rec. class, interface, itable index
210                              recv_klass_reg, holder_klass_reg, itable_index,
211                              // outputs: method, scan temp. reg
212                              xmethod, temp_reg,
213                              L_no_such_interface);
214 
215   const ptrdiff_t lookupSize = __ pc() - start_pc;
216 
217   // Reduce "estimate" such that "padding" does not drop below 8.
218   const ptrdiff_t estimate = 256;
219   const ptrdiff_t codesize = typecheckSize + lookupSize;
220   slop_delta = (int)(estimate - codesize);
221   slop_bytes += slop_delta;
222   assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
223 
224 #ifdef ASSERT
225   if (DebugVtables) {
226     Label L2;
227     __ beqz(xmethod, L2);
228     __ ld(t0, Address(xmethod, Method::from_compiled_offset()));
229     __ bnez(t0, L2);
230     __ stop("compiler entrypoint is null");
231     __ bind(L2);
232   }
233 #endif // ASSERT
234 
235   // xmethod: Method*
236   // j_rarg0: receiver
237   address ame_addr = __ pc();
238   __ ld(t0, Address(xmethod, Method::from_compiled_offset()));
239   __ jr(t0);
240 
241   __ bind(L_no_such_interface);
242   // Handle IncompatibleClassChangeError in itable stubs.
243   // More detailed error message.
244   // We force resolving of the call site by jumping to the "handle
245   // wrong method" stub, and so let the interpreter runtime do all the
246   // dirty work.
247   assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order");
248   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
249 
250   masm->flush();
251   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
252 
253   return s;
254 }
255 
256 int VtableStub::pd_code_alignment() {
257   // riscv64 cache line size is 64 bytes, but we want to limit alignment loss.
258   const unsigned int icache_line_size = wordSize;
259   return icache_line_size;
260 }