1 /*
  2  * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "asm/assembler.inline.hpp"
 28 #include "asm/macroAssembler.inline.hpp"
 29 #include "code/vtableStubs.hpp"
 30 #include "interp_masm_aarch64.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "oops/compiledICHolder.hpp"
 33 #include "oops/instanceKlass.hpp"
 34 #include "oops/klassVtable.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "vmreg_aarch64.inline.hpp"
 37 #ifdef COMPILER2
 38 #include "opto/runtime.hpp"
 39 #endif
 40 
 41 // machine-dependent part of VtableStubs: create VtableStub of correct size and
 42 // initialize its code
 43 
 44 #define __ masm->
 45 
 46 #ifndef PRODUCT
 47 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
 48 #endif
 49 
 50 VtableStub* VtableStubs::create_vtable_stub(int vtable_index, bool caller_is_c1) {
 51   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
 52   const int stub_code_length = code_size_limit(true);
 53   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index, caller_is_c1);
 54   // Can be NULL if there is no free space in the code cache.
 55   if (s == NULL) {
 56     return NULL;
 57   }
 58 
 59   // Count unused bytes in instruction sequences of variable size.
 60   // We add them to the computed buffer size in order to avoid
 61   // overflow in subsequently generated stubs.
 62   address   start_pc;
 63   int       slop_bytes = 0;
 64   int       slop_delta = 0;
 65 
 66 // No variance was detected in vtable stub sizes. Setting index_dependent_slop == 0 will unveil any deviation from this observation.
 67   const int index_dependent_slop     = 0;
 68   ByteSize  entry_offset = caller_is_c1 ? Method::from_compiled_inline_offset() :  Method::from_compiled_inline_ro_offset();
 69 
 70   ResourceMark    rm;
 71   CodeBuffer      cb(s->entry_point(), stub_code_length);
 72   MacroAssembler* masm = new MacroAssembler(&cb);
 73 
 74 #if (!defined(PRODUCT) && defined(COMPILER2))
 75   if (CountCompiledCalls) {
 76     __ lea(r16, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 77     __ increment(Address(r16));
 78   }
 79 #endif
 80 
 81   // get receiver (need to skip return address on top of stack)
 82   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
 83 
 84   // get receiver klass
 85   address npe_addr = __ pc();
 86   __ load_klass(r16, j_rarg0);
 87 
 88 #ifndef PRODUCT
 89   if (DebugVtables) {
 90     Label L;
 91     // TODO: find upper bound for this debug code.
 92     start_pc = __ pc();
 93 
 94     // check offset vs vtable length
 95     __ ldrw(rscratch1, Address(r16, Klass::vtable_length_offset()));
 96     __ cmpw(rscratch1, vtable_index * vtableEntry::size());
 97     __ br(Assembler::GT, L);
 98     __ enter();
 99     __ mov(r2, vtable_index);
100 
101     __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), j_rarg0, r2);
102     const ptrdiff_t estimate = 256;
103     const ptrdiff_t codesize = __ pc() - start_pc;
104     slop_delta  = estimate - codesize;  // call_VM varies in length, depending on data
105     slop_bytes += slop_delta;
106     assert(slop_delta >= 0, "vtable #%d: Code size estimate (%d) for DebugVtables too small, required: %d", vtable_index, (int)estimate, (int)codesize);
107 
108     __ leave();
109     __ bind(L);
110   }
111 #endif // PRODUCT
112 
113   start_pc = __ pc();
114   __ lookup_virtual_method(r16, vtable_index, rmethod);
115   slop_delta  = 8 - (int)(__ pc() - start_pc);
116   slop_bytes += slop_delta;
117   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
118 
119 #ifndef PRODUCT
120   if (DebugVtables) {
121     Label L;
122     __ cbz(rmethod, L);
123     __ ldr(rscratch1, Address(rmethod, entry_offset));
124     __ cbnz(rscratch1, L);
125     __ stop("Vtable entry is NULL");
126     __ bind(L);
127   }
128 #endif // PRODUCT
129 
130   // r0: receiver klass
131   // rmethod: Method*
132   // r2: receiver
133   address ame_addr = __ pc();
134   __ ldr(rscratch1, Address(rmethod, entry_offset));
135   __ br(rscratch1);
136 
137   masm->flush();
138   slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
139   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, index_dependent_slop);
140 
141   return s;
142 }
143 
144 
145 VtableStub* VtableStubs::create_itable_stub(int itable_index, bool caller_is_c1) {
146   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
147   const int stub_code_length = code_size_limit(false);
148   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index, caller_is_c1);
149   // Can be NULL if there is no free space in the code cache.
150   if (s == NULL) {
151     return NULL;
152   }
153 
154   // Count unused bytes in instruction sequences of variable size.
155   // We add them to the computed buffer size in order to avoid
156   // overflow in subsequently generated stubs.
157   address   start_pc;
158   int       slop_bytes = 0;
159   int       slop_delta = 0;
160 
161   const int index_dependent_slop = (itable_index == 0) ? 4 :     // code size change with transition from 8-bit to 32-bit constant (@index == 16).
162                                    (itable_index < 16) ? 3 : 0;  // index == 0 generates even shorter code.
163   ByteSize  entry_offset = caller_is_c1 ? Method::from_compiled_inline_offset() :  Method::from_compiled_inline_ro_offset();
164 
165   ResourceMark    rm;
166   CodeBuffer      cb(s->entry_point(), stub_code_length);
167   MacroAssembler* masm = new MacroAssembler(&cb);
168 
169 #if (!defined(PRODUCT) && defined(COMPILER2))
170   if (CountCompiledCalls) {
171     __ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
172     __ increment(Address(r10));
173   }
174 #endif
175 
176   // get receiver (need to skip return address on top of stack)
177   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
178 
179   // Entry arguments:
180   //  rscratch2: CompiledICHolder
181   //  j_rarg0: Receiver
182 
183   // This stub is called from compiled code which has no callee-saved registers,
184   // so all registers except arguments are free at this point.
185   const Register recv_klass_reg     = r10;
186   const Register holder_klass_reg   = r16; // declaring interface klass (DECC)
187   const Register resolved_klass_reg = rmethod; // resolved interface klass (REFC)
188   const Register temp_reg           = r11;
189   const Register temp_reg2          = r15;
190   const Register icholder_reg       = rscratch2;
191 
192   Label L_no_such_interface;
193 
194   __ ldr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
195   __ ldr(holder_klass_reg,   Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
196 
197   start_pc = __ pc();
198 
199   // get receiver klass (also an implicit null-check)
200   address npe_addr = __ pc();
201   __ load_klass(recv_klass_reg, j_rarg0);
202 
203   // Receiver subtype check against REFC.
204   __ lookup_interface_method(// inputs: rec. class, interface
205                              recv_klass_reg, resolved_klass_reg, noreg,
206                              // outputs:  scan temp. reg1, scan temp. reg2
207                              temp_reg2, temp_reg,
208                              L_no_such_interface,
209                              /*return_method=*/false);
210 
211   const ptrdiff_t  typecheckSize = __ pc() - start_pc;
212   start_pc = __ pc();
213 
214   // Get selected method from declaring class and itable index
215   __ lookup_interface_method(// inputs: rec. class, interface, itable index
216                              recv_klass_reg, holder_klass_reg, itable_index,
217                              // outputs: method, scan temp. reg
218                              rmethod, temp_reg,
219                              L_no_such_interface);
220 
221   const ptrdiff_t lookupSize = __ pc() - start_pc;
222 
223   // Reduce "estimate" such that "padding" does not drop below 8.
224   const ptrdiff_t estimate = 124;
225   const ptrdiff_t codesize = typecheckSize + lookupSize;
226   slop_delta  = (int)(estimate - codesize);
227   slop_bytes += slop_delta;
228   assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
229 
230 #ifdef ASSERT
231   if (DebugVtables) {
232     Label L2;
233     __ cbz(rmethod, L2);
234     __ ldr(rscratch1, Address(rmethod, entry_offset));
235     __ cbnz(rscratch1, L2);
236     __ stop("compiler entrypoint is null");
237     __ bind(L2);
238   }
239 #endif // ASSERT
240 
241   // rmethod: Method*
242   // j_rarg0: receiver
243   address ame_addr = __ pc();
244   __ ldr(rscratch1, Address(rmethod, entry_offset));
245   __ br(rscratch1);
246 
247   __ bind(L_no_such_interface);
248   // Handle IncompatibleClassChangeError in itable stubs.
249   // More detailed error message.
250   // We force resolving of the call site by jumping to the "handle
251   // wrong method" stub, and so let the interpreter runtime do all the
252   // dirty work.
253   assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order");
254   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
255 
256   masm->flush();
257   slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
258   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, index_dependent_slop);
259 
260   return s;
261 }
262 
263 int VtableStub::pd_code_alignment() {
264   // aarch64 cache line size is not an architected constant. We just align on 4 bytes (instruction size).
265   const unsigned int icache_line_size = 4;
266   return icache_line_size;
267 }