1 /*
  2  * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "asm/assembler.inline.hpp"
 28 #include "asm/macroAssembler.inline.hpp"
 29 #include "code/vtableStubs.hpp"
 30 #include "interp_masm_aarch64.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "oops/compiledICHolder.hpp"
 33 #include "oops/instanceKlass.hpp"
 34 #include "oops/klassVtable.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "vmreg_aarch64.inline.hpp"
 37 #ifdef COMPILER2
 38 #include "opto/runtime.hpp"
 39 #endif
 40 
 41 // machine-dependent part of VtableStubs: create VtableStub of correct size and
 42 // initialize its code
 43 
 44 #define __ masm->
 45 
 46 #ifndef PRODUCT
 47 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
 48 #endif
 49 
 50 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
 51   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
 52   const int stub_code_length = code_size_limit(true);
 53   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
 54   // Can be NULL if there is no free space in the code cache.
 55   if (s == NULL) {
 56     return NULL;
 57   }
 58 
 59   // Count unused bytes in instruction sequences of variable size.
 60   // We add them to the computed buffer size in order to avoid
 61   // overflow in subsequently generated stubs.
 62   address   start_pc;
 63   int       slop_bytes = 0;
 64   int       slop_delta = 0;
 65 
 66   ResourceMark    rm;
 67   CodeBuffer      cb(s->entry_point(), stub_code_length);
 68   MacroAssembler* masm = new MacroAssembler(&cb);
 69 
 70 #if (!defined(PRODUCT) && defined(COMPILER2))
 71   if (CountCompiledCalls) {
 72     __ lea(r16, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 73     __ increment(Address(r16));
 74   }
 75 #endif
 76 
 77   // get receiver (need to skip return address on top of stack)
 78   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
 79 
 80   // get receiver klass
 81   address npe_addr = __ pc();
 82   __ load_klass(r16, j_rarg0);
 83 
 84 #ifndef PRODUCT
 85   if (DebugVtables) {
 86     Label L;
 87     // TODO: find upper bound for this debug code.
 88     start_pc = __ pc();
 89 
 90     // check offset vs vtable length
 91     __ ldrw(rscratch1, Address(r16, Klass::vtable_length_offset()));
 92     __ cmpw(rscratch1, vtable_index * vtableEntry::size());
 93     __ br(Assembler::GT, L);
 94     __ enter();
 95     __ mov(r2, vtable_index);
 96 
 97     __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), j_rarg0, r2);
 98     const ptrdiff_t estimate = 256;
 99     const ptrdiff_t codesize = __ pc() - start_pc;
100     slop_delta  = estimate - codesize;  // call_VM varies in length, depending on data
101     slop_bytes += slop_delta;
102     assert(slop_delta >= 0, "vtable #%d: Code size estimate (%d) for DebugVtables too small, required: %d", vtable_index, (int)estimate, (int)codesize);
103 
104     __ leave();
105     __ bind(L);
106   }
107 #endif // PRODUCT
108 
109   start_pc = __ pc();
110   __ lookup_virtual_method(r16, vtable_index, rmethod);
111   slop_delta  = 8 - (int)(__ pc() - start_pc);
112   slop_bytes += slop_delta;
113   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
114 
115 #ifndef PRODUCT
116   if (DebugVtables) {
117     Label L;
118     __ cbz(rmethod, L);
119     __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
120     __ cbnz(rscratch1, L);
121     __ stop("Vtable entry is NULL");
122     __ bind(L);
123   }
124 #endif // PRODUCT
125 
126   // r0: receiver klass
127   // rmethod: Method*
128   // r2: receiver
129   address ame_addr = __ pc();
130   __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
131   __ br(rscratch1);
132 
133   masm->flush();
134   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
135 
136   return s;
137 }
138 
139 
140 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
141   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
142   const int stub_code_length = code_size_limit(false);
143   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
144   // Can be NULL if there is no free space in the code cache.
145   if (s == NULL) {
146     return NULL;
147   }
148 
149   // Count unused bytes in instruction sequences of variable size.
150   // We add them to the computed buffer size in order to avoid
151   // overflow in subsequently generated stubs.
152   address   start_pc;
153   int       slop_bytes = 0;
154   int       slop_delta = 0;
155 
156   ResourceMark    rm;
157   CodeBuffer      cb(s->entry_point(), stub_code_length);
158   MacroAssembler* masm = new MacroAssembler(&cb);
159 
160 #if (!defined(PRODUCT) && defined(COMPILER2))
161   if (CountCompiledCalls) {
162     __ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
163     __ increment(Address(r10));
164   }
165 #endif
166 
167   // get receiver (need to skip return address on top of stack)
168   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
169 
170   // Entry arguments:
171   //  rscratch2: CompiledICHolder
172   //  j_rarg0: Receiver
173 
174   // This stub is called from compiled code which has no callee-saved registers,
175   // so all registers except arguments are free at this point.
176   const Register recv_klass_reg     = r10;
177   const Register holder_klass_reg   = r16; // declaring interface klass (DECC)
178   const Register resolved_klass_reg = rmethod; // resolved interface klass (REFC)
179   const Register temp_reg           = r11;
180   const Register temp_reg2          = r15;
181   const Register icholder_reg       = rscratch2;
182 
183   Label L_no_such_interface;
184 
185   __ ldr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
186   __ ldr(holder_klass_reg,   Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
187 
188   start_pc = __ pc();
189 
190   // get receiver klass (also an implicit null-check)
191   address npe_addr = __ pc();
192   __ load_klass(recv_klass_reg, j_rarg0);
193 
194   // Receiver subtype check against REFC.
195   __ lookup_interface_method(// inputs: rec. class, interface
196                              recv_klass_reg, resolved_klass_reg, noreg,
197                              // outputs:  scan temp. reg1, scan temp. reg2
198                              temp_reg2, temp_reg,
199                              L_no_such_interface,
200                              /*return_method=*/false);
201 
202   const ptrdiff_t  typecheckSize = __ pc() - start_pc;
203   start_pc = __ pc();
204 
205   // Get selected method from declaring class and itable index
206   __ lookup_interface_method(// inputs: rec. class, interface, itable index
207                              recv_klass_reg, holder_klass_reg, itable_index,
208                              // outputs: method, scan temp. reg
209                              rmethod, temp_reg,
210                              L_no_such_interface);
211 
212   const ptrdiff_t lookupSize = __ pc() - start_pc;
213 
214   // Reduce "estimate" such that "padding" does not drop below 8.
215   const ptrdiff_t estimate = 128;
216   const ptrdiff_t codesize = typecheckSize + lookupSize;
217   slop_delta  = (int)(estimate - codesize);
218   slop_bytes += slop_delta;
219   assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
220 
221 #ifdef ASSERT
222   if (DebugVtables) {
223     Label L2;
224     __ cbz(rmethod, L2);
225     __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
226     __ cbnz(rscratch1, L2);
227     __ stop("compiler entrypoint is null");
228     __ bind(L2);
229   }
230 #endif // ASSERT
231 
232   // rmethod: Method*
233   // j_rarg0: receiver
234   address ame_addr = __ pc();
235   __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
236   __ br(rscratch1);
237 
238   __ bind(L_no_such_interface);
239   // Handle IncompatibleClassChangeError in itable stubs.
240   // More detailed error message.
241   // We force resolving of the call site by jumping to the "handle
242   // wrong method" stub, and so let the interpreter runtime do all the
243   // dirty work.
244   assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order");
245   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
246 
247   masm->flush();
248   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
249 
250   return s;
251 }
252 
253 int VtableStub::pd_code_alignment() {
254   // aarch64 cache line size is not an architected constant. We just align on 4 bytes (instruction size).
255   const unsigned int icache_line_size = 4;
256   return icache_line_size;
257 }
--- EOF ---