1 /* 2 * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "c1/c1_CodeStubs.hpp" 29 #include "c1/c1_FrameMap.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_MacroAssembler.hpp" 32 #include "c1/c1_Runtime1.hpp" 33 #include "classfile/javaClasses.hpp" 34 #include "nativeInst_aarch64.hpp" 35 #include "runtime/objectMonitor.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "vmreg_aarch64.inline.hpp" 38 39 40 #define __ ce->masm()-> 41 42 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) { 43 __ bind(_entry); 44 InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset()); 45 __ adr(rscratch1, safepoint_pc); 46 __ str(rscratch1, Address(rthread, JavaThread::saved_exception_pc_offset())); 47 48 assert(SharedRuntime::polling_page_return_handler_blob() != nullptr, 49 "polling page return stub not created yet"); 50 address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point(); 51 52 __ far_jump(RuntimeAddress(stub)); 53 } 54 55 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 56 __ bind(_entry); 57 Metadata *m = _method->as_constant_ptr()->as_metadata(); 58 __ mov_metadata(rscratch1, m); 59 ce->store_parameter(rscratch1, 1); 60 ce->store_parameter(_bci, 0); 61 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); 62 ce->add_call_info_here(_info); 63 ce->verify_oop_map(_info); 64 __ b(_continuation); 65 } 66 67 void RangeCheckStub::emit_code(LIR_Assembler* ce) { 68 __ bind(_entry); 69 if (_info->deoptimize_on_exception()) { 70 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 71 __ far_call(RuntimeAddress(a)); 72 ce->add_call_info_here(_info); 73 ce->verify_oop_map(_info); 74 debug_only(__ should_not_reach_here()); 75 return; 76 } 77 78 if (_index->is_cpu_register()) { 79 __ mov(rscratch1, _index->as_register()); 80 } else { 81 __ mov(rscratch1, _index->as_jint()); 82 } 83 Runtime1::StubID stub_id; 84 if (_throw_index_out_of_bounds_exception) { 85 stub_id = Runtime1::throw_index_exception_id; 86 } else { 87 assert(_array != LIR_Opr::nullOpr(), "sanity"); 88 __ mov(rscratch2, _array->as_pointer_register()); 89 stub_id = Runtime1::throw_range_check_failed_id; 90 } 91 __ lea(lr, RuntimeAddress(Runtime1::entry_for(stub_id))); 92 __ blr(lr); 93 ce->add_call_info_here(_info); 94 ce->verify_oop_map(_info); 95 debug_only(__ should_not_reach_here()); 96 } 97 98 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { 99 _info = new CodeEmitInfo(info); 100 } 101 102 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { 103 __ bind(_entry); 104 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 105 __ far_call(RuntimeAddress(a)); 106 ce->add_call_info_here(_info); 107 ce->verify_oop_map(_info); 108 debug_only(__ should_not_reach_here()); 109 } 110 111 void DivByZeroStub::emit_code(LIR_Assembler* ce) { 112 if (_offset != -1) { 113 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 114 } 115 __ bind(_entry); 116 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id))); 117 ce->add_call_info_here(_info); 118 ce->verify_oop_map(_info); 119 #ifdef ASSERT 120 __ should_not_reach_here(); 121 #endif 122 } 123 124 125 126 // Implementation of NewInstanceStub 127 128 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { 129 _result = result; 130 _klass = klass; 131 _klass_reg = klass_reg; 132 _info = new CodeEmitInfo(info); 133 assert(stub_id == Runtime1::new_instance_id || 134 stub_id == Runtime1::fast_new_instance_id || 135 stub_id == Runtime1::fast_new_instance_init_check_id, 136 "need new_instance id"); 137 _stub_id = stub_id; 138 } 139 140 141 142 void NewInstanceStub::emit_code(LIR_Assembler* ce) { 143 assert(__ rsp_offset() == 0, "frame size should be fixed"); 144 __ bind(_entry); 145 __ mov(r3, _klass_reg->as_register()); 146 __ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id))); 147 ce->add_call_info_here(_info); 148 ce->verify_oop_map(_info); 149 assert(_result->as_register() == r0, "result must in r0,"); 150 __ b(_continuation); 151 } 152 153 154 // Implementation of NewTypeArrayStub 155 156 // Implementation of NewTypeArrayStub 157 158 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 159 _klass_reg = klass_reg; 160 _length = length; 161 _result = result; 162 _info = new CodeEmitInfo(info); 163 } 164 165 166 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { 167 assert(__ rsp_offset() == 0, "frame size should be fixed"); 168 __ bind(_entry); 169 assert(_length->as_register() == r19, "length must in r19,"); 170 assert(_klass_reg->as_register() == r3, "klass_reg must in r3"); 171 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id))); 172 ce->add_call_info_here(_info); 173 ce->verify_oop_map(_info); 174 assert(_result->as_register() == r0, "result must in r0"); 175 __ b(_continuation); 176 } 177 178 179 // Implementation of NewObjectArrayStub 180 181 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 182 _klass_reg = klass_reg; 183 _result = result; 184 _length = length; 185 _info = new CodeEmitInfo(info); 186 } 187 188 189 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { 190 assert(__ rsp_offset() == 0, "frame size should be fixed"); 191 __ bind(_entry); 192 assert(_length->as_register() == r19, "length must in r19,"); 193 assert(_klass_reg->as_register() == r3, "klass_reg must in r3"); 194 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id))); 195 ce->add_call_info_here(_info); 196 ce->verify_oop_map(_info); 197 assert(_result->as_register() == r0, "result must in r0"); 198 __ b(_continuation); 199 } 200 201 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { 202 assert(__ rsp_offset() == 0, "frame size should be fixed"); 203 __ bind(_entry); 204 ce->store_parameter(_obj_reg->as_register(), 1); 205 ce->store_parameter(_lock_reg->as_register(), 0); 206 Runtime1::StubID enter_id; 207 if (ce->compilation()->has_fpu_code()) { 208 enter_id = Runtime1::monitorenter_id; 209 } else { 210 enter_id = Runtime1::monitorenter_nofpu_id; 211 } 212 __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id))); 213 ce->add_call_info_here(_info); 214 ce->verify_oop_map(_info); 215 __ b(_continuation); 216 } 217 218 219 void MonitorExitStub::emit_code(LIR_Assembler* ce) { 220 __ bind(_entry); 221 if (_compute_lock) { 222 // lock_reg was destroyed by fast unlocking attempt => recompute it 223 ce->monitor_address(_monitor_ix, _lock_reg); 224 } 225 ce->store_parameter(_lock_reg->as_register(), 0); 226 // note: non-blocking leaf routine => no call info needed 227 Runtime1::StubID exit_id; 228 if (ce->compilation()->has_fpu_code()) { 229 exit_id = Runtime1::monitorexit_id; 230 } else { 231 exit_id = Runtime1::monitorexit_nofpu_id; 232 } 233 __ adr(lr, _continuation); 234 __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id))); 235 } 236 237 void LoadKlassStub::emit_code(LIR_Assembler* ce) { 238 assert(UseCompactObjectHeaders, "Only use with compact object headers"); 239 __ bind(_entry); 240 Register d = _result->as_register(); 241 __ ldr(d, Address(d, OM_OFFSET_NO_MONITOR_VALUE_TAG(header))); 242 __ b(_continuation); 243 } 244 245 // Implementation of patching: 246 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) 247 // - Replace original code with a call to the stub 248 // At Runtime: 249 // - call to stub, jump to runtime 250 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object) 251 // - in runtime: after initializing class, restore original code, reexecute instruction 252 253 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; 254 255 void PatchingStub::align_patch_site(MacroAssembler* masm) { 256 } 257 258 void PatchingStub::emit_code(LIR_Assembler* ce) { 259 assert(false, "AArch64 should not use C1 runtime patching"); 260 } 261 262 263 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { 264 __ bind(_entry); 265 ce->store_parameter(_trap_request, 0); 266 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id))); 267 ce->add_call_info_here(_info); 268 DEBUG_ONLY(__ should_not_reach_here()); 269 } 270 271 272 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { 273 address a; 274 if (_info->deoptimize_on_exception()) { 275 // Deoptimize, do not throw the exception, because it is probably wrong to do it here. 276 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 277 } else { 278 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); 279 } 280 281 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 282 __ bind(_entry); 283 __ far_call(RuntimeAddress(a)); 284 ce->add_call_info_here(_info); 285 ce->verify_oop_map(_info); 286 debug_only(__ should_not_reach_here()); 287 } 288 289 290 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { 291 assert(__ rsp_offset() == 0, "frame size should be fixed"); 292 293 __ bind(_entry); 294 // pass the object in a scratch register because all other registers 295 // must be preserved 296 if (_obj->is_cpu_register()) { 297 __ mov(rscratch1, _obj->as_register()); 298 } 299 __ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), rscratch2); 300 ce->add_call_info_here(_info); 301 debug_only(__ should_not_reach_here()); 302 } 303 304 305 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { 306 //---------------slow case: call to native----------------- 307 __ bind(_entry); 308 // Figure out where the args should go 309 // This should really convert the IntrinsicID to the Method* and signature 310 // but I don't know how to do that. 311 // 312 VMRegPair args[5]; 313 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT}; 314 SharedRuntime::java_calling_convention(signature, args, 5); 315 316 // push parameters 317 // (src, src_pos, dest, destPos, length) 318 Register r[5]; 319 r[0] = src()->as_register(); 320 r[1] = src_pos()->as_register(); 321 r[2] = dst()->as_register(); 322 r[3] = dst_pos()->as_register(); 323 r[4] = length()->as_register(); 324 325 // next registers will get stored on the stack 326 for (int i = 0; i < 5 ; i++ ) { 327 VMReg r_1 = args[i].first(); 328 if (r_1->is_stack()) { 329 int st_off = r_1->reg2stack() * wordSize; 330 __ str (r[i], Address(sp, st_off)); 331 } else { 332 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg "); 333 } 334 } 335 336 ce->align_call(lir_static_call); 337 338 ce->emit_static_call_stub(); 339 if (ce->compilation()->bailed_out()) { 340 return; // CodeCache is full 341 } 342 Address resolve(SharedRuntime::get_resolve_static_call_stub(), 343 relocInfo::static_call_type); 344 address call = __ trampoline_call(resolve); 345 if (call == nullptr) { 346 ce->bailout("trampoline stub overflow"); 347 return; 348 } 349 ce->add_call_info_here(info()); 350 351 #ifndef PRODUCT 352 if (PrintC1Statistics) { 353 __ lea(rscratch2, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); 354 __ incrementw(Address(rscratch2)); 355 } 356 #endif 357 358 __ b(_continuation); 359 } 360 361 #undef __