1 /* 2 * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "c1/c1_CodeStubs.hpp" 29 #include "c1/c1_FrameMap.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_MacroAssembler.hpp" 32 #include "c1/c1_Runtime1.hpp" 33 #include "classfile/javaClasses.hpp" 34 #include "nativeInst_aarch64.hpp" 35 #include "runtime/sharedRuntime.hpp" 36 #include "vmreg_aarch64.inline.hpp" 37 38 39 #define __ ce->masm()-> 40 41 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) { 42 __ bind(_entry); 43 InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset()); 44 __ adr(rscratch1, safepoint_pc); 45 __ str(rscratch1, Address(rthread, JavaThread::saved_exception_pc_offset())); 46 47 assert(SharedRuntime::polling_page_return_handler_blob() != nullptr, 48 "polling page return stub not created yet"); 49 address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point(); 50 51 __ far_jump(RuntimeAddress(stub)); 52 } 53 54 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 55 __ bind(_entry); 56 Metadata *m = _method->as_constant_ptr()->as_metadata(); 57 __ mov_metadata(rscratch1, m); 58 ce->store_parameter(rscratch1, 1); 59 ce->store_parameter(_bci, 0); 60 __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::counter_overflow_id))); 61 ce->add_call_info_here(_info); 62 ce->verify_oop_map(_info); 63 __ b(_continuation); 64 } 65 66 void RangeCheckStub::emit_code(LIR_Assembler* ce) { 67 __ bind(_entry); 68 if (_info->deoptimize_on_exception()) { 69 address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); 70 __ far_call(RuntimeAddress(a)); 71 ce->add_call_info_here(_info); 72 ce->verify_oop_map(_info); 73 debug_only(__ should_not_reach_here()); 74 return; 75 } 76 77 if (_index->is_cpu_register()) { 78 __ mov(rscratch1, _index->as_register()); 79 } else { 80 __ mov(rscratch1, _index->as_jint()); 81 } 82 C1StubId stub_id; 83 if (_throw_index_out_of_bounds_exception) { 84 stub_id = C1StubId::throw_index_exception_id; 85 } else { 86 assert(_array != LIR_Opr::nullOpr(), "sanity"); 87 __ mov(rscratch2, _array->as_pointer_register()); 88 stub_id = C1StubId::throw_range_check_failed_id; 89 } 90 __ lea(lr, RuntimeAddress(Runtime1::entry_for(stub_id))); 91 __ blr(lr); 92 ce->add_call_info_here(_info); 93 ce->verify_oop_map(_info); 94 debug_only(__ should_not_reach_here()); 95 } 96 97 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { 98 _info = new CodeEmitInfo(info); 99 } 100 101 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { 102 __ bind(_entry); 103 address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); 104 __ far_call(RuntimeAddress(a)); 105 ce->add_call_info_here(_info); 106 ce->verify_oop_map(_info); 107 debug_only(__ should_not_reach_here()); 108 } 109 110 void DivByZeroStub::emit_code(LIR_Assembler* ce) { 111 if (_offset != -1) { 112 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 113 } 114 __ bind(_entry); 115 __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::throw_div0_exception_id))); 116 ce->add_call_info_here(_info); 117 ce->verify_oop_map(_info); 118 #ifdef ASSERT 119 __ should_not_reach_here(); 120 #endif 121 } 122 123 // Implementation of LoadFlattenedArrayStub 124 125 LoadFlattenedArrayStub::LoadFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) { 126 _array = array; 127 _index = index; 128 _result = result; 129 _scratch_reg = FrameMap::r0_oop_opr; 130 _info = new CodeEmitInfo(info); 131 } 132 133 void LoadFlattenedArrayStub::emit_code(LIR_Assembler* ce) { 134 assert(__ rsp_offset() == 0, "frame size should be fixed"); 135 __ bind(_entry); 136 ce->store_parameter(_array->as_register(), 1); 137 ce->store_parameter(_index->as_register(), 0); 138 __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::load_flat_array_id))); 139 ce->add_call_info_here(_info); 140 ce->verify_oop_map(_info); 141 if (_result->as_register() != r0) { 142 __ mov(_result->as_register(), r0); 143 } 144 __ b(_continuation); 145 } 146 147 148 // Implementation of StoreFlattenedArrayStub 149 150 StoreFlattenedArrayStub::StoreFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr value, CodeEmitInfo* info) { 151 _array = array; 152 _index = index; 153 _value = value; 154 _scratch_reg = FrameMap::r0_oop_opr; 155 _info = new CodeEmitInfo(info); 156 } 157 158 159 void StoreFlattenedArrayStub::emit_code(LIR_Assembler* ce) { 160 assert(__ rsp_offset() == 0, "frame size should be fixed"); 161 __ bind(_entry); 162 ce->store_parameter(_array->as_register(), 2); 163 ce->store_parameter(_index->as_register(), 1); 164 ce->store_parameter(_value->as_register(), 0); 165 __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::store_flat_array_id))); 166 ce->add_call_info_here(_info); 167 ce->verify_oop_map(_info); 168 __ b(_continuation); 169 } 170 171 // Implementation of SubstitutabilityCheckStub 172 SubstitutabilityCheckStub::SubstitutabilityCheckStub(LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) { 173 _left = left; 174 _right = right; 175 _scratch_reg = FrameMap::r0_oop_opr; 176 _info = new CodeEmitInfo(info); 177 } 178 179 void SubstitutabilityCheckStub::emit_code(LIR_Assembler* ce) { 180 assert(__ rsp_offset() == 0, "frame size should be fixed"); 181 __ bind(_entry); 182 ce->store_parameter(_left->as_register(), 1); 183 ce->store_parameter(_right->as_register(), 0); 184 __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::substitutability_check_id))); 185 ce->add_call_info_here(_info); 186 ce->verify_oop_map(_info); 187 __ b(_continuation); 188 } 189 190 191 // Implementation of NewInstanceStub 192 193 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) { 194 _result = result; 195 _klass = klass; 196 _klass_reg = klass_reg; 197 _info = new CodeEmitInfo(info); 198 assert(stub_id == C1StubId::new_instance_id || 199 stub_id == C1StubId::fast_new_instance_id || 200 stub_id == C1StubId::fast_new_instance_init_check_id, 201 "need new_instance id"); 202 _stub_id = stub_id; 203 } 204 205 void NewInstanceStub::emit_code(LIR_Assembler* ce) { 206 assert(__ rsp_offset() == 0, "frame size should be fixed"); 207 __ bind(_entry); 208 __ mov(r3, _klass_reg->as_register()); 209 __ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id))); 210 ce->add_call_info_here(_info); 211 ce->verify_oop_map(_info); 212 assert(_result->as_register() == r0, "result must in r0,"); 213 __ b(_continuation); 214 } 215 216 217 // Implementation of NewTypeArrayStub 218 219 // Implementation of NewTypeArrayStub 220 221 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 222 _klass_reg = klass_reg; 223 _length = length; 224 _result = result; 225 _info = new CodeEmitInfo(info); 226 } 227 228 229 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { 230 assert(__ rsp_offset() == 0, "frame size should be fixed"); 231 __ bind(_entry); 232 assert(_length->as_register() == r19, "length must in r19,"); 233 assert(_klass_reg->as_register() == r3, "klass_reg must in r3"); 234 __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_type_array_id))); 235 ce->add_call_info_here(_info); 236 ce->verify_oop_map(_info); 237 assert(_result->as_register() == r0, "result must in r0"); 238 __ b(_continuation); 239 } 240 241 242 // Implementation of NewObjectArrayStub 243 244 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, 245 CodeEmitInfo* info, bool is_null_free) { 246 _klass_reg = klass_reg; 247 _result = result; 248 _length = length; 249 _info = new CodeEmitInfo(info); 250 _is_null_free = is_null_free; 251 } 252 253 254 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { 255 assert(__ rsp_offset() == 0, "frame size should be fixed"); 256 __ bind(_entry); 257 assert(_length->as_register() == r19, "length must in r19,"); 258 assert(_klass_reg->as_register() == r3, "klass_reg must in r3"); 259 260 if (_is_null_free) { 261 __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_null_free_array_id))); 262 } else { 263 __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_object_array_id))); 264 } 265 266 ce->add_call_info_here(_info); 267 ce->verify_oop_map(_info); 268 assert(_result->as_register() == r0, "result must in r0"); 269 __ b(_continuation); 270 } 271 272 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { 273 assert(__ rsp_offset() == 0, "frame size should be fixed"); 274 __ bind(_entry); 275 if (_throw_ie_stub != nullptr) { 276 // When we come here, _obj_reg has already been checked to be non-null. 277 __ ldr(rscratch1, Address(_obj_reg->as_register(), oopDesc::mark_offset_in_bytes())); 278 __ mov(rscratch2, markWord::inline_type_pattern); 279 __ andr(rscratch1, rscratch1, rscratch2); 280 281 __ cmp(rscratch1, rscratch2); 282 __ br(Assembler::EQ, *_throw_ie_stub->entry()); 283 } 284 285 ce->store_parameter(_obj_reg->as_register(), 1); 286 ce->store_parameter(_lock_reg->as_register(), 0); 287 C1StubId enter_id; 288 if (ce->compilation()->has_fpu_code()) { 289 enter_id = C1StubId::monitorenter_id; 290 } else { 291 enter_id = C1StubId::monitorenter_nofpu_id; 292 } 293 __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id))); 294 ce->add_call_info_here(_info); 295 ce->verify_oop_map(_info); 296 __ b(_continuation); 297 } 298 299 300 void MonitorExitStub::emit_code(LIR_Assembler* ce) { 301 __ bind(_entry); 302 if (_compute_lock) { 303 // lock_reg was destroyed by fast unlocking attempt => recompute it 304 ce->monitor_address(_monitor_ix, _lock_reg); 305 } 306 ce->store_parameter(_lock_reg->as_register(), 0); 307 // note: non-blocking leaf routine => no call info needed 308 C1StubId exit_id; 309 if (ce->compilation()->has_fpu_code()) { 310 exit_id = C1StubId::monitorexit_id; 311 } else { 312 exit_id = C1StubId::monitorexit_nofpu_id; 313 } 314 __ adr(lr, _continuation); 315 __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id))); 316 } 317 318 319 // Implementation of patching: 320 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) 321 // - Replace original code with a call to the stub 322 // At Runtime: 323 // - call to stub, jump to runtime 324 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object) 325 // - in runtime: after initializing class, restore original code, reexecute instruction 326 327 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; 328 329 void PatchingStub::align_patch_site(MacroAssembler* masm) { 330 } 331 332 void PatchingStub::emit_code(LIR_Assembler* ce) { 333 assert(false, "AArch64 should not use C1 runtime patching"); 334 } 335 336 337 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { 338 __ bind(_entry); 339 ce->store_parameter(_trap_request, 0); 340 __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::deoptimize_id))); 341 ce->add_call_info_here(_info); 342 DEBUG_ONLY(__ should_not_reach_here()); 343 } 344 345 346 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { 347 address a; 348 if (_info->deoptimize_on_exception()) { 349 // Deoptimize, do not throw the exception, because it is probably wrong to do it here. 350 a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); 351 } else { 352 a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id); 353 } 354 355 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 356 __ bind(_entry); 357 __ far_call(RuntimeAddress(a)); 358 ce->add_call_info_here(_info); 359 ce->verify_oop_map(_info); 360 debug_only(__ should_not_reach_here()); 361 } 362 363 364 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { 365 assert(__ rsp_offset() == 0, "frame size should be fixed"); 366 367 __ bind(_entry); 368 // pass the object in a scratch register because all other registers 369 // must be preserved 370 if (_obj->is_cpu_register()) { 371 __ mov(rscratch1, _obj->as_register()); 372 } 373 __ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), rscratch2); 374 ce->add_call_info_here(_info); 375 debug_only(__ should_not_reach_here()); 376 } 377 378 379 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { 380 //---------------slow case: call to native----------------- 381 __ bind(_entry); 382 // Figure out where the args should go 383 // This should really convert the IntrinsicID to the Method* and signature 384 // but I don't know how to do that. 385 // 386 VMRegPair args[5]; 387 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT}; 388 SharedRuntime::java_calling_convention(signature, args, 5); 389 390 // push parameters 391 // (src, src_pos, dest, destPos, length) 392 Register r[5]; 393 r[0] = src()->as_register(); 394 r[1] = src_pos()->as_register(); 395 r[2] = dst()->as_register(); 396 r[3] = dst_pos()->as_register(); 397 r[4] = length()->as_register(); 398 399 // next registers will get stored on the stack 400 for (int i = 0; i < 5 ; i++ ) { 401 VMReg r_1 = args[i].first(); 402 if (r_1->is_stack()) { 403 int st_off = r_1->reg2stack() * wordSize; 404 __ str (r[i], Address(sp, st_off)); 405 } else { 406 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg "); 407 } 408 } 409 410 ce->align_call(lir_static_call); 411 412 ce->emit_static_call_stub(); 413 if (ce->compilation()->bailed_out()) { 414 return; // CodeCache is full 415 } 416 Address resolve(SharedRuntime::get_resolve_static_call_stub(), 417 relocInfo::static_call_type); 418 address call = __ trampoline_call(resolve); 419 if (call == nullptr) { 420 ce->bailout("trampoline stub overflow"); 421 return; 422 } 423 ce->add_call_info_here(info()); 424 425 #ifndef PRODUCT 426 if (PrintC1Statistics) { 427 __ lea(rscratch2, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); 428 __ incrementw(Address(rscratch2)); 429 } 430 #endif 431 432 __ b(_continuation); 433 } 434 435 #undef __