1 /* 2 * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "c1/c1_CodeStubs.hpp" 28 #include "c1/c1_FrameMap.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "classfile/javaClasses.hpp" 33 #include "memory/universe.hpp" 34 #include "nativeInst_arm.hpp" 35 #include "runtime/sharedRuntime.hpp" 36 #include "utilities/macros.hpp" 37 #include "vmreg_arm.inline.hpp" 38 39 #define __ ce->masm()-> 40 41 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) { 42 ShouldNotReachHere(); 43 } 44 45 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 46 __ bind(_entry); 47 ce->store_parameter(_bci, 0); 48 ce->store_parameter(_method->as_constant_ptr()->as_metadata(), 1); 49 __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type); 50 ce->add_call_info_here(_info); 51 ce->verify_oop_map(_info); 52 53 __ b(_continuation); 54 } 55 56 void RangeCheckStub::emit_code(LIR_Assembler* ce) { 57 __ bind(_entry); 58 59 if (_info->deoptimize_on_exception()) { 60 __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type); 61 ce->add_call_info_here(_info); 62 ce->verify_oop_map(_info); 63 debug_only(__ should_not_reach_here()); 64 return; 65 } 66 // Pass the array index on stack because all registers must be preserved 67 ce->verify_reserved_argument_area_size(_throw_index_out_of_bounds_exception ? 1 : 2); 68 if (_index->is_cpu_register()) { 69 __ str_32(_index->as_register(), Address(SP)); 70 } else { 71 __ mov_slow(Rtemp, _index->as_jint()); // Rtemp should be OK in C1 72 __ str_32(Rtemp, Address(SP)); 73 } 74 75 if (_throw_index_out_of_bounds_exception) { 76 __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type); 77 } else { 78 __ str(_array->as_pointer_register(), Address(SP, BytesPerWord)); // ??? Correct offset? Correct instruction? 79 __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type); 80 } 81 ce->add_call_info_here(_info); 82 ce->verify_oop_map(_info); 83 DEBUG_ONLY(STOP("RangeCheck");) 84 } 85 86 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { 87 _info = new CodeEmitInfo(info); 88 } 89 90 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { 91 __ bind(_entry); 92 __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type); 93 ce->add_call_info_here(_info); 94 ce->verify_oop_map(_info); 95 debug_only(__ should_not_reach_here()); 96 } 97 98 void DivByZeroStub::emit_code(LIR_Assembler* ce) { 99 if (_offset != -1) { 100 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 101 } 102 __ bind(_entry); 103 __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), 104 relocInfo::runtime_call_type); 105 ce->add_call_info_here(_info); 106 DEBUG_ONLY(STOP("DivByZero");) 107 } 108 109 110 // Implementation of NewInstanceStub 111 112 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { 113 _result = result; 114 _klass = klass; 115 _klass_reg = klass_reg; 116 _info = new CodeEmitInfo(info); 117 assert(stub_id == Runtime1::new_instance_id || 118 stub_id == Runtime1::fast_new_instance_id || 119 stub_id == Runtime1::fast_new_instance_init_check_id, 120 "need new_instance id"); 121 _stub_id = stub_id; 122 } 123 124 125 void NewInstanceStub::emit_code(LIR_Assembler* ce) { 126 assert(_result->as_register() == R0, "runtime call setup"); 127 assert(_klass_reg->as_register() == R1, "runtime call setup"); 128 __ bind(_entry); 129 __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type); 130 ce->add_call_info_here(_info); 131 ce->verify_oop_map(_info); 132 __ b(_continuation); 133 } 134 135 136 // Implementation of NewTypeArrayStub 137 138 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 139 _klass_reg = klass_reg; 140 _length = length; 141 _result = result; 142 _info = new CodeEmitInfo(info); 143 } 144 145 146 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { 147 assert(_result->as_register() == R0, "runtime call setup"); 148 assert(_klass_reg->as_register() == R1, "runtime call setup"); 149 assert(_length->as_register() == R2, "runtime call setup"); 150 __ bind(_entry); 151 __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type); 152 ce->add_call_info_here(_info); 153 ce->verify_oop_map(_info); 154 __ b(_continuation); 155 } 156 157 158 // Implementation of NewObjectArrayStub 159 160 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 161 _klass_reg = klass_reg; 162 _result = result; 163 _length = length; 164 _info = new CodeEmitInfo(info); 165 } 166 167 168 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { 169 assert(_result->as_register() == R0, "runtime call setup"); 170 assert(_klass_reg->as_register() == R1, "runtime call setup"); 171 assert(_length->as_register() == R2, "runtime call setup"); 172 __ bind(_entry); 173 __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type); 174 ce->add_call_info_here(_info); 175 ce->verify_oop_map(_info); 176 __ b(_continuation); 177 } 178 179 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { 180 __ bind(_entry); 181 const Register obj_reg = _obj_reg->as_pointer_register(); 182 const Register lock_reg = _lock_reg->as_pointer_register(); 183 184 ce->verify_reserved_argument_area_size(2); 185 if (obj_reg < lock_reg) { 186 __ stmia(SP, RegisterSet(obj_reg) | RegisterSet(lock_reg)); 187 } else { 188 __ str(obj_reg, Address(SP)); 189 __ str(lock_reg, Address(SP, BytesPerWord)); 190 } 191 192 Runtime1::StubID enter_id = ce->compilation()->has_fpu_code() ? 193 Runtime1::monitorenter_id : 194 Runtime1::monitorenter_nofpu_id; 195 __ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type); 196 ce->add_call_info_here(_info); 197 ce->verify_oop_map(_info); 198 __ b(_continuation); 199 } 200 201 202 void MonitorExitStub::emit_code(LIR_Assembler* ce) { 203 __ bind(_entry); 204 if (_compute_lock) { 205 ce->monitor_address(_monitor_ix, _lock_reg); 206 } 207 const Register lock_reg = _lock_reg->as_pointer_register(); 208 209 ce->verify_reserved_argument_area_size(1); 210 __ str(lock_reg, Address(SP)); 211 212 // Non-blocking leaf routine - no call info needed 213 Runtime1::StubID exit_id = ce->compilation()->has_fpu_code() ? 214 Runtime1::monitorexit_id : 215 Runtime1::monitorexit_nofpu_id; 216 __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type); 217 __ b(_continuation); 218 } 219 220 // Call return is directly after patch word 221 int PatchingStub::_patch_info_offset = 0; 222 223 void PatchingStub::align_patch_site(MacroAssembler* masm) { 224 #if 0 225 // TODO: investigate if we required to implement this 226 ShouldNotReachHere(); 227 #endif 228 } 229 230 void PatchingStub::emit_code(LIR_Assembler* ce) { 231 const int patchable_instruction_offset = 0; 232 233 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, 234 "not enough room for call"); 235 assert((_bytes_to_copy & 3) == 0, "must copy a multiple of four bytes"); 236 Label call_patch; 237 bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id); 238 239 240 if (is_load && !VM_Version::supports_movw()) { 241 address start = __ pc(); 242 243 // The following sequence duplicates code provided in MacroAssembler::patchable_mov_oop() 244 // without creating relocation info entry. 245 246 assert((__ pc() - start) == patchable_instruction_offset, "should be"); 247 __ ldr(_obj, Address(PC)); 248 // Extra nop to handle case of large offset of oop placeholder (see NativeMovConstReg::set_data). 249 __ nop(); 250 251 #ifdef ASSERT 252 for (int i = 0; i < _bytes_to_copy; i++) { 253 assert(((address)_pc_start)[i] == start[i], "should be the same code"); 254 } 255 #endif // ASSERT 256 } 257 258 address being_initialized_entry = __ pc(); 259 if (CommentedAssembly) { 260 __ block_comment(" patch template"); 261 } 262 if (is_load) { 263 address start = __ pc(); 264 if (_id == load_mirror_id || _id == load_appendix_id) { 265 __ patchable_mov_oop(_obj, (jobject)Universe::non_oop_word(), _index); 266 } else { 267 __ patchable_mov_metadata(_obj, (Metadata*)Universe::non_oop_word(), _index); 268 } 269 #ifdef ASSERT 270 for (int i = 0; i < _bytes_to_copy; i++) { 271 assert(((address)_pc_start)[i] == start[i], "should be the same code"); 272 } 273 #endif // ASSERT 274 } else { 275 int* start = (int*)_pc_start; 276 int* end = start + (_bytes_to_copy / BytesPerInt); 277 while (start < end) { 278 __ emit_int32(*start++); 279 } 280 } 281 address end_of_patch = __ pc(); 282 283 int bytes_to_skip = 0; 284 if (_id == load_mirror_id) { 285 int offset = __ offset(); 286 if (CommentedAssembly) { 287 __ block_comment(" being_initialized check"); 288 } 289 290 assert(_obj != noreg, "must be a valid register"); 291 // Rtemp should be OK in C1 292 __ ldr(Rtemp, Address(_obj, java_lang_Class::klass_offset())); 293 __ ldr(Rtemp, Address(Rtemp, InstanceKlass::init_thread_offset())); 294 __ cmp(Rtemp, Rthread); 295 __ b(call_patch, ne); 296 __ b(_patch_site_continuation); 297 298 bytes_to_skip += __ offset() - offset; 299 } 300 301 if (CommentedAssembly) { 302 __ block_comment("patch data - 3 high bytes of the word"); 303 } 304 const int sizeof_patch_record = 4; 305 bytes_to_skip += sizeof_patch_record; 306 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record; 307 __ emit_int32(0xff | being_initialized_entry_offset << 8 | bytes_to_skip << 16 | _bytes_to_copy << 24); 308 309 address patch_info_pc = __ pc(); 310 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); 311 312 // runtime call will return here 313 Label call_return; 314 __ bind(call_return); 315 ce->add_call_info_here(_info); 316 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); 317 __ b(_patch_site_entry); 318 319 address entry = __ pc(); 320 NativeGeneralJump::insert_unconditional((address)_pc_start, entry); 321 address target = nullptr; 322 relocInfo::relocType reloc_type = relocInfo::none; 323 switch (_id) { 324 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; 325 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; 326 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; 327 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; 328 default: ShouldNotReachHere(); 329 } 330 __ bind(call_patch); 331 332 if (CommentedAssembly) { 333 __ block_comment("patch entry point"); 334 } 335 336 // arrange for call to return just after patch word 337 __ adr(LR, call_return); 338 __ jump(target, relocInfo::runtime_call_type, Rtemp); 339 340 if (is_load) { 341 CodeSection* cs = __ code_section(); 342 address pc = (address)_pc_start; 343 RelocIterator iter(cs, pc, pc + 1); 344 relocInfo::change_reloc_info_for_address(&iter, pc, reloc_type, relocInfo::none); 345 } 346 } 347 348 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { 349 __ bind(_entry); 350 __ mov_slow(Rtemp, _trap_request); 351 ce->verify_reserved_argument_area_size(1); 352 __ str(Rtemp, Address(SP)); 353 __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type); 354 ce->add_call_info_here(_info); 355 DEBUG_ONLY(__ should_not_reach_here()); 356 } 357 358 359 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { 360 address a; 361 if (_info->deoptimize_on_exception()) { 362 // Deoptimize, do not throw the exception, because it is 363 // probably wrong to do it here. 364 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 365 } else { 366 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); 367 } 368 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 369 __ bind(_entry); 370 __ call(a, relocInfo::runtime_call_type); 371 ce->add_call_info_here(_info); 372 ce->verify_oop_map(_info); 373 DEBUG_ONLY(STOP("ImplicitNullCheck");) 374 } 375 376 377 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { 378 __ bind(_entry); 379 // Pass the object on stack because all registers must be preserved 380 if (_obj->is_cpu_register()) { 381 ce->verify_reserved_argument_area_size(1); 382 __ str(_obj->as_pointer_register(), Address(SP)); 383 } else { 384 assert(_obj->is_illegal(), "should be"); 385 } 386 __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type); 387 ce->add_call_info_here(_info); 388 DEBUG_ONLY(STOP("SimpleException");) 389 } 390 391 392 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { 393 __ bind(_entry); 394 395 VMRegPair args[5]; 396 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT }; 397 SharedRuntime::java_calling_convention(signature, args, 5); 398 399 Register r[5]; 400 r[0] = src()->as_pointer_register(); 401 r[1] = src_pos()->as_register(); 402 r[2] = dst()->as_pointer_register(); 403 r[3] = dst_pos()->as_register(); 404 r[4] = length()->as_register(); 405 406 for (int i = 0; i < 5; i++) { 407 VMReg arg = args[i].first(); 408 if (arg->is_stack()) { 409 __ str(r[i], Address(SP, arg->reg2stack() * VMRegImpl::stack_slot_size)); 410 } else { 411 assert(r[i] == arg->as_Register(), "Calling conventions must match"); 412 } 413 } 414 415 ce->emit_static_call_stub(); 416 if (ce->compilation()->bailed_out()) { 417 return; // CodeCache is full 418 } 419 int ret_addr_offset = __ patchable_call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type); 420 assert(ret_addr_offset == __ offset(), "embedded return address not allowed"); 421 ce->add_call_info_here(info()); 422 ce->verify_oop_map(info()); 423 __ b(_continuation); 424 } 425 426 #undef __