1 /* 2 * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved. 4 * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "asm/macroAssembler.hpp" 28 #include "asm/macroAssembler.inline.hpp" 29 #include "classfile/symbolTable.hpp" 30 #include "code/codeCache.hpp" 31 #include "code/compiledIC.hpp" 32 #include "code/debugInfoRec.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/oopMap.hpp" 35 #include "gc/shared/barrierSetAssembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "interpreter/interp_masm.hpp" 38 #include "logging/log.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "nativeInst_aarch64.hpp" 41 #include "oops/klass.inline.hpp" 42 #include "oops/method.inline.hpp" 43 #include "prims/methodHandles.hpp" 44 #include "runtime/continuation.hpp" 45 #include "runtime/continuationEntry.inline.hpp" 46 #include "runtime/globals.hpp" 47 #include "runtime/jniHandles.hpp" 48 #include "runtime/safepointMechanism.hpp" 49 #include "runtime/sharedRuntime.hpp" 50 #include "runtime/signature.hpp" 51 #include "runtime/stubRoutines.hpp" 52 #include "runtime/timerTrace.hpp" 53 #include "runtime/vframeArray.hpp" 54 #include "utilities/align.hpp" 55 #include "utilities/formatBuffer.hpp" 56 #include "vmreg_aarch64.inline.hpp" 57 #ifdef COMPILER1 58 #include "c1/c1_Runtime1.hpp" 59 #endif 60 #ifdef COMPILER2 61 #include "adfiles/ad_aarch64.hpp" 62 #include "opto/runtime.hpp" 63 #endif 64 #if INCLUDE_JVMCI 65 #include "jvmci/jvmciJavaClasses.hpp" 66 #endif 67 68 #define __ masm-> 69 70 #ifdef PRODUCT 71 #define BLOCK_COMMENT(str) /* nothing */ 72 #else 73 #define BLOCK_COMMENT(str) __ block_comment(str) 74 #endif 75 76 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; 77 78 // FIXME -- this is used by C1 79 class RegisterSaver { 80 const bool _save_vectors; 81 public: 82 RegisterSaver(bool save_vectors) : _save_vectors(save_vectors) {} 83 84 OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words); 85 void restore_live_registers(MacroAssembler* masm); 86 87 // Offsets into the register save area 88 // Used by deoptimization when it is managing result register 89 // values on its own 90 91 int reg_offset_in_bytes(Register r); 92 int r0_offset_in_bytes() { return reg_offset_in_bytes(r0); } 93 int rscratch1_offset_in_bytes() { return reg_offset_in_bytes(rscratch1); } 94 int v0_offset_in_bytes(); 95 96 // Total stack size in bytes for saving sve predicate registers. 97 int total_sve_predicate_in_bytes(); 98 99 // Capture info about frame layout 100 // Note this is only correct when not saving full vectors. 101 enum layout { 102 fpu_state_off = 0, 103 fpu_state_end = fpu_state_off + FPUStateSizeInWords - 1, 104 // The frame sender code expects that rfp will be in 105 // the "natural" place and will override any oopMap 106 // setting for it. We must therefore force the layout 107 // so that it agrees with the frame sender code. 108 r0_off = fpu_state_off + FPUStateSizeInWords, 109 rfp_off = r0_off + (Register::number_of_registers - 2) * Register::max_slots_per_register, 110 return_off = rfp_off + Register::max_slots_per_register, // slot for return address 111 reg_save_size = return_off + Register::max_slots_per_register}; 112 113 }; 114 115 int RegisterSaver::reg_offset_in_bytes(Register r) { 116 // The integer registers are located above the floating point 117 // registers in the stack frame pushed by save_live_registers() so the 118 // offset depends on whether we are saving full vectors, and whether 119 // those vectors are NEON or SVE. 120 121 int slots_per_vect = FloatRegister::save_slots_per_register; 122 123 #if COMPILER2_OR_JVMCI 124 if (_save_vectors) { 125 slots_per_vect = FloatRegister::slots_per_neon_register; 126 127 #ifdef COMPILER2 128 if (Matcher::supports_scalable_vector()) { 129 slots_per_vect = Matcher::scalable_vector_reg_size(T_FLOAT); 130 } 131 #endif 132 } 133 #endif 134 135 int r0_offset = v0_offset_in_bytes() + (slots_per_vect * FloatRegister::number_of_registers) * BytesPerInt; 136 return r0_offset + r->encoding() * wordSize; 137 } 138 139 int RegisterSaver::v0_offset_in_bytes() { 140 // The floating point registers are located above the predicate registers if 141 // they are present in the stack frame pushed by save_live_registers(). So the 142 // offset depends on the saved total predicate vectors in the stack frame. 143 return (total_sve_predicate_in_bytes() / VMRegImpl::stack_slot_size) * BytesPerInt; 144 } 145 146 int RegisterSaver::total_sve_predicate_in_bytes() { 147 #ifdef COMPILER2 148 if (_save_vectors && Matcher::supports_scalable_vector()) { 149 return (Matcher::scalable_vector_reg_size(T_BYTE) >> LogBitsPerByte) * 150 PRegister::number_of_registers; 151 } 152 #endif 153 return 0; 154 } 155 156 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) { 157 bool use_sve = false; 158 int sve_vector_size_in_bytes = 0; 159 int sve_vector_size_in_slots = 0; 160 int sve_predicate_size_in_slots = 0; 161 int total_predicate_in_bytes = total_sve_predicate_in_bytes(); 162 int total_predicate_in_slots = total_predicate_in_bytes / VMRegImpl::stack_slot_size; 163 164 #ifdef COMPILER2 165 use_sve = Matcher::supports_scalable_vector(); 166 if (use_sve) { 167 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 168 sve_vector_size_in_slots = Matcher::scalable_vector_reg_size(T_FLOAT); 169 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 170 } 171 #endif 172 173 #if COMPILER2_OR_JVMCI 174 if (_save_vectors) { 175 int extra_save_slots_per_register = 0; 176 // Save upper half of vector registers 177 if (use_sve) { 178 extra_save_slots_per_register = sve_vector_size_in_slots - FloatRegister::save_slots_per_register; 179 } else { 180 extra_save_slots_per_register = FloatRegister::extra_save_slots_per_neon_register; 181 } 182 int extra_vector_bytes = extra_save_slots_per_register * 183 VMRegImpl::stack_slot_size * 184 FloatRegister::number_of_registers; 185 additional_frame_words += ((extra_vector_bytes + total_predicate_in_bytes) / wordSize); 186 } 187 #else 188 assert(!_save_vectors, "vectors are generated only by C2 and JVMCI"); 189 #endif 190 191 int frame_size_in_bytes = align_up(additional_frame_words * wordSize + 192 reg_save_size * BytesPerInt, 16); 193 // OopMap frame size is in compiler stack slots (jint's) not bytes or words 194 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt; 195 // The caller will allocate additional_frame_words 196 int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt; 197 // CodeBlob frame size is in words. 198 int frame_size_in_words = frame_size_in_bytes / wordSize; 199 *total_frame_words = frame_size_in_words; 200 201 // Save Integer and Float registers. 202 __ enter(); 203 __ push_CPU_state(_save_vectors, use_sve, sve_vector_size_in_bytes, total_predicate_in_bytes); 204 205 // Set an oopmap for the call site. This oopmap will map all 206 // oop-registers and debug-info registers as callee-saved. This 207 // will allow deoptimization at this safepoint to find all possible 208 // debug-info recordings, as well as let GC find all oops. 209 210 OopMapSet *oop_maps = new OopMapSet(); 211 OopMap* oop_map = new OopMap(frame_size_in_slots, 0); 212 213 for (int i = 0; i < Register::number_of_registers; i++) { 214 Register r = as_Register(i); 215 if (i <= rfp->encoding() && r != rscratch1 && r != rscratch2) { 216 // SP offsets are in 4-byte words. 217 // Register slots are 8 bytes wide, 32 floating-point registers. 218 int sp_offset = Register::max_slots_per_register * i + 219 FloatRegister::save_slots_per_register * FloatRegister::number_of_registers; 220 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots), r->as_VMReg()); 221 } 222 } 223 224 for (int i = 0; i < FloatRegister::number_of_registers; i++) { 225 FloatRegister r = as_FloatRegister(i); 226 int sp_offset = 0; 227 if (_save_vectors) { 228 sp_offset = use_sve ? (total_predicate_in_slots + sve_vector_size_in_slots * i) : 229 (FloatRegister::slots_per_neon_register * i); 230 } else { 231 sp_offset = FloatRegister::save_slots_per_register * i; 232 } 233 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), r->as_VMReg()); 234 } 235 236 return oop_map; 237 } 238 239 void RegisterSaver::restore_live_registers(MacroAssembler* masm) { 240 #ifdef COMPILER2 241 __ pop_CPU_state(_save_vectors, Matcher::supports_scalable_vector(), 242 Matcher::scalable_vector_reg_size(T_BYTE), total_sve_predicate_in_bytes()); 243 #else 244 #if !INCLUDE_JVMCI 245 assert(!_save_vectors, "vectors are generated only by C2 and JVMCI"); 246 #endif 247 __ pop_CPU_state(_save_vectors); 248 #endif 249 __ ldp(rfp, lr, Address(__ post(sp, 2 * wordSize))); 250 __ authenticate_return_address(); 251 } 252 253 // Is vector's size (in bytes) bigger than a size saved by default? 254 // 8 bytes vector registers are saved by default on AArch64. 255 // The SVE supported min vector size is 8 bytes and we need to save 256 // predicate registers when the vector size is 8 bytes as well. 257 bool SharedRuntime::is_wide_vector(int size) { 258 return size > 8 || (UseSVE > 0 && size >= 8); 259 } 260 261 // --------------------------------------------------------------------------- 262 // Read the array of BasicTypes from a signature, and compute where the 263 // arguments should go. Values in the VMRegPair regs array refer to 4-byte 264 // quantities. Values less than VMRegImpl::stack0 are registers, those above 265 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer 266 // as framesizes are fixed. 267 // VMRegImpl::stack0 refers to the first slot 0(sp). 268 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. 269 // Register up to Register::number_of_registers are the 64-bit 270 // integer registers. 271 272 // Note: the INPUTS in sig_bt are in units of Java argument words, 273 // which are 64-bit. The OUTPUTS are in 32-bit units. 274 275 // The Java calling convention is a "shifted" version of the C ABI. 276 // By skipping the first C ABI register we can call non-static jni 277 // methods with small numbers of arguments without having to shuffle 278 // the arguments at all. Since we control the java ABI we ought to at 279 // least get some advantage out of it. 280 281 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 282 VMRegPair *regs, 283 int total_args_passed) { 284 285 // Create the mapping between argument positions and 286 // registers. 287 static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = { 288 j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7 289 }; 290 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = { 291 j_farg0, j_farg1, j_farg2, j_farg3, 292 j_farg4, j_farg5, j_farg6, j_farg7 293 }; 294 295 296 uint int_args = 0; 297 uint fp_args = 0; 298 uint stk_args = 0; 299 300 for (int i = 0; i < total_args_passed; i++) { 301 switch (sig_bt[i]) { 302 case T_BOOLEAN: 303 case T_CHAR: 304 case T_BYTE: 305 case T_SHORT: 306 case T_INT: 307 if (int_args < Argument::n_int_register_parameters_j) { 308 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); 309 } else { 310 stk_args = align_up(stk_args, 2); 311 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 312 stk_args += 1; 313 } 314 break; 315 case T_VOID: 316 // halves of T_LONG or T_DOUBLE 317 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); 318 regs[i].set_bad(); 319 break; 320 case T_LONG: 321 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 322 // fall through 323 case T_OBJECT: 324 case T_ARRAY: 325 case T_ADDRESS: 326 if (int_args < Argument::n_int_register_parameters_j) { 327 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); 328 } else { 329 stk_args = align_up(stk_args, 2); 330 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 331 stk_args += 2; 332 } 333 break; 334 case T_FLOAT: 335 if (fp_args < Argument::n_float_register_parameters_j) { 336 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); 337 } else { 338 stk_args = align_up(stk_args, 2); 339 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 340 stk_args += 1; 341 } 342 break; 343 case T_DOUBLE: 344 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 345 if (fp_args < Argument::n_float_register_parameters_j) { 346 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg()); 347 } else { 348 stk_args = align_up(stk_args, 2); 349 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 350 stk_args += 2; 351 } 352 break; 353 default: 354 ShouldNotReachHere(); 355 break; 356 } 357 } 358 359 return stk_args; 360 } 361 362 363 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j; 364 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j; 365 366 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) { 367 368 // Create the mapping between argument positions and registers. 369 370 static const Register INT_ArgReg[java_return_convention_max_int] = { 371 r0 /* j_rarg7 */, j_rarg6, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0 372 }; 373 374 static const FloatRegister FP_ArgReg[java_return_convention_max_float] = { 375 j_farg0, j_farg1, j_farg2, j_farg3, j_farg4, j_farg5, j_farg6, j_farg7 376 }; 377 378 uint int_args = 0; 379 uint fp_args = 0; 380 381 for (int i = 0; i < total_args_passed; i++) { 382 switch (sig_bt[i]) { 383 case T_BOOLEAN: 384 case T_CHAR: 385 case T_BYTE: 386 case T_SHORT: 387 case T_INT: 388 if (int_args < SharedRuntime::java_return_convention_max_int) { 389 regs[i].set1(INT_ArgReg[int_args]->as_VMReg()); 390 int_args ++; 391 } else { 392 return -1; 393 } 394 break; 395 case T_VOID: 396 // halves of T_LONG or T_DOUBLE 397 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); 398 regs[i].set_bad(); 399 break; 400 case T_LONG: 401 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 402 // fall through 403 case T_OBJECT: 404 case T_ARRAY: 405 case T_ADDRESS: 406 // Should T_METADATA be added to java_calling_convention as well ? 407 case T_METADATA: 408 if (int_args < SharedRuntime::java_return_convention_max_int) { 409 regs[i].set2(INT_ArgReg[int_args]->as_VMReg()); 410 int_args ++; 411 } else { 412 return -1; 413 } 414 break; 415 case T_FLOAT: 416 if (fp_args < SharedRuntime::java_return_convention_max_float) { 417 regs[i].set1(FP_ArgReg[fp_args]->as_VMReg()); 418 fp_args ++; 419 } else { 420 return -1; 421 } 422 break; 423 case T_DOUBLE: 424 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 425 if (fp_args < SharedRuntime::java_return_convention_max_float) { 426 regs[i].set2(FP_ArgReg[fp_args]->as_VMReg()); 427 fp_args ++; 428 } else { 429 return -1; 430 } 431 break; 432 default: 433 ShouldNotReachHere(); 434 break; 435 } 436 } 437 438 return int_args + fp_args; 439 } 440 441 // Patch the callers callsite with entry to compiled code if it exists. 442 static void patch_callers_callsite(MacroAssembler *masm) { 443 Label L; 444 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset()))); 445 __ cbz(rscratch1, L); 446 447 __ enter(); 448 __ push_CPU_state(); 449 450 // VM needs caller's callsite 451 // VM needs target method 452 // This needs to be a long call since we will relocate this adapter to 453 // the codeBuffer and it may not reach 454 455 #ifndef PRODUCT 456 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); 457 #endif 458 459 __ mov(c_rarg0, rmethod); 460 __ mov(c_rarg1, lr); 461 __ authenticate_return_address(c_rarg1); 462 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite))); 463 __ blr(rscratch1); 464 465 // Explicit isb required because fixup_callers_callsite may change the code 466 // stream. 467 __ safepoint_isb(); 468 469 __ pop_CPU_state(); 470 // restore sp 471 __ leave(); 472 __ bind(L); 473 } 474 475 // For each inline type argument, sig includes the list of fields of 476 // the inline type. This utility function computes the number of 477 // arguments for the call if inline types are passed by reference (the 478 // calling convention the interpreter expects). 479 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) { 480 int total_args_passed = 0; 481 if (InlineTypePassFieldsAsArgs) { 482 for (int i = 0; i < sig_extended->length(); i++) { 483 BasicType bt = sig_extended->at(i)._bt; 484 if (bt == T_METADATA) { 485 // In sig_extended, an inline type argument starts with: 486 // T_METADATA, followed by the types of the fields of the 487 // inline type and T_VOID to mark the end of the value 488 // type. Inline types are flattened so, for instance, in the 489 // case of an inline type with an int field and an inline type 490 // field that itself has 2 fields, an int and a long: 491 // T_METADATA T_INT T_METADATA T_INT T_LONG T_VOID (second 492 // slot for the T_LONG) T_VOID (inner inline type) T_VOID 493 // (outer inline type) 494 total_args_passed++; 495 int vt = 1; 496 do { 497 i++; 498 BasicType bt = sig_extended->at(i)._bt; 499 BasicType prev_bt = sig_extended->at(i-1)._bt; 500 if (bt == T_METADATA) { 501 vt++; 502 } else if (bt == T_VOID && 503 prev_bt != T_LONG && 504 prev_bt != T_DOUBLE) { 505 vt--; 506 } 507 } while (vt != 0); 508 } else { 509 total_args_passed++; 510 } 511 } 512 } else { 513 total_args_passed = sig_extended->length(); 514 } 515 516 return total_args_passed; 517 } 518 519 520 static void gen_c2i_adapter_helper(MacroAssembler* masm, 521 BasicType bt, 522 BasicType prev_bt, 523 size_t size_in_bytes, 524 const VMRegPair& reg_pair, 525 const Address& to, 526 Register tmp1, 527 Register tmp2, 528 Register tmp3, 529 int extraspace, 530 bool is_oop) { 531 if (bt == T_VOID) { 532 assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half"); 533 return; 534 } 535 536 // Say 4 args: 537 // i st_off 538 // 0 32 T_LONG 539 // 1 24 T_VOID 540 // 2 16 T_OBJECT 541 // 3 8 T_BOOL 542 // - 0 return address 543 // 544 // However to make thing extra confusing. Because we can fit a Java long/double in 545 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter 546 // leaves one slot empty and only stores to a single slot. In this case the 547 // slot that is occupied is the T_VOID slot. See I said it was confusing. 548 549 bool wide = (size_in_bytes == wordSize); 550 VMReg r_1 = reg_pair.first(); 551 VMReg r_2 = reg_pair.second(); 552 assert(r_2->is_valid() == wide, "invalid size"); 553 if (!r_1->is_valid()) { 554 assert(!r_2->is_valid(), ""); 555 return; 556 } 557 558 if (!r_1->is_FloatRegister()) { 559 Register val = r25; 560 if (r_1->is_stack()) { 561 // memory to memory use r25 (scratch registers is used by store_heap_oop) 562 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace; 563 __ load_sized_value(val, Address(sp, ld_off), size_in_bytes, /* is_signed */ false); 564 } else { 565 val = r_1->as_Register(); 566 } 567 assert_different_registers(to.base(), val, tmp1, tmp2, tmp3); 568 if (is_oop) { 569 __ store_heap_oop(to, val, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED); 570 } else { 571 __ store_sized_value(to, val, size_in_bytes); 572 } 573 } else { 574 if (wide) { 575 __ strd(r_1->as_FloatRegister(), to); 576 } else { 577 // only a float use just part of the slot 578 __ strs(r_1->as_FloatRegister(), to); 579 } 580 } 581 } 582 583 static void gen_c2i_adapter(MacroAssembler *masm, 584 const GrowableArray<SigEntry>* sig_extended, 585 const VMRegPair *regs, 586 bool requires_clinit_barrier, 587 address& c2i_no_clinit_check_entry, 588 Label& skip_fixup, 589 address start, 590 OopMapSet* oop_maps, 591 int& frame_complete, 592 int& frame_size_in_words, 593 bool alloc_inline_receiver) { 594 if (requires_clinit_barrier && VM_Version::supports_fast_class_init_checks()) { 595 Label L_skip_barrier; 596 597 { // Bypass the barrier for non-static methods 598 __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset())); 599 __ andsw(zr, rscratch1, JVM_ACC_STATIC); 600 __ br(Assembler::EQ, L_skip_barrier); // non-static 601 } 602 603 __ load_method_holder(rscratch2, rmethod); 604 __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier); 605 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 606 607 __ bind(L_skip_barrier); 608 c2i_no_clinit_check_entry = __ pc(); 609 } 610 611 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 612 bs->c2i_entry_barrier(masm); 613 614 // Before we get into the guts of the C2I adapter, see if we should be here 615 // at all. We've come from compiled code and are attempting to jump to the 616 // interpreter, which means the caller made a static call to get here 617 // (vcalls always get a compiled target if there is one). Check for a 618 // compiled target. If there is one, we need to patch the caller's call. 619 patch_callers_callsite(masm); 620 621 __ bind(skip_fixup); 622 623 // Name some registers to be used in the following code. We can use 624 // anything except r0-r7 which are arguments in the Java calling 625 // convention, rmethod (r12), and r13 which holds the outgoing sender 626 // SP for the interpreter. 627 Register buf_array = r10; // Array of buffered inline types 628 Register buf_oop = r11; // Buffered inline type oop 629 Register tmp1 = r15; 630 Register tmp2 = r16; 631 Register tmp3 = r17; 632 633 if (InlineTypePassFieldsAsArgs) { 634 // Is there an inline type argument? 635 bool has_inline_argument = false; 636 for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) { 637 has_inline_argument = (sig_extended->at(i)._bt == T_METADATA); 638 } 639 if (has_inline_argument) { 640 // There is at least an inline type argument: we're coming from 641 // compiled code so we have no buffers to back the inline types 642 // Allocate the buffers here with a runtime call. 643 RegisterSaver reg_save(false /* save_vectors */); 644 OopMap* map = reg_save.save_live_registers(masm, 0, &frame_size_in_words); 645 646 frame_complete = __ offset(); 647 address the_pc = __ pc(); 648 649 Label retaddr; 650 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 651 652 __ mov(c_rarg0, rthread); 653 __ mov(c_rarg1, rmethod); 654 __ mov(c_rarg2, (int64_t)alloc_inline_receiver); 655 656 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types))); 657 __ blr(rscratch1); 658 __ bind(retaddr); 659 660 oop_maps->add_gc_map(__ pc() - start, map); 661 __ reset_last_Java_frame(false); 662 663 reg_save.restore_live_registers(masm); 664 665 Label no_exception; 666 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 667 __ cbz(rscratch1, no_exception); 668 669 __ str(zr, Address(rthread, JavaThread::vm_result_offset())); 670 __ ldr(r0, Address(rthread, Thread::pending_exception_offset())); 671 __ b(RuntimeAddress(StubRoutines::forward_exception_entry())); 672 673 __ bind(no_exception); 674 675 // We get an array of objects from the runtime call 676 __ get_vm_result(buf_array, rthread); 677 __ get_vm_result_2(rmethod, rthread); // TODO: required to keep the callee Method live? 678 } 679 } 680 681 // Since all args are passed on the stack, total_args_passed * 682 // Interpreter::stackElementSize is the space we need. 683 684 int total_args_passed = compute_total_args_passed_int(sig_extended); 685 int extraspace = total_args_passed * Interpreter::stackElementSize; 686 687 // stack is aligned, keep it that way 688 extraspace = align_up(extraspace, StackAlignmentInBytes); 689 690 // set senderSP value 691 __ mov(r19_sender_sp, sp); 692 693 __ sub(sp, sp, extraspace); 694 695 // Now write the args into the outgoing interpreter space 696 697 // next_arg_comp is the next argument from the compiler point of 698 // view (inline type fields are passed in registers/on the stack). In 699 // sig_extended, an inline type argument starts with: T_METADATA, 700 // followed by the types of the fields of the inline type and T_VOID 701 // to mark the end of the inline type. ignored counts the number of 702 // T_METADATA/T_VOID. next_vt_arg is the next inline type argument: 703 // used to get the buffer for that argument from the pool of buffers 704 // we allocated above and want to pass to the 705 // interpreter. next_arg_int is the next argument from the 706 // interpreter point of view (inline types are passed by reference). 707 for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0; 708 next_arg_comp < sig_extended->length(); next_arg_comp++) { 709 assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments"); 710 assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?"); 711 BasicType bt = sig_extended->at(next_arg_comp)._bt; 712 int st_off = (total_args_passed - next_arg_int - 1) * Interpreter::stackElementSize; 713 if (!InlineTypePassFieldsAsArgs || bt != T_METADATA) { 714 int next_off = st_off - Interpreter::stackElementSize; 715 const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off; 716 const VMRegPair reg_pair = regs[next_arg_comp-ignored]; 717 size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4; 718 gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL, 719 size_in_bytes, reg_pair, Address(sp, offset), tmp1, tmp2, tmp3, extraspace, false); 720 next_arg_int++; 721 #ifdef ASSERT 722 if (bt == T_LONG || bt == T_DOUBLE) { 723 // Overwrite the unused slot with known junk 724 __ mov(rscratch1, CONST64(0xdeadffffdeadaaaa)); 725 __ str(rscratch1, Address(sp, st_off)); 726 } 727 #endif /* ASSERT */ 728 } else { 729 ignored++; 730 // get the buffer from the just allocated pool of buffers 731 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_OBJECT); 732 __ load_heap_oop(buf_oop, Address(buf_array, index), tmp1, tmp2); 733 next_vt_arg++; next_arg_int++; 734 int vt = 1; 735 // write fields we get from compiled code in registers/stack 736 // slots to the buffer: we know we are done with that inline type 737 // argument when we hit the T_VOID that acts as an end of inline 738 // type delimiter for this inline type. Inline types are flattened 739 // so we might encounter embedded inline types. Each entry in 740 // sig_extended contains a field offset in the buffer. 741 Label L_null; 742 do { 743 next_arg_comp++; 744 BasicType bt = sig_extended->at(next_arg_comp)._bt; 745 BasicType prev_bt = sig_extended->at(next_arg_comp - 1)._bt; 746 if (bt == T_METADATA) { 747 vt++; 748 ignored++; 749 } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) { 750 vt--; 751 ignored++; 752 } else { 753 int off = sig_extended->at(next_arg_comp)._offset; 754 if (off == -1) { 755 // Nullable inline type argument, emit null check 756 VMReg reg = regs[next_arg_comp-ignored].first(); 757 Label L_notNull; 758 if (reg->is_stack()) { 759 int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace; 760 __ ldrb(tmp1, Address(sp, ld_off)); 761 __ cbnz(tmp1, L_notNull); 762 } else { 763 __ cbnz(reg->as_Register(), L_notNull); 764 } 765 __ str(zr, Address(sp, st_off)); 766 __ b(L_null); 767 __ bind(L_notNull); 768 continue; 769 } 770 assert(off > 0, "offset in object should be positive"); 771 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize; 772 bool is_oop = is_reference_type(bt); 773 gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL, 774 size_in_bytes, regs[next_arg_comp-ignored], Address(buf_oop, off), tmp1, tmp2, tmp3, extraspace, is_oop); 775 } 776 } while (vt != 0); 777 // pass the buffer to the interpreter 778 __ str(buf_oop, Address(sp, st_off)); 779 __ bind(L_null); 780 } 781 } 782 783 __ mov(esp, sp); // Interp expects args on caller's expression stack 784 785 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset()))); 786 __ br(rscratch1); 787 } 788 789 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs) { 790 791 792 // Note: r19_sender_sp contains the senderSP on entry. We must 793 // preserve it since we may do a i2c -> c2i transition if we lose a 794 // race where compiled code goes non-entrant while we get args 795 // ready. 796 797 // Adapters are frameless. 798 799 // An i2c adapter is frameless because the *caller* frame, which is 800 // interpreted, routinely repairs its own esp (from 801 // interpreter_frame_last_sp), even if a callee has modified the 802 // stack pointer. It also recalculates and aligns sp. 803 804 // A c2i adapter is frameless because the *callee* frame, which is 805 // interpreted, routinely repairs its caller's sp (from sender_sp, 806 // which is set up via the senderSP register). 807 808 // In other words, if *either* the caller or callee is interpreted, we can 809 // get the stack pointer repaired after a call. 810 811 // This is why c2i and i2c adapters cannot be indefinitely composed. 812 // In particular, if a c2i adapter were to somehow call an i2c adapter, 813 // both caller and callee would be compiled methods, and neither would 814 // clean up the stack pointer changes performed by the two adapters. 815 // If this happens, control eventually transfers back to the compiled 816 // caller, but with an uncorrected stack, causing delayed havoc. 817 818 if (VerifyAdapterCalls && 819 (Interpreter::code() != nullptr || StubRoutines::final_stubs_code() != nullptr)) { 820 #if 0 821 // So, let's test for cascading c2i/i2c adapters right now. 822 // assert(Interpreter::contains($return_addr) || 823 // StubRoutines::contains($return_addr), 824 // "i2c adapter must return to an interpreter frame"); 825 __ block_comment("verify_i2c { "); 826 Label L_ok; 827 if (Interpreter::code() != nullptr) { 828 range_check(masm, rax, r11, 829 Interpreter::code()->code_start(), Interpreter::code()->code_end(), 830 L_ok); 831 } 832 if (StubRoutines::initial_stubs_code() != nullptr) { 833 range_check(masm, rax, r11, 834 StubRoutines::initial_stubs_code()->code_begin(), 835 StubRoutines::initial_stubs_code()->code_end(), 836 L_ok); 837 } 838 if (StubRoutines::final_stubs_code() != nullptr) { 839 range_check(masm, rax, r11, 840 StubRoutines::final_stubs_code()->code_begin(), 841 StubRoutines::final_stubs_code()->code_end(), 842 L_ok); 843 } 844 const char* msg = "i2c adapter must return to an interpreter frame"; 845 __ block_comment(msg); 846 __ stop(msg); 847 __ bind(L_ok); 848 __ block_comment("} verify_i2ce "); 849 #endif 850 } 851 852 // Cut-out for having no stack args. 853 int comp_words_on_stack = 0; 854 if (comp_args_on_stack) { 855 comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord; 856 __ sub(rscratch1, sp, comp_words_on_stack * wordSize); 857 __ andr(sp, rscratch1, -16); 858 } 859 860 // Will jump to the compiled code just as if compiled code was doing it. 861 // Pre-load the register-jump target early, to schedule it better. 862 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_inline_offset()))); 863 864 #if INCLUDE_JVMCI 865 if (EnableJVMCI) { 866 // check if this call should be routed towards a specific entry point 867 __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); 868 Label no_alternative_target; 869 __ cbz(rscratch2, no_alternative_target); 870 __ mov(rscratch1, rscratch2); 871 __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); 872 __ bind(no_alternative_target); 873 } 874 #endif // INCLUDE_JVMCI 875 876 int total_args_passed = sig->length(); 877 878 // Now generate the shuffle code. 879 for (int i = 0; i < total_args_passed; i++) { 880 BasicType bt = sig->at(i)._bt; 881 if (bt == T_VOID) { 882 assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half"); 883 continue; 884 } 885 886 // Pick up 0, 1 or 2 words from SP+offset. 887 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?"); 888 889 // Load in argument order going down. 890 int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize; 891 // Point to interpreter value (vs. tag) 892 int next_off = ld_off - Interpreter::stackElementSize; 893 // 894 // 895 // 896 VMReg r_1 = regs[i].first(); 897 VMReg r_2 = regs[i].second(); 898 if (!r_1->is_valid()) { 899 assert(!r_2->is_valid(), ""); 900 continue; 901 } 902 if (r_1->is_stack()) { 903 // Convert stack slot to an SP offset (+ wordSize to account for return address ) 904 int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size; 905 if (!r_2->is_valid()) { 906 // sign extend??? 907 __ ldrsw(rscratch2, Address(esp, ld_off)); 908 __ str(rscratch2, Address(sp, st_off)); 909 } else { 910 // 911 // We are using two optoregs. This can be either T_OBJECT, 912 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates 913 // two slots but only uses one for thr T_LONG or T_DOUBLE case 914 // So we must adjust where to pick up the data to match the 915 // interpreter. 916 // 917 // Interpreter local[n] == MSW, local[n+1] == LSW however locals 918 // are accessed as negative so LSW is at LOW address 919 920 // ld_off is MSW so get LSW 921 const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off; 922 __ ldr(rscratch2, Address(esp, offset)); 923 // st_off is LSW (i.e. reg.first()) 924 __ str(rscratch2, Address(sp, st_off)); 925 } 926 } else if (r_1->is_Register()) { // Register argument 927 Register r = r_1->as_Register(); 928 if (r_2->is_valid()) { 929 // 930 // We are using two VMRegs. This can be either T_OBJECT, 931 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates 932 // two slots but only uses one for thr T_LONG or T_DOUBLE case 933 // So we must adjust where to pick up the data to match the 934 // interpreter. 935 936 const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off; 937 938 // this can be a misaligned move 939 __ ldr(r, Address(esp, offset)); 940 } else { 941 // sign extend and use a full word? 942 __ ldrw(r, Address(esp, ld_off)); 943 } 944 } else { 945 if (!r_2->is_valid()) { 946 __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off)); 947 } else { 948 __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off)); 949 } 950 } 951 } 952 953 954 __ mov(rscratch2, rscratch1); 955 __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1 956 __ mov(rscratch1, rscratch2); 957 958 // 6243940 We might end up in handle_wrong_method if 959 // the callee is deoptimized as we race thru here. If that 960 // happens we don't want to take a safepoint because the 961 // caller frame will look interpreted and arguments are now 962 // "compiled" so it is much better to make this transition 963 // invisible to the stack walking code. Unfortunately if 964 // we try and find the callee by normal means a safepoint 965 // is possible. So we stash the desired callee in the thread 966 // and the vm will find there should this case occur. 967 968 __ str(rmethod, Address(rthread, JavaThread::callee_target_offset())); 969 __ br(rscratch1); 970 } 971 972 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) { 973 Register data = rscratch2; 974 __ ic_check(1 /* end_alignment */); 975 __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset())); 976 977 // Method might have been compiled since the call site was patched to 978 // interpreted; if that is the case treat it as a miss so we can get 979 // the call site corrected. 980 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset()))); 981 __ cbz(rscratch1, skip_fixup); 982 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 983 } 984 985 // --------------------------------------------------------------- 986 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm, 987 int comp_args_on_stack, 988 const GrowableArray<SigEntry>* sig, 989 const VMRegPair* regs, 990 const GrowableArray<SigEntry>* sig_cc, 991 const VMRegPair* regs_cc, 992 const GrowableArray<SigEntry>* sig_cc_ro, 993 const VMRegPair* regs_cc_ro, 994 AdapterFingerPrint* fingerprint, 995 AdapterBlob*& new_adapter, 996 bool allocate_code_blob) { 997 998 address i2c_entry = __ pc(); 999 gen_i2c_adapter(masm, comp_args_on_stack, sig, regs); 1000 1001 // ------------------------------------------------------------------------- 1002 // Generate a C2I adapter. On entry we know rmethod holds the Method* during calls 1003 // to the interpreter. The args start out packed in the compiled layout. They 1004 // need to be unpacked into the interpreter layout. This will almost always 1005 // require some stack space. We grow the current (compiled) stack, then repack 1006 // the args. We finally end in a jump to the generic interpreter entry point. 1007 // On exit from the interpreter, the interpreter will restore our SP (lest the 1008 // compiled code, which relies solely on SP and not FP, get sick). 1009 1010 address c2i_unverified_entry = __ pc(); 1011 address c2i_unverified_inline_entry = __ pc(); 1012 Label skip_fixup; 1013 1014 gen_inline_cache_check(masm, skip_fixup); 1015 1016 OopMapSet* oop_maps = new OopMapSet(); 1017 int frame_complete = CodeOffsets::frame_never_safe; 1018 int frame_size_in_words = 0; 1019 1020 // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver) 1021 address c2i_no_clinit_check_entry = nullptr; 1022 address c2i_inline_ro_entry = __ pc(); 1023 if (regs_cc != regs_cc_ro) { 1024 // No class init barrier needed because method is guaranteed to be non-static 1025 gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, /* requires_clinit_barrier = */ false, c2i_no_clinit_check_entry, 1026 skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false); 1027 skip_fixup.reset(); 1028 } 1029 1030 // Scalarized c2i adapter 1031 address c2i_entry = __ pc(); 1032 address c2i_inline_entry = __ pc(); 1033 gen_c2i_adapter(masm, sig_cc, regs_cc, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry, 1034 skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ true); 1035 1036 // Non-scalarized c2i adapter 1037 if (regs != regs_cc) { 1038 c2i_unverified_inline_entry = __ pc(); 1039 Label inline_entry_skip_fixup; 1040 gen_inline_cache_check(masm, inline_entry_skip_fixup); 1041 1042 c2i_inline_entry = __ pc(); 1043 gen_c2i_adapter(masm, sig, regs, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry, 1044 inline_entry_skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false); 1045 } 1046 1047 1048 // The c2i adapter might safepoint and trigger a GC. The caller must make sure that 1049 // the GC knows about the location of oop argument locations passed to the c2i adapter. 1050 if (allocate_code_blob) { 1051 bool caller_must_gc_arguments = (regs != regs_cc); 1052 new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments); 1053 } 1054 1055 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry, c2i_unverified_inline_entry, c2i_no_clinit_check_entry); 1056 } 1057 1058 static int c_calling_convention_priv(const BasicType *sig_bt, 1059 VMRegPair *regs, 1060 int total_args_passed) { 1061 1062 // We return the amount of VMRegImpl stack slots we need to reserve for all 1063 // the arguments NOT counting out_preserve_stack_slots. 1064 1065 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = { 1066 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5, c_rarg6, c_rarg7 1067 }; 1068 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = { 1069 c_farg0, c_farg1, c_farg2, c_farg3, 1070 c_farg4, c_farg5, c_farg6, c_farg7 1071 }; 1072 1073 uint int_args = 0; 1074 uint fp_args = 0; 1075 uint stk_args = 0; // inc by 2 each time 1076 1077 for (int i = 0; i < total_args_passed; i++) { 1078 switch (sig_bt[i]) { 1079 case T_BOOLEAN: 1080 case T_CHAR: 1081 case T_BYTE: 1082 case T_SHORT: 1083 case T_INT: 1084 if (int_args < Argument::n_int_register_parameters_c) { 1085 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); 1086 } else { 1087 #ifdef __APPLE__ 1088 // Less-than word types are stored one after another. 1089 // The code is unable to handle this so bailout. 1090 return -1; 1091 #endif 1092 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 1093 stk_args += 2; 1094 } 1095 break; 1096 case T_LONG: 1097 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 1098 // fall through 1099 case T_OBJECT: 1100 case T_ARRAY: 1101 case T_ADDRESS: 1102 case T_METADATA: 1103 if (int_args < Argument::n_int_register_parameters_c) { 1104 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); 1105 } else { 1106 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 1107 stk_args += 2; 1108 } 1109 break; 1110 case T_FLOAT: 1111 if (fp_args < Argument::n_float_register_parameters_c) { 1112 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); 1113 } else { 1114 #ifdef __APPLE__ 1115 // Less-than word types are stored one after another. 1116 // The code is unable to handle this so bailout. 1117 return -1; 1118 #endif 1119 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 1120 stk_args += 2; 1121 } 1122 break; 1123 case T_DOUBLE: 1124 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 1125 if (fp_args < Argument::n_float_register_parameters_c) { 1126 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg()); 1127 } else { 1128 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 1129 stk_args += 2; 1130 } 1131 break; 1132 case T_VOID: // Halves of longs and doubles 1133 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); 1134 regs[i].set_bad(); 1135 break; 1136 default: 1137 ShouldNotReachHere(); 1138 break; 1139 } 1140 } 1141 1142 return stk_args; 1143 } 1144 1145 int SharedRuntime::vector_calling_convention(VMRegPair *regs, 1146 uint num_bits, 1147 uint total_args_passed) { 1148 // More than 8 argument inputs are not supported now. 1149 assert(total_args_passed <= Argument::n_float_register_parameters_c, "unsupported"); 1150 assert(num_bits >= 64 && num_bits <= 2048 && is_power_of_2(num_bits), "unsupported"); 1151 1152 static const FloatRegister VEC_ArgReg[Argument::n_float_register_parameters_c] = { 1153 v0, v1, v2, v3, v4, v5, v6, v7 1154 }; 1155 1156 // On SVE, we use the same vector registers with 128-bit vector registers on NEON. 1157 int next_reg_val = num_bits == 64 ? 1 : 3; 1158 for (uint i = 0; i < total_args_passed; i++) { 1159 VMReg vmreg = VEC_ArgReg[i]->as_VMReg(); 1160 regs[i].set_pair(vmreg->next(next_reg_val), vmreg); 1161 } 1162 return 0; 1163 } 1164 1165 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 1166 VMRegPair *regs, 1167 int total_args_passed) 1168 { 1169 int result = c_calling_convention_priv(sig_bt, regs, total_args_passed); 1170 guarantee(result >= 0, "Unsupported arguments configuration"); 1171 return result; 1172 } 1173 1174 1175 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1176 // We always ignore the frame_slots arg and just use the space just below frame pointer 1177 // which by this time is free to use 1178 switch (ret_type) { 1179 case T_FLOAT: 1180 __ strs(v0, Address(rfp, -wordSize)); 1181 break; 1182 case T_DOUBLE: 1183 __ strd(v0, Address(rfp, -wordSize)); 1184 break; 1185 case T_VOID: break; 1186 default: { 1187 __ str(r0, Address(rfp, -wordSize)); 1188 } 1189 } 1190 } 1191 1192 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1193 // We always ignore the frame_slots arg and just use the space just below frame pointer 1194 // which by this time is free to use 1195 switch (ret_type) { 1196 case T_FLOAT: 1197 __ ldrs(v0, Address(rfp, -wordSize)); 1198 break; 1199 case T_DOUBLE: 1200 __ ldrd(v0, Address(rfp, -wordSize)); 1201 break; 1202 case T_VOID: break; 1203 default: { 1204 __ ldr(r0, Address(rfp, -wordSize)); 1205 } 1206 } 1207 } 1208 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { 1209 RegSet x; 1210 for ( int i = first_arg ; i < arg_count ; i++ ) { 1211 if (args[i].first()->is_Register()) { 1212 x = x + args[i].first()->as_Register(); 1213 } else if (args[i].first()->is_FloatRegister()) { 1214 __ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize))); 1215 } 1216 } 1217 __ push(x, sp); 1218 } 1219 1220 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { 1221 RegSet x; 1222 for ( int i = first_arg ; i < arg_count ; i++ ) { 1223 if (args[i].first()->is_Register()) { 1224 x = x + args[i].first()->as_Register(); 1225 } else { 1226 ; 1227 } 1228 } 1229 __ pop(x, sp); 1230 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) { 1231 if (args[i].first()->is_Register()) { 1232 ; 1233 } else if (args[i].first()->is_FloatRegister()) { 1234 __ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize))); 1235 } 1236 } 1237 } 1238 1239 static void verify_oop_args(MacroAssembler* masm, 1240 const methodHandle& method, 1241 const BasicType* sig_bt, 1242 const VMRegPair* regs) { 1243 Register temp_reg = r19; // not part of any compiled calling seq 1244 if (VerifyOops) { 1245 for (int i = 0; i < method->size_of_parameters(); i++) { 1246 if (sig_bt[i] == T_OBJECT || 1247 sig_bt[i] == T_ARRAY) { 1248 VMReg r = regs[i].first(); 1249 assert(r->is_valid(), "bad oop arg"); 1250 if (r->is_stack()) { 1251 __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); 1252 __ verify_oop(temp_reg); 1253 } else { 1254 __ verify_oop(r->as_Register()); 1255 } 1256 } 1257 } 1258 } 1259 } 1260 1261 // on exit, sp points to the ContinuationEntry 1262 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) { 1263 assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, ""); 1264 assert(in_bytes(ContinuationEntry::cont_offset()) % VMRegImpl::stack_slot_size == 0, ""); 1265 assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, ""); 1266 1267 stack_slots += (int)ContinuationEntry::size()/wordSize; 1268 __ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata 1269 1270 OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize)/ VMRegImpl::stack_slot_size, 0 /* arg_slots*/); 1271 1272 __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset())); 1273 __ str(rscratch1, Address(sp, ContinuationEntry::parent_offset())); 1274 __ mov(rscratch1, sp); // we can't use sp as the source in str 1275 __ str(rscratch1, Address(rthread, JavaThread::cont_entry_offset())); 1276 1277 return map; 1278 } 1279 1280 // on entry c_rarg1 points to the continuation 1281 // sp points to ContinuationEntry 1282 // c_rarg3 -- isVirtualThread 1283 static void fill_continuation_entry(MacroAssembler* masm) { 1284 #ifdef ASSERT 1285 __ movw(rscratch1, ContinuationEntry::cookie_value()); 1286 __ strw(rscratch1, Address(sp, ContinuationEntry::cookie_offset())); 1287 #endif 1288 1289 __ str (c_rarg1, Address(sp, ContinuationEntry::cont_offset())); 1290 __ strw(c_rarg3, Address(sp, ContinuationEntry::flags_offset())); 1291 __ str (zr, Address(sp, ContinuationEntry::chunk_offset())); 1292 __ strw(zr, Address(sp, ContinuationEntry::argsize_offset())); 1293 __ strw(zr, Address(sp, ContinuationEntry::pin_count_offset())); 1294 1295 __ ldr(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset())); 1296 __ str(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset())); 1297 __ ldr(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset())); 1298 __ str(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset())); 1299 1300 __ str(zr, Address(rthread, JavaThread::cont_fastpath_offset())); 1301 __ str(zr, Address(rthread, JavaThread::held_monitor_count_offset())); 1302 } 1303 1304 // on entry, sp points to the ContinuationEntry 1305 // on exit, rfp points to the spilled rfp in the entry frame 1306 static void continuation_enter_cleanup(MacroAssembler* masm) { 1307 #ifndef PRODUCT 1308 Label OK; 1309 __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset())); 1310 __ cmp(sp, rscratch1); 1311 __ br(Assembler::EQ, OK); 1312 __ stop("incorrect sp1"); 1313 __ bind(OK); 1314 #endif 1315 __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset())); 1316 __ str(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset())); 1317 1318 if (CheckJNICalls) { 1319 // Check if this is a virtual thread continuation 1320 Label L_skip_vthread_code; 1321 __ ldrw(rscratch1, Address(sp, ContinuationEntry::flags_offset())); 1322 __ cbzw(rscratch1, L_skip_vthread_code); 1323 1324 // If the held monitor count is > 0 and this vthread is terminating then 1325 // it failed to release a JNI monitor. So we issue the same log message 1326 // that JavaThread::exit does. 1327 __ ldr(rscratch1, Address(rthread, JavaThread::jni_monitor_count_offset())); 1328 __ cbz(rscratch1, L_skip_vthread_code); 1329 1330 // Save return value potentially containing the exception oop in callee-saved R19. 1331 __ mov(r19, r0); 1332 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held)); 1333 // Restore potential return value. 1334 __ mov(r0, r19); 1335 1336 // For vthreads we have to explicitly zero the JNI monitor count of the carrier 1337 // on termination. The held count is implicitly zeroed below when we restore from 1338 // the parent held count (which has to be zero). 1339 __ str(zr, Address(rthread, JavaThread::jni_monitor_count_offset())); 1340 1341 __ bind(L_skip_vthread_code); 1342 } 1343 #ifdef ASSERT 1344 else { 1345 // Check if this is a virtual thread continuation 1346 Label L_skip_vthread_code; 1347 __ ldrw(rscratch1, Address(sp, ContinuationEntry::flags_offset())); 1348 __ cbzw(rscratch1, L_skip_vthread_code); 1349 1350 // See comment just above. If not checking JNI calls the JNI count is only 1351 // needed for assertion checking. 1352 __ str(zr, Address(rthread, JavaThread::jni_monitor_count_offset())); 1353 1354 __ bind(L_skip_vthread_code); 1355 } 1356 #endif 1357 1358 __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset())); 1359 __ str(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset())); 1360 1361 __ ldr(rscratch2, Address(sp, ContinuationEntry::parent_offset())); 1362 __ str(rscratch2, Address(rthread, JavaThread::cont_entry_offset())); 1363 __ add(rfp, sp, (int)ContinuationEntry::size()); 1364 } 1365 1366 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread) 1367 // On entry: c_rarg1 -- the continuation object 1368 // c_rarg2 -- isContinue 1369 // c_rarg3 -- isVirtualThread 1370 static void gen_continuation_enter(MacroAssembler* masm, 1371 const methodHandle& method, 1372 const BasicType* sig_bt, 1373 const VMRegPair* regs, 1374 int& exception_offset, 1375 OopMapSet*oop_maps, 1376 int& frame_complete, 1377 int& stack_slots, 1378 int& interpreted_entry_offset, 1379 int& compiled_entry_offset) { 1380 //verify_oop_args(masm, method, sig_bt, regs); 1381 Address resolve(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type); 1382 1383 address start = __ pc(); 1384 1385 Label call_thaw, exit; 1386 1387 // i2i entry used at interp_only_mode only 1388 interpreted_entry_offset = __ pc() - start; 1389 { 1390 1391 #ifdef ASSERT 1392 Label is_interp_only; 1393 __ ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset())); 1394 __ cbnzw(rscratch1, is_interp_only); 1395 __ stop("enterSpecial interpreter entry called when not in interp_only_mode"); 1396 __ bind(is_interp_only); 1397 #endif 1398 1399 // Read interpreter arguments into registers (this is an ad-hoc i2c adapter) 1400 __ ldr(c_rarg1, Address(esp, Interpreter::stackElementSize*2)); 1401 __ ldr(c_rarg2, Address(esp, Interpreter::stackElementSize*1)); 1402 __ ldr(c_rarg3, Address(esp, Interpreter::stackElementSize*0)); 1403 __ push_cont_fastpath(rthread); 1404 1405 __ enter(); 1406 stack_slots = 2; // will be adjusted in setup 1407 OopMap* map = continuation_enter_setup(masm, stack_slots); 1408 // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe, 1409 // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway. 1410 1411 fill_continuation_entry(masm); 1412 1413 __ cbnz(c_rarg2, call_thaw); 1414 1415 const address tr_call = __ trampoline_call(resolve); 1416 if (tr_call == nullptr) { 1417 fatal("CodeCache is full at gen_continuation_enter"); 1418 } 1419 1420 oop_maps->add_gc_map(__ pc() - start, map); 1421 __ post_call_nop(); 1422 1423 __ b(exit); 1424 1425 address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call); 1426 if (stub == nullptr) { 1427 fatal("CodeCache is full at gen_continuation_enter"); 1428 } 1429 } 1430 1431 // compiled entry 1432 __ align(CodeEntryAlignment); 1433 compiled_entry_offset = __ pc() - start; 1434 1435 __ enter(); 1436 stack_slots = 2; // will be adjusted in setup 1437 OopMap* map = continuation_enter_setup(masm, stack_slots); 1438 frame_complete = __ pc() - start; 1439 1440 fill_continuation_entry(masm); 1441 1442 __ cbnz(c_rarg2, call_thaw); 1443 1444 const address tr_call = __ trampoline_call(resolve); 1445 if (tr_call == nullptr) { 1446 fatal("CodeCache is full at gen_continuation_enter"); 1447 } 1448 1449 oop_maps->add_gc_map(__ pc() - start, map); 1450 __ post_call_nop(); 1451 1452 __ b(exit); 1453 1454 __ bind(call_thaw); 1455 1456 ContinuationEntry::_thaw_call_pc_offset = __ pc() - start; 1457 __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw())); 1458 oop_maps->add_gc_map(__ pc() - start, map->deep_copy()); 1459 ContinuationEntry::_return_pc_offset = __ pc() - start; 1460 __ post_call_nop(); 1461 1462 __ bind(exit); 1463 ContinuationEntry::_cleanup_offset = __ pc() - start; 1464 continuation_enter_cleanup(masm); 1465 __ leave(); 1466 __ ret(lr); 1467 1468 /// exception handling 1469 1470 exception_offset = __ pc() - start; 1471 { 1472 __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19 1473 1474 continuation_enter_cleanup(masm); 1475 1476 __ ldr(c_rarg1, Address(rfp, wordSize)); // return address 1477 __ authenticate_return_address(c_rarg1); 1478 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1); 1479 1480 // see OptoRuntime::generate_exception_blob: r0 -- exception oop, r3 -- exception pc 1481 1482 __ mov(r1, r0); // the exception handler 1483 __ mov(r0, r19); // restore return value contaning the exception oop 1484 __ verify_oop(r0); 1485 1486 __ leave(); 1487 __ mov(r3, lr); 1488 __ br(r1); // the exception handler 1489 } 1490 1491 address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call); 1492 if (stub == nullptr) { 1493 fatal("CodeCache is full at gen_continuation_enter"); 1494 } 1495 } 1496 1497 static void gen_continuation_yield(MacroAssembler* masm, 1498 const methodHandle& method, 1499 const BasicType* sig_bt, 1500 const VMRegPair* regs, 1501 OopMapSet* oop_maps, 1502 int& frame_complete, 1503 int& stack_slots, 1504 int& compiled_entry_offset) { 1505 enum layout { 1506 rfp_off1, 1507 rfp_off2, 1508 lr_off, 1509 lr_off2, 1510 framesize // inclusive of return address 1511 }; 1512 // assert(is_even(framesize/2), "sp not 16-byte aligned"); 1513 stack_slots = framesize / VMRegImpl::slots_per_word; 1514 assert(stack_slots == 2, "recheck layout"); 1515 1516 address start = __ pc(); 1517 1518 compiled_entry_offset = __ pc() - start; 1519 __ enter(); 1520 1521 __ mov(c_rarg1, sp); 1522 1523 frame_complete = __ pc() - start; 1524 address the_pc = __ pc(); 1525 1526 __ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup 1527 1528 __ mov(c_rarg0, rthread); 1529 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1); 1530 __ call_VM_leaf(Continuation::freeze_entry(), 2); 1531 __ reset_last_Java_frame(true); 1532 1533 Label pinned; 1534 1535 __ cbnz(r0, pinned); 1536 1537 // We've succeeded, set sp to the ContinuationEntry 1538 __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset())); 1539 __ mov(sp, rscratch1); 1540 continuation_enter_cleanup(masm); 1541 1542 __ bind(pinned); // pinned -- return to caller 1543 1544 // handle pending exception thrown by freeze 1545 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 1546 Label ok; 1547 __ cbz(rscratch1, ok); 1548 __ leave(); 1549 __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 1550 __ br(rscratch1); 1551 __ bind(ok); 1552 1553 __ leave(); 1554 __ ret(lr); 1555 1556 OopMap* map = new OopMap(framesize, 1); 1557 oop_maps->add_gc_map(the_pc - start, map); 1558 } 1559 1560 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) { 1561 ::continuation_enter_cleanup(masm); 1562 } 1563 1564 static void gen_special_dispatch(MacroAssembler* masm, 1565 const methodHandle& method, 1566 const BasicType* sig_bt, 1567 const VMRegPair* regs) { 1568 verify_oop_args(masm, method, sig_bt, regs); 1569 vmIntrinsics::ID iid = method->intrinsic_id(); 1570 1571 // Now write the args into the outgoing interpreter space 1572 bool has_receiver = false; 1573 Register receiver_reg = noreg; 1574 int member_arg_pos = -1; 1575 Register member_reg = noreg; 1576 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1577 if (ref_kind != 0) { 1578 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1579 member_reg = r19; // known to be free at this point 1580 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1581 } else if (iid == vmIntrinsics::_invokeBasic) { 1582 has_receiver = true; 1583 } else if (iid == vmIntrinsics::_linkToNative) { 1584 member_arg_pos = method->size_of_parameters() - 1; // trailing NativeEntryPoint argument 1585 member_reg = r19; // known to be free at this point 1586 } else { 1587 fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid)); 1588 } 1589 1590 if (member_reg != noreg) { 1591 // Load the member_arg into register, if necessary. 1592 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1593 VMReg r = regs[member_arg_pos].first(); 1594 if (r->is_stack()) { 1595 __ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); 1596 } else { 1597 // no data motion is needed 1598 member_reg = r->as_Register(); 1599 } 1600 } 1601 1602 if (has_receiver) { 1603 // Make sure the receiver is loaded into a register. 1604 assert(method->size_of_parameters() > 0, "oob"); 1605 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1606 VMReg r = regs[0].first(); 1607 assert(r->is_valid(), "bad receiver arg"); 1608 if (r->is_stack()) { 1609 // Porting note: This assumes that compiled calling conventions always 1610 // pass the receiver oop in a register. If this is not true on some 1611 // platform, pick a temp and load the receiver from stack. 1612 fatal("receiver always in a register"); 1613 receiver_reg = r2; // known to be free at this point 1614 __ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); 1615 } else { 1616 // no data motion is needed 1617 receiver_reg = r->as_Register(); 1618 } 1619 } 1620 1621 // Figure out which address we are really jumping to: 1622 MethodHandles::generate_method_handle_dispatch(masm, iid, 1623 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1624 } 1625 1626 // --------------------------------------------------------------------------- 1627 // Generate a native wrapper for a given method. The method takes arguments 1628 // in the Java compiled code convention, marshals them to the native 1629 // convention (handlizes oops, etc), transitions to native, makes the call, 1630 // returns to java state (possibly blocking), unhandlizes any result and 1631 // returns. 1632 // 1633 // Critical native functions are a shorthand for the use of 1634 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1635 // functions. The wrapper is expected to unpack the arguments before 1636 // passing them to the callee. Critical native functions leave the state _in_Java, 1637 // since they block out GC. 1638 // Some other parts of JNI setup are skipped like the tear down of the JNI handle 1639 // block and the check for pending exceptions it's impossible for them 1640 // to be thrown. 1641 // 1642 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, 1643 const methodHandle& method, 1644 int compile_id, 1645 BasicType* in_sig_bt, 1646 VMRegPair* in_regs, 1647 BasicType ret_type) { 1648 if (method->is_continuation_native_intrinsic()) { 1649 int exception_offset = -1; 1650 OopMapSet* oop_maps = new OopMapSet(); 1651 int frame_complete = -1; 1652 int stack_slots = -1; 1653 int interpreted_entry_offset = -1; 1654 int vep_offset = -1; 1655 if (method->is_continuation_enter_intrinsic()) { 1656 gen_continuation_enter(masm, 1657 method, 1658 in_sig_bt, 1659 in_regs, 1660 exception_offset, 1661 oop_maps, 1662 frame_complete, 1663 stack_slots, 1664 interpreted_entry_offset, 1665 vep_offset); 1666 } else if (method->is_continuation_yield_intrinsic()) { 1667 gen_continuation_yield(masm, 1668 method, 1669 in_sig_bt, 1670 in_regs, 1671 oop_maps, 1672 frame_complete, 1673 stack_slots, 1674 vep_offset); 1675 } else { 1676 guarantee(false, "Unknown Continuation native intrinsic"); 1677 } 1678 1679 #ifdef ASSERT 1680 if (method->is_continuation_enter_intrinsic()) { 1681 assert(interpreted_entry_offset != -1, "Must be set"); 1682 assert(exception_offset != -1, "Must be set"); 1683 } else { 1684 assert(interpreted_entry_offset == -1, "Must be unset"); 1685 assert(exception_offset == -1, "Must be unset"); 1686 } 1687 assert(frame_complete != -1, "Must be set"); 1688 assert(stack_slots != -1, "Must be set"); 1689 assert(vep_offset != -1, "Must be set"); 1690 #endif 1691 1692 __ flush(); 1693 nmethod* nm = nmethod::new_native_nmethod(method, 1694 compile_id, 1695 masm->code(), 1696 vep_offset, 1697 frame_complete, 1698 stack_slots, 1699 in_ByteSize(-1), 1700 in_ByteSize(-1), 1701 oop_maps, 1702 exception_offset); 1703 if (nm == nullptr) return nm; 1704 if (method->is_continuation_enter_intrinsic()) { 1705 ContinuationEntry::set_enter_code(nm, interpreted_entry_offset); 1706 } else if (method->is_continuation_yield_intrinsic()) { 1707 _cont_doYield_stub = nm; 1708 } else { 1709 guarantee(false, "Unknown Continuation native intrinsic"); 1710 } 1711 return nm; 1712 } 1713 1714 if (method->is_method_handle_intrinsic()) { 1715 vmIntrinsics::ID iid = method->intrinsic_id(); 1716 intptr_t start = (intptr_t)__ pc(); 1717 int vep_offset = ((intptr_t)__ pc()) - start; 1718 1719 // First instruction must be a nop as it may need to be patched on deoptimisation 1720 __ nop(); 1721 gen_special_dispatch(masm, 1722 method, 1723 in_sig_bt, 1724 in_regs); 1725 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1726 __ flush(); 1727 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1728 return nmethod::new_native_nmethod(method, 1729 compile_id, 1730 masm->code(), 1731 vep_offset, 1732 frame_complete, 1733 stack_slots / VMRegImpl::slots_per_word, 1734 in_ByteSize(-1), 1735 in_ByteSize(-1), 1736 nullptr); 1737 } 1738 address native_func = method->native_function(); 1739 assert(native_func != nullptr, "must have function"); 1740 1741 // An OopMap for lock (and class if static) 1742 OopMapSet *oop_maps = new OopMapSet(); 1743 intptr_t start = (intptr_t)__ pc(); 1744 1745 // We have received a description of where all the java arg are located 1746 // on entry to the wrapper. We need to convert these args to where 1747 // the jni function will expect them. To figure out where they go 1748 // we convert the java signature to a C signature by inserting 1749 // the hidden arguments as arg[0] and possibly arg[1] (static method) 1750 1751 const int total_in_args = method->size_of_parameters(); 1752 int total_c_args = total_in_args + (method->is_static() ? 2 : 1); 1753 1754 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1755 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1756 1757 int argc = 0; 1758 out_sig_bt[argc++] = T_ADDRESS; 1759 if (method->is_static()) { 1760 out_sig_bt[argc++] = T_OBJECT; 1761 } 1762 1763 for (int i = 0; i < total_in_args ; i++ ) { 1764 out_sig_bt[argc++] = in_sig_bt[i]; 1765 } 1766 1767 // Now figure out where the args must be stored and how much stack space 1768 // they require. 1769 int out_arg_slots; 1770 out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, total_c_args); 1771 1772 if (out_arg_slots < 0) { 1773 return nullptr; 1774 } 1775 1776 // Compute framesize for the wrapper. We need to handlize all oops in 1777 // incoming registers 1778 1779 // Calculate the total number of stack slots we will need. 1780 1781 // First count the abi requirement plus all of the outgoing args 1782 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 1783 1784 // Now the space for the inbound oop handle area 1785 int total_save_slots = 8 * VMRegImpl::slots_per_word; // 8 arguments passed in registers 1786 1787 int oop_handle_offset = stack_slots; 1788 stack_slots += total_save_slots; 1789 1790 // Now any space we need for handlizing a klass if static method 1791 1792 int klass_slot_offset = 0; 1793 int klass_offset = -1; 1794 int lock_slot_offset = 0; 1795 bool is_static = false; 1796 1797 if (method->is_static()) { 1798 klass_slot_offset = stack_slots; 1799 stack_slots += VMRegImpl::slots_per_word; 1800 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 1801 is_static = true; 1802 } 1803 1804 // Plus a lock if needed 1805 1806 if (method->is_synchronized()) { 1807 lock_slot_offset = stack_slots; 1808 stack_slots += VMRegImpl::slots_per_word; 1809 } 1810 1811 // Now a place (+2) to save return values or temp during shuffling 1812 // + 4 for return address (which we own) and saved rfp 1813 stack_slots += 6; 1814 1815 // Ok The space we have allocated will look like: 1816 // 1817 // 1818 // FP-> | | 1819 // |---------------------| 1820 // | 2 slots for moves | 1821 // |---------------------| 1822 // | lock box (if sync) | 1823 // |---------------------| <- lock_slot_offset 1824 // | klass (if static) | 1825 // |---------------------| <- klass_slot_offset 1826 // | oopHandle area | 1827 // |---------------------| <- oop_handle_offset (8 java arg registers) 1828 // | outbound memory | 1829 // | based arguments | 1830 // | | 1831 // |---------------------| 1832 // | | 1833 // SP-> | out_preserved_slots | 1834 // 1835 // 1836 1837 1838 // Now compute actual number of stack words we need rounding to make 1839 // stack properly aligned. 1840 stack_slots = align_up(stack_slots, StackAlignmentInSlots); 1841 1842 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 1843 1844 // First thing make an ic check to see if we should even be here 1845 1846 // We are free to use all registers as temps without saving them and 1847 // restoring them except rfp. rfp is the only callee save register 1848 // as far as the interpreter and the compiler(s) are concerned. 1849 1850 const Register receiver = j_rarg0; 1851 1852 Label exception_pending; 1853 1854 assert_different_registers(receiver, rscratch1); 1855 __ verify_oop(receiver); 1856 __ ic_check(8 /* end_alignment */); 1857 1858 // Verified entry point must be aligned 1859 int vep_offset = ((intptr_t)__ pc()) - start; 1860 1861 // If we have to make this method not-entrant we'll overwrite its 1862 // first instruction with a jump. For this action to be legal we 1863 // must ensure that this first instruction is a B, BL, NOP, BKPT, 1864 // SVC, HVC, or SMC. Make it a NOP. 1865 __ nop(); 1866 1867 if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) { 1868 Label L_skip_barrier; 1869 __ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass* 1870 __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier); 1871 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 1872 1873 __ bind(L_skip_barrier); 1874 } 1875 1876 // Generate stack overflow check 1877 __ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size())); 1878 1879 // Generate a new frame for the wrapper. 1880 __ enter(); 1881 // -2 because return address is already present and so is saved rfp 1882 __ sub(sp, sp, stack_size - 2*wordSize); 1883 1884 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1885 bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */); 1886 1887 // Frame is now completed as far as size and linkage. 1888 int frame_complete = ((intptr_t)__ pc()) - start; 1889 1890 // We use r20 as the oop handle for the receiver/klass 1891 // It is callee save so it survives the call to native 1892 1893 const Register oop_handle_reg = r20; 1894 1895 // 1896 // We immediately shuffle the arguments so that any vm call we have to 1897 // make from here on out (sync slow path, jvmti, etc.) we will have 1898 // captured the oops from our caller and have a valid oopMap for 1899 // them. 1900 1901 // ----------------- 1902 // The Grand Shuffle 1903 1904 // The Java calling convention is either equal (linux) or denser (win64) than the 1905 // c calling convention. However the because of the jni_env argument the c calling 1906 // convention always has at least one more (and two for static) arguments than Java. 1907 // Therefore if we move the args from java -> c backwards then we will never have 1908 // a register->register conflict and we don't have to build a dependency graph 1909 // and figure out how to break any cycles. 1910 // 1911 1912 // Record esp-based slot for receiver on stack for non-static methods 1913 int receiver_offset = -1; 1914 1915 // This is a trick. We double the stack slots so we can claim 1916 // the oops in the caller's frame. Since we are sure to have 1917 // more args than the caller doubling is enough to make 1918 // sure we can capture all the incoming oop args from the 1919 // caller. 1920 // 1921 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1922 1923 // Mark location of rfp (someday) 1924 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp)); 1925 1926 1927 int float_args = 0; 1928 int int_args = 0; 1929 1930 #ifdef ASSERT 1931 bool reg_destroyed[Register::number_of_registers]; 1932 bool freg_destroyed[FloatRegister::number_of_registers]; 1933 for ( int r = 0 ; r < Register::number_of_registers ; r++ ) { 1934 reg_destroyed[r] = false; 1935 } 1936 for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) { 1937 freg_destroyed[f] = false; 1938 } 1939 1940 #endif /* ASSERT */ 1941 1942 // For JNI natives the incoming and outgoing registers are offset upwards. 1943 GrowableArray<int> arg_order(2 * total_in_args); 1944 1945 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) { 1946 arg_order.push(i); 1947 arg_order.push(c_arg); 1948 } 1949 1950 for (int ai = 0; ai < arg_order.length(); ai += 2) { 1951 int i = arg_order.at(ai); 1952 int c_arg = arg_order.at(ai + 1); 1953 __ block_comment(err_msg("move %d -> %d", i, c_arg)); 1954 assert(c_arg != -1 && i != -1, "wrong order"); 1955 #ifdef ASSERT 1956 if (in_regs[i].first()->is_Register()) { 1957 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!"); 1958 } else if (in_regs[i].first()->is_FloatRegister()) { 1959 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!"); 1960 } 1961 if (out_regs[c_arg].first()->is_Register()) { 1962 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 1963 } else if (out_regs[c_arg].first()->is_FloatRegister()) { 1964 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true; 1965 } 1966 #endif /* ASSERT */ 1967 switch (in_sig_bt[i]) { 1968 case T_ARRAY: 1969 case T_OBJECT: 1970 __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], 1971 ((i == 0) && (!is_static)), 1972 &receiver_offset); 1973 int_args++; 1974 break; 1975 case T_VOID: 1976 break; 1977 1978 case T_FLOAT: 1979 __ float_move(in_regs[i], out_regs[c_arg]); 1980 float_args++; 1981 break; 1982 1983 case T_DOUBLE: 1984 assert( i + 1 < total_in_args && 1985 in_sig_bt[i + 1] == T_VOID && 1986 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 1987 __ double_move(in_regs[i], out_regs[c_arg]); 1988 float_args++; 1989 break; 1990 1991 case T_LONG : 1992 __ long_move(in_regs[i], out_regs[c_arg]); 1993 int_args++; 1994 break; 1995 1996 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 1997 1998 default: 1999 __ move32_64(in_regs[i], out_regs[c_arg]); 2000 int_args++; 2001 } 2002 } 2003 2004 // point c_arg at the first arg that is already loaded in case we 2005 // need to spill before we call out 2006 int c_arg = total_c_args - total_in_args; 2007 2008 // Pre-load a static method's oop into c_rarg1. 2009 if (method->is_static()) { 2010 2011 // load oop into a register 2012 __ movoop(c_rarg1, 2013 JNIHandles::make_local(method->method_holder()->java_mirror())); 2014 2015 // Now handlize the static class mirror it's known not-null. 2016 __ str(c_rarg1, Address(sp, klass_offset)); 2017 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2018 2019 // Now get the handle 2020 __ lea(c_rarg1, Address(sp, klass_offset)); 2021 // and protect the arg if we must spill 2022 c_arg--; 2023 } 2024 2025 // Change state to native (we save the return address in the thread, since it might not 2026 // be pushed on the stack when we do a stack traversal). It is enough that the pc() 2027 // points into the right code segment. It does not have to be the correct return pc. 2028 // We use the same pc/oopMap repeatedly when we call out. 2029 2030 Label native_return; 2031 if (LockingMode != LM_LEGACY && method->is_object_wait0()) { 2032 // For convenience we use the pc we want to resume to in case of preemption on Object.wait. 2033 __ set_last_Java_frame(sp, noreg, native_return, rscratch1); 2034 } else { 2035 intptr_t the_pc = (intptr_t) __ pc(); 2036 oop_maps->add_gc_map(the_pc - start, map); 2037 2038 __ set_last_Java_frame(sp, noreg, __ pc(), rscratch1); 2039 } 2040 2041 Label dtrace_method_entry, dtrace_method_entry_done; 2042 if (DTraceMethodProbes) { 2043 __ b(dtrace_method_entry); 2044 __ bind(dtrace_method_entry_done); 2045 } 2046 2047 // RedefineClasses() tracing support for obsolete method entry 2048 if (log_is_enabled(Trace, redefine, class, obsolete)) { 2049 // protect the args we've loaded 2050 save_args(masm, total_c_args, c_arg, out_regs); 2051 __ mov_metadata(c_rarg1, method()); 2052 __ call_VM_leaf( 2053 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2054 rthread, c_rarg1); 2055 restore_args(masm, total_c_args, c_arg, out_regs); 2056 } 2057 2058 // Lock a synchronized method 2059 2060 // Register definitions used by locking and unlocking 2061 2062 const Register swap_reg = r0; 2063 const Register obj_reg = r19; // Will contain the oop 2064 const Register lock_reg = r13; // Address of compiler lock object (BasicLock) 2065 const Register old_hdr = r13; // value of old header at unlock time 2066 const Register lock_tmp = r14; // Temporary used by lightweight_lock/unlock 2067 const Register tmp = lr; 2068 2069 Label slow_path_lock; 2070 Label lock_done; 2071 2072 if (method->is_synchronized()) { 2073 Label count; 2074 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes(); 2075 2076 // Get the handle (the 2nd argument) 2077 __ mov(oop_handle_reg, c_rarg1); 2078 2079 // Get address of the box 2080 2081 __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 2082 2083 // Load the oop from the handle 2084 __ ldr(obj_reg, Address(oop_handle_reg, 0)); 2085 2086 if (LockingMode == LM_MONITOR) { 2087 __ b(slow_path_lock); 2088 } else if (LockingMode == LM_LEGACY) { 2089 // Load (object->mark() | 1) into swap_reg %r0 2090 __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 2091 __ orr(swap_reg, rscratch1, 1); 2092 if (EnableValhalla) { 2093 // Mask inline_type bit such that we go to the slow path if object is an inline type 2094 __ andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place)); 2095 } 2096 2097 // Save (object->mark() | 1) into BasicLock's displaced header 2098 __ str(swap_reg, Address(lock_reg, mark_word_offset)); 2099 2100 // src -> dest iff dest == r0 else r0 <- dest 2101 __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr); 2102 2103 // Hmm should this move to the slow path code area??? 2104 2105 // Test if the oopMark is an obvious stack pointer, i.e., 2106 // 1) (mark & 3) == 0, and 2107 // 2) sp <= mark < mark + os::pagesize() 2108 // These 3 tests can be done by evaluating the following 2109 // expression: ((mark - sp) & (3 - os::vm_page_size())), 2110 // assuming both stack pointer and pagesize have their 2111 // least significant 2 bits clear. 2112 // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg 2113 2114 __ sub(swap_reg, sp, swap_reg); 2115 __ neg(swap_reg, swap_reg); 2116 __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size()); 2117 2118 // Save the test result, for recursive case, the result is zero 2119 __ str(swap_reg, Address(lock_reg, mark_word_offset)); 2120 __ br(Assembler::NE, slow_path_lock); 2121 2122 __ bind(count); 2123 __ inc_held_monitor_count(rscratch1); 2124 } else { 2125 assert(LockingMode == LM_LIGHTWEIGHT, "must be"); 2126 __ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock); 2127 } 2128 2129 // Slow path will re-enter here 2130 __ bind(lock_done); 2131 } 2132 2133 2134 // Finally just about ready to make the JNI call 2135 2136 // get JNIEnv* which is first argument to native 2137 __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset()))); 2138 2139 // Now set thread in native 2140 __ mov(rscratch1, _thread_in_native); 2141 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 2142 __ stlrw(rscratch1, rscratch2); 2143 2144 __ rt_call(native_func); 2145 2146 // Verify or restore cpu control state after JNI call 2147 __ restore_cpu_control_state_after_jni(rscratch1, rscratch2); 2148 2149 // Unpack native results. 2150 switch (ret_type) { 2151 case T_BOOLEAN: __ c2bool(r0); break; 2152 case T_CHAR : __ ubfx(r0, r0, 0, 16); break; 2153 case T_BYTE : __ sbfx(r0, r0, 0, 8); break; 2154 case T_SHORT : __ sbfx(r0, r0, 0, 16); break; 2155 case T_INT : __ sbfx(r0, r0, 0, 32); break; 2156 case T_DOUBLE : 2157 case T_FLOAT : 2158 // Result is in v0 we'll save as needed 2159 break; 2160 case T_ARRAY: // Really a handle 2161 case T_OBJECT: // Really a handle 2162 break; // can't de-handlize until after safepoint check 2163 case T_VOID: break; 2164 case T_LONG: break; 2165 default : ShouldNotReachHere(); 2166 } 2167 2168 Label safepoint_in_progress, safepoint_in_progress_done; 2169 2170 // Switch thread to "native transition" state before reading the synchronization state. 2171 // This additional state is necessary because reading and testing the synchronization 2172 // state is not atomic w.r.t. GC, as this scenario demonstrates: 2173 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 2174 // VM thread changes sync state to synchronizing and suspends threads for GC. 2175 // Thread A is resumed to finish this native method, but doesn't block here since it 2176 // didn't see any synchronization is progress, and escapes. 2177 __ mov(rscratch1, _thread_in_native_trans); 2178 2179 __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset())); 2180 2181 // Force this write out before the read below 2182 if (!UseSystemMemoryBarrier) { 2183 __ dmb(Assembler::ISH); 2184 } 2185 2186 __ verify_sve_vector_length(); 2187 2188 // Check for safepoint operation in progress and/or pending suspend requests. 2189 { 2190 // No need for acquire as Java threads always disarm themselves. 2191 __ safepoint_poll(safepoint_in_progress, true /* at_return */, false /* acquire */, false /* in_nmethod */); 2192 __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset())); 2193 __ cbnzw(rscratch1, safepoint_in_progress); 2194 __ bind(safepoint_in_progress_done); 2195 } 2196 2197 // change thread state 2198 __ mov(rscratch1, _thread_in_Java); 2199 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 2200 __ stlrw(rscratch1, rscratch2); 2201 2202 if (LockingMode != LM_LEGACY && method->is_object_wait0()) { 2203 // Check preemption for Object.wait() 2204 __ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset())); 2205 __ cbz(rscratch1, native_return); 2206 __ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset())); 2207 __ br(rscratch1); 2208 __ bind(native_return); 2209 2210 intptr_t the_pc = (intptr_t) __ pc(); 2211 oop_maps->add_gc_map(the_pc - start, map); 2212 } 2213 2214 Label reguard; 2215 Label reguard_done; 2216 __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset())); 2217 __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled); 2218 __ br(Assembler::EQ, reguard); 2219 __ bind(reguard_done); 2220 2221 // native result if any is live 2222 2223 // Unlock 2224 Label unlock_done; 2225 Label slow_path_unlock; 2226 if (method->is_synchronized()) { 2227 2228 // Get locked oop from the handle we passed to jni 2229 __ ldr(obj_reg, Address(oop_handle_reg, 0)); 2230 2231 Label done, not_recursive; 2232 2233 if (LockingMode == LM_LEGACY) { 2234 // Simple recursive lock? 2235 __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 2236 __ cbnz(rscratch1, not_recursive); 2237 __ dec_held_monitor_count(rscratch1); 2238 __ b(done); 2239 } 2240 2241 __ bind(not_recursive); 2242 2243 // Must save r0 if if it is live now because cmpxchg must use it 2244 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { 2245 save_native_result(masm, ret_type, stack_slots); 2246 } 2247 2248 if (LockingMode == LM_MONITOR) { 2249 __ b(slow_path_unlock); 2250 } else if (LockingMode == LM_LEGACY) { 2251 // get address of the stack lock 2252 __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 2253 // get old displaced header 2254 __ ldr(old_hdr, Address(r0, 0)); 2255 2256 // Atomic swap old header if oop still contains the stack lock 2257 Label count; 2258 __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock); 2259 __ bind(count); 2260 __ dec_held_monitor_count(rscratch1); 2261 } else { 2262 assert(LockingMode == LM_LIGHTWEIGHT, ""); 2263 __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock); 2264 } 2265 2266 // slow path re-enters here 2267 __ bind(unlock_done); 2268 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { 2269 restore_native_result(masm, ret_type, stack_slots); 2270 } 2271 2272 __ bind(done); 2273 } 2274 2275 Label dtrace_method_exit, dtrace_method_exit_done; 2276 if (DTraceMethodProbes) { 2277 __ b(dtrace_method_exit); 2278 __ bind(dtrace_method_exit_done); 2279 } 2280 2281 __ reset_last_Java_frame(false); 2282 2283 // Unbox oop result, e.g. JNIHandles::resolve result. 2284 if (is_reference_type(ret_type)) { 2285 __ resolve_jobject(r0, r1, r2); 2286 } 2287 2288 if (CheckJNICalls) { 2289 // clear_pending_jni_exception_check 2290 __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset())); 2291 } 2292 2293 // reset handle block 2294 __ ldr(r2, Address(rthread, JavaThread::active_handles_offset())); 2295 __ str(zr, Address(r2, JNIHandleBlock::top_offset())); 2296 2297 __ leave(); 2298 2299 // Any exception pending? 2300 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2301 __ cbnz(rscratch1, exception_pending); 2302 2303 // We're done 2304 __ ret(lr); 2305 2306 // Unexpected paths are out of line and go here 2307 2308 // forward the exception 2309 __ bind(exception_pending); 2310 2311 // and forward the exception 2312 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2313 2314 // Slow path locking & unlocking 2315 if (method->is_synchronized()) { 2316 2317 __ block_comment("Slow path lock {"); 2318 __ bind(slow_path_lock); 2319 2320 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM 2321 // args are (oop obj, BasicLock* lock, JavaThread* thread) 2322 2323 // protect the args we've loaded 2324 save_args(masm, total_c_args, c_arg, out_regs); 2325 2326 __ mov(c_rarg0, obj_reg); 2327 __ mov(c_rarg1, lock_reg); 2328 __ mov(c_rarg2, rthread); 2329 2330 // Not a leaf but we have last_Java_frame setup as we want. 2331 // We don't want to unmount in case of contention since that would complicate preserving 2332 // the arguments that had already been marshalled into the native convention. So we force 2333 // the freeze slow path to find this native wrapper frame (see recurse_freeze_native_frame()) 2334 // and pin the vthread. Otherwise the fast path won't find it since we don't walk the stack. 2335 __ push_cont_fastpath(); 2336 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3); 2337 __ pop_cont_fastpath(); 2338 restore_args(masm, total_c_args, c_arg, out_regs); 2339 2340 #ifdef ASSERT 2341 { Label L; 2342 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2343 __ cbz(rscratch1, L); 2344 __ stop("no pending exception allowed on exit from monitorenter"); 2345 __ bind(L); 2346 } 2347 #endif 2348 __ b(lock_done); 2349 2350 __ block_comment("} Slow path lock"); 2351 2352 __ block_comment("Slow path unlock {"); 2353 __ bind(slow_path_unlock); 2354 2355 // If we haven't already saved the native result we must save it now as xmm registers 2356 // are still exposed. 2357 2358 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { 2359 save_native_result(masm, ret_type, stack_slots); 2360 } 2361 2362 __ mov(c_rarg2, rthread); 2363 __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 2364 __ mov(c_rarg0, obj_reg); 2365 2366 // Save pending exception around call to VM (which contains an EXCEPTION_MARK) 2367 // NOTE that obj_reg == r19 currently 2368 __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2369 __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2370 2371 __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)); 2372 2373 #ifdef ASSERT 2374 { 2375 Label L; 2376 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2377 __ cbz(rscratch1, L); 2378 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C"); 2379 __ bind(L); 2380 } 2381 #endif /* ASSERT */ 2382 2383 __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2384 2385 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { 2386 restore_native_result(masm, ret_type, stack_slots); 2387 } 2388 __ b(unlock_done); 2389 2390 __ block_comment("} Slow path unlock"); 2391 2392 } // synchronized 2393 2394 // SLOW PATH Reguard the stack if needed 2395 2396 __ bind(reguard); 2397 save_native_result(masm, ret_type, stack_slots); 2398 __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2399 restore_native_result(masm, ret_type, stack_slots); 2400 // and continue 2401 __ b(reguard_done); 2402 2403 // SLOW PATH safepoint 2404 { 2405 __ block_comment("safepoint {"); 2406 __ bind(safepoint_in_progress); 2407 2408 // Don't use call_VM as it will see a possible pending exception and forward it 2409 // and never return here preventing us from clearing _last_native_pc down below. 2410 // 2411 save_native_result(masm, ret_type, stack_slots); 2412 __ mov(c_rarg0, rthread); 2413 #ifndef PRODUCT 2414 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); 2415 #endif 2416 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 2417 __ blr(rscratch1); 2418 2419 // Restore any method result value 2420 restore_native_result(masm, ret_type, stack_slots); 2421 2422 __ b(safepoint_in_progress_done); 2423 __ block_comment("} safepoint"); 2424 } 2425 2426 // SLOW PATH dtrace support 2427 if (DTraceMethodProbes) { 2428 { 2429 __ block_comment("dtrace entry {"); 2430 __ bind(dtrace_method_entry); 2431 2432 // We have all of the arguments setup at this point. We must not touch any register 2433 // argument registers at this point (what if we save/restore them there are no oop? 2434 2435 save_args(masm, total_c_args, c_arg, out_regs); 2436 __ mov_metadata(c_rarg1, method()); 2437 __ call_VM_leaf( 2438 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2439 rthread, c_rarg1); 2440 restore_args(masm, total_c_args, c_arg, out_regs); 2441 __ b(dtrace_method_entry_done); 2442 __ block_comment("} dtrace entry"); 2443 } 2444 2445 { 2446 __ block_comment("dtrace exit {"); 2447 __ bind(dtrace_method_exit); 2448 save_native_result(masm, ret_type, stack_slots); 2449 __ mov_metadata(c_rarg1, method()); 2450 __ call_VM_leaf( 2451 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2452 rthread, c_rarg1); 2453 restore_native_result(masm, ret_type, stack_slots); 2454 __ b(dtrace_method_exit_done); 2455 __ block_comment("} dtrace exit"); 2456 } 2457 } 2458 2459 __ flush(); 2460 2461 nmethod *nm = nmethod::new_native_nmethod(method, 2462 compile_id, 2463 masm->code(), 2464 vep_offset, 2465 frame_complete, 2466 stack_slots / VMRegImpl::slots_per_word, 2467 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2468 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size), 2469 oop_maps); 2470 2471 return nm; 2472 } 2473 2474 // this function returns the adjust size (in number of words) to a c2i adapter 2475 // activation for use during deoptimization 2476 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2477 assert(callee_locals >= callee_parameters, 2478 "test and remove; got more parms than locals"); 2479 if (callee_locals < callee_parameters) 2480 return 0; // No adjustment for negative locals 2481 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords; 2482 // diff is counted in stack words 2483 return align_up(diff, 2); 2484 } 2485 2486 2487 //------------------------------generate_deopt_blob---------------------------- 2488 void SharedRuntime::generate_deopt_blob() { 2489 // Allocate space for the code 2490 ResourceMark rm; 2491 // Setup code generation tools 2492 int pad = 0; 2493 #if INCLUDE_JVMCI 2494 if (EnableJVMCI) { 2495 pad += 512; // Increase the buffer size when compiling for JVMCI 2496 } 2497 #endif 2498 const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id); 2499 CodeBuffer buffer(name, 2048+pad, 1024); 2500 MacroAssembler* masm = new MacroAssembler(&buffer); 2501 int frame_size_in_words; 2502 OopMap* map = nullptr; 2503 OopMapSet *oop_maps = new OopMapSet(); 2504 RegisterSaver reg_save(COMPILER2_OR_JVMCI != 0); 2505 2506 // ------------- 2507 // This code enters when returning to a de-optimized nmethod. A return 2508 // address has been pushed on the stack, and return values are in 2509 // registers. 2510 // If we are doing a normal deopt then we were called from the patched 2511 // nmethod from the point we returned to the nmethod. So the return 2512 // address on the stack is wrong by NativeCall::instruction_size 2513 // We will adjust the value so it looks like we have the original return 2514 // address on the stack (like when we eagerly deoptimized). 2515 // In the case of an exception pending when deoptimizing, we enter 2516 // with a return address on the stack that points after the call we patched 2517 // into the exception handler. We have the following register state from, 2518 // e.g., the forward exception stub (see stubGenerator_x86_64.cpp). 2519 // r0: exception oop 2520 // r19: exception handler 2521 // r3: throwing pc 2522 // So in this case we simply jam r3 into the useless return address and 2523 // the stack looks just like we want. 2524 // 2525 // At this point we need to de-opt. We save the argument return 2526 // registers. We call the first C routine, fetch_unroll_info(). This 2527 // routine captures the return values and returns a structure which 2528 // describes the current frame size and the sizes of all replacement frames. 2529 // The current frame is compiled code and may contain many inlined 2530 // functions, each with their own JVM state. We pop the current frame, then 2531 // push all the new frames. Then we call the C routine unpack_frames() to 2532 // populate these frames. Finally unpack_frames() returns us the new target 2533 // address. Notice that callee-save registers are BLOWN here; they have 2534 // already been captured in the vframeArray at the time the return PC was 2535 // patched. 2536 address start = __ pc(); 2537 Label cont; 2538 2539 // Prolog for non exception case! 2540 2541 // Save everything in sight. 2542 map = reg_save.save_live_registers(masm, 0, &frame_size_in_words); 2543 2544 // Normal deoptimization. Save exec mode for unpack_frames. 2545 __ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved 2546 __ b(cont); 2547 2548 int reexecute_offset = __ pc() - start; 2549 #if INCLUDE_JVMCI && !defined(COMPILER1) 2550 if (UseJVMCICompiler) { 2551 // JVMCI does not use this kind of deoptimization 2552 __ should_not_reach_here(); 2553 } 2554 #endif 2555 2556 // Reexecute case 2557 // return address is the pc describes what bci to do re-execute at 2558 2559 // No need to update map as each call to save_live_registers will produce identical oopmap 2560 (void) reg_save.save_live_registers(masm, 0, &frame_size_in_words); 2561 2562 __ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved 2563 __ b(cont); 2564 2565 #if INCLUDE_JVMCI 2566 Label after_fetch_unroll_info_call; 2567 int implicit_exception_uncommon_trap_offset = 0; 2568 int uncommon_trap_offset = 0; 2569 2570 if (EnableJVMCI) { 2571 implicit_exception_uncommon_trap_offset = __ pc() - start; 2572 2573 __ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); 2574 __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); 2575 2576 uncommon_trap_offset = __ pc() - start; 2577 2578 // Save everything in sight. 2579 reg_save.save_live_registers(masm, 0, &frame_size_in_words); 2580 // fetch_unroll_info needs to call last_java_frame() 2581 Label retaddr; 2582 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 2583 2584 __ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset()))); 2585 __ movw(rscratch1, -1); 2586 __ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset()))); 2587 2588 __ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute); 2589 __ mov(c_rarg0, rthread); 2590 __ movw(c_rarg2, rcpool); // exec mode 2591 __ lea(rscratch1, 2592 RuntimeAddress(CAST_FROM_FN_PTR(address, 2593 Deoptimization::uncommon_trap))); 2594 __ blr(rscratch1); 2595 __ bind(retaddr); 2596 oop_maps->add_gc_map( __ pc()-start, map->deep_copy()); 2597 2598 __ reset_last_Java_frame(false); 2599 2600 __ b(after_fetch_unroll_info_call); 2601 } // EnableJVMCI 2602 #endif // INCLUDE_JVMCI 2603 2604 int exception_offset = __ pc() - start; 2605 2606 // Prolog for exception case 2607 2608 // all registers are dead at this entry point, except for r0, and 2609 // r3 which contain the exception oop and exception pc 2610 // respectively. Set them in TLS and fall thru to the 2611 // unpack_with_exception_in_tls entry point. 2612 2613 __ str(r3, Address(rthread, JavaThread::exception_pc_offset())); 2614 __ str(r0, Address(rthread, JavaThread::exception_oop_offset())); 2615 2616 int exception_in_tls_offset = __ pc() - start; 2617 2618 // new implementation because exception oop is now passed in JavaThread 2619 2620 // Prolog for exception case 2621 // All registers must be preserved because they might be used by LinearScan 2622 // Exceptiop oop and throwing PC are passed in JavaThread 2623 // tos: stack at point of call to method that threw the exception (i.e. only 2624 // args are on the stack, no return address) 2625 2626 // The return address pushed by save_live_registers will be patched 2627 // later with the throwing pc. The correct value is not available 2628 // now because loading it from memory would destroy registers. 2629 2630 // NB: The SP at this point must be the SP of the method that is 2631 // being deoptimized. Deoptimization assumes that the frame created 2632 // here by save_live_registers is immediately below the method's SP. 2633 // This is a somewhat fragile mechanism. 2634 2635 // Save everything in sight. 2636 map = reg_save.save_live_registers(masm, 0, &frame_size_in_words); 2637 2638 // Now it is safe to overwrite any register 2639 2640 // Deopt during an exception. Save exec mode for unpack_frames. 2641 __ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved 2642 2643 // load throwing pc from JavaThread and patch it as the return address 2644 // of the current frame. Then clear the field in JavaThread 2645 __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset())); 2646 __ protect_return_address(r3); 2647 __ str(r3, Address(rfp, wordSize)); 2648 __ str(zr, Address(rthread, JavaThread::exception_pc_offset())); 2649 2650 #ifdef ASSERT 2651 // verify that there is really an exception oop in JavaThread 2652 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); 2653 __ verify_oop(r0); 2654 2655 // verify that there is no pending exception 2656 Label no_pending_exception; 2657 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 2658 __ cbz(rscratch1, no_pending_exception); 2659 __ stop("must not have pending exception here"); 2660 __ bind(no_pending_exception); 2661 #endif 2662 2663 __ bind(cont); 2664 2665 // Call C code. Need thread and this frame, but NOT official VM entry 2666 // crud. We cannot block on this call, no GC can happen. 2667 // 2668 // UnrollBlock* fetch_unroll_info(JavaThread* thread) 2669 2670 // fetch_unroll_info needs to call last_java_frame(). 2671 2672 Label retaddr; 2673 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 2674 #ifdef ASSERT 2675 { Label L; 2676 __ ldr(rscratch1, Address(rthread, JavaThread::last_Java_fp_offset())); 2677 __ cbz(rscratch1, L); 2678 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared"); 2679 __ bind(L); 2680 } 2681 #endif // ASSERT 2682 __ mov(c_rarg0, rthread); 2683 __ mov(c_rarg1, rcpool); 2684 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info))); 2685 __ blr(rscratch1); 2686 __ bind(retaddr); 2687 2688 // Need to have an oopmap that tells fetch_unroll_info where to 2689 // find any register it might need. 2690 oop_maps->add_gc_map(__ pc() - start, map); 2691 2692 __ reset_last_Java_frame(false); 2693 2694 #if INCLUDE_JVMCI 2695 if (EnableJVMCI) { 2696 __ bind(after_fetch_unroll_info_call); 2697 } 2698 #endif 2699 2700 // Load UnrollBlock* into r5 2701 __ mov(r5, r0); 2702 2703 __ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset())); 2704 Label noException; 2705 __ cmpw(rcpool, Deoptimization::Unpack_exception); // Was exception pending? 2706 __ br(Assembler::NE, noException); 2707 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); 2708 // QQQ this is useless it was null above 2709 __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset())); 2710 __ str(zr, Address(rthread, JavaThread::exception_oop_offset())); 2711 __ str(zr, Address(rthread, JavaThread::exception_pc_offset())); 2712 2713 __ verify_oop(r0); 2714 2715 // Overwrite the result registers with the exception results. 2716 __ str(r0, Address(sp, reg_save.r0_offset_in_bytes())); 2717 // I think this is useless 2718 // __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes())); 2719 2720 __ bind(noException); 2721 2722 // Only register save data is on the stack. 2723 // Now restore the result registers. Everything else is either dead 2724 // or captured in the vframeArray. 2725 2726 // Restore fp result register 2727 __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes())); 2728 // Restore integer result register 2729 __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes())); 2730 2731 // Pop all of the register save area off the stack 2732 __ add(sp, sp, frame_size_in_words * wordSize); 2733 2734 // All of the register save area has been popped of the stack. Only the 2735 // return address remains. 2736 2737 // Pop all the frames we must move/replace. 2738 // 2739 // Frame picture (youngest to oldest) 2740 // 1: self-frame (no frame link) 2741 // 2: deopting frame (no frame link) 2742 // 3: caller of deopting frame (could be compiled/interpreted). 2743 // 2744 // Note: by leaving the return address of self-frame on the stack 2745 // and using the size of frame 2 to adjust the stack 2746 // when we are done the return to frame 3 will still be on the stack. 2747 2748 // Pop deoptimized frame 2749 __ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset())); 2750 __ sub(r2, r2, 2 * wordSize); 2751 __ add(sp, sp, r2); 2752 __ ldp(rfp, zr, __ post(sp, 2 * wordSize)); 2753 2754 #ifdef ASSERT 2755 // Compilers generate code that bang the stack by as much as the 2756 // interpreter would need. So this stack banging should never 2757 // trigger a fault. Verify that it does not on non product builds. 2758 __ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset())); 2759 __ bang_stack_size(r19, r2); 2760 #endif 2761 // Load address of array of frame pcs into r2 2762 __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset())); 2763 2764 // Trash the old pc 2765 // __ addptr(sp, wordSize); FIXME ???? 2766 2767 // Load address of array of frame sizes into r4 2768 __ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset())); 2769 2770 // Load counter into r3 2771 __ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset())); 2772 2773 // Now adjust the caller's stack to make up for the extra locals 2774 // but record the original sp so that we can save it in the skeletal interpreter 2775 // frame and the stack walking of interpreter_sender will get the unextended sp 2776 // value and not the "real" sp value. 2777 2778 const Register sender_sp = r6; 2779 2780 __ mov(sender_sp, sp); 2781 __ ldrw(r19, Address(r5, 2782 Deoptimization::UnrollBlock:: 2783 caller_adjustment_offset())); 2784 __ sub(sp, sp, r19); 2785 2786 // Push interpreter frames in a loop 2787 __ mov(rscratch1, (uint64_t)0xDEADDEAD); // Make a recognizable pattern 2788 __ mov(rscratch2, rscratch1); 2789 Label loop; 2790 __ bind(loop); 2791 __ ldr(r19, Address(__ post(r4, wordSize))); // Load frame size 2792 __ sub(r19, r19, 2*wordSize); // We'll push pc and fp by hand 2793 __ ldr(lr, Address(__ post(r2, wordSize))); // Load pc 2794 __ enter(); // Save old & set new fp 2795 __ sub(sp, sp, r19); // Prolog 2796 // This value is corrected by layout_activation_impl 2797 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 2798 __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable 2799 __ mov(sender_sp, sp); // Pass sender_sp to next frame 2800 __ sub(r3, r3, 1); // Decrement counter 2801 __ cbnz(r3, loop); 2802 2803 // Re-push self-frame 2804 __ ldr(lr, Address(r2)); 2805 __ enter(); 2806 2807 // Allocate a full sized register save area. We subtract 2 because 2808 // enter() just pushed 2 words 2809 __ sub(sp, sp, (frame_size_in_words - 2) * wordSize); 2810 2811 // Restore frame locals after moving the frame 2812 __ strd(v0, Address(sp, reg_save.v0_offset_in_bytes())); 2813 __ str(r0, Address(sp, reg_save.r0_offset_in_bytes())); 2814 2815 // Call C code. Need thread but NOT official VM entry 2816 // crud. We cannot block on this call, no GC can happen. Call should 2817 // restore return values to their stack-slots with the new SP. 2818 // 2819 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode) 2820 2821 // Use rfp because the frames look interpreted now 2822 // Don't need the precise return PC here, just precise enough to point into this code blob. 2823 address the_pc = __ pc(); 2824 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1); 2825 2826 __ mov(c_rarg0, rthread); 2827 __ movw(c_rarg1, rcpool); // second arg: exec_mode 2828 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames))); 2829 __ blr(rscratch1); 2830 2831 // Set an oopmap for the call site 2832 // Use the same PC we used for the last java frame 2833 oop_maps->add_gc_map(the_pc - start, 2834 new OopMap( frame_size_in_words, 0 )); 2835 2836 // Clear fp AND pc 2837 __ reset_last_Java_frame(true); 2838 2839 // Collect return values 2840 __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes())); 2841 __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes())); 2842 // I think this is useless (throwing pc?) 2843 // __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes())); 2844 2845 // Pop self-frame. 2846 __ leave(); // Epilog 2847 2848 // Jump to interpreter 2849 __ ret(lr); 2850 2851 // Make sure all code is generated 2852 masm->flush(); 2853 2854 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words); 2855 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 2856 #if INCLUDE_JVMCI 2857 if (EnableJVMCI) { 2858 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset); 2859 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset); 2860 } 2861 #endif 2862 } 2863 2864 // Number of stack slots between incoming argument block and the start of 2865 // a new frame. The PROLOG must add this many slots to the stack. The 2866 // EPILOG must remove this many slots. aarch64 needs two slots for 2867 // return address and fp. 2868 // TODO think this is correct but check 2869 uint SharedRuntime::in_preserve_stack_slots() { 2870 return 4; 2871 } 2872 2873 uint SharedRuntime::out_preserve_stack_slots() { 2874 return 0; 2875 } 2876 2877 2878 VMReg SharedRuntime::thread_register() { 2879 return rthread->as_VMReg(); 2880 } 2881 2882 //------------------------------generate_handler_blob------ 2883 // 2884 // Generate a special Compile2Runtime blob that saves all registers, 2885 // and setup oopmap. 2886 // 2887 SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) { 2888 assert(is_polling_page_id(id), "expected a polling page stub id"); 2889 2890 ResourceMark rm; 2891 OopMapSet *oop_maps = new OopMapSet(); 2892 OopMap* map; 2893 2894 // Allocate space for the code. Setup code generation tools. 2895 const char* name = SharedRuntime::stub_name(id); 2896 CodeBuffer buffer(name, 2048, 1024); 2897 MacroAssembler* masm = new MacroAssembler(&buffer); 2898 2899 address start = __ pc(); 2900 address call_pc = nullptr; 2901 int frame_size_in_words; 2902 bool cause_return = (id == SharedStubId::polling_page_return_handler_id); 2903 RegisterSaver reg_save(id == SharedStubId::polling_page_vectors_safepoint_handler_id /* save_vectors */); 2904 2905 // When the signal occurred, the LR was either signed and stored on the stack (in which 2906 // case it will be restored from the stack before being used) or unsigned and not stored 2907 // on the stack. Stipping ensures we get the right value. 2908 __ strip_return_address(); 2909 2910 // Save Integer and Float registers. 2911 map = reg_save.save_live_registers(masm, 0, &frame_size_in_words); 2912 2913 // The following is basically a call_VM. However, we need the precise 2914 // address of the call in order to generate an oopmap. Hence, we do all the 2915 // work ourselves. 2916 2917 Label retaddr; 2918 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 2919 2920 // The return address must always be correct so that frame constructor never 2921 // sees an invalid pc. 2922 2923 if (!cause_return) { 2924 // overwrite the return address pushed by save_live_registers 2925 // Additionally, r20 is a callee-saved register so we can look at 2926 // it later to determine if someone changed the return address for 2927 // us! 2928 __ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset())); 2929 __ protect_return_address(r20); 2930 __ str(r20, Address(rfp, wordSize)); 2931 } 2932 2933 // Do the call 2934 __ mov(c_rarg0, rthread); 2935 __ lea(rscratch1, RuntimeAddress(call_ptr)); 2936 __ blr(rscratch1); 2937 __ bind(retaddr); 2938 2939 // Set an oopmap for the call site. This oopmap will map all 2940 // oop-registers and debug-info registers as callee-saved. This 2941 // will allow deoptimization at this safepoint to find all possible 2942 // debug-info recordings, as well as let GC find all oops. 2943 2944 oop_maps->add_gc_map( __ pc() - start, map); 2945 2946 Label noException; 2947 2948 __ reset_last_Java_frame(false); 2949 2950 __ membar(Assembler::LoadLoad | Assembler::LoadStore); 2951 2952 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 2953 __ cbz(rscratch1, noException); 2954 2955 // Exception pending 2956 2957 reg_save.restore_live_registers(masm); 2958 2959 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2960 2961 // No exception case 2962 __ bind(noException); 2963 2964 Label no_adjust, bail; 2965 if (!cause_return) { 2966 // If our stashed return pc was modified by the runtime we avoid touching it 2967 __ ldr(rscratch1, Address(rfp, wordSize)); 2968 __ cmp(r20, rscratch1); 2969 __ br(Assembler::NE, no_adjust); 2970 __ authenticate_return_address(r20); 2971 2972 #ifdef ASSERT 2973 // Verify the correct encoding of the poll we're about to skip. 2974 // See NativeInstruction::is_ldrw_to_zr() 2975 __ ldrw(rscratch1, Address(r20)); 2976 __ ubfx(rscratch2, rscratch1, 22, 10); 2977 __ cmpw(rscratch2, 0b1011100101); 2978 __ br(Assembler::NE, bail); 2979 __ ubfx(rscratch2, rscratch1, 0, 5); 2980 __ cmpw(rscratch2, 0b11111); 2981 __ br(Assembler::NE, bail); 2982 #endif 2983 // Adjust return pc forward to step over the safepoint poll instruction 2984 __ add(r20, r20, NativeInstruction::instruction_size); 2985 __ protect_return_address(r20); 2986 __ str(r20, Address(rfp, wordSize)); 2987 } 2988 2989 __ bind(no_adjust); 2990 // Normal exit, restore registers and exit. 2991 reg_save.restore_live_registers(masm); 2992 2993 __ ret(lr); 2994 2995 #ifdef ASSERT 2996 __ bind(bail); 2997 __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected"); 2998 #endif 2999 3000 // Make sure all code is generated 3001 masm->flush(); 3002 3003 // Fill-out other meta info 3004 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words); 3005 } 3006 3007 // 3008 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss 3009 // 3010 // Generate a stub that calls into vm to find out the proper destination 3011 // of a java call. All the argument registers are live at this point 3012 // but since this is generic code we don't know what they are and the caller 3013 // must do any gc of the args. 3014 // 3015 RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) { 3016 assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before"); 3017 assert(is_resolve_id(id), "expected a resolve stub id"); 3018 3019 // allocate space for the code 3020 ResourceMark rm; 3021 3022 const char* name = SharedRuntime::stub_name(id); 3023 CodeBuffer buffer(name, 1000, 512); 3024 MacroAssembler* masm = new MacroAssembler(&buffer); 3025 3026 int frame_size_in_words; 3027 RegisterSaver reg_save(false /* save_vectors */); 3028 3029 OopMapSet *oop_maps = new OopMapSet(); 3030 OopMap* map = nullptr; 3031 3032 int start = __ offset(); 3033 3034 map = reg_save.save_live_registers(masm, 0, &frame_size_in_words); 3035 3036 int frame_complete = __ offset(); 3037 3038 { 3039 Label retaddr; 3040 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 3041 3042 __ mov(c_rarg0, rthread); 3043 __ lea(rscratch1, RuntimeAddress(destination)); 3044 3045 __ blr(rscratch1); 3046 __ bind(retaddr); 3047 } 3048 3049 // Set an oopmap for the call site. 3050 // We need this not only for callee-saved registers, but also for volatile 3051 // registers that the compiler might be keeping live across a safepoint. 3052 3053 oop_maps->add_gc_map( __ offset() - start, map); 3054 3055 // r0 contains the address we are going to jump to assuming no exception got installed 3056 3057 // clear last_Java_sp 3058 __ reset_last_Java_frame(false); 3059 // check for pending exceptions 3060 Label pending; 3061 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 3062 __ cbnz(rscratch1, pending); 3063 3064 // get the returned Method* 3065 __ get_vm_result_2(rmethod, rthread); 3066 __ str(rmethod, Address(sp, reg_save.reg_offset_in_bytes(rmethod))); 3067 3068 // r0 is where we want to jump, overwrite rscratch1 which is saved and scratch 3069 __ str(r0, Address(sp, reg_save.rscratch1_offset_in_bytes())); 3070 reg_save.restore_live_registers(masm); 3071 3072 // We are back to the original state on entry and ready to go. 3073 3074 __ br(rscratch1); 3075 3076 // Pending exception after the safepoint 3077 3078 __ bind(pending); 3079 3080 reg_save.restore_live_registers(masm); 3081 3082 // exception pending => remove activation and forward to exception handler 3083 3084 __ str(zr, Address(rthread, JavaThread::vm_result_offset())); 3085 3086 __ ldr(r0, Address(rthread, Thread::pending_exception_offset())); 3087 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 3088 3089 // ------------- 3090 // make sure all code is generated 3091 masm->flush(); 3092 3093 // return the blob 3094 // frame_size_words or bytes?? 3095 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true); 3096 } 3097 3098 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) { 3099 BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K); 3100 CodeBuffer buffer(buf); 3101 short buffer_locs[20]; 3102 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs, 3103 sizeof(buffer_locs)/sizeof(relocInfo)); 3104 3105 MacroAssembler _masm(&buffer); 3106 MacroAssembler* masm = &_masm; 3107 3108 const Array<SigEntry>* sig_vk = vk->extended_sig(); 3109 const Array<VMRegPair>* regs = vk->return_regs(); 3110 3111 int pack_fields_jobject_off = __ offset(); 3112 // Resolve pre-allocated buffer from JNI handle. 3113 // We cannot do this in generate_call_stub() because it requires GC code to be initialized. 3114 Register Rresult = r14; // See StubGenerator::generate_call_stub(). 3115 __ ldr(r0, Address(Rresult)); 3116 __ resolve_jobject(r0 /* value */, 3117 rthread /* thread */, 3118 r12 /* tmp */); 3119 __ str(r0, Address(Rresult)); 3120 3121 int pack_fields_off = __ offset(); 3122 3123 int j = 1; 3124 for (int i = 0; i < sig_vk->length(); i++) { 3125 BasicType bt = sig_vk->at(i)._bt; 3126 if (bt == T_METADATA) { 3127 continue; 3128 } 3129 if (bt == T_VOID) { 3130 if (sig_vk->at(i-1)._bt == T_LONG || 3131 sig_vk->at(i-1)._bt == T_DOUBLE) { 3132 j++; 3133 } 3134 continue; 3135 } 3136 int off = sig_vk->at(i)._offset; 3137 VMRegPair pair = regs->at(j); 3138 VMReg r_1 = pair.first(); 3139 VMReg r_2 = pair.second(); 3140 Address to(r0, off); 3141 if (bt == T_FLOAT) { 3142 __ strs(r_1->as_FloatRegister(), to); 3143 } else if (bt == T_DOUBLE) { 3144 __ strd(r_1->as_FloatRegister(), to); 3145 } else { 3146 Register val = r_1->as_Register(); 3147 assert_different_registers(to.base(), val, r15, r16, r17); 3148 if (is_reference_type(bt)) { 3149 __ store_heap_oop(to, val, r15, r16, r17, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED); 3150 } else { 3151 __ store_sized_value(to, r_1->as_Register(), type2aelembytes(bt)); 3152 } 3153 } 3154 j++; 3155 } 3156 assert(j == regs->length(), "missed a field?"); 3157 if (vk->has_nullable_atomic_layout()) { 3158 // Zero the null marker (setting it to 1 would be better but would require an additional register) 3159 __ strb(zr, Address(r0, vk->null_marker_offset())); 3160 } 3161 __ ret(lr); 3162 3163 int unpack_fields_off = __ offset(); 3164 3165 Label skip; 3166 __ cbz(r0, skip); 3167 3168 j = 1; 3169 for (int i = 0; i < sig_vk->length(); i++) { 3170 BasicType bt = sig_vk->at(i)._bt; 3171 if (bt == T_METADATA) { 3172 continue; 3173 } 3174 if (bt == T_VOID) { 3175 if (sig_vk->at(i-1)._bt == T_LONG || 3176 sig_vk->at(i-1)._bt == T_DOUBLE) { 3177 j++; 3178 } 3179 continue; 3180 } 3181 int off = sig_vk->at(i)._offset; 3182 assert(off > 0, "offset in object should be positive"); 3183 VMRegPair pair = regs->at(j); 3184 VMReg r_1 = pair.first(); 3185 VMReg r_2 = pair.second(); 3186 Address from(r0, off); 3187 if (bt == T_FLOAT) { 3188 __ ldrs(r_1->as_FloatRegister(), from); 3189 } else if (bt == T_DOUBLE) { 3190 __ ldrd(r_1->as_FloatRegister(), from); 3191 } else if (bt == T_OBJECT || bt == T_ARRAY) { 3192 assert_different_registers(r0, r_1->as_Register()); 3193 __ load_heap_oop(r_1->as_Register(), from, rscratch1, rscratch2); 3194 } else { 3195 assert(is_java_primitive(bt), "unexpected basic type"); 3196 assert_different_registers(r0, r_1->as_Register()); 3197 3198 size_t size_in_bytes = type2aelembytes(bt); 3199 __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN); 3200 } 3201 j++; 3202 } 3203 assert(j == regs->length(), "missed a field?"); 3204 3205 __ bind(skip); 3206 3207 __ ret(lr); 3208 3209 __ flush(); 3210 3211 return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off); 3212 } 3213 3214 // Continuation point for throwing of implicit exceptions that are 3215 // not handled in the current activation. Fabricates an exception 3216 // oop and initiates normal exception dispatching in this 3217 // frame. Since we need to preserve callee-saved values (currently 3218 // only for C2, but done for C1 as well) we need a callee-saved oop 3219 // map and therefore have to make these stubs into RuntimeStubs 3220 // rather than BufferBlobs. If the compiler needs all registers to 3221 // be preserved between the fault point and the exception handler 3222 // then it must assume responsibility for that in 3223 // AbstractCompiler::continuation_for_implicit_null_exception or 3224 // continuation_for_implicit_division_by_zero_exception. All other 3225 // implicit exceptions (e.g., NullPointerException or 3226 // AbstractMethodError on entry) are either at call sites or 3227 // otherwise assume that stack unwinding will be initiated, so 3228 // caller saved registers were assumed volatile in the compiler. 3229 3230 RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) { 3231 assert(is_throw_id(id), "expected a throw stub id"); 3232 3233 const char* name = SharedRuntime::stub_name(id); 3234 3235 // Information about frame layout at time of blocking runtime call. 3236 // Note that we only have to preserve callee-saved registers since 3237 // the compilers are responsible for supplying a continuation point 3238 // if they expect all registers to be preserved. 3239 // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0 3240 enum layout { 3241 rfp_off = 0, 3242 rfp_off2, 3243 return_off, 3244 return_off2, 3245 framesize // inclusive of return address 3246 }; 3247 3248 int insts_size = 512; 3249 int locs_size = 64; 3250 3251 ResourceMark rm; 3252 const char* timer_msg = "SharedRuntime generate_throw_exception"; 3253 TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); 3254 3255 CodeBuffer code(name, insts_size, locs_size); 3256 OopMapSet* oop_maps = new OopMapSet(); 3257 MacroAssembler* masm = new MacroAssembler(&code); 3258 3259 address start = __ pc(); 3260 3261 // This is an inlined and slightly modified version of call_VM 3262 // which has the ability to fetch the return PC out of 3263 // thread-local storage and also sets up last_Java_sp slightly 3264 // differently than the real call_VM 3265 3266 __ enter(); // Save FP and LR before call 3267 3268 assert(is_even(framesize/2), "sp not 16-byte aligned"); 3269 3270 // lr and fp are already in place 3271 __ sub(sp, rfp, ((uint64_t)framesize-4) << LogBytesPerInt); // prolog 3272 3273 int frame_complete = __ pc() - start; 3274 3275 // Set up last_Java_sp and last_Java_fp 3276 address the_pc = __ pc(); 3277 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1); 3278 3279 __ mov(c_rarg0, rthread); 3280 BLOCK_COMMENT("call runtime_entry"); 3281 __ mov(rscratch1, runtime_entry); 3282 __ blr(rscratch1); 3283 3284 // Generate oop map 3285 OopMap* map = new OopMap(framesize, 0); 3286 3287 oop_maps->add_gc_map(the_pc - start, map); 3288 3289 __ reset_last_Java_frame(true); 3290 3291 // Reinitialize the ptrue predicate register, in case the external runtime 3292 // call clobbers ptrue reg, as we may return to SVE compiled code. 3293 __ reinitialize_ptrue(); 3294 3295 __ leave(); 3296 3297 // check for pending exceptions 3298 #ifdef ASSERT 3299 Label L; 3300 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 3301 __ cbnz(rscratch1, L); 3302 __ should_not_reach_here(); 3303 __ bind(L); 3304 #endif // ASSERT 3305 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 3306 3307 // codeBlob framesize is in words (not VMRegImpl::slot_size) 3308 RuntimeStub* stub = 3309 RuntimeStub::new_runtime_stub(name, 3310 &code, 3311 frame_complete, 3312 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 3313 oop_maps, false); 3314 return stub; 3315 } 3316 3317 #if INCLUDE_JFR 3318 3319 static void jfr_prologue(address the_pc, MacroAssembler* masm, Register thread) { 3320 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1); 3321 __ mov(c_rarg0, thread); 3322 } 3323 3324 // The handle is dereferenced through a load barrier. 3325 static void jfr_epilogue(MacroAssembler* masm) { 3326 __ reset_last_Java_frame(true); 3327 } 3328 3329 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint. 3330 // It returns a jobject handle to the event writer. 3331 // The handle is dereferenced and the return value is the event writer oop. 3332 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { 3333 enum layout { 3334 rbp_off, 3335 rbpH_off, 3336 return_off, 3337 return_off2, 3338 framesize // inclusive of return address 3339 }; 3340 3341 int insts_size = 1024; 3342 int locs_size = 64; 3343 const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id); 3344 CodeBuffer code(name, insts_size, locs_size); 3345 OopMapSet* oop_maps = new OopMapSet(); 3346 MacroAssembler* masm = new MacroAssembler(&code); 3347 3348 address start = __ pc(); 3349 __ enter(); 3350 int frame_complete = __ pc() - start; 3351 address the_pc = __ pc(); 3352 jfr_prologue(the_pc, masm, rthread); 3353 __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1); 3354 jfr_epilogue(masm); 3355 __ resolve_global_jobject(r0, rscratch1, rscratch2); 3356 __ leave(); 3357 __ ret(lr); 3358 3359 OopMap* map = new OopMap(framesize, 1); // rfp 3360 oop_maps->add_gc_map(the_pc - start, map); 3361 3362 RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) 3363 RuntimeStub::new_runtime_stub(name, &code, frame_complete, 3364 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 3365 oop_maps, false); 3366 return stub; 3367 } 3368 3369 // For c2: call to return a leased buffer. 3370 RuntimeStub* SharedRuntime::generate_jfr_return_lease() { 3371 enum layout { 3372 rbp_off, 3373 rbpH_off, 3374 return_off, 3375 return_off2, 3376 framesize // inclusive of return address 3377 }; 3378 3379 int insts_size = 1024; 3380 int locs_size = 64; 3381 3382 const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id); 3383 CodeBuffer code(name, insts_size, locs_size); 3384 OopMapSet* oop_maps = new OopMapSet(); 3385 MacroAssembler* masm = new MacroAssembler(&code); 3386 3387 address start = __ pc(); 3388 __ enter(); 3389 int frame_complete = __ pc() - start; 3390 address the_pc = __ pc(); 3391 jfr_prologue(the_pc, masm, rthread); 3392 __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1); 3393 jfr_epilogue(masm); 3394 3395 __ leave(); 3396 __ ret(lr); 3397 3398 OopMap* map = new OopMap(framesize, 1); // rfp 3399 oop_maps->add_gc_map(the_pc - start, map); 3400 3401 RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) 3402 RuntimeStub::new_runtime_stub(name, &code, frame_complete, 3403 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 3404 oop_maps, false); 3405 return stub; 3406 } 3407 3408 #endif // INCLUDE_JFR --- EOF ---