1 /* 2 * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved. 4 * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "asm/macroAssembler.hpp" 29 #include "asm/macroAssembler.inline.hpp" 30 #include "classfile/symbolTable.hpp" 31 #include "code/codeCache.hpp" 32 #include "code/compiledIC.hpp" 33 #include "code/debugInfoRec.hpp" 34 #include "code/icBuffer.hpp" 35 #include "code/vtableStubs.hpp" 36 #include "compiler/oopMap.hpp" 37 #include "gc/shared/barrierSetAssembler.hpp" 38 #include "interpreter/interpreter.hpp" 39 #include "interpreter/interp_masm.hpp" 40 #include "logging/log.hpp" 41 #include "memory/resourceArea.hpp" 42 #include "nativeInst_aarch64.hpp" 43 #include "oops/compiledICHolder.hpp" 44 #include "oops/klass.inline.hpp" 45 #include "oops/method.inline.hpp" 46 #include "prims/methodHandles.hpp" 47 #include "runtime/continuation.hpp" 48 #include "runtime/continuationEntry.inline.hpp" 49 #include "runtime/globals.hpp" 50 #include "runtime/jniHandles.hpp" 51 #include "runtime/safepointMechanism.hpp" 52 #include "runtime/sharedRuntime.hpp" 53 #include "runtime/signature.hpp" 54 #include "runtime/stubRoutines.hpp" 55 #include "runtime/vframeArray.hpp" 56 #include "utilities/align.hpp" 57 #include "utilities/formatBuffer.hpp" 58 #include "vmreg_aarch64.inline.hpp" 59 #ifdef COMPILER1 60 #include "c1/c1_Runtime1.hpp" 61 #endif 62 #ifdef COMPILER2 63 #include "adfiles/ad_aarch64.hpp" 64 #include "opto/runtime.hpp" 65 #endif 66 #if INCLUDE_JVMCI 67 #include "jvmci/jvmciJavaClasses.hpp" 68 #endif 69 70 #define __ masm-> 71 72 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; 73 74 class SimpleRuntimeFrame { 75 76 public: 77 78 // Most of the runtime stubs have this simple frame layout. 79 // This class exists to make the layout shared in one place. 80 // Offsets are for compiler stack slots, which are jints. 81 enum layout { 82 // The frame sender code expects that rbp will be in the "natural" place and 83 // will override any oopMap setting for it. We must therefore force the layout 84 // so that it agrees with the frame sender code. 85 // we don't expect any arg reg save area so aarch64 asserts that 86 // frame::arg_reg_save_area_bytes == 0 87 rfp_off = 0, 88 rfp_off2, 89 return_off, return_off2, 90 framesize 91 }; 92 }; 93 94 // FIXME -- this is used by C1 95 class RegisterSaver { 96 const bool _save_vectors; 97 public: 98 RegisterSaver(bool save_vectors) : _save_vectors(save_vectors) {} 99 100 OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words); 101 void restore_live_registers(MacroAssembler* masm); 102 103 // Offsets into the register save area 104 // Used by deoptimization when it is managing result register 105 // values on its own 106 107 int reg_offset_in_bytes(Register r); 108 int r0_offset_in_bytes() { return reg_offset_in_bytes(r0); } 109 int rscratch1_offset_in_bytes() { return reg_offset_in_bytes(rscratch1); } 110 int v0_offset_in_bytes(); 111 112 // Total stack size in bytes for saving sve predicate registers. 113 int total_sve_predicate_in_bytes(); 114 115 // Capture info about frame layout 116 // Note this is only correct when not saving full vectors. 117 enum layout { 118 fpu_state_off = 0, 119 fpu_state_end = fpu_state_off + FPUStateSizeInWords - 1, 120 // The frame sender code expects that rfp will be in 121 // the "natural" place and will override any oopMap 122 // setting for it. We must therefore force the layout 123 // so that it agrees with the frame sender code. 124 r0_off = fpu_state_off + FPUStateSizeInWords, 125 rfp_off = r0_off + (Register::number_of_registers - 2) * Register::max_slots_per_register, 126 return_off = rfp_off + Register::max_slots_per_register, // slot for return address 127 reg_save_size = return_off + Register::max_slots_per_register}; 128 129 }; 130 131 int RegisterSaver::reg_offset_in_bytes(Register r) { 132 // The integer registers are located above the floating point 133 // registers in the stack frame pushed by save_live_registers() so the 134 // offset depends on whether we are saving full vectors, and whether 135 // those vectors are NEON or SVE. 136 137 int slots_per_vect = FloatRegister::save_slots_per_register; 138 139 #if COMPILER2_OR_JVMCI 140 if (_save_vectors) { 141 slots_per_vect = FloatRegister::slots_per_neon_register; 142 143 #ifdef COMPILER2 144 if (Matcher::supports_scalable_vector()) { 145 slots_per_vect = Matcher::scalable_vector_reg_size(T_FLOAT); 146 } 147 #endif 148 } 149 #endif 150 151 int r0_offset = v0_offset_in_bytes() + (slots_per_vect * FloatRegister::number_of_registers) * BytesPerInt; 152 return r0_offset + r->encoding() * wordSize; 153 } 154 155 int RegisterSaver::v0_offset_in_bytes() { 156 // The floating point registers are located above the predicate registers if 157 // they are present in the stack frame pushed by save_live_registers(). So the 158 // offset depends on the saved total predicate vectors in the stack frame. 159 return (total_sve_predicate_in_bytes() / VMRegImpl::stack_slot_size) * BytesPerInt; 160 } 161 162 int RegisterSaver::total_sve_predicate_in_bytes() { 163 #ifdef COMPILER2 164 if (_save_vectors && Matcher::supports_scalable_vector()) { 165 return (Matcher::scalable_vector_reg_size(T_BYTE) >> LogBitsPerByte) * 166 PRegister::number_of_registers; 167 } 168 #endif 169 return 0; 170 } 171 172 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) { 173 bool use_sve = false; 174 int sve_vector_size_in_bytes = 0; 175 int sve_vector_size_in_slots = 0; 176 int sve_predicate_size_in_slots = 0; 177 int total_predicate_in_bytes = total_sve_predicate_in_bytes(); 178 int total_predicate_in_slots = total_predicate_in_bytes / VMRegImpl::stack_slot_size; 179 180 #ifdef COMPILER2 181 use_sve = Matcher::supports_scalable_vector(); 182 if (use_sve) { 183 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 184 sve_vector_size_in_slots = Matcher::scalable_vector_reg_size(T_FLOAT); 185 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 186 } 187 #endif 188 189 #if COMPILER2_OR_JVMCI 190 if (_save_vectors) { 191 int extra_save_slots_per_register = 0; 192 // Save upper half of vector registers 193 if (use_sve) { 194 extra_save_slots_per_register = sve_vector_size_in_slots - FloatRegister::save_slots_per_register; 195 } else { 196 extra_save_slots_per_register = FloatRegister::extra_save_slots_per_neon_register; 197 } 198 int extra_vector_bytes = extra_save_slots_per_register * 199 VMRegImpl::stack_slot_size * 200 FloatRegister::number_of_registers; 201 additional_frame_words += ((extra_vector_bytes + total_predicate_in_bytes) / wordSize); 202 } 203 #else 204 assert(!_save_vectors, "vectors are generated only by C2 and JVMCI"); 205 #endif 206 207 int frame_size_in_bytes = align_up(additional_frame_words * wordSize + 208 reg_save_size * BytesPerInt, 16); 209 // OopMap frame size is in compiler stack slots (jint's) not bytes or words 210 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt; 211 // The caller will allocate additional_frame_words 212 int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt; 213 // CodeBlob frame size is in words. 214 int frame_size_in_words = frame_size_in_bytes / wordSize; 215 *total_frame_words = frame_size_in_words; 216 217 // Save Integer and Float registers. 218 __ enter(); 219 __ push_CPU_state(_save_vectors, use_sve, sve_vector_size_in_bytes, total_predicate_in_bytes); 220 221 // Set an oopmap for the call site. This oopmap will map all 222 // oop-registers and debug-info registers as callee-saved. This 223 // will allow deoptimization at this safepoint to find all possible 224 // debug-info recordings, as well as let GC find all oops. 225 226 OopMapSet *oop_maps = new OopMapSet(); 227 OopMap* oop_map = new OopMap(frame_size_in_slots, 0); 228 229 for (int i = 0; i < Register::number_of_registers; i++) { 230 Register r = as_Register(i); 231 if (i <= rfp->encoding() && r != rscratch1 && r != rscratch2) { 232 // SP offsets are in 4-byte words. 233 // Register slots are 8 bytes wide, 32 floating-point registers. 234 int sp_offset = Register::max_slots_per_register * i + 235 FloatRegister::save_slots_per_register * FloatRegister::number_of_registers; 236 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots), r->as_VMReg()); 237 } 238 } 239 240 for (int i = 0; i < FloatRegister::number_of_registers; i++) { 241 FloatRegister r = as_FloatRegister(i); 242 int sp_offset = 0; 243 if (_save_vectors) { 244 sp_offset = use_sve ? (total_predicate_in_slots + sve_vector_size_in_slots * i) : 245 (FloatRegister::slots_per_neon_register * i); 246 } else { 247 sp_offset = FloatRegister::save_slots_per_register * i; 248 } 249 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), r->as_VMReg()); 250 } 251 252 return oop_map; 253 } 254 255 void RegisterSaver::restore_live_registers(MacroAssembler* masm) { 256 #ifdef COMPILER2 257 __ pop_CPU_state(_save_vectors, Matcher::supports_scalable_vector(), 258 Matcher::scalable_vector_reg_size(T_BYTE), total_sve_predicate_in_bytes()); 259 #else 260 #if !INCLUDE_JVMCI 261 assert(!_save_vectors, "vectors are generated only by C2 and JVMCI"); 262 #endif 263 __ pop_CPU_state(_save_vectors); 264 #endif 265 __ ldp(rfp, lr, Address(__ post(sp, 2 * wordSize))); 266 __ authenticate_return_address(); 267 } 268 269 // Is vector's size (in bytes) bigger than a size saved by default? 270 // 8 bytes vector registers are saved by default on AArch64. 271 // The SVE supported min vector size is 8 bytes and we need to save 272 // predicate registers when the vector size is 8 bytes as well. 273 bool SharedRuntime::is_wide_vector(int size) { 274 return size > 8 || (UseSVE > 0 && size >= 8); 275 } 276 277 // --------------------------------------------------------------------------- 278 // Read the array of BasicTypes from a signature, and compute where the 279 // arguments should go. Values in the VMRegPair regs array refer to 4-byte 280 // quantities. Values less than VMRegImpl::stack0 are registers, those above 281 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer 282 // as framesizes are fixed. 283 // VMRegImpl::stack0 refers to the first slot 0(sp). 284 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. 285 // Register up to Register::number_of_registers are the 64-bit 286 // integer registers. 287 288 // Note: the INPUTS in sig_bt are in units of Java argument words, 289 // which are 64-bit. The OUTPUTS are in 32-bit units. 290 291 // The Java calling convention is a "shifted" version of the C ABI. 292 // By skipping the first C ABI register we can call non-static jni 293 // methods with small numbers of arguments without having to shuffle 294 // the arguments at all. Since we control the java ABI we ought to at 295 // least get some advantage out of it. 296 297 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 298 VMRegPair *regs, 299 int total_args_passed) { 300 301 // Create the mapping between argument positions and 302 // registers. 303 static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = { 304 j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7 305 }; 306 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = { 307 j_farg0, j_farg1, j_farg2, j_farg3, 308 j_farg4, j_farg5, j_farg6, j_farg7 309 }; 310 311 312 uint int_args = 0; 313 uint fp_args = 0; 314 uint stk_args = 0; // inc by 2 each time 315 316 for (int i = 0; i < total_args_passed; i++) { 317 switch (sig_bt[i]) { 318 case T_BOOLEAN: 319 case T_CHAR: 320 case T_BYTE: 321 case T_SHORT: 322 case T_INT: 323 if (int_args < Argument::n_int_register_parameters_j) { 324 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); 325 } else { 326 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 327 stk_args += 2; 328 } 329 break; 330 case T_VOID: 331 // halves of T_LONG or T_DOUBLE 332 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); 333 regs[i].set_bad(); 334 break; 335 case T_LONG: 336 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 337 // fall through 338 case T_OBJECT: 339 case T_ARRAY: 340 case T_ADDRESS: 341 if (int_args < Argument::n_int_register_parameters_j) { 342 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); 343 } else { 344 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 345 stk_args += 2; 346 } 347 break; 348 case T_FLOAT: 349 if (fp_args < Argument::n_float_register_parameters_j) { 350 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); 351 } else { 352 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 353 stk_args += 2; 354 } 355 break; 356 case T_DOUBLE: 357 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 358 if (fp_args < Argument::n_float_register_parameters_j) { 359 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg()); 360 } else { 361 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 362 stk_args += 2; 363 } 364 break; 365 default: 366 ShouldNotReachHere(); 367 break; 368 } 369 } 370 371 return align_up(stk_args, 2); 372 } 373 374 375 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j; 376 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j; 377 378 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) { 379 380 // Create the mapping between argument positions and registers. 381 382 static const Register INT_ArgReg[java_return_convention_max_int] = { 383 r0 /* j_rarg7 */, j_rarg6, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0 384 }; 385 386 static const FloatRegister FP_ArgReg[java_return_convention_max_float] = { 387 j_farg0, j_farg1, j_farg2, j_farg3, j_farg4, j_farg5, j_farg6, j_farg7 388 }; 389 390 uint int_args = 0; 391 uint fp_args = 0; 392 393 for (int i = 0; i < total_args_passed; i++) { 394 switch (sig_bt[i]) { 395 case T_BOOLEAN: 396 case T_CHAR: 397 case T_BYTE: 398 case T_SHORT: 399 case T_INT: 400 if (int_args < SharedRuntime::java_return_convention_max_int) { 401 regs[i].set1(INT_ArgReg[int_args]->as_VMReg()); 402 int_args ++; 403 } else { 404 return -1; 405 } 406 break; 407 case T_VOID: 408 // halves of T_LONG or T_DOUBLE 409 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); 410 regs[i].set_bad(); 411 break; 412 case T_LONG: 413 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 414 // fall through 415 case T_OBJECT: 416 case T_ARRAY: 417 case T_ADDRESS: 418 // Should T_METADATA be added to java_calling_convention as well ? 419 case T_METADATA: 420 if (int_args < SharedRuntime::java_return_convention_max_int) { 421 regs[i].set2(INT_ArgReg[int_args]->as_VMReg()); 422 int_args ++; 423 } else { 424 return -1; 425 } 426 break; 427 case T_FLOAT: 428 if (fp_args < SharedRuntime::java_return_convention_max_float) { 429 regs[i].set1(FP_ArgReg[fp_args]->as_VMReg()); 430 fp_args ++; 431 } else { 432 return -1; 433 } 434 break; 435 case T_DOUBLE: 436 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 437 if (fp_args < SharedRuntime::java_return_convention_max_float) { 438 regs[i].set2(FP_ArgReg[fp_args]->as_VMReg()); 439 fp_args ++; 440 } else { 441 return -1; 442 } 443 break; 444 default: 445 ShouldNotReachHere(); 446 break; 447 } 448 } 449 450 return int_args + fp_args; 451 } 452 453 // Patch the callers callsite with entry to compiled code if it exists. 454 static void patch_callers_callsite(MacroAssembler *masm) { 455 Label L; 456 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset()))); 457 __ cbz(rscratch1, L); 458 459 __ enter(); 460 __ push_CPU_state(); 461 462 // VM needs caller's callsite 463 // VM needs target method 464 // This needs to be a long call since we will relocate this adapter to 465 // the codeBuffer and it may not reach 466 467 #ifndef PRODUCT 468 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); 469 #endif 470 471 __ mov(c_rarg0, rmethod); 472 __ mov(c_rarg1, lr); 473 __ authenticate_return_address(c_rarg1); 474 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite))); 475 __ blr(rscratch1); 476 477 // Explicit isb required because fixup_callers_callsite may change the code 478 // stream. 479 __ safepoint_isb(); 480 481 __ pop_CPU_state(); 482 // restore sp 483 __ leave(); 484 __ bind(L); 485 } 486 487 // For each inline type argument, sig includes the list of fields of 488 // the inline type. This utility function computes the number of 489 // arguments for the call if inline types are passed by reference (the 490 // calling convention the interpreter expects). 491 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) { 492 int total_args_passed = 0; 493 if (InlineTypePassFieldsAsArgs) { 494 for (int i = 0; i < sig_extended->length(); i++) { 495 BasicType bt = sig_extended->at(i)._bt; 496 if (bt == T_METADATA) { 497 // In sig_extended, an inline type argument starts with: 498 // T_METADATA, followed by the types of the fields of the 499 // inline type and T_VOID to mark the end of the value 500 // type. Inline types are flattened so, for instance, in the 501 // case of an inline type with an int field and an inline type 502 // field that itself has 2 fields, an int and a long: 503 // T_METADATA T_INT T_METADATA T_INT T_LONG T_VOID (second 504 // slot for the T_LONG) T_VOID (inner inline type) T_VOID 505 // (outer inline type) 506 total_args_passed++; 507 int vt = 1; 508 do { 509 i++; 510 BasicType bt = sig_extended->at(i)._bt; 511 BasicType prev_bt = sig_extended->at(i-1)._bt; 512 if (bt == T_METADATA) { 513 vt++; 514 } else if (bt == T_VOID && 515 prev_bt != T_LONG && 516 prev_bt != T_DOUBLE) { 517 vt--; 518 } 519 } while (vt != 0); 520 } else { 521 total_args_passed++; 522 } 523 } 524 } else { 525 total_args_passed = sig_extended->length(); 526 } 527 528 return total_args_passed; 529 } 530 531 532 static void gen_c2i_adapter_helper(MacroAssembler* masm, 533 BasicType bt, 534 BasicType prev_bt, 535 size_t size_in_bytes, 536 const VMRegPair& reg_pair, 537 const Address& to, 538 Register tmp1, 539 Register tmp2, 540 Register tmp3, 541 int extraspace, 542 bool is_oop) { 543 if (bt == T_VOID) { 544 assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half"); 545 return; 546 } 547 548 // Say 4 args: 549 // i st_off 550 // 0 32 T_LONG 551 // 1 24 T_VOID 552 // 2 16 T_OBJECT 553 // 3 8 T_BOOL 554 // - 0 return address 555 // 556 // However to make thing extra confusing. Because we can fit a Java long/double in 557 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter 558 // leaves one slot empty and only stores to a single slot. In this case the 559 // slot that is occupied is the T_VOID slot. See I said it was confusing. 560 561 bool wide = (size_in_bytes == wordSize); 562 VMReg r_1 = reg_pair.first(); 563 VMReg r_2 = reg_pair.second(); 564 assert(r_2->is_valid() == wide, "invalid size"); 565 if (!r_1->is_valid()) { 566 assert(!r_2->is_valid(), ""); 567 return; 568 } 569 570 if (!r_1->is_FloatRegister()) { 571 Register val = r25; 572 if (r_1->is_stack()) { 573 // memory to memory use r25 (scratch registers is used by store_heap_oop) 574 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace; 575 __ load_sized_value(val, Address(sp, ld_off), size_in_bytes, /* is_signed */ false); 576 } else { 577 val = r_1->as_Register(); 578 } 579 assert_different_registers(to.base(), val, tmp1, tmp2, tmp3); 580 if (is_oop) { 581 __ store_heap_oop(to, val, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED); 582 } else { 583 __ store_sized_value(to, val, size_in_bytes); 584 } 585 } else { 586 if (wide) { 587 __ strd(r_1->as_FloatRegister(), to); 588 } else { 589 // only a float use just part of the slot 590 __ strs(r_1->as_FloatRegister(), to); 591 } 592 } 593 } 594 595 static void gen_c2i_adapter(MacroAssembler *masm, 596 const GrowableArray<SigEntry>* sig_extended, 597 const VMRegPair *regs, 598 bool requires_clinit_barrier, 599 address& c2i_no_clinit_check_entry, 600 Label& skip_fixup, 601 address start, 602 OopMapSet* oop_maps, 603 int& frame_complete, 604 int& frame_size_in_words, 605 bool alloc_inline_receiver) { 606 if (requires_clinit_barrier && VM_Version::supports_fast_class_init_checks()) { 607 Label L_skip_barrier; 608 609 { // Bypass the barrier for non-static methods 610 __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset())); 611 __ andsw(zr, rscratch1, JVM_ACC_STATIC); 612 __ br(Assembler::EQ, L_skip_barrier); // non-static 613 } 614 615 __ load_method_holder(rscratch2, rmethod); 616 __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier); 617 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 618 619 __ bind(L_skip_barrier); 620 c2i_no_clinit_check_entry = __ pc(); 621 } 622 623 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 624 bs->c2i_entry_barrier(masm); 625 626 // Before we get into the guts of the C2I adapter, see if we should be here 627 // at all. We've come from compiled code and are attempting to jump to the 628 // interpreter, which means the caller made a static call to get here 629 // (vcalls always get a compiled target if there is one). Check for a 630 // compiled target. If there is one, we need to patch the caller's call. 631 patch_callers_callsite(masm); 632 633 __ bind(skip_fixup); 634 635 // Name some registers to be used in the following code. We can use 636 // anything except r0-r7 which are arguments in the Java calling 637 // convention, rmethod (r12), and r13 which holds the outgoing sender 638 // SP for the interpreter. 639 Register buf_array = r10; // Array of buffered inline types 640 Register buf_oop = r11; // Buffered inline type oop 641 Register tmp1 = r15; 642 Register tmp2 = r16; 643 Register tmp3 = r17; 644 645 if (InlineTypePassFieldsAsArgs) { 646 // Is there an inline type argument? 647 bool has_inline_argument = false; 648 for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) { 649 has_inline_argument = (sig_extended->at(i)._bt == T_METADATA); 650 } 651 if (has_inline_argument) { 652 // There is at least an inline type argument: we're coming from 653 // compiled code so we have no buffers to back the inline types 654 // Allocate the buffers here with a runtime call. 655 RegisterSaver reg_save(false /* save_vectors */); 656 OopMap* map = reg_save.save_live_registers(masm, 0, &frame_size_in_words); 657 658 frame_complete = __ offset(); 659 address the_pc = __ pc(); 660 661 Label retaddr; 662 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 663 664 __ mov(c_rarg0, rthread); 665 __ mov(c_rarg1, rmethod); 666 __ mov(c_rarg2, (int64_t)alloc_inline_receiver); 667 668 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types))); 669 __ blr(rscratch1); 670 __ bind(retaddr); 671 672 oop_maps->add_gc_map(__ pc() - start, map); 673 __ reset_last_Java_frame(false); 674 675 reg_save.restore_live_registers(masm); 676 677 Label no_exception; 678 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 679 __ cbz(rscratch1, no_exception); 680 681 __ str(zr, Address(rthread, JavaThread::vm_result_offset())); 682 __ ldr(r0, Address(rthread, Thread::pending_exception_offset())); 683 __ b(RuntimeAddress(StubRoutines::forward_exception_entry())); 684 685 __ bind(no_exception); 686 687 // We get an array of objects from the runtime call 688 __ get_vm_result(buf_array, rthread); 689 __ get_vm_result_2(rmethod, rthread); // TODO: required to keep the callee Method live? 690 } 691 } 692 693 // Since all args are passed on the stack, total_args_passed * 694 // Interpreter::stackElementSize is the space we need. 695 696 int total_args_passed = compute_total_args_passed_int(sig_extended); 697 int extraspace = total_args_passed * Interpreter::stackElementSize; 698 699 // stack is aligned, keep it that way 700 extraspace = align_up(extraspace, StackAlignmentInBytes); 701 702 // set senderSP value 703 __ mov(r19_sender_sp, sp); 704 705 __ sub(sp, sp, extraspace); 706 707 // Now write the args into the outgoing interpreter space 708 709 // next_arg_comp is the next argument from the compiler point of 710 // view (inline type fields are passed in registers/on the stack). In 711 // sig_extended, an inline type argument starts with: T_METADATA, 712 // followed by the types of the fields of the inline type and T_VOID 713 // to mark the end of the inline type. ignored counts the number of 714 // T_METADATA/T_VOID. next_vt_arg is the next inline type argument: 715 // used to get the buffer for that argument from the pool of buffers 716 // we allocated above and want to pass to the 717 // interpreter. next_arg_int is the next argument from the 718 // interpreter point of view (inline types are passed by reference). 719 for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0; 720 next_arg_comp < sig_extended->length(); next_arg_comp++) { 721 assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments"); 722 assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?"); 723 BasicType bt = sig_extended->at(next_arg_comp)._bt; 724 int st_off = (total_args_passed - next_arg_int - 1) * Interpreter::stackElementSize; 725 if (!InlineTypePassFieldsAsArgs || bt != T_METADATA) { 726 int next_off = st_off - Interpreter::stackElementSize; 727 const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off; 728 const VMRegPair reg_pair = regs[next_arg_comp-ignored]; 729 size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4; 730 gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL, 731 size_in_bytes, reg_pair, Address(sp, offset), tmp1, tmp2, tmp3, extraspace, false); 732 next_arg_int++; 733 #ifdef ASSERT 734 if (bt == T_LONG || bt == T_DOUBLE) { 735 // Overwrite the unused slot with known junk 736 __ mov(rscratch1, CONST64(0xdeadffffdeadaaaa)); 737 __ str(rscratch1, Address(sp, st_off)); 738 } 739 #endif /* ASSERT */ 740 } else { 741 ignored++; 742 // get the buffer from the just allocated pool of buffers 743 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_OBJECT); 744 __ load_heap_oop(buf_oop, Address(buf_array, index), tmp1, tmp2); 745 next_vt_arg++; next_arg_int++; 746 int vt = 1; 747 // write fields we get from compiled code in registers/stack 748 // slots to the buffer: we know we are done with that inline type 749 // argument when we hit the T_VOID that acts as an end of inline 750 // type delimiter for this inline type. Inline types are flattened 751 // so we might encounter embedded inline types. Each entry in 752 // sig_extended contains a field offset in the buffer. 753 Label L_null; 754 do { 755 next_arg_comp++; 756 BasicType bt = sig_extended->at(next_arg_comp)._bt; 757 BasicType prev_bt = sig_extended->at(next_arg_comp - 1)._bt; 758 if (bt == T_METADATA) { 759 vt++; 760 ignored++; 761 } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) { 762 vt--; 763 ignored++; 764 } else { 765 int off = sig_extended->at(next_arg_comp)._offset; 766 if (off == -1) { 767 // Nullable inline type argument, emit null check 768 VMReg reg = regs[next_arg_comp-ignored].first(); 769 Label L_notNull; 770 if (reg->is_stack()) { 771 int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace; 772 __ ldrb(tmp1, Address(sp, ld_off)); 773 __ cbnz(tmp1, L_notNull); 774 } else { 775 __ cbnz(reg->as_Register(), L_notNull); 776 } 777 __ str(zr, Address(sp, st_off)); 778 __ b(L_null); 779 __ bind(L_notNull); 780 continue; 781 } 782 assert(off > 0, "offset in object should be positive"); 783 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize; 784 bool is_oop = is_reference_type(bt); 785 gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL, 786 size_in_bytes, regs[next_arg_comp-ignored], Address(buf_oop, off), tmp1, tmp2, tmp3, extraspace, is_oop); 787 } 788 } while (vt != 0); 789 // pass the buffer to the interpreter 790 __ str(buf_oop, Address(sp, st_off)); 791 __ bind(L_null); 792 } 793 } 794 795 __ mov(esp, sp); // Interp expects args on caller's expression stack 796 797 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset()))); 798 __ br(rscratch1); 799 } 800 801 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs) { 802 803 804 // Note: r19_sender_sp contains the senderSP on entry. We must 805 // preserve it since we may do a i2c -> c2i transition if we lose a 806 // race where compiled code goes non-entrant while we get args 807 // ready. 808 809 // Adapters are frameless. 810 811 // An i2c adapter is frameless because the *caller* frame, which is 812 // interpreted, routinely repairs its own esp (from 813 // interpreter_frame_last_sp), even if a callee has modified the 814 // stack pointer. It also recalculates and aligns sp. 815 816 // A c2i adapter is frameless because the *callee* frame, which is 817 // interpreted, routinely repairs its caller's sp (from sender_sp, 818 // which is set up via the senderSP register). 819 820 // In other words, if *either* the caller or callee is interpreted, we can 821 // get the stack pointer repaired after a call. 822 823 // This is why c2i and i2c adapters cannot be indefinitely composed. 824 // In particular, if a c2i adapter were to somehow call an i2c adapter, 825 // both caller and callee would be compiled methods, and neither would 826 // clean up the stack pointer changes performed by the two adapters. 827 // If this happens, control eventually transfers back to the compiled 828 // caller, but with an uncorrected stack, causing delayed havoc. 829 830 if (VerifyAdapterCalls && 831 (Interpreter::code() != nullptr || StubRoutines::final_stubs_code() != nullptr)) { 832 #if 0 833 // So, let's test for cascading c2i/i2c adapters right now. 834 // assert(Interpreter::contains($return_addr) || 835 // StubRoutines::contains($return_addr), 836 // "i2c adapter must return to an interpreter frame"); 837 __ block_comment("verify_i2c { "); 838 Label L_ok; 839 if (Interpreter::code() != nullptr) { 840 range_check(masm, rax, r11, 841 Interpreter::code()->code_start(), Interpreter::code()->code_end(), 842 L_ok); 843 } 844 if (StubRoutines::initial_stubs_code() != nullptr) { 845 range_check(masm, rax, r11, 846 StubRoutines::initial_stubs_code()->code_begin(), 847 StubRoutines::initial_stubs_code()->code_end(), 848 L_ok); 849 } 850 if (StubRoutines::final_stubs_code() != nullptr) { 851 range_check(masm, rax, r11, 852 StubRoutines::final_stubs_code()->code_begin(), 853 StubRoutines::final_stubs_code()->code_end(), 854 L_ok); 855 } 856 const char* msg = "i2c adapter must return to an interpreter frame"; 857 __ block_comment(msg); 858 __ stop(msg); 859 __ bind(L_ok); 860 __ block_comment("} verify_i2ce "); 861 #endif 862 } 863 864 // Cut-out for having no stack args. 865 int comp_words_on_stack = 0; 866 if (comp_args_on_stack) { 867 comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord; 868 __ sub(rscratch1, sp, comp_words_on_stack * wordSize); 869 __ andr(sp, rscratch1, -16); 870 } 871 872 // Will jump to the compiled code just as if compiled code was doing it. 873 // Pre-load the register-jump target early, to schedule it better. 874 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_inline_offset()))); 875 876 #if INCLUDE_JVMCI 877 if (EnableJVMCI) { 878 // check if this call should be routed towards a specific entry point 879 __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); 880 Label no_alternative_target; 881 __ cbz(rscratch2, no_alternative_target); 882 __ mov(rscratch1, rscratch2); 883 __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); 884 __ bind(no_alternative_target); 885 } 886 #endif // INCLUDE_JVMCI 887 888 int total_args_passed = sig->length(); 889 890 // Now generate the shuffle code. 891 for (int i = 0; i < total_args_passed; i++) { 892 BasicType bt = sig->at(i)._bt; 893 if (bt == T_VOID) { 894 assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half"); 895 continue; 896 } 897 898 // Pick up 0, 1 or 2 words from SP+offset. 899 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?"); 900 901 // Load in argument order going down. 902 int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize; 903 // Point to interpreter value (vs. tag) 904 int next_off = ld_off - Interpreter::stackElementSize; 905 // 906 // 907 // 908 VMReg r_1 = regs[i].first(); 909 VMReg r_2 = regs[i].second(); 910 if (!r_1->is_valid()) { 911 assert(!r_2->is_valid(), ""); 912 continue; 913 } 914 if (r_1->is_stack()) { 915 // Convert stack slot to an SP offset (+ wordSize to account for return address ) 916 int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size; 917 if (!r_2->is_valid()) { 918 // sign extend??? 919 __ ldrsw(rscratch2, Address(esp, ld_off)); 920 __ str(rscratch2, Address(sp, st_off)); 921 } else { 922 // 923 // We are using two optoregs. This can be either T_OBJECT, 924 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates 925 // two slots but only uses one for thr T_LONG or T_DOUBLE case 926 // So we must adjust where to pick up the data to match the 927 // interpreter. 928 // 929 // Interpreter local[n] == MSW, local[n+1] == LSW however locals 930 // are accessed as negative so LSW is at LOW address 931 932 // ld_off is MSW so get LSW 933 const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off; 934 __ ldr(rscratch2, Address(esp, offset)); 935 // st_off is LSW (i.e. reg.first()) 936 __ str(rscratch2, Address(sp, st_off)); 937 } 938 } else if (r_1->is_Register()) { // Register argument 939 Register r = r_1->as_Register(); 940 if (r_2->is_valid()) { 941 // 942 // We are using two VMRegs. This can be either T_OBJECT, 943 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates 944 // two slots but only uses one for thr T_LONG or T_DOUBLE case 945 // So we must adjust where to pick up the data to match the 946 // interpreter. 947 948 const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off; 949 950 // this can be a misaligned move 951 __ ldr(r, Address(esp, offset)); 952 } else { 953 // sign extend and use a full word? 954 __ ldrw(r, Address(esp, ld_off)); 955 } 956 } else { 957 if (!r_2->is_valid()) { 958 __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off)); 959 } else { 960 __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off)); 961 } 962 } 963 } 964 965 966 __ mov(rscratch2, rscratch1); 967 __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1 968 __ mov(rscratch1, rscratch2); 969 970 // 6243940 We might end up in handle_wrong_method if 971 // the callee is deoptimized as we race thru here. If that 972 // happens we don't want to take a safepoint because the 973 // caller frame will look interpreted and arguments are now 974 // "compiled" so it is much better to make this transition 975 // invisible to the stack walking code. Unfortunately if 976 // we try and find the callee by normal means a safepoint 977 // is possible. So we stash the desired callee in the thread 978 // and the vm will find there should this case occur. 979 980 __ str(rmethod, Address(rthread, JavaThread::callee_target_offset())); 981 __ br(rscratch1); 982 } 983 984 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) { 985 986 Label ok; 987 988 Register holder = rscratch2; 989 Register receiver = j_rarg0; 990 Register tmp = r10; // A call-clobbered register not used for arg passing 991 992 // ------------------------------------------------------------------------- 993 // Generate a C2I adapter. On entry we know rmethod holds the Method* during calls 994 // to the interpreter. The args start out packed in the compiled layout. They 995 // need to be unpacked into the interpreter layout. This will almost always 996 // require some stack space. We grow the current (compiled) stack, then repack 997 // the args. We finally end in a jump to the generic interpreter entry point. 998 // On exit from the interpreter, the interpreter will restore our SP (lest the 999 // compiled code, which relies solely on SP and not FP, get sick). 1000 1001 { 1002 __ block_comment("c2i_unverified_entry {"); 1003 __ load_klass(rscratch1, receiver); 1004 __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset())); 1005 __ cmp(rscratch1, tmp); 1006 __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset())); 1007 __ br(Assembler::EQ, ok); 1008 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1009 1010 __ bind(ok); 1011 // Method might have been compiled since the call site was patched to 1012 // interpreted; if that is the case treat it as a miss so we can get 1013 // the call site corrected. 1014 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset()))); 1015 __ cbz(rscratch1, skip_fixup); 1016 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1017 __ block_comment("} c2i_unverified_entry"); 1018 } 1019 } 1020 1021 1022 // --------------------------------------------------------------- 1023 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm, 1024 int comp_args_on_stack, 1025 const GrowableArray<SigEntry>* sig, 1026 const VMRegPair* regs, 1027 const GrowableArray<SigEntry>* sig_cc, 1028 const VMRegPair* regs_cc, 1029 const GrowableArray<SigEntry>* sig_cc_ro, 1030 const VMRegPair* regs_cc_ro, 1031 AdapterFingerPrint* fingerprint, 1032 AdapterBlob*& new_adapter, 1033 bool allocate_code_blob) { 1034 1035 address i2c_entry = __ pc(); 1036 gen_i2c_adapter(masm, comp_args_on_stack, sig, regs); 1037 1038 address c2i_unverified_entry = __ pc(); 1039 address c2i_unverified_inline_entry = __ pc(); 1040 Label skip_fixup; 1041 1042 gen_inline_cache_check(masm, skip_fixup); 1043 1044 OopMapSet* oop_maps = new OopMapSet(); 1045 int frame_complete = CodeOffsets::frame_never_safe; 1046 int frame_size_in_words = 0; 1047 1048 // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver) 1049 address c2i_no_clinit_check_entry = nullptr; 1050 address c2i_inline_ro_entry = __ pc(); 1051 if (regs_cc != regs_cc_ro) { 1052 // No class init barrier needed because method is guaranteed to be non-static 1053 gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, /* requires_clinit_barrier = */ false, c2i_no_clinit_check_entry, 1054 skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false); 1055 skip_fixup.reset(); 1056 } 1057 1058 // Scalarized c2i adapter 1059 address c2i_entry = __ pc(); 1060 address c2i_inline_entry = __ pc(); 1061 gen_c2i_adapter(masm, sig_cc, regs_cc, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry, 1062 skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ true); 1063 1064 // Non-scalarized c2i adapter 1065 if (regs != regs_cc) { 1066 c2i_unverified_inline_entry = __ pc(); 1067 Label inline_entry_skip_fixup; 1068 gen_inline_cache_check(masm, inline_entry_skip_fixup); 1069 1070 c2i_inline_entry = __ pc(); 1071 gen_c2i_adapter(masm, sig, regs, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry, 1072 inline_entry_skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false); 1073 } 1074 1075 1076 // The c2i adapter might safepoint and trigger a GC. The caller must make sure that 1077 // the GC knows about the location of oop argument locations passed to the c2i adapter. 1078 if (allocate_code_blob) { 1079 bool caller_must_gc_arguments = (regs != regs_cc); 1080 new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments); 1081 } 1082 1083 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry, c2i_unverified_inline_entry, c2i_no_clinit_check_entry); 1084 } 1085 1086 static int c_calling_convention_priv(const BasicType *sig_bt, 1087 VMRegPair *regs, 1088 int total_args_passed) { 1089 1090 // We return the amount of VMRegImpl stack slots we need to reserve for all 1091 // the arguments NOT counting out_preserve_stack_slots. 1092 1093 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = { 1094 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5, c_rarg6, c_rarg7 1095 }; 1096 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = { 1097 c_farg0, c_farg1, c_farg2, c_farg3, 1098 c_farg4, c_farg5, c_farg6, c_farg7 1099 }; 1100 1101 uint int_args = 0; 1102 uint fp_args = 0; 1103 uint stk_args = 0; // inc by 2 each time 1104 1105 for (int i = 0; i < total_args_passed; i++) { 1106 switch (sig_bt[i]) { 1107 case T_BOOLEAN: 1108 case T_CHAR: 1109 case T_BYTE: 1110 case T_SHORT: 1111 case T_INT: 1112 if (int_args < Argument::n_int_register_parameters_c) { 1113 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); 1114 } else { 1115 #ifdef __APPLE__ 1116 // Less-than word types are stored one after another. 1117 // The code is unable to handle this so bailout. 1118 return -1; 1119 #endif 1120 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 1121 stk_args += 2; 1122 } 1123 break; 1124 case T_LONG: 1125 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 1126 // fall through 1127 case T_OBJECT: 1128 case T_ARRAY: 1129 case T_ADDRESS: 1130 case T_METADATA: 1131 if (int_args < Argument::n_int_register_parameters_c) { 1132 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); 1133 } else { 1134 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 1135 stk_args += 2; 1136 } 1137 break; 1138 case T_FLOAT: 1139 if (fp_args < Argument::n_float_register_parameters_c) { 1140 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); 1141 } else { 1142 #ifdef __APPLE__ 1143 // Less-than word types are stored one after another. 1144 // The code is unable to handle this so bailout. 1145 return -1; 1146 #endif 1147 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 1148 stk_args += 2; 1149 } 1150 break; 1151 case T_DOUBLE: 1152 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 1153 if (fp_args < Argument::n_float_register_parameters_c) { 1154 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg()); 1155 } else { 1156 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 1157 stk_args += 2; 1158 } 1159 break; 1160 case T_VOID: // Halves of longs and doubles 1161 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); 1162 regs[i].set_bad(); 1163 break; 1164 default: 1165 ShouldNotReachHere(); 1166 break; 1167 } 1168 } 1169 1170 return stk_args; 1171 } 1172 1173 int SharedRuntime::vector_calling_convention(VMRegPair *regs, 1174 uint num_bits, 1175 uint total_args_passed) { 1176 Unimplemented(); 1177 return 0; 1178 } 1179 1180 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 1181 VMRegPair *regs, 1182 int total_args_passed) 1183 { 1184 int result = c_calling_convention_priv(sig_bt, regs, total_args_passed); 1185 guarantee(result >= 0, "Unsupported arguments configuration"); 1186 return result; 1187 } 1188 1189 1190 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1191 // We always ignore the frame_slots arg and just use the space just below frame pointer 1192 // which by this time is free to use 1193 switch (ret_type) { 1194 case T_FLOAT: 1195 __ strs(v0, Address(rfp, -wordSize)); 1196 break; 1197 case T_DOUBLE: 1198 __ strd(v0, Address(rfp, -wordSize)); 1199 break; 1200 case T_VOID: break; 1201 default: { 1202 __ str(r0, Address(rfp, -wordSize)); 1203 } 1204 } 1205 } 1206 1207 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1208 // We always ignore the frame_slots arg and just use the space just below frame pointer 1209 // which by this time is free to use 1210 switch (ret_type) { 1211 case T_FLOAT: 1212 __ ldrs(v0, Address(rfp, -wordSize)); 1213 break; 1214 case T_DOUBLE: 1215 __ ldrd(v0, Address(rfp, -wordSize)); 1216 break; 1217 case T_VOID: break; 1218 default: { 1219 __ ldr(r0, Address(rfp, -wordSize)); 1220 } 1221 } 1222 } 1223 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { 1224 RegSet x; 1225 for ( int i = first_arg ; i < arg_count ; i++ ) { 1226 if (args[i].first()->is_Register()) { 1227 x = x + args[i].first()->as_Register(); 1228 } else if (args[i].first()->is_FloatRegister()) { 1229 __ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize))); 1230 } 1231 } 1232 __ push(x, sp); 1233 } 1234 1235 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { 1236 RegSet x; 1237 for ( int i = first_arg ; i < arg_count ; i++ ) { 1238 if (args[i].first()->is_Register()) { 1239 x = x + args[i].first()->as_Register(); 1240 } else { 1241 ; 1242 } 1243 } 1244 __ pop(x, sp); 1245 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) { 1246 if (args[i].first()->is_Register()) { 1247 ; 1248 } else if (args[i].first()->is_FloatRegister()) { 1249 __ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize))); 1250 } 1251 } 1252 } 1253 1254 static void verify_oop_args(MacroAssembler* masm, 1255 const methodHandle& method, 1256 const BasicType* sig_bt, 1257 const VMRegPair* regs) { 1258 Register temp_reg = r19; // not part of any compiled calling seq 1259 if (VerifyOops) { 1260 for (int i = 0; i < method->size_of_parameters(); i++) { 1261 if (sig_bt[i] == T_OBJECT || 1262 sig_bt[i] == T_ARRAY) { 1263 VMReg r = regs[i].first(); 1264 assert(r->is_valid(), "bad oop arg"); 1265 if (r->is_stack()) { 1266 __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); 1267 __ verify_oop(temp_reg); 1268 } else { 1269 __ verify_oop(r->as_Register()); 1270 } 1271 } 1272 } 1273 } 1274 } 1275 1276 // on exit, sp points to the ContinuationEntry 1277 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) { 1278 assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, ""); 1279 assert(in_bytes(ContinuationEntry::cont_offset()) % VMRegImpl::stack_slot_size == 0, ""); 1280 assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, ""); 1281 1282 stack_slots += (int)ContinuationEntry::size()/wordSize; 1283 __ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata 1284 1285 OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize)/ VMRegImpl::stack_slot_size, 0 /* arg_slots*/); 1286 1287 __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset())); 1288 __ str(rscratch1, Address(sp, ContinuationEntry::parent_offset())); 1289 __ mov(rscratch1, sp); // we can't use sp as the source in str 1290 __ str(rscratch1, Address(rthread, JavaThread::cont_entry_offset())); 1291 1292 return map; 1293 } 1294 1295 // on entry c_rarg1 points to the continuation 1296 // sp points to ContinuationEntry 1297 // c_rarg3 -- isVirtualThread 1298 static void fill_continuation_entry(MacroAssembler* masm) { 1299 #ifdef ASSERT 1300 __ movw(rscratch1, ContinuationEntry::cookie_value()); 1301 __ strw(rscratch1, Address(sp, ContinuationEntry::cookie_offset())); 1302 #endif 1303 1304 __ str (c_rarg1, Address(sp, ContinuationEntry::cont_offset())); 1305 __ strw(c_rarg3, Address(sp, ContinuationEntry::flags_offset())); 1306 __ str (zr, Address(sp, ContinuationEntry::chunk_offset())); 1307 __ strw(zr, Address(sp, ContinuationEntry::argsize_offset())); 1308 __ strw(zr, Address(sp, ContinuationEntry::pin_count_offset())); 1309 1310 __ ldr(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset())); 1311 __ str(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset())); 1312 __ ldr(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset())); 1313 __ str(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset())); 1314 1315 __ str(zr, Address(rthread, JavaThread::cont_fastpath_offset())); 1316 __ str(zr, Address(rthread, JavaThread::held_monitor_count_offset())); 1317 } 1318 1319 // on entry, sp points to the ContinuationEntry 1320 // on exit, rfp points to the spilled rfp in the entry frame 1321 static void continuation_enter_cleanup(MacroAssembler* masm) { 1322 #ifndef PRODUCT 1323 Label OK; 1324 __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset())); 1325 __ cmp(sp, rscratch1); 1326 __ br(Assembler::EQ, OK); 1327 __ stop("incorrect sp1"); 1328 __ bind(OK); 1329 #endif 1330 1331 __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset())); 1332 __ str(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset())); 1333 __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset())); 1334 __ str(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset())); 1335 1336 __ ldr(rscratch2, Address(sp, ContinuationEntry::parent_offset())); 1337 __ str(rscratch2, Address(rthread, JavaThread::cont_entry_offset())); 1338 __ add(rfp, sp, (int)ContinuationEntry::size()); 1339 } 1340 1341 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread) 1342 // On entry: c_rarg1 -- the continuation object 1343 // c_rarg2 -- isContinue 1344 // c_rarg3 -- isVirtualThread 1345 static void gen_continuation_enter(MacroAssembler* masm, 1346 const methodHandle& method, 1347 const BasicType* sig_bt, 1348 const VMRegPair* regs, 1349 int& exception_offset, 1350 OopMapSet*oop_maps, 1351 int& frame_complete, 1352 int& stack_slots, 1353 int& interpreted_entry_offset, 1354 int& compiled_entry_offset) { 1355 //verify_oop_args(masm, method, sig_bt, regs); 1356 Address resolve(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type); 1357 1358 address start = __ pc(); 1359 1360 Label call_thaw, exit; 1361 1362 // i2i entry used at interp_only_mode only 1363 interpreted_entry_offset = __ pc() - start; 1364 { 1365 1366 #ifdef ASSERT 1367 Label is_interp_only; 1368 __ ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset())); 1369 __ cbnzw(rscratch1, is_interp_only); 1370 __ stop("enterSpecial interpreter entry called when not in interp_only_mode"); 1371 __ bind(is_interp_only); 1372 #endif 1373 1374 // Read interpreter arguments into registers (this is an ad-hoc i2c adapter) 1375 __ ldr(c_rarg1, Address(esp, Interpreter::stackElementSize*2)); 1376 __ ldr(c_rarg2, Address(esp, Interpreter::stackElementSize*1)); 1377 __ ldr(c_rarg3, Address(esp, Interpreter::stackElementSize*0)); 1378 __ push_cont_fastpath(rthread); 1379 1380 __ enter(); 1381 stack_slots = 2; // will be adjusted in setup 1382 OopMap* map = continuation_enter_setup(masm, stack_slots); 1383 // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe, 1384 // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway. 1385 1386 fill_continuation_entry(masm); 1387 1388 __ cbnz(c_rarg2, call_thaw); 1389 1390 const address tr_call = __ trampoline_call(resolve); 1391 if (tr_call == nullptr) { 1392 fatal("CodeCache is full at gen_continuation_enter"); 1393 } 1394 1395 oop_maps->add_gc_map(__ pc() - start, map); 1396 __ post_call_nop(); 1397 1398 __ b(exit); 1399 1400 CodeBuffer* cbuf = masm->code_section()->outer(); 1401 address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, tr_call); 1402 if (stub == nullptr) { 1403 fatal("CodeCache is full at gen_continuation_enter"); 1404 } 1405 } 1406 1407 // compiled entry 1408 __ align(CodeEntryAlignment); 1409 compiled_entry_offset = __ pc() - start; 1410 1411 __ enter(); 1412 stack_slots = 2; // will be adjusted in setup 1413 OopMap* map = continuation_enter_setup(masm, stack_slots); 1414 frame_complete = __ pc() - start; 1415 1416 fill_continuation_entry(masm); 1417 1418 __ cbnz(c_rarg2, call_thaw); 1419 1420 const address tr_call = __ trampoline_call(resolve); 1421 if (tr_call == nullptr) { 1422 fatal("CodeCache is full at gen_continuation_enter"); 1423 } 1424 1425 oop_maps->add_gc_map(__ pc() - start, map); 1426 __ post_call_nop(); 1427 1428 __ b(exit); 1429 1430 __ bind(call_thaw); 1431 1432 __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw())); 1433 oop_maps->add_gc_map(__ pc() - start, map->deep_copy()); 1434 ContinuationEntry::_return_pc_offset = __ pc() - start; 1435 __ post_call_nop(); 1436 1437 __ bind(exit); 1438 continuation_enter_cleanup(masm); 1439 __ leave(); 1440 __ ret(lr); 1441 1442 /// exception handling 1443 1444 exception_offset = __ pc() - start; 1445 { 1446 __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19 1447 1448 continuation_enter_cleanup(masm); 1449 1450 __ ldr(c_rarg1, Address(rfp, wordSize)); // return address 1451 __ authenticate_return_address(c_rarg1); 1452 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1); 1453 1454 // see OptoRuntime::generate_exception_blob: r0 -- exception oop, r3 -- exception pc 1455 1456 __ mov(r1, r0); // the exception handler 1457 __ mov(r0, r19); // restore return value contaning the exception oop 1458 __ verify_oop(r0); 1459 1460 __ leave(); 1461 __ mov(r3, lr); 1462 __ br(r1); // the exception handler 1463 } 1464 1465 CodeBuffer* cbuf = masm->code_section()->outer(); 1466 address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, tr_call); 1467 if (stub == nullptr) { 1468 fatal("CodeCache is full at gen_continuation_enter"); 1469 } 1470 } 1471 1472 static void gen_continuation_yield(MacroAssembler* masm, 1473 const methodHandle& method, 1474 const BasicType* sig_bt, 1475 const VMRegPair* regs, 1476 OopMapSet* oop_maps, 1477 int& frame_complete, 1478 int& stack_slots, 1479 int& compiled_entry_offset) { 1480 enum layout { 1481 rfp_off1, 1482 rfp_off2, 1483 lr_off, 1484 lr_off2, 1485 framesize // inclusive of return address 1486 }; 1487 // assert(is_even(framesize/2), "sp not 16-byte aligned"); 1488 stack_slots = framesize / VMRegImpl::slots_per_word; 1489 assert(stack_slots == 2, "recheck layout"); 1490 1491 address start = __ pc(); 1492 1493 compiled_entry_offset = __ pc() - start; 1494 __ enter(); 1495 1496 __ mov(c_rarg1, sp); 1497 1498 frame_complete = __ pc() - start; 1499 address the_pc = __ pc(); 1500 1501 __ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup 1502 1503 __ mov(c_rarg0, rthread); 1504 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1); 1505 __ call_VM_leaf(Continuation::freeze_entry(), 2); 1506 __ reset_last_Java_frame(true); 1507 1508 Label pinned; 1509 1510 __ cbnz(r0, pinned); 1511 1512 // We've succeeded, set sp to the ContinuationEntry 1513 __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset())); 1514 __ mov(sp, rscratch1); 1515 continuation_enter_cleanup(masm); 1516 1517 __ bind(pinned); // pinned -- return to caller 1518 1519 // handle pending exception thrown by freeze 1520 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 1521 Label ok; 1522 __ cbz(rscratch1, ok); 1523 __ leave(); 1524 __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 1525 __ br(rscratch1); 1526 __ bind(ok); 1527 1528 __ leave(); 1529 __ ret(lr); 1530 1531 OopMap* map = new OopMap(framesize, 1); 1532 oop_maps->add_gc_map(the_pc - start, map); 1533 } 1534 1535 static void gen_special_dispatch(MacroAssembler* masm, 1536 const methodHandle& method, 1537 const BasicType* sig_bt, 1538 const VMRegPair* regs) { 1539 verify_oop_args(masm, method, sig_bt, regs); 1540 vmIntrinsics::ID iid = method->intrinsic_id(); 1541 1542 // Now write the args into the outgoing interpreter space 1543 bool has_receiver = false; 1544 Register receiver_reg = noreg; 1545 int member_arg_pos = -1; 1546 Register member_reg = noreg; 1547 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1548 if (ref_kind != 0) { 1549 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1550 member_reg = r19; // known to be free at this point 1551 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1552 } else if (iid == vmIntrinsics::_invokeBasic) { 1553 has_receiver = true; 1554 } else if (iid == vmIntrinsics::_linkToNative) { 1555 member_arg_pos = method->size_of_parameters() - 1; // trailing NativeEntryPoint argument 1556 member_reg = r19; // known to be free at this point 1557 } else { 1558 fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid)); 1559 } 1560 1561 if (member_reg != noreg) { 1562 // Load the member_arg into register, if necessary. 1563 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1564 VMReg r = regs[member_arg_pos].first(); 1565 if (r->is_stack()) { 1566 __ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); 1567 } else { 1568 // no data motion is needed 1569 member_reg = r->as_Register(); 1570 } 1571 } 1572 1573 if (has_receiver) { 1574 // Make sure the receiver is loaded into a register. 1575 assert(method->size_of_parameters() > 0, "oob"); 1576 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1577 VMReg r = regs[0].first(); 1578 assert(r->is_valid(), "bad receiver arg"); 1579 if (r->is_stack()) { 1580 // Porting note: This assumes that compiled calling conventions always 1581 // pass the receiver oop in a register. If this is not true on some 1582 // platform, pick a temp and load the receiver from stack. 1583 fatal("receiver always in a register"); 1584 receiver_reg = r2; // known to be free at this point 1585 __ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); 1586 } else { 1587 // no data motion is needed 1588 receiver_reg = r->as_Register(); 1589 } 1590 } 1591 1592 // Figure out which address we are really jumping to: 1593 MethodHandles::generate_method_handle_dispatch(masm, iid, 1594 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1595 } 1596 1597 // --------------------------------------------------------------------------- 1598 // Generate a native wrapper for a given method. The method takes arguments 1599 // in the Java compiled code convention, marshals them to the native 1600 // convention (handlizes oops, etc), transitions to native, makes the call, 1601 // returns to java state (possibly blocking), unhandlizes any result and 1602 // returns. 1603 // 1604 // Critical native functions are a shorthand for the use of 1605 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1606 // functions. The wrapper is expected to unpack the arguments before 1607 // passing them to the callee. Critical native functions leave the state _in_Java, 1608 // since they block out GC. 1609 // Some other parts of JNI setup are skipped like the tear down of the JNI handle 1610 // block and the check for pending exceptions it's impossible for them 1611 // to be thrown. 1612 // 1613 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, 1614 const methodHandle& method, 1615 int compile_id, 1616 BasicType* in_sig_bt, 1617 VMRegPair* in_regs, 1618 BasicType ret_type) { 1619 if (method->is_continuation_native_intrinsic()) { 1620 int exception_offset = -1; 1621 OopMapSet* oop_maps = new OopMapSet(); 1622 int frame_complete = -1; 1623 int stack_slots = -1; 1624 int interpreted_entry_offset = -1; 1625 int vep_offset = -1; 1626 if (method->is_continuation_enter_intrinsic()) { 1627 gen_continuation_enter(masm, 1628 method, 1629 in_sig_bt, 1630 in_regs, 1631 exception_offset, 1632 oop_maps, 1633 frame_complete, 1634 stack_slots, 1635 interpreted_entry_offset, 1636 vep_offset); 1637 } else if (method->is_continuation_yield_intrinsic()) { 1638 gen_continuation_yield(masm, 1639 method, 1640 in_sig_bt, 1641 in_regs, 1642 oop_maps, 1643 frame_complete, 1644 stack_slots, 1645 vep_offset); 1646 } else { 1647 guarantee(false, "Unknown Continuation native intrinsic"); 1648 } 1649 1650 #ifdef ASSERT 1651 if (method->is_continuation_enter_intrinsic()) { 1652 assert(interpreted_entry_offset != -1, "Must be set"); 1653 assert(exception_offset != -1, "Must be set"); 1654 } else { 1655 assert(interpreted_entry_offset == -1, "Must be unset"); 1656 assert(exception_offset == -1, "Must be unset"); 1657 } 1658 assert(frame_complete != -1, "Must be set"); 1659 assert(stack_slots != -1, "Must be set"); 1660 assert(vep_offset != -1, "Must be set"); 1661 #endif 1662 1663 __ flush(); 1664 nmethod* nm = nmethod::new_native_nmethod(method, 1665 compile_id, 1666 masm->code(), 1667 vep_offset, 1668 frame_complete, 1669 stack_slots, 1670 in_ByteSize(-1), 1671 in_ByteSize(-1), 1672 oop_maps, 1673 exception_offset); 1674 if (method->is_continuation_enter_intrinsic()) { 1675 ContinuationEntry::set_enter_code(nm, interpreted_entry_offset); 1676 } else if (method->is_continuation_yield_intrinsic()) { 1677 _cont_doYield_stub = nm; 1678 } else { 1679 guarantee(false, "Unknown Continuation native intrinsic"); 1680 } 1681 return nm; 1682 } 1683 1684 if (method->is_method_handle_intrinsic()) { 1685 vmIntrinsics::ID iid = method->intrinsic_id(); 1686 intptr_t start = (intptr_t)__ pc(); 1687 int vep_offset = ((intptr_t)__ pc()) - start; 1688 1689 // First instruction must be a nop as it may need to be patched on deoptimisation 1690 __ nop(); 1691 gen_special_dispatch(masm, 1692 method, 1693 in_sig_bt, 1694 in_regs); 1695 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1696 __ flush(); 1697 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1698 return nmethod::new_native_nmethod(method, 1699 compile_id, 1700 masm->code(), 1701 vep_offset, 1702 frame_complete, 1703 stack_slots / VMRegImpl::slots_per_word, 1704 in_ByteSize(-1), 1705 in_ByteSize(-1), 1706 nullptr); 1707 } 1708 address native_func = method->native_function(); 1709 assert(native_func != nullptr, "must have function"); 1710 1711 // An OopMap for lock (and class if static) 1712 OopMapSet *oop_maps = new OopMapSet(); 1713 intptr_t start = (intptr_t)__ pc(); 1714 1715 // We have received a description of where all the java arg are located 1716 // on entry to the wrapper. We need to convert these args to where 1717 // the jni function will expect them. To figure out where they go 1718 // we convert the java signature to a C signature by inserting 1719 // the hidden arguments as arg[0] and possibly arg[1] (static method) 1720 1721 const int total_in_args = method->size_of_parameters(); 1722 int total_c_args = total_in_args + (method->is_static() ? 2 : 1); 1723 1724 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1725 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1726 BasicType* in_elem_bt = nullptr; 1727 1728 int argc = 0; 1729 out_sig_bt[argc++] = T_ADDRESS; 1730 if (method->is_static()) { 1731 out_sig_bt[argc++] = T_OBJECT; 1732 } 1733 1734 for (int i = 0; i < total_in_args ; i++ ) { 1735 out_sig_bt[argc++] = in_sig_bt[i]; 1736 } 1737 1738 // Now figure out where the args must be stored and how much stack space 1739 // they require. 1740 int out_arg_slots; 1741 out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, total_c_args); 1742 1743 if (out_arg_slots < 0) { 1744 return nullptr; 1745 } 1746 1747 // Compute framesize for the wrapper. We need to handlize all oops in 1748 // incoming registers 1749 1750 // Calculate the total number of stack slots we will need. 1751 1752 // First count the abi requirement plus all of the outgoing args 1753 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 1754 1755 // Now the space for the inbound oop handle area 1756 int total_save_slots = 8 * VMRegImpl::slots_per_word; // 8 arguments passed in registers 1757 1758 int oop_handle_offset = stack_slots; 1759 stack_slots += total_save_slots; 1760 1761 // Now any space we need for handlizing a klass if static method 1762 1763 int klass_slot_offset = 0; 1764 int klass_offset = -1; 1765 int lock_slot_offset = 0; 1766 bool is_static = false; 1767 1768 if (method->is_static()) { 1769 klass_slot_offset = stack_slots; 1770 stack_slots += VMRegImpl::slots_per_word; 1771 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 1772 is_static = true; 1773 } 1774 1775 // Plus a lock if needed 1776 1777 if (method->is_synchronized()) { 1778 lock_slot_offset = stack_slots; 1779 stack_slots += VMRegImpl::slots_per_word; 1780 } 1781 1782 // Now a place (+2) to save return values or temp during shuffling 1783 // + 4 for return address (which we own) and saved rfp 1784 stack_slots += 6; 1785 1786 // Ok The space we have allocated will look like: 1787 // 1788 // 1789 // FP-> | | 1790 // |---------------------| 1791 // | 2 slots for moves | 1792 // |---------------------| 1793 // | lock box (if sync) | 1794 // |---------------------| <- lock_slot_offset 1795 // | klass (if static) | 1796 // |---------------------| <- klass_slot_offset 1797 // | oopHandle area | 1798 // |---------------------| <- oop_handle_offset (8 java arg registers) 1799 // | outbound memory | 1800 // | based arguments | 1801 // | | 1802 // |---------------------| 1803 // | | 1804 // SP-> | out_preserved_slots | 1805 // 1806 // 1807 1808 1809 // Now compute actual number of stack words we need rounding to make 1810 // stack properly aligned. 1811 stack_slots = align_up(stack_slots, StackAlignmentInSlots); 1812 1813 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 1814 1815 // First thing make an ic check to see if we should even be here 1816 1817 // We are free to use all registers as temps without saving them and 1818 // restoring them except rfp. rfp is the only callee save register 1819 // as far as the interpreter and the compiler(s) are concerned. 1820 1821 1822 const Register ic_reg = rscratch2; 1823 const Register receiver = j_rarg0; 1824 1825 Label hit; 1826 Label exception_pending; 1827 1828 assert_different_registers(ic_reg, receiver, rscratch1); 1829 __ verify_oop(receiver); 1830 __ cmp_klass(receiver, ic_reg, rscratch1); 1831 __ br(Assembler::EQ, hit); 1832 1833 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1834 1835 // Verified entry point must be aligned 1836 __ align(8); 1837 1838 __ bind(hit); 1839 1840 int vep_offset = ((intptr_t)__ pc()) - start; 1841 1842 // If we have to make this method not-entrant we'll overwrite its 1843 // first instruction with a jump. For this action to be legal we 1844 // must ensure that this first instruction is a B, BL, NOP, BKPT, 1845 // SVC, HVC, or SMC. Make it a NOP. 1846 __ nop(); 1847 1848 if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) { 1849 Label L_skip_barrier; 1850 __ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass* 1851 __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier); 1852 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 1853 1854 __ bind(L_skip_barrier); 1855 } 1856 1857 // Generate stack overflow check 1858 __ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size())); 1859 1860 // Generate a new frame for the wrapper. 1861 __ enter(); 1862 // -2 because return address is already present and so is saved rfp 1863 __ sub(sp, sp, stack_size - 2*wordSize); 1864 1865 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1866 bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */); 1867 1868 // Frame is now completed as far as size and linkage. 1869 int frame_complete = ((intptr_t)__ pc()) - start; 1870 1871 // We use r20 as the oop handle for the receiver/klass 1872 // It is callee save so it survives the call to native 1873 1874 const Register oop_handle_reg = r20; 1875 1876 // 1877 // We immediately shuffle the arguments so that any vm call we have to 1878 // make from here on out (sync slow path, jvmti, etc.) we will have 1879 // captured the oops from our caller and have a valid oopMap for 1880 // them. 1881 1882 // ----------------- 1883 // The Grand Shuffle 1884 1885 // The Java calling convention is either equal (linux) or denser (win64) than the 1886 // c calling convention. However the because of the jni_env argument the c calling 1887 // convention always has at least one more (and two for static) arguments than Java. 1888 // Therefore if we move the args from java -> c backwards then we will never have 1889 // a register->register conflict and we don't have to build a dependency graph 1890 // and figure out how to break any cycles. 1891 // 1892 1893 // Record esp-based slot for receiver on stack for non-static methods 1894 int receiver_offset = -1; 1895 1896 // This is a trick. We double the stack slots so we can claim 1897 // the oops in the caller's frame. Since we are sure to have 1898 // more args than the caller doubling is enough to make 1899 // sure we can capture all the incoming oop args from the 1900 // caller. 1901 // 1902 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1903 1904 // Mark location of rfp (someday) 1905 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp)); 1906 1907 1908 int float_args = 0; 1909 int int_args = 0; 1910 1911 #ifdef ASSERT 1912 bool reg_destroyed[Register::number_of_registers]; 1913 bool freg_destroyed[FloatRegister::number_of_registers]; 1914 for ( int r = 0 ; r < Register::number_of_registers ; r++ ) { 1915 reg_destroyed[r] = false; 1916 } 1917 for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) { 1918 freg_destroyed[f] = false; 1919 } 1920 1921 #endif /* ASSERT */ 1922 1923 // For JNI natives the incoming and outgoing registers are offset upwards. 1924 GrowableArray<int> arg_order(2 * total_in_args); 1925 VMRegPair tmp_vmreg; 1926 tmp_vmreg.set2(r19->as_VMReg()); 1927 1928 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) { 1929 arg_order.push(i); 1930 arg_order.push(c_arg); 1931 } 1932 1933 int temploc = -1; 1934 for (int ai = 0; ai < arg_order.length(); ai += 2) { 1935 int i = arg_order.at(ai); 1936 int c_arg = arg_order.at(ai + 1); 1937 __ block_comment(err_msg("move %d -> %d", i, c_arg)); 1938 assert(c_arg != -1 && i != -1, "wrong order"); 1939 #ifdef ASSERT 1940 if (in_regs[i].first()->is_Register()) { 1941 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!"); 1942 } else if (in_regs[i].first()->is_FloatRegister()) { 1943 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!"); 1944 } 1945 if (out_regs[c_arg].first()->is_Register()) { 1946 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 1947 } else if (out_regs[c_arg].first()->is_FloatRegister()) { 1948 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true; 1949 } 1950 #endif /* ASSERT */ 1951 switch (in_sig_bt[i]) { 1952 case T_ARRAY: 1953 case T_OBJECT: 1954 __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], 1955 ((i == 0) && (!is_static)), 1956 &receiver_offset); 1957 int_args++; 1958 break; 1959 case T_VOID: 1960 break; 1961 1962 case T_FLOAT: 1963 __ float_move(in_regs[i], out_regs[c_arg]); 1964 float_args++; 1965 break; 1966 1967 case T_DOUBLE: 1968 assert( i + 1 < total_in_args && 1969 in_sig_bt[i + 1] == T_VOID && 1970 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 1971 __ double_move(in_regs[i], out_regs[c_arg]); 1972 float_args++; 1973 break; 1974 1975 case T_LONG : 1976 __ long_move(in_regs[i], out_regs[c_arg]); 1977 int_args++; 1978 break; 1979 1980 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 1981 1982 default: 1983 __ move32_64(in_regs[i], out_regs[c_arg]); 1984 int_args++; 1985 } 1986 } 1987 1988 // point c_arg at the first arg that is already loaded in case we 1989 // need to spill before we call out 1990 int c_arg = total_c_args - total_in_args; 1991 1992 // Pre-load a static method's oop into c_rarg1. 1993 if (method->is_static()) { 1994 1995 // load oop into a register 1996 __ movoop(c_rarg1, 1997 JNIHandles::make_local(method->method_holder()->java_mirror())); 1998 1999 // Now handlize the static class mirror it's known not-null. 2000 __ str(c_rarg1, Address(sp, klass_offset)); 2001 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2002 2003 // Now get the handle 2004 __ lea(c_rarg1, Address(sp, klass_offset)); 2005 // and protect the arg if we must spill 2006 c_arg--; 2007 } 2008 2009 // Change state to native (we save the return address in the thread, since it might not 2010 // be pushed on the stack when we do a stack traversal). 2011 // We use the same pc/oopMap repeatedly when we call out 2012 2013 Label native_return; 2014 __ set_last_Java_frame(sp, noreg, native_return, rscratch1); 2015 2016 Label dtrace_method_entry, dtrace_method_entry_done; 2017 { 2018 uint64_t offset; 2019 __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset); 2020 __ ldrb(rscratch1, Address(rscratch1, offset)); 2021 __ cbnzw(rscratch1, dtrace_method_entry); 2022 __ bind(dtrace_method_entry_done); 2023 } 2024 2025 // RedefineClasses() tracing support for obsolete method entry 2026 if (log_is_enabled(Trace, redefine, class, obsolete)) { 2027 // protect the args we've loaded 2028 save_args(masm, total_c_args, c_arg, out_regs); 2029 __ mov_metadata(c_rarg1, method()); 2030 __ call_VM_leaf( 2031 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2032 rthread, c_rarg1); 2033 restore_args(masm, total_c_args, c_arg, out_regs); 2034 } 2035 2036 // Lock a synchronized method 2037 2038 // Register definitions used by locking and unlocking 2039 2040 const Register swap_reg = r0; 2041 const Register obj_reg = r19; // Will contain the oop 2042 const Register lock_reg = r13; // Address of compiler lock object (BasicLock) 2043 const Register old_hdr = r13; // value of old header at unlock time 2044 const Register lock_tmp = r14; // Temporary used by lightweight_lock/unlock 2045 const Register tmp = lr; 2046 2047 Label slow_path_lock; 2048 Label lock_done; 2049 2050 if (method->is_synchronized()) { 2051 Label count; 2052 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes(); 2053 2054 // Get the handle (the 2nd argument) 2055 __ mov(oop_handle_reg, c_rarg1); 2056 2057 // Get address of the box 2058 2059 __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 2060 2061 // Load the oop from the handle 2062 __ ldr(obj_reg, Address(oop_handle_reg, 0)); 2063 2064 if (LockingMode == LM_MONITOR) { 2065 __ b(slow_path_lock); 2066 } else if (LockingMode == LM_LEGACY) { 2067 // Load (object->mark() | 1) into swap_reg %r0 2068 __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 2069 __ orr(swap_reg, rscratch1, 1); 2070 if (EnableValhalla) { 2071 // Mask inline_type bit such that we go to the slow path if object is an inline type 2072 __ andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place)); 2073 } 2074 2075 // Save (object->mark() | 1) into BasicLock's displaced header 2076 __ str(swap_reg, Address(lock_reg, mark_word_offset)); 2077 2078 // src -> dest iff dest == r0 else r0 <- dest 2079 __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr); 2080 2081 // Hmm should this move to the slow path code area??? 2082 2083 // Test if the oopMark is an obvious stack pointer, i.e., 2084 // 1) (mark & 3) == 0, and 2085 // 2) sp <= mark < mark + os::pagesize() 2086 // These 3 tests can be done by evaluating the following 2087 // expression: ((mark - sp) & (3 - os::vm_page_size())), 2088 // assuming both stack pointer and pagesize have their 2089 // least significant 2 bits clear. 2090 // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg 2091 2092 __ sub(swap_reg, sp, swap_reg); 2093 __ neg(swap_reg, swap_reg); 2094 __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size()); 2095 2096 // Save the test result, for recursive case, the result is zero 2097 __ str(swap_reg, Address(lock_reg, mark_word_offset)); 2098 __ br(Assembler::NE, slow_path_lock); 2099 } else { 2100 assert(LockingMode == LM_LIGHTWEIGHT, "must be"); 2101 __ ldr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 2102 __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock); 2103 } 2104 __ bind(count); 2105 __ increment(Address(rthread, JavaThread::held_monitor_count_offset())); 2106 2107 // Slow path will re-enter here 2108 __ bind(lock_done); 2109 } 2110 2111 2112 // Finally just about ready to make the JNI call 2113 2114 // get JNIEnv* which is first argument to native 2115 __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset()))); 2116 2117 // Now set thread in native 2118 __ mov(rscratch1, _thread_in_native); 2119 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 2120 __ stlrw(rscratch1, rscratch2); 2121 2122 __ rt_call(native_func); 2123 2124 __ bind(native_return); 2125 2126 intptr_t return_pc = (intptr_t) __ pc(); 2127 oop_maps->add_gc_map(return_pc - start, map); 2128 2129 // Unpack native results. 2130 switch (ret_type) { 2131 case T_BOOLEAN: __ c2bool(r0); break; 2132 case T_CHAR : __ ubfx(r0, r0, 0, 16); break; 2133 case T_BYTE : __ sbfx(r0, r0, 0, 8); break; 2134 case T_SHORT : __ sbfx(r0, r0, 0, 16); break; 2135 case T_INT : __ sbfx(r0, r0, 0, 32); break; 2136 case T_DOUBLE : 2137 case T_FLOAT : 2138 // Result is in v0 we'll save as needed 2139 break; 2140 case T_ARRAY: // Really a handle 2141 case T_OBJECT: // Really a handle 2142 break; // can't de-handlize until after safepoint check 2143 case T_VOID: break; 2144 case T_LONG: break; 2145 default : ShouldNotReachHere(); 2146 } 2147 2148 Label safepoint_in_progress, safepoint_in_progress_done; 2149 Label after_transition; 2150 2151 // Switch thread to "native transition" state before reading the synchronization state. 2152 // This additional state is necessary because reading and testing the synchronization 2153 // state is not atomic w.r.t. GC, as this scenario demonstrates: 2154 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 2155 // VM thread changes sync state to synchronizing and suspends threads for GC. 2156 // Thread A is resumed to finish this native method, but doesn't block here since it 2157 // didn't see any synchronization is progress, and escapes. 2158 __ mov(rscratch1, _thread_in_native_trans); 2159 2160 __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset())); 2161 2162 // Force this write out before the read below 2163 if (!UseSystemMemoryBarrier) { 2164 __ dmb(Assembler::ISH); 2165 } 2166 2167 __ verify_sve_vector_length(); 2168 2169 // Check for safepoint operation in progress and/or pending suspend requests. 2170 { 2171 // We need an acquire here to ensure that any subsequent load of the 2172 // global SafepointSynchronize::_state flag is ordered after this load 2173 // of the thread-local polling word. We don't want this poll to 2174 // return false (i.e. not safepointing) and a later poll of the global 2175 // SafepointSynchronize::_state spuriously to return true. 2176 // 2177 // This is to avoid a race when we're in a native->Java transition 2178 // racing the code which wakes up from a safepoint. 2179 2180 __ safepoint_poll(safepoint_in_progress, true /* at_return */, true /* acquire */, false /* in_nmethod */); 2181 __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset())); 2182 __ cbnzw(rscratch1, safepoint_in_progress); 2183 __ bind(safepoint_in_progress_done); 2184 } 2185 2186 // change thread state 2187 __ mov(rscratch1, _thread_in_Java); 2188 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 2189 __ stlrw(rscratch1, rscratch2); 2190 __ bind(after_transition); 2191 2192 Label reguard; 2193 Label reguard_done; 2194 __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset())); 2195 __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled); 2196 __ br(Assembler::EQ, reguard); 2197 __ bind(reguard_done); 2198 2199 // native result if any is live 2200 2201 // Unlock 2202 Label unlock_done; 2203 Label slow_path_unlock; 2204 if (method->is_synchronized()) { 2205 2206 // Get locked oop from the handle we passed to jni 2207 __ ldr(obj_reg, Address(oop_handle_reg, 0)); 2208 2209 Label done, not_recursive; 2210 2211 if (LockingMode == LM_LEGACY) { 2212 // Simple recursive lock? 2213 __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 2214 __ cbnz(rscratch1, not_recursive); 2215 __ decrement(Address(rthread, JavaThread::held_monitor_count_offset())); 2216 __ b(done); 2217 } 2218 2219 __ bind(not_recursive); 2220 2221 // Must save r0 if if it is live now because cmpxchg must use it 2222 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { 2223 save_native_result(masm, ret_type, stack_slots); 2224 } 2225 2226 if (LockingMode == LM_MONITOR) { 2227 __ b(slow_path_unlock); 2228 } else if (LockingMode == LM_LEGACY) { 2229 // get address of the stack lock 2230 __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 2231 // get old displaced header 2232 __ ldr(old_hdr, Address(r0, 0)); 2233 2234 // Atomic swap old header if oop still contains the stack lock 2235 Label count; 2236 __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock); 2237 __ bind(count); 2238 __ decrement(Address(rthread, JavaThread::held_monitor_count_offset())); 2239 } else { 2240 assert(LockingMode == LM_LIGHTWEIGHT, ""); 2241 __ ldr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 2242 __ tbnz(old_hdr, exact_log2(markWord::monitor_value), slow_path_unlock); 2243 __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock); 2244 __ decrement(Address(rthread, JavaThread::held_monitor_count_offset())); 2245 } 2246 2247 // slow path re-enters here 2248 __ bind(unlock_done); 2249 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { 2250 restore_native_result(masm, ret_type, stack_slots); 2251 } 2252 2253 __ bind(done); 2254 } 2255 2256 Label dtrace_method_exit, dtrace_method_exit_done; 2257 { 2258 uint64_t offset; 2259 __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset); 2260 __ ldrb(rscratch1, Address(rscratch1, offset)); 2261 __ cbnzw(rscratch1, dtrace_method_exit); 2262 __ bind(dtrace_method_exit_done); 2263 } 2264 2265 __ reset_last_Java_frame(false); 2266 2267 // Unbox oop result, e.g. JNIHandles::resolve result. 2268 if (is_reference_type(ret_type)) { 2269 __ resolve_jobject(r0, r1, r2); 2270 } 2271 2272 if (CheckJNICalls) { 2273 // clear_pending_jni_exception_check 2274 __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset())); 2275 } 2276 2277 // reset handle block 2278 __ ldr(r2, Address(rthread, JavaThread::active_handles_offset())); 2279 __ str(zr, Address(r2, JNIHandleBlock::top_offset())); 2280 2281 __ leave(); 2282 2283 // Any exception pending? 2284 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2285 __ cbnz(rscratch1, exception_pending); 2286 2287 // We're done 2288 __ ret(lr); 2289 2290 // Unexpected paths are out of line and go here 2291 2292 // forward the exception 2293 __ bind(exception_pending); 2294 2295 // and forward the exception 2296 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2297 2298 // Slow path locking & unlocking 2299 if (method->is_synchronized()) { 2300 2301 __ block_comment("Slow path lock {"); 2302 __ bind(slow_path_lock); 2303 2304 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM 2305 // args are (oop obj, BasicLock* lock, JavaThread* thread) 2306 2307 // protect the args we've loaded 2308 save_args(masm, total_c_args, c_arg, out_regs); 2309 2310 __ mov(c_rarg0, obj_reg); 2311 __ mov(c_rarg1, lock_reg); 2312 __ mov(c_rarg2, rthread); 2313 2314 // Not a leaf but we have last_Java_frame setup as we want 2315 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3); 2316 restore_args(masm, total_c_args, c_arg, out_regs); 2317 2318 #ifdef ASSERT 2319 { Label L; 2320 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2321 __ cbz(rscratch1, L); 2322 __ stop("no pending exception allowed on exit from monitorenter"); 2323 __ bind(L); 2324 } 2325 #endif 2326 __ b(lock_done); 2327 2328 __ block_comment("} Slow path lock"); 2329 2330 __ block_comment("Slow path unlock {"); 2331 __ bind(slow_path_unlock); 2332 2333 // If we haven't already saved the native result we must save it now as xmm registers 2334 // are still exposed. 2335 2336 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { 2337 save_native_result(masm, ret_type, stack_slots); 2338 } 2339 2340 __ mov(c_rarg2, rthread); 2341 __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 2342 __ mov(c_rarg0, obj_reg); 2343 2344 // Save pending exception around call to VM (which contains an EXCEPTION_MARK) 2345 // NOTE that obj_reg == r19 currently 2346 __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2347 __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2348 2349 __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)); 2350 2351 #ifdef ASSERT 2352 { 2353 Label L; 2354 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2355 __ cbz(rscratch1, L); 2356 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C"); 2357 __ bind(L); 2358 } 2359 #endif /* ASSERT */ 2360 2361 __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 2362 2363 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { 2364 restore_native_result(masm, ret_type, stack_slots); 2365 } 2366 __ b(unlock_done); 2367 2368 __ block_comment("} Slow path unlock"); 2369 2370 } // synchronized 2371 2372 // SLOW PATH Reguard the stack if needed 2373 2374 __ bind(reguard); 2375 save_native_result(masm, ret_type, stack_slots); 2376 __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2377 restore_native_result(masm, ret_type, stack_slots); 2378 // and continue 2379 __ b(reguard_done); 2380 2381 // SLOW PATH safepoint 2382 { 2383 __ block_comment("safepoint {"); 2384 __ bind(safepoint_in_progress); 2385 2386 // Don't use call_VM as it will see a possible pending exception and forward it 2387 // and never return here preventing us from clearing _last_native_pc down below. 2388 // 2389 save_native_result(masm, ret_type, stack_slots); 2390 __ mov(c_rarg0, rthread); 2391 #ifndef PRODUCT 2392 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); 2393 #endif 2394 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 2395 __ blr(rscratch1); 2396 2397 // Restore any method result value 2398 restore_native_result(masm, ret_type, stack_slots); 2399 2400 __ b(safepoint_in_progress_done); 2401 __ block_comment("} safepoint"); 2402 } 2403 2404 // SLOW PATH dtrace support 2405 { 2406 __ block_comment("dtrace entry {"); 2407 __ bind(dtrace_method_entry); 2408 2409 // We have all of the arguments setup at this point. We must not touch any register 2410 // argument registers at this point (what if we save/restore them there are no oop? 2411 2412 save_args(masm, total_c_args, c_arg, out_regs); 2413 __ mov_metadata(c_rarg1, method()); 2414 __ call_VM_leaf( 2415 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2416 rthread, c_rarg1); 2417 restore_args(masm, total_c_args, c_arg, out_regs); 2418 __ b(dtrace_method_entry_done); 2419 __ block_comment("} dtrace entry"); 2420 } 2421 2422 { 2423 __ block_comment("dtrace exit {"); 2424 __ bind(dtrace_method_exit); 2425 save_native_result(masm, ret_type, stack_slots); 2426 __ mov_metadata(c_rarg1, method()); 2427 __ call_VM_leaf( 2428 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2429 rthread, c_rarg1); 2430 restore_native_result(masm, ret_type, stack_slots); 2431 __ b(dtrace_method_exit_done); 2432 __ block_comment("} dtrace exit"); 2433 } 2434 2435 2436 __ flush(); 2437 2438 nmethod *nm = nmethod::new_native_nmethod(method, 2439 compile_id, 2440 masm->code(), 2441 vep_offset, 2442 frame_complete, 2443 stack_slots / VMRegImpl::slots_per_word, 2444 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2445 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size), 2446 oop_maps); 2447 2448 return nm; 2449 } 2450 2451 // this function returns the adjust size (in number of words) to a c2i adapter 2452 // activation for use during deoptimization 2453 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2454 assert(callee_locals >= callee_parameters, 2455 "test and remove; got more parms than locals"); 2456 if (callee_locals < callee_parameters) 2457 return 0; // No adjustment for negative locals 2458 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords; 2459 // diff is counted in stack words 2460 return align_up(diff, 2); 2461 } 2462 2463 2464 //------------------------------generate_deopt_blob---------------------------- 2465 void SharedRuntime::generate_deopt_blob() { 2466 // Allocate space for the code 2467 ResourceMark rm; 2468 // Setup code generation tools 2469 int pad = 0; 2470 #if INCLUDE_JVMCI 2471 if (EnableJVMCI) { 2472 pad += 512; // Increase the buffer size when compiling for JVMCI 2473 } 2474 #endif 2475 CodeBuffer buffer("deopt_blob", 2048+pad, 1024); 2476 MacroAssembler* masm = new MacroAssembler(&buffer); 2477 int frame_size_in_words; 2478 OopMap* map = nullptr; 2479 OopMapSet *oop_maps = new OopMapSet(); 2480 RegisterSaver reg_save(COMPILER2_OR_JVMCI != 0); 2481 2482 // ------------- 2483 // This code enters when returning to a de-optimized nmethod. A return 2484 // address has been pushed on the stack, and return values are in 2485 // registers. 2486 // If we are doing a normal deopt then we were called from the patched 2487 // nmethod from the point we returned to the nmethod. So the return 2488 // address on the stack is wrong by NativeCall::instruction_size 2489 // We will adjust the value so it looks like we have the original return 2490 // address on the stack (like when we eagerly deoptimized). 2491 // In the case of an exception pending when deoptimizing, we enter 2492 // with a return address on the stack that points after the call we patched 2493 // into the exception handler. We have the following register state from, 2494 // e.g., the forward exception stub (see stubGenerator_x86_64.cpp). 2495 // r0: exception oop 2496 // r19: exception handler 2497 // r3: throwing pc 2498 // So in this case we simply jam r3 into the useless return address and 2499 // the stack looks just like we want. 2500 // 2501 // At this point we need to de-opt. We save the argument return 2502 // registers. We call the first C routine, fetch_unroll_info(). This 2503 // routine captures the return values and returns a structure which 2504 // describes the current frame size and the sizes of all replacement frames. 2505 // The current frame is compiled code and may contain many inlined 2506 // functions, each with their own JVM state. We pop the current frame, then 2507 // push all the new frames. Then we call the C routine unpack_frames() to 2508 // populate these frames. Finally unpack_frames() returns us the new target 2509 // address. Notice that callee-save registers are BLOWN here; they have 2510 // already been captured in the vframeArray at the time the return PC was 2511 // patched. 2512 address start = __ pc(); 2513 Label cont; 2514 2515 // Prolog for non exception case! 2516 2517 // Save everything in sight. 2518 map = reg_save.save_live_registers(masm, 0, &frame_size_in_words); 2519 2520 // Normal deoptimization. Save exec mode for unpack_frames. 2521 __ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved 2522 __ b(cont); 2523 2524 int reexecute_offset = __ pc() - start; 2525 #if INCLUDE_JVMCI && !defined(COMPILER1) 2526 if (EnableJVMCI && UseJVMCICompiler) { 2527 // JVMCI does not use this kind of deoptimization 2528 __ should_not_reach_here(); 2529 } 2530 #endif 2531 2532 // Reexecute case 2533 // return address is the pc describes what bci to do re-execute at 2534 2535 // No need to update map as each call to save_live_registers will produce identical oopmap 2536 (void) reg_save.save_live_registers(masm, 0, &frame_size_in_words); 2537 2538 __ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved 2539 __ b(cont); 2540 2541 #if INCLUDE_JVMCI 2542 Label after_fetch_unroll_info_call; 2543 int implicit_exception_uncommon_trap_offset = 0; 2544 int uncommon_trap_offset = 0; 2545 2546 if (EnableJVMCI) { 2547 implicit_exception_uncommon_trap_offset = __ pc() - start; 2548 2549 __ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); 2550 __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); 2551 2552 uncommon_trap_offset = __ pc() - start; 2553 2554 // Save everything in sight. 2555 reg_save.save_live_registers(masm, 0, &frame_size_in_words); 2556 // fetch_unroll_info needs to call last_java_frame() 2557 Label retaddr; 2558 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 2559 2560 __ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset()))); 2561 __ movw(rscratch1, -1); 2562 __ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset()))); 2563 2564 __ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute); 2565 __ mov(c_rarg0, rthread); 2566 __ movw(c_rarg2, rcpool); // exec mode 2567 __ lea(rscratch1, 2568 RuntimeAddress(CAST_FROM_FN_PTR(address, 2569 Deoptimization::uncommon_trap))); 2570 __ blr(rscratch1); 2571 __ bind(retaddr); 2572 oop_maps->add_gc_map( __ pc()-start, map->deep_copy()); 2573 2574 __ reset_last_Java_frame(false); 2575 2576 __ b(after_fetch_unroll_info_call); 2577 } // EnableJVMCI 2578 #endif // INCLUDE_JVMCI 2579 2580 int exception_offset = __ pc() - start; 2581 2582 // Prolog for exception case 2583 2584 // all registers are dead at this entry point, except for r0, and 2585 // r3 which contain the exception oop and exception pc 2586 // respectively. Set them in TLS and fall thru to the 2587 // unpack_with_exception_in_tls entry point. 2588 2589 __ str(r3, Address(rthread, JavaThread::exception_pc_offset())); 2590 __ str(r0, Address(rthread, JavaThread::exception_oop_offset())); 2591 2592 int exception_in_tls_offset = __ pc() - start; 2593 2594 // new implementation because exception oop is now passed in JavaThread 2595 2596 // Prolog for exception case 2597 // All registers must be preserved because they might be used by LinearScan 2598 // Exceptiop oop and throwing PC are passed in JavaThread 2599 // tos: stack at point of call to method that threw the exception (i.e. only 2600 // args are on the stack, no return address) 2601 2602 // The return address pushed by save_live_registers will be patched 2603 // later with the throwing pc. The correct value is not available 2604 // now because loading it from memory would destroy registers. 2605 2606 // NB: The SP at this point must be the SP of the method that is 2607 // being deoptimized. Deoptimization assumes that the frame created 2608 // here by save_live_registers is immediately below the method's SP. 2609 // This is a somewhat fragile mechanism. 2610 2611 // Save everything in sight. 2612 map = reg_save.save_live_registers(masm, 0, &frame_size_in_words); 2613 2614 // Now it is safe to overwrite any register 2615 2616 // Deopt during an exception. Save exec mode for unpack_frames. 2617 __ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved 2618 2619 // load throwing pc from JavaThread and patch it as the return address 2620 // of the current frame. Then clear the field in JavaThread 2621 __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset())); 2622 __ protect_return_address(r3); 2623 __ str(r3, Address(rfp, wordSize)); 2624 __ str(zr, Address(rthread, JavaThread::exception_pc_offset())); 2625 2626 #ifdef ASSERT 2627 // verify that there is really an exception oop in JavaThread 2628 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); 2629 __ verify_oop(r0); 2630 2631 // verify that there is no pending exception 2632 Label no_pending_exception; 2633 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 2634 __ cbz(rscratch1, no_pending_exception); 2635 __ stop("must not have pending exception here"); 2636 __ bind(no_pending_exception); 2637 #endif 2638 2639 __ bind(cont); 2640 2641 // Call C code. Need thread and this frame, but NOT official VM entry 2642 // crud. We cannot block on this call, no GC can happen. 2643 // 2644 // UnrollBlock* fetch_unroll_info(JavaThread* thread) 2645 2646 // fetch_unroll_info needs to call last_java_frame(). 2647 2648 Label retaddr; 2649 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 2650 #ifdef ASSERT 2651 { Label L; 2652 __ ldr(rscratch1, Address(rthread, JavaThread::last_Java_fp_offset())); 2653 __ cbz(rscratch1, L); 2654 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared"); 2655 __ bind(L); 2656 } 2657 #endif // ASSERT 2658 __ mov(c_rarg0, rthread); 2659 __ mov(c_rarg1, rcpool); 2660 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info))); 2661 __ blr(rscratch1); 2662 __ bind(retaddr); 2663 2664 // Need to have an oopmap that tells fetch_unroll_info where to 2665 // find any register it might need. 2666 oop_maps->add_gc_map(__ pc() - start, map); 2667 2668 __ reset_last_Java_frame(false); 2669 2670 #if INCLUDE_JVMCI 2671 if (EnableJVMCI) { 2672 __ bind(after_fetch_unroll_info_call); 2673 } 2674 #endif 2675 2676 // Load UnrollBlock* into r5 2677 __ mov(r5, r0); 2678 2679 __ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset())); 2680 Label noException; 2681 __ cmpw(rcpool, Deoptimization::Unpack_exception); // Was exception pending? 2682 __ br(Assembler::NE, noException); 2683 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); 2684 // QQQ this is useless it was null above 2685 __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset())); 2686 __ str(zr, Address(rthread, JavaThread::exception_oop_offset())); 2687 __ str(zr, Address(rthread, JavaThread::exception_pc_offset())); 2688 2689 __ verify_oop(r0); 2690 2691 // Overwrite the result registers with the exception results. 2692 __ str(r0, Address(sp, reg_save.r0_offset_in_bytes())); 2693 // I think this is useless 2694 // __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes())); 2695 2696 __ bind(noException); 2697 2698 // Only register save data is on the stack. 2699 // Now restore the result registers. Everything else is either dead 2700 // or captured in the vframeArray. 2701 2702 // Restore fp result register 2703 __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes())); 2704 // Restore integer result register 2705 __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes())); 2706 2707 // Pop all of the register save area off the stack 2708 __ add(sp, sp, frame_size_in_words * wordSize); 2709 2710 // All of the register save area has been popped of the stack. Only the 2711 // return address remains. 2712 2713 // Pop all the frames we must move/replace. 2714 // 2715 // Frame picture (youngest to oldest) 2716 // 1: self-frame (no frame link) 2717 // 2: deopting frame (no frame link) 2718 // 3: caller of deopting frame (could be compiled/interpreted). 2719 // 2720 // Note: by leaving the return address of self-frame on the stack 2721 // and using the size of frame 2 to adjust the stack 2722 // when we are done the return to frame 3 will still be on the stack. 2723 2724 // Pop deoptimized frame 2725 __ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset())); 2726 __ sub(r2, r2, 2 * wordSize); 2727 __ add(sp, sp, r2); 2728 __ ldp(rfp, zr, __ post(sp, 2 * wordSize)); 2729 2730 #ifdef ASSERT 2731 // Compilers generate code that bang the stack by as much as the 2732 // interpreter would need. So this stack banging should never 2733 // trigger a fault. Verify that it does not on non product builds. 2734 __ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset())); 2735 __ bang_stack_size(r19, r2); 2736 #endif 2737 // Load address of array of frame pcs into r2 2738 __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset())); 2739 2740 // Trash the old pc 2741 // __ addptr(sp, wordSize); FIXME ???? 2742 2743 // Load address of array of frame sizes into r4 2744 __ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset())); 2745 2746 // Load counter into r3 2747 __ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset())); 2748 2749 // Now adjust the caller's stack to make up for the extra locals 2750 // but record the original sp so that we can save it in the skeletal interpreter 2751 // frame and the stack walking of interpreter_sender will get the unextended sp 2752 // value and not the "real" sp value. 2753 2754 const Register sender_sp = r6; 2755 2756 __ mov(sender_sp, sp); 2757 __ ldrw(r19, Address(r5, 2758 Deoptimization::UnrollBlock:: 2759 caller_adjustment_offset())); 2760 __ sub(sp, sp, r19); 2761 2762 // Push interpreter frames in a loop 2763 __ mov(rscratch1, (uint64_t)0xDEADDEAD); // Make a recognizable pattern 2764 __ mov(rscratch2, rscratch1); 2765 Label loop; 2766 __ bind(loop); 2767 __ ldr(r19, Address(__ post(r4, wordSize))); // Load frame size 2768 __ sub(r19, r19, 2*wordSize); // We'll push pc and fp by hand 2769 __ ldr(lr, Address(__ post(r2, wordSize))); // Load pc 2770 __ enter(); // Save old & set new fp 2771 __ sub(sp, sp, r19); // Prolog 2772 // This value is corrected by layout_activation_impl 2773 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 2774 __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable 2775 __ mov(sender_sp, sp); // Pass sender_sp to next frame 2776 __ sub(r3, r3, 1); // Decrement counter 2777 __ cbnz(r3, loop); 2778 2779 // Re-push self-frame 2780 __ ldr(lr, Address(r2)); 2781 __ enter(); 2782 2783 // Allocate a full sized register save area. We subtract 2 because 2784 // enter() just pushed 2 words 2785 __ sub(sp, sp, (frame_size_in_words - 2) * wordSize); 2786 2787 // Restore frame locals after moving the frame 2788 __ strd(v0, Address(sp, reg_save.v0_offset_in_bytes())); 2789 __ str(r0, Address(sp, reg_save.r0_offset_in_bytes())); 2790 2791 // Call C code. Need thread but NOT official VM entry 2792 // crud. We cannot block on this call, no GC can happen. Call should 2793 // restore return values to their stack-slots with the new SP. 2794 // 2795 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode) 2796 2797 // Use rfp because the frames look interpreted now 2798 // Don't need the precise return PC here, just precise enough to point into this code blob. 2799 address the_pc = __ pc(); 2800 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1); 2801 2802 __ mov(c_rarg0, rthread); 2803 __ movw(c_rarg1, rcpool); // second arg: exec_mode 2804 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames))); 2805 __ blr(rscratch1); 2806 2807 // Set an oopmap for the call site 2808 // Use the same PC we used for the last java frame 2809 oop_maps->add_gc_map(the_pc - start, 2810 new OopMap( frame_size_in_words, 0 )); 2811 2812 // Clear fp AND pc 2813 __ reset_last_Java_frame(true); 2814 2815 // Collect return values 2816 __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes())); 2817 __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes())); 2818 // I think this is useless (throwing pc?) 2819 // __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes())); 2820 2821 // Pop self-frame. 2822 __ leave(); // Epilog 2823 2824 // Jump to interpreter 2825 __ ret(lr); 2826 2827 // Make sure all code is generated 2828 masm->flush(); 2829 2830 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words); 2831 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 2832 #if INCLUDE_JVMCI 2833 if (EnableJVMCI) { 2834 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset); 2835 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset); 2836 } 2837 #endif 2838 } 2839 2840 // Number of stack slots between incoming argument block and the start of 2841 // a new frame. The PROLOG must add this many slots to the stack. The 2842 // EPILOG must remove this many slots. aarch64 needs two slots for 2843 // return address and fp. 2844 // TODO think this is correct but check 2845 uint SharedRuntime::in_preserve_stack_slots() { 2846 return 4; 2847 } 2848 2849 uint SharedRuntime::out_preserve_stack_slots() { 2850 return 0; 2851 } 2852 2853 #ifdef COMPILER2 2854 //------------------------------generate_uncommon_trap_blob-------------------- 2855 void SharedRuntime::generate_uncommon_trap_blob() { 2856 // Allocate space for the code 2857 ResourceMark rm; 2858 // Setup code generation tools 2859 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024); 2860 MacroAssembler* masm = new MacroAssembler(&buffer); 2861 2862 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); 2863 2864 address start = __ pc(); 2865 2866 // Push self-frame. We get here with a return address in LR 2867 // and sp should be 16 byte aligned 2868 // push rfp and retaddr by hand 2869 __ protect_return_address(); 2870 __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize))); 2871 // we don't expect an arg reg save area 2872 #ifndef PRODUCT 2873 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); 2874 #endif 2875 // compiler left unloaded_class_index in j_rarg0 move to where the 2876 // runtime expects it. 2877 if (c_rarg1 != j_rarg0) { 2878 __ movw(c_rarg1, j_rarg0); 2879 } 2880 2881 // we need to set the past SP to the stack pointer of the stub frame 2882 // and the pc to the address where this runtime call will return 2883 // although actually any pc in this code blob will do). 2884 Label retaddr; 2885 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 2886 2887 // Call C code. Need thread but NOT official VM entry 2888 // crud. We cannot block on this call, no GC can happen. Call should 2889 // capture callee-saved registers as well as return values. 2890 // Thread is in rdi already. 2891 // 2892 // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index); 2893 // 2894 // n.b. 2 gp args, 0 fp args, integral return type 2895 2896 __ mov(c_rarg0, rthread); 2897 __ movw(c_rarg2, (unsigned)Deoptimization::Unpack_uncommon_trap); 2898 __ lea(rscratch1, 2899 RuntimeAddress(CAST_FROM_FN_PTR(address, 2900 Deoptimization::uncommon_trap))); 2901 __ blr(rscratch1); 2902 __ bind(retaddr); 2903 2904 // Set an oopmap for the call site 2905 OopMapSet* oop_maps = new OopMapSet(); 2906 OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0); 2907 2908 // location of rfp is known implicitly by the frame sender code 2909 2910 oop_maps->add_gc_map(__ pc() - start, map); 2911 2912 __ reset_last_Java_frame(false); 2913 2914 // move UnrollBlock* into r4 2915 __ mov(r4, r0); 2916 2917 #ifdef ASSERT 2918 { Label L; 2919 __ ldrw(rscratch1, Address(r4, Deoptimization::UnrollBlock::unpack_kind_offset())); 2920 __ cmpw(rscratch1, (unsigned)Deoptimization::Unpack_uncommon_trap); 2921 __ br(Assembler::EQ, L); 2922 __ stop("SharedRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap"); 2923 __ bind(L); 2924 } 2925 #endif 2926 2927 // Pop all the frames we must move/replace. 2928 // 2929 // Frame picture (youngest to oldest) 2930 // 1: self-frame (no frame link) 2931 // 2: deopting frame (no frame link) 2932 // 3: caller of deopting frame (could be compiled/interpreted). 2933 2934 // Pop self-frame. We have no frame, and must rely only on r0 and sp. 2935 __ add(sp, sp, (SimpleRuntimeFrame::framesize) << LogBytesPerInt); // Epilog! 2936 2937 // Pop deoptimized frame (int) 2938 __ ldrw(r2, Address(r4, 2939 Deoptimization::UnrollBlock:: 2940 size_of_deoptimized_frame_offset())); 2941 __ sub(r2, r2, 2 * wordSize); 2942 __ add(sp, sp, r2); 2943 __ ldp(rfp, zr, __ post(sp, 2 * wordSize)); 2944 2945 #ifdef ASSERT 2946 // Compilers generate code that bang the stack by as much as the 2947 // interpreter would need. So this stack banging should never 2948 // trigger a fault. Verify that it does not on non product builds. 2949 __ ldrw(r1, Address(r4, 2950 Deoptimization::UnrollBlock:: 2951 total_frame_sizes_offset())); 2952 __ bang_stack_size(r1, r2); 2953 #endif 2954 2955 // Load address of array of frame pcs into r2 (address*) 2956 __ ldr(r2, Address(r4, 2957 Deoptimization::UnrollBlock::frame_pcs_offset())); 2958 2959 // Load address of array of frame sizes into r5 (intptr_t*) 2960 __ ldr(r5, Address(r4, 2961 Deoptimization::UnrollBlock:: 2962 frame_sizes_offset())); 2963 2964 // Counter 2965 __ ldrw(r3, Address(r4, 2966 Deoptimization::UnrollBlock:: 2967 number_of_frames_offset())); // (int) 2968 2969 // Now adjust the caller's stack to make up for the extra locals but 2970 // record the original sp so that we can save it in the skeletal 2971 // interpreter frame and the stack walking of interpreter_sender 2972 // will get the unextended sp value and not the "real" sp value. 2973 2974 const Register sender_sp = r8; 2975 2976 __ mov(sender_sp, sp); 2977 __ ldrw(r1, Address(r4, 2978 Deoptimization::UnrollBlock:: 2979 caller_adjustment_offset())); // (int) 2980 __ sub(sp, sp, r1); 2981 2982 // Push interpreter frames in a loop 2983 Label loop; 2984 __ bind(loop); 2985 __ ldr(r1, Address(r5, 0)); // Load frame size 2986 __ sub(r1, r1, 2 * wordSize); // We'll push pc and rfp by hand 2987 __ ldr(lr, Address(r2, 0)); // Save return address 2988 __ enter(); // and old rfp & set new rfp 2989 __ sub(sp, sp, r1); // Prolog 2990 __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable 2991 // This value is corrected by layout_activation_impl 2992 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 2993 __ mov(sender_sp, sp); // Pass sender_sp to next frame 2994 __ add(r5, r5, wordSize); // Bump array pointer (sizes) 2995 __ add(r2, r2, wordSize); // Bump array pointer (pcs) 2996 __ subsw(r3, r3, 1); // Decrement counter 2997 __ br(Assembler::GT, loop); 2998 __ ldr(lr, Address(r2, 0)); // save final return address 2999 // Re-push self-frame 3000 __ enter(); // & old rfp & set new rfp 3001 3002 // Use rfp because the frames look interpreted now 3003 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP. 3004 // Don't need the precise return PC here, just precise enough to point into this code blob. 3005 address the_pc = __ pc(); 3006 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1); 3007 3008 // Call C code. Need thread but NOT official VM entry 3009 // crud. We cannot block on this call, no GC can happen. Call should 3010 // restore return values to their stack-slots with the new SP. 3011 // Thread is in rdi already. 3012 // 3013 // BasicType unpack_frames(JavaThread* thread, int exec_mode); 3014 // 3015 // n.b. 2 gp args, 0 fp args, integral return type 3016 3017 // sp should already be aligned 3018 __ mov(c_rarg0, rthread); 3019 __ movw(c_rarg1, (unsigned)Deoptimization::Unpack_uncommon_trap); 3020 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames))); 3021 __ blr(rscratch1); 3022 3023 // Set an oopmap for the call site 3024 // Use the same PC we used for the last java frame 3025 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); 3026 3027 // Clear fp AND pc 3028 __ reset_last_Java_frame(true); 3029 3030 // Pop self-frame. 3031 __ leave(); // Epilog 3032 3033 // Jump to interpreter 3034 __ ret(lr); 3035 3036 // Make sure all code is generated 3037 masm->flush(); 3038 3039 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, 3040 SimpleRuntimeFrame::framesize >> 1); 3041 } 3042 #endif // COMPILER2 3043 3044 3045 //------------------------------generate_handler_blob------ 3046 // 3047 // Generate a special Compile2Runtime blob that saves all registers, 3048 // and setup oopmap. 3049 // 3050 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { 3051 ResourceMark rm; 3052 OopMapSet *oop_maps = new OopMapSet(); 3053 OopMap* map; 3054 3055 // Allocate space for the code. Setup code generation tools. 3056 CodeBuffer buffer("handler_blob", 2048, 1024); 3057 MacroAssembler* masm = new MacroAssembler(&buffer); 3058 3059 address start = __ pc(); 3060 address call_pc = nullptr; 3061 int frame_size_in_words; 3062 bool cause_return = (poll_type == POLL_AT_RETURN); 3063 RegisterSaver reg_save(poll_type == POLL_AT_VECTOR_LOOP /* save_vectors */); 3064 3065 // When the signal occurred, the LR was either signed and stored on the stack (in which 3066 // case it will be restored from the stack before being used) or unsigned and not stored 3067 // on the stack. Stipping ensures we get the right value. 3068 __ strip_return_address(); 3069 3070 // Save Integer and Float registers. 3071 map = reg_save.save_live_registers(masm, 0, &frame_size_in_words); 3072 3073 // The following is basically a call_VM. However, we need the precise 3074 // address of the call in order to generate an oopmap. Hence, we do all the 3075 // work ourselves. 3076 3077 Label retaddr; 3078 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 3079 3080 // The return address must always be correct so that frame constructor never 3081 // sees an invalid pc. 3082 3083 if (!cause_return) { 3084 // overwrite the return address pushed by save_live_registers 3085 // Additionally, r20 is a callee-saved register so we can look at 3086 // it later to determine if someone changed the return address for 3087 // us! 3088 __ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset())); 3089 __ protect_return_address(r20); 3090 __ str(r20, Address(rfp, wordSize)); 3091 } 3092 3093 // Do the call 3094 __ mov(c_rarg0, rthread); 3095 __ lea(rscratch1, RuntimeAddress(call_ptr)); 3096 __ blr(rscratch1); 3097 __ bind(retaddr); 3098 3099 // Set an oopmap for the call site. This oopmap will map all 3100 // oop-registers and debug-info registers as callee-saved. This 3101 // will allow deoptimization at this safepoint to find all possible 3102 // debug-info recordings, as well as let GC find all oops. 3103 3104 oop_maps->add_gc_map( __ pc() - start, map); 3105 3106 Label noException; 3107 3108 __ reset_last_Java_frame(false); 3109 3110 __ membar(Assembler::LoadLoad | Assembler::LoadStore); 3111 3112 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 3113 __ cbz(rscratch1, noException); 3114 3115 // Exception pending 3116 3117 reg_save.restore_live_registers(masm); 3118 3119 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 3120 3121 // No exception case 3122 __ bind(noException); 3123 3124 Label no_adjust, bail; 3125 if (!cause_return) { 3126 // If our stashed return pc was modified by the runtime we avoid touching it 3127 __ ldr(rscratch1, Address(rfp, wordSize)); 3128 __ cmp(r20, rscratch1); 3129 __ br(Assembler::NE, no_adjust); 3130 __ authenticate_return_address(r20); 3131 3132 #ifdef ASSERT 3133 // Verify the correct encoding of the poll we're about to skip. 3134 // See NativeInstruction::is_ldrw_to_zr() 3135 __ ldrw(rscratch1, Address(r20)); 3136 __ ubfx(rscratch2, rscratch1, 22, 10); 3137 __ cmpw(rscratch2, 0b1011100101); 3138 __ br(Assembler::NE, bail); 3139 __ ubfx(rscratch2, rscratch1, 0, 5); 3140 __ cmpw(rscratch2, 0b11111); 3141 __ br(Assembler::NE, bail); 3142 #endif 3143 // Adjust return pc forward to step over the safepoint poll instruction 3144 __ add(r20, r20, NativeInstruction::instruction_size); 3145 __ protect_return_address(r20); 3146 __ str(r20, Address(rfp, wordSize)); 3147 } 3148 3149 __ bind(no_adjust); 3150 // Normal exit, restore registers and exit. 3151 reg_save.restore_live_registers(masm); 3152 3153 __ ret(lr); 3154 3155 #ifdef ASSERT 3156 __ bind(bail); 3157 __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected"); 3158 #endif 3159 3160 // Make sure all code is generated 3161 masm->flush(); 3162 3163 // Fill-out other meta info 3164 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words); 3165 } 3166 3167 // 3168 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss 3169 // 3170 // Generate a stub that calls into vm to find out the proper destination 3171 // of a java call. All the argument registers are live at this point 3172 // but since this is generic code we don't know what they are and the caller 3173 // must do any gc of the args. 3174 // 3175 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { 3176 assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before"); 3177 3178 // allocate space for the code 3179 ResourceMark rm; 3180 3181 CodeBuffer buffer(name, 1000, 512); 3182 MacroAssembler* masm = new MacroAssembler(&buffer); 3183 3184 int frame_size_in_words; 3185 RegisterSaver reg_save(false /* save_vectors */); 3186 3187 OopMapSet *oop_maps = new OopMapSet(); 3188 OopMap* map = nullptr; 3189 3190 int start = __ offset(); 3191 3192 map = reg_save.save_live_registers(masm, 0, &frame_size_in_words); 3193 3194 int frame_complete = __ offset(); 3195 3196 { 3197 Label retaddr; 3198 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1); 3199 3200 __ mov(c_rarg0, rthread); 3201 __ lea(rscratch1, RuntimeAddress(destination)); 3202 3203 __ blr(rscratch1); 3204 __ bind(retaddr); 3205 } 3206 3207 // Set an oopmap for the call site. 3208 // We need this not only for callee-saved registers, but also for volatile 3209 // registers that the compiler might be keeping live across a safepoint. 3210 3211 oop_maps->add_gc_map( __ offset() - start, map); 3212 3213 // r0 contains the address we are going to jump to assuming no exception got installed 3214 3215 // clear last_Java_sp 3216 __ reset_last_Java_frame(false); 3217 // check for pending exceptions 3218 Label pending; 3219 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 3220 __ cbnz(rscratch1, pending); 3221 3222 // get the returned Method* 3223 __ get_vm_result_2(rmethod, rthread); 3224 __ str(rmethod, Address(sp, reg_save.reg_offset_in_bytes(rmethod))); 3225 3226 // r0 is where we want to jump, overwrite rscratch1 which is saved and scratch 3227 __ str(r0, Address(sp, reg_save.rscratch1_offset_in_bytes())); 3228 reg_save.restore_live_registers(masm); 3229 3230 // We are back to the original state on entry and ready to go. 3231 3232 __ br(rscratch1); 3233 3234 // Pending exception after the safepoint 3235 3236 __ bind(pending); 3237 3238 reg_save.restore_live_registers(masm); 3239 3240 // exception pending => remove activation and forward to exception handler 3241 3242 __ str(zr, Address(rthread, JavaThread::vm_result_offset())); 3243 3244 __ ldr(r0, Address(rthread, Thread::pending_exception_offset())); 3245 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 3246 3247 // ------------- 3248 // make sure all code is generated 3249 masm->flush(); 3250 3251 // return the blob 3252 // frame_size_words or bytes?? 3253 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true); 3254 } 3255 3256 #ifdef COMPILER2 3257 // This is here instead of runtime_aarch64_64.cpp because it uses SimpleRuntimeFrame 3258 // 3259 //------------------------------generate_exception_blob--------------------------- 3260 // creates exception blob at the end 3261 // Using exception blob, this code is jumped from a compiled method. 3262 // (see emit_exception_handler in x86_64.ad file) 3263 // 3264 // Given an exception pc at a call we call into the runtime for the 3265 // handler in this method. This handler might merely restore state 3266 // (i.e. callee save registers) unwind the frame and jump to the 3267 // exception handler for the nmethod if there is no Java level handler 3268 // for the nmethod. 3269 // 3270 // This code is entered with a jmp. 3271 // 3272 // Arguments: 3273 // r0: exception oop 3274 // r3: exception pc 3275 // 3276 // Results: 3277 // r0: exception oop 3278 // r3: exception pc in caller or ??? 3279 // destination: exception handler of caller 3280 // 3281 // Note: the exception pc MUST be at a call (precise debug information) 3282 // Registers r0, r3, r2, r4, r5, r8-r11 are not callee saved. 3283 // 3284 3285 void OptoRuntime::generate_exception_blob() { 3286 assert(!OptoRuntime::is_callee_saved_register(R3_num), ""); 3287 assert(!OptoRuntime::is_callee_saved_register(R0_num), ""); 3288 assert(!OptoRuntime::is_callee_saved_register(R2_num), ""); 3289 3290 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); 3291 3292 // Allocate space for the code 3293 ResourceMark rm; 3294 // Setup code generation tools 3295 CodeBuffer buffer("exception_blob", 2048, 1024); 3296 MacroAssembler* masm = new MacroAssembler(&buffer); 3297 3298 // TODO check various assumptions made here 3299 // 3300 // make sure we do so before running this 3301 3302 address start = __ pc(); 3303 3304 // push rfp and retaddr by hand 3305 // Exception pc is 'return address' for stack walker 3306 __ protect_return_address(); 3307 __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize))); 3308 // there are no callee save registers and we don't expect an 3309 // arg reg save area 3310 #ifndef PRODUCT 3311 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); 3312 #endif 3313 // Store exception in Thread object. We cannot pass any arguments to the 3314 // handle_exception call, since we do not want to make any assumption 3315 // about the size of the frame where the exception happened in. 3316 __ str(r0, Address(rthread, JavaThread::exception_oop_offset())); 3317 __ str(r3, Address(rthread, JavaThread::exception_pc_offset())); 3318 3319 // This call does all the hard work. It checks if an exception handler 3320 // exists in the method. 3321 // If so, it returns the handler address. 3322 // If not, it prepares for stack-unwinding, restoring the callee-save 3323 // registers of the frame being removed. 3324 // 3325 // address OptoRuntime::handle_exception_C(JavaThread* thread) 3326 // 3327 // n.b. 1 gp arg, 0 fp args, integral return type 3328 3329 // the stack should always be aligned 3330 address the_pc = __ pc(); 3331 __ set_last_Java_frame(sp, noreg, the_pc, rscratch1); 3332 __ mov(c_rarg0, rthread); 3333 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C))); 3334 __ blr(rscratch1); 3335 // handle_exception_C is a special VM call which does not require an explicit 3336 // instruction sync afterwards. 3337 3338 // May jump to SVE compiled code 3339 __ reinitialize_ptrue(); 3340 3341 // Set an oopmap for the call site. This oopmap will only be used if we 3342 // are unwinding the stack. Hence, all locations will be dead. 3343 // Callee-saved registers will be the same as the frame above (i.e., 3344 // handle_exception_stub), since they were restored when we got the 3345 // exception. 3346 3347 OopMapSet* oop_maps = new OopMapSet(); 3348 3349 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); 3350 3351 __ reset_last_Java_frame(false); 3352 3353 // Restore callee-saved registers 3354 3355 // rfp is an implicitly saved callee saved register (i.e. the calling 3356 // convention will save restore it in prolog/epilog) Other than that 3357 // there are no callee save registers now that adapter frames are gone. 3358 // and we dont' expect an arg reg save area 3359 __ ldp(rfp, r3, Address(__ post(sp, 2 * wordSize))); 3360 __ authenticate_return_address(r3); 3361 3362 // r0: exception handler 3363 3364 // We have a handler in r0 (could be deopt blob). 3365 __ mov(r8, r0); 3366 3367 // Get the exception oop 3368 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); 3369 // Get the exception pc in case we are deoptimized 3370 __ ldr(r4, Address(rthread, JavaThread::exception_pc_offset())); 3371 #ifdef ASSERT 3372 __ str(zr, Address(rthread, JavaThread::exception_handler_pc_offset())); 3373 __ str(zr, Address(rthread, JavaThread::exception_pc_offset())); 3374 #endif 3375 // Clear the exception oop so GC no longer processes it as a root. 3376 __ str(zr, Address(rthread, JavaThread::exception_oop_offset())); 3377 3378 // r0: exception oop 3379 // r8: exception handler 3380 // r4: exception pc 3381 // Jump to handler 3382 3383 __ br(r8); 3384 3385 // Make sure all code is generated 3386 masm->flush(); 3387 3388 // Set exception blob 3389 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1); 3390 } 3391 3392 #endif // COMPILER2 3393 3394 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) { 3395 BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K); 3396 CodeBuffer buffer(buf); 3397 short buffer_locs[20]; 3398 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs, 3399 sizeof(buffer_locs)/sizeof(relocInfo)); 3400 3401 MacroAssembler _masm(&buffer); 3402 MacroAssembler* masm = &_masm; 3403 3404 const Array<SigEntry>* sig_vk = vk->extended_sig(); 3405 const Array<VMRegPair>* regs = vk->return_regs(); 3406 3407 int pack_fields_jobject_off = __ offset(); 3408 // Resolve pre-allocated buffer from JNI handle. 3409 // We cannot do this in generate_call_stub() because it requires GC code to be initialized. 3410 Register Rresult = r14; // See StubGenerator::generate_call_stub(). 3411 __ ldr(r0, Address(Rresult)); 3412 __ resolve_jobject(r0 /* value */, 3413 rthread /* thread */, 3414 r12 /* tmp */); 3415 __ str(r0, Address(Rresult)); 3416 3417 int pack_fields_off = __ offset(); 3418 3419 int j = 1; 3420 for (int i = 0; i < sig_vk->length(); i++) { 3421 BasicType bt = sig_vk->at(i)._bt; 3422 if (bt == T_METADATA) { 3423 continue; 3424 } 3425 if (bt == T_VOID) { 3426 if (sig_vk->at(i-1)._bt == T_LONG || 3427 sig_vk->at(i-1)._bt == T_DOUBLE) { 3428 j++; 3429 } 3430 continue; 3431 } 3432 int off = sig_vk->at(i)._offset; 3433 VMRegPair pair = regs->at(j); 3434 VMReg r_1 = pair.first(); 3435 VMReg r_2 = pair.second(); 3436 Address to(r0, off); 3437 if (bt == T_FLOAT) { 3438 __ strs(r_1->as_FloatRegister(), to); 3439 } else if (bt == T_DOUBLE) { 3440 __ strd(r_1->as_FloatRegister(), to); 3441 } else { 3442 Register val = r_1->as_Register(); 3443 assert_different_registers(to.base(), val, r15, r16, r17); 3444 if (is_reference_type(bt)) { 3445 __ store_heap_oop(to, val, r15, r16, r17, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED); 3446 } else { 3447 __ store_sized_value(to, r_1->as_Register(), type2aelembytes(bt)); 3448 } 3449 } 3450 j++; 3451 } 3452 assert(j == regs->length(), "missed a field?"); 3453 3454 __ ret(lr); 3455 3456 int unpack_fields_off = __ offset(); 3457 3458 Label skip; 3459 __ cbz(r0, skip); 3460 3461 j = 1; 3462 for (int i = 0; i < sig_vk->length(); i++) { 3463 BasicType bt = sig_vk->at(i)._bt; 3464 if (bt == T_METADATA) { 3465 continue; 3466 } 3467 if (bt == T_VOID) { 3468 if (sig_vk->at(i-1)._bt == T_LONG || 3469 sig_vk->at(i-1)._bt == T_DOUBLE) { 3470 j++; 3471 } 3472 continue; 3473 } 3474 int off = sig_vk->at(i)._offset; 3475 assert(off > 0, "offset in object should be positive"); 3476 VMRegPair pair = regs->at(j); 3477 VMReg r_1 = pair.first(); 3478 VMReg r_2 = pair.second(); 3479 Address from(r0, off); 3480 if (bt == T_FLOAT) { 3481 __ ldrs(r_1->as_FloatRegister(), from); 3482 } else if (bt == T_DOUBLE) { 3483 __ ldrd(r_1->as_FloatRegister(), from); 3484 } else if (bt == T_OBJECT || bt == T_ARRAY) { 3485 assert_different_registers(r0, r_1->as_Register()); 3486 __ load_heap_oop(r_1->as_Register(), from, rscratch1, rscratch2); 3487 } else { 3488 assert(is_java_primitive(bt), "unexpected basic type"); 3489 assert_different_registers(r0, r_1->as_Register()); 3490 3491 size_t size_in_bytes = type2aelembytes(bt); 3492 __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN); 3493 } 3494 j++; 3495 } 3496 assert(j == regs->length(), "missed a field?"); 3497 3498 __ bind(skip); 3499 3500 __ ret(lr); 3501 3502 __ flush(); 3503 3504 return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off); 3505 }