1 /* 2 * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. 4 * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "asm/macroAssembler.hpp" 29 #include "asm/macroAssembler.inline.hpp" 30 #include "code/compiledIC.hpp" 31 #include "code/debugInfoRec.hpp" 32 #include "code/vtableStubs.hpp" 33 #include "compiler/oopMap.hpp" 34 #include "gc/shared/barrierSetAssembler.hpp" 35 #include "interpreter/interp_masm.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "logging/log.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "nativeInst_riscv.hpp" 40 #include "oops/klass.inline.hpp" 41 #include "oops/method.inline.hpp" 42 #include "prims/methodHandles.hpp" 43 #include "runtime/continuation.hpp" 44 #include "runtime/continuationEntry.inline.hpp" 45 #include "runtime/globals.hpp" 46 #include "runtime/jniHandles.hpp" 47 #include "runtime/safepointMechanism.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 #include "runtime/signature.hpp" 50 #include "runtime/stubRoutines.hpp" 51 #include "runtime/timerTrace.hpp" 52 #include "runtime/vframeArray.hpp" 53 #include "utilities/align.hpp" 54 #include "utilities/formatBuffer.hpp" 55 #include "vmreg_riscv.inline.hpp" 56 #ifdef COMPILER1 57 #include "c1/c1_Runtime1.hpp" 58 #endif 59 #ifdef COMPILER2 60 #include "adfiles/ad_riscv.hpp" 61 #include "opto/runtime.hpp" 62 #endif 63 #if INCLUDE_JVMCI 64 #include "jvmci/jvmciJavaClasses.hpp" 65 #endif 66 67 #define __ masm-> 68 69 #ifdef PRODUCT 70 #define BLOCK_COMMENT(str) /* nothing */ 71 #else 72 #define BLOCK_COMMENT(str) __ block_comment(str) 73 #endif 74 75 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; 76 77 class RegisterSaver { 78 const bool _save_vectors; 79 public: 80 RegisterSaver(bool save_vectors) : _save_vectors(UseRVV && save_vectors) {} 81 ~RegisterSaver() {} 82 OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words); 83 void restore_live_registers(MacroAssembler* masm); 84 85 // Offsets into the register save area 86 // Used by deoptimization when it is managing result register 87 // values on its own 88 // gregs:28, float_register:32; except: x1(ra) & x2(sp) & gp(x3) & tp(x4) 89 // |---v0---|<---SP 90 // |---v1---|save vectors only in generate_handler_blob 91 // |-- .. --| 92 // |---v31--|----- 93 // |---f0---| 94 // |---f1---| 95 // | .. | 96 // |---f31--| 97 // |---reserved slot for stack alignment---| 98 // |---x5---| 99 // | x6 | 100 // |---.. --| 101 // |---x31--| 102 // |---fp---| 103 // |---ra---| 104 int v0_offset_in_bytes(void) { return 0; } 105 int f0_offset_in_bytes(void) { 106 int f0_offset = 0; 107 #ifdef COMPILER2 108 if (_save_vectors) { 109 f0_offset += Matcher::scalable_vector_reg_size(T_INT) * VectorRegister::number_of_registers * 110 BytesPerInt; 111 } 112 #endif 113 return f0_offset; 114 } 115 int reserved_slot_offset_in_bytes(void) { 116 return f0_offset_in_bytes() + 117 FloatRegister::max_slots_per_register * 118 FloatRegister::number_of_registers * 119 BytesPerInt; 120 } 121 122 int reg_offset_in_bytes(Register r) { 123 assert (r->encoding() > 4, "ra, sp, gp and tp not saved"); 124 return reserved_slot_offset_in_bytes() + (r->encoding() - 4 /* x1, x2, x3, x4 */) * wordSize; 125 } 126 127 int freg_offset_in_bytes(FloatRegister f) { 128 return f0_offset_in_bytes() + f->encoding() * wordSize; 129 } 130 131 int ra_offset_in_bytes(void) { 132 return reserved_slot_offset_in_bytes() + 133 (Register::number_of_registers - 3) * 134 Register::max_slots_per_register * 135 BytesPerInt; 136 } 137 }; 138 139 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) { 140 int vector_size_in_bytes = 0; 141 int vector_size_in_slots = 0; 142 #ifdef COMPILER2 143 if (_save_vectors) { 144 vector_size_in_bytes += Matcher::scalable_vector_reg_size(T_BYTE); 145 vector_size_in_slots += Matcher::scalable_vector_reg_size(T_INT); 146 } 147 #endif 148 149 int frame_size_in_bytes = align_up(additional_frame_words * wordSize + ra_offset_in_bytes() + wordSize, 16); 150 // OopMap frame size is in compiler stack slots (jint's) not bytes or words 151 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt; 152 // The caller will allocate additional_frame_words 153 int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt; 154 // CodeBlob frame size is in words. 155 int frame_size_in_words = frame_size_in_bytes / wordSize; 156 *total_frame_words = frame_size_in_words; 157 158 // Save Integer, Float and Vector registers. 159 __ enter(); 160 __ push_CPU_state(_save_vectors, vector_size_in_bytes); 161 162 // Set an oopmap for the call site. This oopmap will map all 163 // oop-registers and debug-info registers as callee-saved. This 164 // will allow deoptimization at this safepoint to find all possible 165 // debug-info recordings, as well as let GC find all oops. 166 167 OopMapSet *oop_maps = new OopMapSet(); 168 OopMap* oop_map = new OopMap(frame_size_in_slots, 0); 169 assert_cond(oop_maps != nullptr && oop_map != nullptr); 170 171 int sp_offset_in_slots = 0; 172 int step_in_slots = 0; 173 if (_save_vectors) { 174 step_in_slots = vector_size_in_slots; 175 for (int i = 0; i < VectorRegister::number_of_registers; i++, sp_offset_in_slots += step_in_slots) { 176 VectorRegister r = as_VectorRegister(i); 177 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots), r->as_VMReg()); 178 } 179 } 180 181 step_in_slots = FloatRegister::max_slots_per_register; 182 for (int i = 0; i < FloatRegister::number_of_registers; i++, sp_offset_in_slots += step_in_slots) { 183 FloatRegister r = as_FloatRegister(i); 184 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots), r->as_VMReg()); 185 } 186 187 step_in_slots = Register::max_slots_per_register; 188 // skip the slot reserved for alignment, see MacroAssembler::push_reg; 189 // also skip x5 ~ x6 on the stack because they are caller-saved registers. 190 sp_offset_in_slots += Register::max_slots_per_register * 3; 191 // besides, we ignore x0 ~ x4 because push_CPU_state won't push them on the stack. 192 for (int i = 7; i < Register::number_of_registers; i++, sp_offset_in_slots += step_in_slots) { 193 Register r = as_Register(i); 194 if (r != xthread) { 195 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots + additional_frame_slots), r->as_VMReg()); 196 } 197 } 198 199 return oop_map; 200 } 201 202 void RegisterSaver::restore_live_registers(MacroAssembler* masm) { 203 #ifdef COMPILER2 204 __ pop_CPU_state(_save_vectors, Matcher::scalable_vector_reg_size(T_BYTE)); 205 #else 206 #if !INCLUDE_JVMCI 207 assert(!_save_vectors, "vectors are generated only by C2 and JVMCI"); 208 #endif 209 __ pop_CPU_state(_save_vectors); 210 #endif 211 __ leave(); 212 } 213 214 // Is vector's size (in bytes) bigger than a size saved by default? 215 // riscv does not ovlerlay the floating-point registers on vector registers like aarch64. 216 bool SharedRuntime::is_wide_vector(int size) { 217 return UseRVV; 218 } 219 220 // --------------------------------------------------------------------------- 221 // Read the array of BasicTypes from a signature, and compute where the 222 // arguments should go. Values in the VMRegPair regs array refer to 4-byte 223 // quantities. Values less than VMRegImpl::stack0 are registers, those above 224 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer 225 // as framesizes are fixed. 226 // VMRegImpl::stack0 refers to the first slot 0(sp). 227 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. 228 // Register up to Register::number_of_registers) are the 64-bit 229 // integer registers. 230 231 // Note: the INPUTS in sig_bt are in units of Java argument words, 232 // which are 64-bit. The OUTPUTS are in 32-bit units. 233 234 // The Java calling convention is a "shifted" version of the C ABI. 235 // By skipping the first C ABI register we can call non-static jni 236 // methods with small numbers of arguments without having to shuffle 237 // the arguments at all. Since we control the java ABI we ought to at 238 // least get some advantage out of it. 239 240 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 241 VMRegPair *regs, 242 int total_args_passed) { 243 // Create the mapping between argument positions and 244 // registers. 245 static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = { 246 j_rarg0, j_rarg1, j_rarg2, j_rarg3, 247 j_rarg4, j_rarg5, j_rarg6, j_rarg7 248 }; 249 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = { 250 j_farg0, j_farg1, j_farg2, j_farg3, 251 j_farg4, j_farg5, j_farg6, j_farg7 252 }; 253 254 uint int_args = 0; 255 uint fp_args = 0; 256 uint stk_args = 0; 257 258 for (int i = 0; i < total_args_passed; i++) { 259 switch (sig_bt[i]) { 260 case T_BOOLEAN: // fall through 261 case T_CHAR: // fall through 262 case T_BYTE: // fall through 263 case T_SHORT: // fall through 264 case T_INT: 265 if (int_args < Argument::n_int_register_parameters_j) { 266 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); 267 } else { 268 stk_args = align_up(stk_args, 2); 269 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 270 stk_args += 1; 271 } 272 break; 273 case T_VOID: 274 // halves of T_LONG or T_DOUBLE 275 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); 276 regs[i].set_bad(); 277 break; 278 case T_LONG: // fall through 279 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 280 case T_OBJECT: // fall through 281 case T_ARRAY: // fall through 282 case T_ADDRESS: 283 if (int_args < Argument::n_int_register_parameters_j) { 284 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); 285 } else { 286 stk_args = align_up(stk_args, 2); 287 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 288 stk_args += 2; 289 } 290 break; 291 case T_FLOAT: 292 if (fp_args < Argument::n_float_register_parameters_j) { 293 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); 294 } else { 295 stk_args = align_up(stk_args, 2); 296 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 297 stk_args += 1; 298 } 299 break; 300 case T_DOUBLE: 301 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 302 if (fp_args < Argument::n_float_register_parameters_j) { 303 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg()); 304 } else { 305 stk_args = align_up(stk_args, 2); 306 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 307 stk_args += 2; 308 } 309 break; 310 default: 311 ShouldNotReachHere(); 312 } 313 } 314 315 return stk_args; 316 } 317 318 // Patch the callers callsite with entry to compiled code if it exists. 319 static void patch_callers_callsite(MacroAssembler *masm) { 320 Label L; 321 __ ld(t0, Address(xmethod, in_bytes(Method::code_offset()))); 322 __ beqz(t0, L); 323 324 __ enter(); 325 __ push_CPU_state(); 326 327 // VM needs caller's callsite 328 // VM needs target method 329 // This needs to be a long call since we will relocate this adapter to 330 // the codeBuffer and it may not reach 331 332 #ifndef PRODUCT 333 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); 334 #endif 335 336 __ mv(c_rarg0, xmethod); 337 __ mv(c_rarg1, ra); 338 __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)); 339 340 __ pop_CPU_state(); 341 // restore sp 342 __ leave(); 343 __ bind(L); 344 } 345 346 static void gen_c2i_adapter(MacroAssembler *masm, 347 int total_args_passed, 348 int comp_args_on_stack, 349 const BasicType *sig_bt, 350 const VMRegPair *regs, 351 Label& skip_fixup) { 352 // Before we get into the guts of the C2I adapter, see if we should be here 353 // at all. We've come from compiled code and are attempting to jump to the 354 // interpreter, which means the caller made a static call to get here 355 // (vcalls always get a compiled target if there is one). Check for a 356 // compiled target. If there is one, we need to patch the caller's call. 357 patch_callers_callsite(masm); 358 359 __ bind(skip_fixup); 360 361 int words_pushed = 0; 362 363 // Since all args are passed on the stack, total_args_passed * 364 // Interpreter::stackElementSize is the space we need. 365 366 int extraspace = total_args_passed * Interpreter::stackElementSize; 367 368 __ mv(x19_sender_sp, sp); 369 370 // stack is aligned, keep it that way 371 extraspace = align_up(extraspace, 2 * wordSize); 372 373 if (extraspace) { 374 __ sub(sp, sp, extraspace); 375 } 376 377 // Now write the args into the outgoing interpreter space 378 for (int i = 0; i < total_args_passed; i++) { 379 if (sig_bt[i] == T_VOID) { 380 assert(i > 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "missing half"); 381 continue; 382 } 383 384 // offset to start parameters 385 int st_off = (total_args_passed - i - 1) * Interpreter::stackElementSize; 386 int next_off = st_off - Interpreter::stackElementSize; 387 388 // Say 4 args: 389 // i st_off 390 // 0 32 T_LONG 391 // 1 24 T_VOID 392 // 2 16 T_OBJECT 393 // 3 8 T_BOOL 394 // - 0 return address 395 // 396 // However to make thing extra confusing. Because we can fit a Java long/double in 397 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter 398 // leaves one slot empty and only stores to a single slot. In this case the 399 // slot that is occupied is the T_VOID slot. See I said it was confusing. 400 401 VMReg r_1 = regs[i].first(); 402 VMReg r_2 = regs[i].second(); 403 if (!r_1->is_valid()) { 404 assert(!r_2->is_valid(), ""); 405 continue; 406 } 407 if (r_1->is_stack()) { 408 // memory to memory use t0 409 int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size 410 + extraspace 411 + words_pushed * wordSize); 412 if (!r_2->is_valid()) { 413 __ lwu(t0, Address(sp, ld_off)); 414 __ sd(t0, Address(sp, st_off), /*temp register*/esp); 415 } else { 416 __ ld(t0, Address(sp, ld_off), /*temp register*/esp); 417 418 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG 419 // T_DOUBLE and T_LONG use two slots in the interpreter 420 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 421 // ld_off == LSW, ld_off+wordSize == MSW 422 // st_off == MSW, next_off == LSW 423 __ sd(t0, Address(sp, next_off), /*temp register*/esp); 424 #ifdef ASSERT 425 // Overwrite the unused slot with known junk 426 __ mv(t0, 0xdeadffffdeadaaaaul); 427 __ sd(t0, Address(sp, st_off), /*temp register*/esp); 428 #endif /* ASSERT */ 429 } else { 430 __ sd(t0, Address(sp, st_off), /*temp register*/esp); 431 } 432 } 433 } else if (r_1->is_Register()) { 434 Register r = r_1->as_Register(); 435 if (!r_2->is_valid()) { 436 // must be only an int (or less ) so move only 32bits to slot 437 __ sd(r, Address(sp, st_off)); 438 } else { 439 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG 440 // T_DOUBLE and T_LONG use two slots in the interpreter 441 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 442 // long/double in gpr 443 #ifdef ASSERT 444 // Overwrite the unused slot with known junk 445 __ mv(t0, 0xdeadffffdeadaaabul); 446 __ sd(t0, Address(sp, st_off), /*temp register*/esp); 447 #endif /* ASSERT */ 448 __ sd(r, Address(sp, next_off)); 449 } else { 450 __ sd(r, Address(sp, st_off)); 451 } 452 } 453 } else { 454 assert(r_1->is_FloatRegister(), ""); 455 if (!r_2->is_valid()) { 456 // only a float use just part of the slot 457 __ fsw(r_1->as_FloatRegister(), Address(sp, st_off)); 458 } else { 459 #ifdef ASSERT 460 // Overwrite the unused slot with known junk 461 __ mv(t0, 0xdeadffffdeadaaacul); 462 __ sd(t0, Address(sp, st_off), /*temp register*/esp); 463 #endif /* ASSERT */ 464 __ fsd(r_1->as_FloatRegister(), Address(sp, next_off)); 465 } 466 } 467 } 468 469 __ mv(esp, sp); // Interp expects args on caller's expression stack 470 471 __ ld(t0, Address(xmethod, in_bytes(Method::interpreter_entry_offset()))); 472 __ jr(t0); 473 } 474 475 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, 476 int total_args_passed, 477 int comp_args_on_stack, 478 const BasicType *sig_bt, 479 const VMRegPair *regs) { 480 // Note: x19_sender_sp contains the senderSP on entry. We must 481 // preserve it since we may do a i2c -> c2i transition if we lose a 482 // race where compiled code goes non-entrant while we get args 483 // ready. 484 485 // Cut-out for having no stack args. 486 int comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord; 487 if (comp_args_on_stack != 0) { 488 __ sub(t0, sp, comp_words_on_stack * wordSize); 489 __ andi(sp, t0, -16); 490 } 491 492 // Will jump to the compiled code just as if compiled code was doing it. 493 // Pre-load the register-jump target early, to schedule it better. 494 __ ld(t1, Address(xmethod, in_bytes(Method::from_compiled_offset()))); 495 496 #if INCLUDE_JVMCI 497 if (EnableJVMCI) { 498 // check if this call should be routed towards a specific entry point 499 __ ld(t0, Address(xthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); 500 Label no_alternative_target; 501 __ beqz(t0, no_alternative_target); 502 __ mv(t1, t0); 503 __ sd(zr, Address(xthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); 504 __ bind(no_alternative_target); 505 } 506 #endif // INCLUDE_JVMCI 507 508 // Now generate the shuffle code. 509 for (int i = 0; i < total_args_passed; i++) { 510 if (sig_bt[i] == T_VOID) { 511 assert(i > 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "missing half"); 512 continue; 513 } 514 515 // Pick up 0, 1 or 2 words from SP+offset. 516 517 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), 518 "scrambled load targets?"); 519 // Load in argument order going down. 520 int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize; 521 // Point to interpreter value (vs. tag) 522 int next_off = ld_off - Interpreter::stackElementSize; 523 524 VMReg r_1 = regs[i].first(); 525 VMReg r_2 = regs[i].second(); 526 if (!r_1->is_valid()) { 527 assert(!r_2->is_valid(), ""); 528 continue; 529 } 530 if (r_1->is_stack()) { 531 // Convert stack slot to an SP offset (+ wordSize to account for return address ) 532 int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size; 533 if (!r_2->is_valid()) { 534 __ lw(t0, Address(esp, ld_off)); 535 __ sd(t0, Address(sp, st_off), /*temp register*/t2); 536 } else { 537 // 538 // We are using two optoregs. This can be either T_OBJECT, 539 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates 540 // two slots but only uses one for thr T_LONG or T_DOUBLE case 541 // So we must adjust where to pick up the data to match the 542 // interpreter. 543 // 544 // Interpreter local[n] == MSW, local[n+1] == LSW however locals 545 // are accessed as negative so LSW is at LOW address 546 547 // ld_off is MSW so get LSW 548 const int offset = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ? 549 next_off : ld_off; 550 __ ld(t0, Address(esp, offset)); 551 // st_off is LSW (i.e. reg.first()) 552 __ sd(t0, Address(sp, st_off), /*temp register*/t2); 553 } 554 } else if (r_1->is_Register()) { // Register argument 555 Register r = r_1->as_Register(); 556 if (r_2->is_valid()) { 557 // 558 // We are using two VMRegs. This can be either T_OBJECT, 559 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates 560 // two slots but only uses one for thr T_LONG or T_DOUBLE case 561 // So we must adjust where to pick up the data to match the 562 // interpreter. 563 564 const int offset = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ? 565 next_off : ld_off; 566 567 // this can be a misaligned move 568 __ ld(r, Address(esp, offset)); 569 } else { 570 // sign extend and use a full word? 571 __ lw(r, Address(esp, ld_off)); 572 } 573 } else { 574 if (!r_2->is_valid()) { 575 __ flw(r_1->as_FloatRegister(), Address(esp, ld_off)); 576 } else { 577 __ fld(r_1->as_FloatRegister(), Address(esp, next_off)); 578 } 579 } 580 } 581 582 __ push_cont_fastpath(xthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about 583 584 // 6243940 We might end up in handle_wrong_method if 585 // the callee is deoptimized as we race thru here. If that 586 // happens we don't want to take a safepoint because the 587 // caller frame will look interpreted and arguments are now 588 // "compiled" so it is much better to make this transition 589 // invisible to the stack walking code. Unfortunately if 590 // we try and find the callee by normal means a safepoint 591 // is possible. So we stash the desired callee in the thread 592 // and the vm will find there should this case occur. 593 594 __ sd(xmethod, Address(xthread, JavaThread::callee_target_offset())); 595 596 __ jr(t1); 597 } 598 599 // --------------------------------------------------------------- 600 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 601 int total_args_passed, 602 int comp_args_on_stack, 603 const BasicType *sig_bt, 604 const VMRegPair *regs, 605 AdapterFingerPrint* fingerprint) { 606 address i2c_entry = __ pc(); 607 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 608 609 address c2i_unverified_entry = __ pc(); 610 Label skip_fixup; 611 612 const Register receiver = j_rarg0; 613 const Register data = t1; 614 const Register tmp = t2; // A call-clobbered register not used for arg passing 615 616 // ------------------------------------------------------------------------- 617 // Generate a C2I adapter. On entry we know xmethod holds the Method* during calls 618 // to the interpreter. The args start out packed in the compiled layout. They 619 // need to be unpacked into the interpreter layout. This will almost always 620 // require some stack space. We grow the current (compiled) stack, then repack 621 // the args. We finally end in a jump to the generic interpreter entry point. 622 // On exit from the interpreter, the interpreter will restore our SP (lest the 623 // compiled code, which relies solely on SP and not FP, get sick). 624 625 { 626 __ block_comment("c2i_unverified_entry {"); 627 628 __ ic_check(); 629 __ ld(xmethod, Address(data, CompiledICData::speculated_method_offset())); 630 631 __ ld(t0, Address(xmethod, in_bytes(Method::code_offset()))); 632 __ beqz(t0, skip_fixup); 633 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 634 __ block_comment("} c2i_unverified_entry"); 635 } 636 637 address c2i_entry = __ pc(); 638 639 // Class initialization barrier for static methods 640 address c2i_no_clinit_check_entry = nullptr; 641 if (VM_Version::supports_fast_class_init_checks()) { 642 Label L_skip_barrier; 643 644 { // Bypass the barrier for non-static methods 645 __ lwu(t0, Address(xmethod, Method::access_flags_offset())); 646 __ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC)); 647 __ beqz(t1, L_skip_barrier); // non-static 648 } 649 650 __ load_method_holder(t1, xmethod); 651 __ clinit_barrier(t1, t0, &L_skip_barrier); 652 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 653 654 __ bind(L_skip_barrier); 655 c2i_no_clinit_check_entry = __ pc(); 656 } 657 658 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 659 bs->c2i_entry_barrier(masm); 660 661 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup); 662 663 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry); 664 } 665 666 int SharedRuntime::vector_calling_convention(VMRegPair *regs, 667 uint num_bits, 668 uint total_args_passed) { 669 assert(total_args_passed <= Argument::n_vector_register_parameters_c, "unsupported"); 670 assert(num_bits >= 64 && num_bits <= 2048 && is_power_of_2(num_bits), "unsupported"); 671 672 // check more info at https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc 673 static const VectorRegister VEC_ArgReg[Argument::n_vector_register_parameters_c] = { 674 v8, v9, v10, v11, v12, v13, v14, v15, 675 v16, v17, v18, v19, v20, v21, v22, v23 676 }; 677 678 const int next_reg_val = 3; 679 for (uint i = 0; i < total_args_passed; i++) { 680 VMReg vmreg = VEC_ArgReg[i]->as_VMReg(); 681 regs[i].set_pair(vmreg->next(next_reg_val), vmreg); 682 } 683 return 0; 684 } 685 686 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 687 VMRegPair *regs, 688 int total_args_passed) { 689 690 // We return the amount of VMRegImpl stack slots we need to reserve for all 691 // the arguments NOT counting out_preserve_stack_slots. 692 693 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = { 694 c_rarg0, c_rarg1, c_rarg2, c_rarg3, 695 c_rarg4, c_rarg5, c_rarg6, c_rarg7 696 }; 697 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = { 698 c_farg0, c_farg1, c_farg2, c_farg3, 699 c_farg4, c_farg5, c_farg6, c_farg7 700 }; 701 702 uint int_args = 0; 703 uint fp_args = 0; 704 uint stk_args = 0; // inc by 2 each time 705 706 for (int i = 0; i < total_args_passed; i++) { 707 switch (sig_bt[i]) { 708 case T_BOOLEAN: // fall through 709 case T_CHAR: // fall through 710 case T_BYTE: // fall through 711 case T_SHORT: // fall through 712 case T_INT: 713 if (int_args < Argument::n_int_register_parameters_c) { 714 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); 715 } else { 716 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 717 stk_args += 2; 718 } 719 break; 720 case T_LONG: // fall through 721 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 722 case T_OBJECT: // fall through 723 case T_ARRAY: // fall through 724 case T_ADDRESS: // fall through 725 case T_METADATA: 726 if (int_args < Argument::n_int_register_parameters_c) { 727 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); 728 } else { 729 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 730 stk_args += 2; 731 } 732 break; 733 case T_FLOAT: 734 if (fp_args < Argument::n_float_register_parameters_c) { 735 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); 736 } else if (int_args < Argument::n_int_register_parameters_c) { 737 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); 738 } else { 739 regs[i].set1(VMRegImpl::stack2reg(stk_args)); 740 stk_args += 2; 741 } 742 break; 743 case T_DOUBLE: 744 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); 745 if (fp_args < Argument::n_float_register_parameters_c) { 746 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg()); 747 } else if (int_args < Argument::n_int_register_parameters_c) { 748 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); 749 } else { 750 regs[i].set2(VMRegImpl::stack2reg(stk_args)); 751 stk_args += 2; 752 } 753 break; 754 case T_VOID: // Halves of longs and doubles 755 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); 756 regs[i].set_bad(); 757 break; 758 default: 759 ShouldNotReachHere(); 760 } 761 } 762 763 return stk_args; 764 } 765 766 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 767 // We always ignore the frame_slots arg and just use the space just below frame pointer 768 // which by this time is free to use 769 switch (ret_type) { 770 case T_FLOAT: 771 __ fsw(f10, Address(fp, -3 * wordSize)); 772 break; 773 case T_DOUBLE: 774 __ fsd(f10, Address(fp, -3 * wordSize)); 775 break; 776 case T_VOID: break; 777 default: { 778 __ sd(x10, Address(fp, -3 * wordSize)); 779 } 780 } 781 } 782 783 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 784 // We always ignore the frame_slots arg and just use the space just below frame pointer 785 // which by this time is free to use 786 switch (ret_type) { 787 case T_FLOAT: 788 __ flw(f10, Address(fp, -3 * wordSize)); 789 break; 790 case T_DOUBLE: 791 __ fld(f10, Address(fp, -3 * wordSize)); 792 break; 793 case T_VOID: break; 794 default: { 795 __ ld(x10, Address(fp, -3 * wordSize)); 796 } 797 } 798 } 799 800 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { 801 RegSet x; 802 for ( int i = first_arg ; i < arg_count ; i++ ) { 803 if (args[i].first()->is_Register()) { 804 x = x + args[i].first()->as_Register(); 805 } else if (args[i].first()->is_FloatRegister()) { 806 __ addi(sp, sp, -2 * wordSize); 807 __ fsd(args[i].first()->as_FloatRegister(), Address(sp, 0)); 808 } 809 } 810 __ push_reg(x, sp); 811 } 812 813 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { 814 RegSet x; 815 for ( int i = first_arg ; i < arg_count ; i++ ) { 816 if (args[i].first()->is_Register()) { 817 x = x + args[i].first()->as_Register(); 818 } else { 819 ; 820 } 821 } 822 __ pop_reg(x, sp); 823 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) { 824 if (args[i].first()->is_Register()) { 825 ; 826 } else if (args[i].first()->is_FloatRegister()) { 827 __ fld(args[i].first()->as_FloatRegister(), Address(sp, 0)); 828 __ add(sp, sp, 2 * wordSize); 829 } 830 } 831 } 832 833 static void verify_oop_args(MacroAssembler* masm, 834 const methodHandle& method, 835 const BasicType* sig_bt, 836 const VMRegPair* regs) { 837 const Register temp_reg = x9; // not part of any compiled calling seq 838 if (VerifyOops) { 839 for (int i = 0; i < method->size_of_parameters(); i++) { 840 if (sig_bt[i] == T_OBJECT || 841 sig_bt[i] == T_ARRAY) { 842 VMReg r = regs[i].first(); 843 assert(r->is_valid(), "bad oop arg"); 844 if (r->is_stack()) { 845 __ ld(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); 846 __ verify_oop(temp_reg); 847 } else { 848 __ verify_oop(r->as_Register()); 849 } 850 } 851 } 852 } 853 } 854 855 // on exit, sp points to the ContinuationEntry 856 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) { 857 assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, ""); 858 assert(in_bytes(ContinuationEntry::cont_offset()) % VMRegImpl::stack_slot_size == 0, ""); 859 assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, ""); 860 861 stack_slots += (int)ContinuationEntry::size() / wordSize; 862 __ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata 863 864 OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize) / VMRegImpl::stack_slot_size, 0 /* arg_slots*/); 865 866 __ ld(t0, Address(xthread, JavaThread::cont_entry_offset())); 867 __ sd(t0, Address(sp, ContinuationEntry::parent_offset())); 868 __ sd(sp, Address(xthread, JavaThread::cont_entry_offset())); 869 870 return map; 871 } 872 873 // on entry c_rarg1 points to the continuation 874 // sp points to ContinuationEntry 875 // c_rarg3 -- isVirtualThread 876 static void fill_continuation_entry(MacroAssembler* masm) { 877 #ifdef ASSERT 878 __ mv(t0, ContinuationEntry::cookie_value()); 879 __ sw(t0, Address(sp, ContinuationEntry::cookie_offset())); 880 #endif 881 882 __ sd(c_rarg1, Address(sp, ContinuationEntry::cont_offset())); 883 __ sw(c_rarg3, Address(sp, ContinuationEntry::flags_offset())); 884 __ sd(zr, Address(sp, ContinuationEntry::chunk_offset())); 885 __ sw(zr, Address(sp, ContinuationEntry::argsize_offset())); 886 __ sw(zr, Address(sp, ContinuationEntry::pin_count_offset())); 887 888 __ ld(t0, Address(xthread, JavaThread::cont_fastpath_offset())); 889 __ sd(t0, Address(sp, ContinuationEntry::parent_cont_fastpath_offset())); 890 __ ld(t0, Address(xthread, JavaThread::held_monitor_count_offset())); 891 __ sd(t0, Address(sp, ContinuationEntry::parent_held_monitor_count_offset())); 892 893 __ sd(zr, Address(xthread, JavaThread::cont_fastpath_offset())); 894 __ sd(zr, Address(xthread, JavaThread::held_monitor_count_offset())); 895 } 896 897 // on entry, sp points to the ContinuationEntry 898 // on exit, fp points to the spilled fp + 2 * wordSize in the entry frame 899 static void continuation_enter_cleanup(MacroAssembler* masm) { 900 #ifndef PRODUCT 901 Label OK; 902 __ ld(t0, Address(xthread, JavaThread::cont_entry_offset())); 903 __ beq(sp, t0, OK); 904 __ stop("incorrect sp"); 905 __ bind(OK); 906 #endif 907 908 __ ld(t0, Address(sp, ContinuationEntry::parent_cont_fastpath_offset())); 909 __ sd(t0, Address(xthread, JavaThread::cont_fastpath_offset())); 910 911 if (CheckJNICalls) { 912 // Check if this is a virtual thread continuation 913 Label L_skip_vthread_code; 914 __ lwu(t0, Address(sp, ContinuationEntry::flags_offset())); 915 __ beqz(t0, L_skip_vthread_code); 916 917 // If the held monitor count is > 0 and this vthread is terminating then 918 // it failed to release a JNI monitor. So we issue the same log message 919 // that JavaThread::exit does. 920 __ ld(t0, Address(xthread, JavaThread::jni_monitor_count_offset())); 921 __ beqz(t0, L_skip_vthread_code); 922 923 // Save return value potentially containing the exception oop in callee-saved x9 924 __ mv(x9, x10); 925 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held)); 926 // Restore potential return value 927 __ mv(x10, x9); 928 929 // For vthreads we have to explicitly zero the JNI monitor count of the carrier 930 // on termination. The held count is implicitly zeroed below when we restore from 931 // the parent held count (which has to be zero). 932 __ sd(zr, Address(xthread, JavaThread::jni_monitor_count_offset())); 933 934 __ bind(L_skip_vthread_code); 935 } 936 #ifdef ASSERT 937 else { 938 // Check if this is a virtual thread continuation 939 Label L_skip_vthread_code; 940 __ lwu(t0, Address(sp, ContinuationEntry::flags_offset())); 941 __ beqz(t0, L_skip_vthread_code); 942 943 // See comment just above. If not checking JNI calls the JNI count is only 944 // needed for assertion checking. 945 __ sd(zr, Address(xthread, JavaThread::jni_monitor_count_offset())); 946 947 __ bind(L_skip_vthread_code); 948 } 949 #endif 950 951 __ ld(t0, Address(sp, ContinuationEntry::parent_held_monitor_count_offset())); 952 __ sd(t0, Address(xthread, JavaThread::held_monitor_count_offset())); 953 954 __ ld(t0, Address(sp, ContinuationEntry::parent_offset())); 955 __ sd(t0, Address(xthread, JavaThread::cont_entry_offset())); 956 __ add(fp, sp, (int)ContinuationEntry::size() + 2 * wordSize /* 2 extra words to match up with leave() */); 957 } 958 959 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread) 960 // On entry: c_rarg1 -- the continuation object 961 // c_rarg2 -- isContinue 962 // c_rarg3 -- isVirtualThread 963 static void gen_continuation_enter(MacroAssembler* masm, 964 const methodHandle& method, 965 const BasicType* sig_bt, 966 const VMRegPair* regs, 967 int& exception_offset, 968 OopMapSet*oop_maps, 969 int& frame_complete, 970 int& stack_slots, 971 int& interpreted_entry_offset, 972 int& compiled_entry_offset) { 973 // verify_oop_args(masm, method, sig_bt, regs); 974 Address resolve(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type); 975 976 address start = __ pc(); 977 978 Label call_thaw, exit; 979 980 // i2i entry used at interp_only_mode only 981 interpreted_entry_offset = __ pc() - start; 982 { 983 #ifdef ASSERT 984 Label is_interp_only; 985 __ lw(t0, Address(xthread, JavaThread::interp_only_mode_offset())); 986 __ bnez(t0, is_interp_only); 987 __ stop("enterSpecial interpreter entry called when not in interp_only_mode"); 988 __ bind(is_interp_only); 989 #endif 990 991 // Read interpreter arguments into registers (this is an ad-hoc i2c adapter) 992 __ ld(c_rarg1, Address(esp, Interpreter::stackElementSize * 2)); 993 __ ld(c_rarg2, Address(esp, Interpreter::stackElementSize * 1)); 994 __ ld(c_rarg3, Address(esp, Interpreter::stackElementSize * 0)); 995 __ push_cont_fastpath(xthread); 996 997 __ enter(); 998 stack_slots = 2; // will be adjusted in setup 999 OopMap* map = continuation_enter_setup(masm, stack_slots); 1000 // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe, 1001 // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway. 1002 1003 fill_continuation_entry(masm); 1004 1005 __ bnez(c_rarg2, call_thaw); 1006 1007 // Make sure the call is patchable 1008 __ align(NativeInstruction::instruction_size); 1009 1010 const address tr_call = __ reloc_call(resolve); 1011 if (tr_call == nullptr) { 1012 fatal("CodeCache is full at gen_continuation_enter"); 1013 } 1014 1015 oop_maps->add_gc_map(__ pc() - start, map); 1016 __ post_call_nop(); 1017 1018 __ j(exit); 1019 1020 address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call); 1021 if (stub == nullptr) { 1022 fatal("CodeCache is full at gen_continuation_enter"); 1023 } 1024 } 1025 1026 // compiled entry 1027 __ align(CodeEntryAlignment); 1028 compiled_entry_offset = __ pc() - start; 1029 1030 __ enter(); 1031 stack_slots = 2; // will be adjusted in setup 1032 OopMap* map = continuation_enter_setup(masm, stack_slots); 1033 frame_complete = __ pc() - start; 1034 1035 fill_continuation_entry(masm); 1036 1037 __ bnez(c_rarg2, call_thaw); 1038 1039 // Make sure the call is patchable 1040 __ align(NativeInstruction::instruction_size); 1041 1042 const address tr_call = __ reloc_call(resolve); 1043 if (tr_call == nullptr) { 1044 fatal("CodeCache is full at gen_continuation_enter"); 1045 } 1046 1047 oop_maps->add_gc_map(__ pc() - start, map); 1048 __ post_call_nop(); 1049 1050 __ j(exit); 1051 1052 __ bind(call_thaw); 1053 1054 ContinuationEntry::_thaw_call_pc_offset = __ pc() - start; 1055 __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw())); 1056 oop_maps->add_gc_map(__ pc() - start, map->deep_copy()); 1057 ContinuationEntry::_return_pc_offset = __ pc() - start; 1058 __ post_call_nop(); 1059 1060 __ bind(exit); 1061 ContinuationEntry::_cleanup_offset = __ pc() - start; 1062 continuation_enter_cleanup(masm); 1063 __ leave(); 1064 __ ret(); 1065 1066 // exception handling 1067 exception_offset = __ pc() - start; 1068 { 1069 __ mv(x9, x10); // save return value contaning the exception oop in callee-saved x9 1070 1071 continuation_enter_cleanup(masm); 1072 1073 __ ld(c_rarg1, Address(fp, -1 * wordSize)); // return address 1074 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), xthread, c_rarg1); 1075 1076 // see OptoRuntime::generate_exception_blob: x10 -- exception oop, x13 -- exception pc 1077 1078 __ mv(x11, x10); // the exception handler 1079 __ mv(x10, x9); // restore return value contaning the exception oop 1080 __ verify_oop(x10); 1081 1082 __ leave(); 1083 __ mv(x13, ra); 1084 __ jr(x11); // the exception handler 1085 } 1086 1087 address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call); 1088 if (stub == nullptr) { 1089 fatal("CodeCache is full at gen_continuation_enter"); 1090 } 1091 } 1092 1093 static void gen_continuation_yield(MacroAssembler* masm, 1094 const methodHandle& method, 1095 const BasicType* sig_bt, 1096 const VMRegPair* regs, 1097 OopMapSet* oop_maps, 1098 int& frame_complete, 1099 int& stack_slots, 1100 int& compiled_entry_offset) { 1101 enum layout { 1102 fp_off, 1103 fp_off2, 1104 return_off, 1105 return_off2, 1106 framesize // inclusive of return address 1107 }; 1108 // assert(is_even(framesize/2), "sp not 16-byte aligned"); 1109 1110 stack_slots = framesize / VMRegImpl::slots_per_word; 1111 assert(stack_slots == 2, "recheck layout"); 1112 1113 address start = __ pc(); 1114 1115 compiled_entry_offset = __ pc() - start; 1116 __ enter(); 1117 1118 __ mv(c_rarg1, sp); 1119 1120 frame_complete = __ pc() - start; 1121 address the_pc = __ pc(); 1122 1123 __ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup 1124 1125 __ mv(c_rarg0, xthread); 1126 __ set_last_Java_frame(sp, fp, the_pc, t0); 1127 __ call_VM_leaf(Continuation::freeze_entry(), 2); 1128 __ reset_last_Java_frame(true); 1129 1130 Label pinned; 1131 1132 __ bnez(x10, pinned); 1133 1134 // We've succeeded, set sp to the ContinuationEntry 1135 __ ld(sp, Address(xthread, JavaThread::cont_entry_offset())); 1136 continuation_enter_cleanup(masm); 1137 1138 __ bind(pinned); // pinned -- return to caller 1139 1140 // handle pending exception thrown by freeze 1141 __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset()))); 1142 Label ok; 1143 __ beqz(t0, ok); 1144 __ leave(); 1145 __ la(t0, RuntimeAddress(StubRoutines::forward_exception_entry())); 1146 __ jr(t0); 1147 __ bind(ok); 1148 1149 __ leave(); 1150 __ ret(); 1151 1152 OopMap* map = new OopMap(framesize, 1); 1153 oop_maps->add_gc_map(the_pc - start, map); 1154 } 1155 1156 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) { 1157 ::continuation_enter_cleanup(masm); 1158 } 1159 1160 static void gen_special_dispatch(MacroAssembler* masm, 1161 const methodHandle& method, 1162 const BasicType* sig_bt, 1163 const VMRegPair* regs) { 1164 verify_oop_args(masm, method, sig_bt, regs); 1165 vmIntrinsics::ID iid = method->intrinsic_id(); 1166 1167 // Now write the args into the outgoing interpreter space 1168 bool has_receiver = false; 1169 Register receiver_reg = noreg; 1170 int member_arg_pos = -1; 1171 Register member_reg = noreg; 1172 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1173 if (ref_kind != 0) { 1174 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1175 member_reg = x9; // known to be free at this point 1176 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1177 } else if (iid == vmIntrinsics::_invokeBasic) { 1178 has_receiver = true; 1179 } else if (iid == vmIntrinsics::_linkToNative) { 1180 member_arg_pos = method->size_of_parameters() - 1; // trailing NativeEntryPoint argument 1181 member_reg = x9; // known to be free at this point 1182 } else { 1183 fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid)); 1184 } 1185 1186 if (member_reg != noreg) { 1187 // Load the member_arg into register, if necessary. 1188 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); 1189 VMReg r = regs[member_arg_pos].first(); 1190 if (r->is_stack()) { 1191 __ ld(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); 1192 } else { 1193 // no data motion is needed 1194 member_reg = r->as_Register(); 1195 } 1196 } 1197 1198 if (has_receiver) { 1199 // Make sure the receiver is loaded into a register. 1200 assert(method->size_of_parameters() > 0, "oob"); 1201 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); 1202 VMReg r = regs[0].first(); 1203 assert(r->is_valid(), "bad receiver arg"); 1204 if (r->is_stack()) { 1205 // Porting note: This assumes that compiled calling conventions always 1206 // pass the receiver oop in a register. If this is not true on some 1207 // platform, pick a temp and load the receiver from stack. 1208 fatal("receiver always in a register"); 1209 receiver_reg = x12; // known to be free at this point 1210 __ ld(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); 1211 } else { 1212 // no data motion is needed 1213 receiver_reg = r->as_Register(); 1214 } 1215 } 1216 1217 // Figure out which address we are really jumping to: 1218 MethodHandles::generate_method_handle_dispatch(masm, iid, 1219 receiver_reg, member_reg, /*for_compiler_entry:*/ true); 1220 } 1221 1222 // --------------------------------------------------------------------------- 1223 // Generate a native wrapper for a given method. The method takes arguments 1224 // in the Java compiled code convention, marshals them to the native 1225 // convention (handlizes oops, etc), transitions to native, makes the call, 1226 // returns to java state (possibly blocking), unhandlizes any result and 1227 // returns. 1228 // 1229 // Critical native functions are a shorthand for the use of 1230 // GetPrimtiveArrayCritical and disallow the use of any other JNI 1231 // functions. The wrapper is expected to unpack the arguments before 1232 // passing them to the callee and perform checks before and after the 1233 // native call to ensure that they GCLocker 1234 // lock_critical/unlock_critical semantics are followed. Some other 1235 // parts of JNI setup are skipped like the tear down of the JNI handle 1236 // block and the check for pending exceptions it's impossible for them 1237 // to be thrown. 1238 // 1239 // They are roughly structured like this: 1240 // if (GCLocker::needs_gc()) SharedRuntime::block_for_jni_critical() 1241 // tranistion to thread_in_native 1242 // unpack array arguments and call native entry point 1243 // check for safepoint in progress 1244 // check if any thread suspend flags are set 1245 // call into JVM and possible unlock the JNI critical 1246 // if a GC was suppressed while in the critical native. 1247 // transition back to thread_in_Java 1248 // return to caller 1249 // 1250 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, 1251 const methodHandle& method, 1252 int compile_id, 1253 BasicType* in_sig_bt, 1254 VMRegPair* in_regs, 1255 BasicType ret_type) { 1256 if (method->is_continuation_native_intrinsic()) { 1257 int exception_offset = -1; 1258 OopMapSet* oop_maps = new OopMapSet(); 1259 int frame_complete = -1; 1260 int stack_slots = -1; 1261 int interpreted_entry_offset = -1; 1262 int vep_offset = -1; 1263 if (method->is_continuation_enter_intrinsic()) { 1264 gen_continuation_enter(masm, 1265 method, 1266 in_sig_bt, 1267 in_regs, 1268 exception_offset, 1269 oop_maps, 1270 frame_complete, 1271 stack_slots, 1272 interpreted_entry_offset, 1273 vep_offset); 1274 } else if (method->is_continuation_yield_intrinsic()) { 1275 gen_continuation_yield(masm, 1276 method, 1277 in_sig_bt, 1278 in_regs, 1279 oop_maps, 1280 frame_complete, 1281 stack_slots, 1282 vep_offset); 1283 } else { 1284 guarantee(false, "Unknown Continuation native intrinsic"); 1285 } 1286 1287 #ifdef ASSERT 1288 if (method->is_continuation_enter_intrinsic()) { 1289 assert(interpreted_entry_offset != -1, "Must be set"); 1290 assert(exception_offset != -1, "Must be set"); 1291 } else { 1292 assert(interpreted_entry_offset == -1, "Must be unset"); 1293 assert(exception_offset == -1, "Must be unset"); 1294 } 1295 assert(frame_complete != -1, "Must be set"); 1296 assert(stack_slots != -1, "Must be set"); 1297 assert(vep_offset != -1, "Must be set"); 1298 #endif 1299 1300 __ flush(); 1301 nmethod* nm = nmethod::new_native_nmethod(method, 1302 compile_id, 1303 masm->code(), 1304 vep_offset, 1305 frame_complete, 1306 stack_slots, 1307 in_ByteSize(-1), 1308 in_ByteSize(-1), 1309 oop_maps, 1310 exception_offset); 1311 if (nm == nullptr) return nm; 1312 if (method->is_continuation_enter_intrinsic()) { 1313 ContinuationEntry::set_enter_code(nm, interpreted_entry_offset); 1314 } else if (method->is_continuation_yield_intrinsic()) { 1315 _cont_doYield_stub = nm; 1316 } else { 1317 guarantee(false, "Unknown Continuation native intrinsic"); 1318 } 1319 return nm; 1320 } 1321 1322 if (method->is_method_handle_intrinsic()) { 1323 vmIntrinsics::ID iid = method->intrinsic_id(); 1324 intptr_t start = (intptr_t)__ pc(); 1325 int vep_offset = ((intptr_t)__ pc()) - start; 1326 1327 // First instruction must be a nop as it may need to be patched on deoptimisation 1328 { 1329 Assembler::IncompressibleRegion ir(masm); // keep the nop as 4 bytes for patching. 1330 MacroAssembler::assert_alignment(__ pc()); 1331 __ nop(); // 4 bytes 1332 } 1333 gen_special_dispatch(masm, 1334 method, 1335 in_sig_bt, 1336 in_regs); 1337 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1338 __ flush(); 1339 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1340 return nmethod::new_native_nmethod(method, 1341 compile_id, 1342 masm->code(), 1343 vep_offset, 1344 frame_complete, 1345 stack_slots / VMRegImpl::slots_per_word, 1346 in_ByteSize(-1), 1347 in_ByteSize(-1), 1348 (OopMapSet*)nullptr); 1349 } 1350 address native_func = method->native_function(); 1351 assert(native_func != nullptr, "must have function"); 1352 1353 // An OopMap for lock (and class if static) 1354 OopMapSet *oop_maps = new OopMapSet(); 1355 assert_cond(oop_maps != nullptr); 1356 intptr_t start = (intptr_t)__ pc(); 1357 1358 // We have received a description of where all the java arg are located 1359 // on entry to the wrapper. We need to convert these args to where 1360 // the jni function will expect them. To figure out where they go 1361 // we convert the java signature to a C signature by inserting 1362 // the hidden arguments as arg[0] and possibly arg[1] (static method) 1363 1364 const int total_in_args = method->size_of_parameters(); 1365 int total_c_args = total_in_args + (method->is_static() ? 2 : 1); 1366 1367 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1368 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1369 BasicType* in_elem_bt = nullptr; 1370 1371 int argc = 0; 1372 out_sig_bt[argc++] = T_ADDRESS; 1373 if (method->is_static()) { 1374 out_sig_bt[argc++] = T_OBJECT; 1375 } 1376 1377 for (int i = 0; i < total_in_args ; i++) { 1378 out_sig_bt[argc++] = in_sig_bt[i]; 1379 } 1380 1381 // Now figure out where the args must be stored and how much stack space 1382 // they require. 1383 int out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); 1384 1385 // Compute framesize for the wrapper. We need to handlize all oops in 1386 // incoming registers 1387 1388 // Calculate the total number of stack slots we will need. 1389 1390 // First count the abi requirement plus all of the outgoing args 1391 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 1392 1393 // Now the space for the inbound oop handle area 1394 int total_save_slots = 8 * VMRegImpl::slots_per_word; // 8 arguments passed in registers 1395 1396 int oop_handle_offset = stack_slots; 1397 stack_slots += total_save_slots; 1398 1399 // Now any space we need for handlizing a klass if static method 1400 1401 int klass_slot_offset = 0; 1402 int klass_offset = -1; 1403 int lock_slot_offset = 0; 1404 bool is_static = false; 1405 1406 if (method->is_static()) { 1407 klass_slot_offset = stack_slots; 1408 stack_slots += VMRegImpl::slots_per_word; 1409 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 1410 is_static = true; 1411 } 1412 1413 // Plus a lock if needed 1414 1415 if (method->is_synchronized()) { 1416 lock_slot_offset = stack_slots; 1417 stack_slots += VMRegImpl::slots_per_word; 1418 } 1419 1420 // Now a place (+2) to save return values or temp during shuffling 1421 // + 4 for return address (which we own) and saved fp 1422 stack_slots += 6; 1423 1424 // Ok The space we have allocated will look like: 1425 // 1426 // 1427 // FP-> | | 1428 // | 2 slots (ra) | 1429 // | 2 slots (fp) | 1430 // |---------------------| 1431 // | 2 slots for moves | 1432 // |---------------------| 1433 // | lock box (if sync) | 1434 // |---------------------| <- lock_slot_offset 1435 // | klass (if static) | 1436 // |---------------------| <- klass_slot_offset 1437 // | oopHandle area | 1438 // |---------------------| <- oop_handle_offset (8 java arg registers) 1439 // | outbound memory | 1440 // | based arguments | 1441 // | | 1442 // |---------------------| 1443 // | | 1444 // SP-> | out_preserved_slots | 1445 // 1446 // 1447 1448 1449 // Now compute actual number of stack words we need rounding to make 1450 // stack properly aligned. 1451 stack_slots = align_up(stack_slots, StackAlignmentInSlots); 1452 1453 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 1454 1455 // First thing make an ic check to see if we should even be here 1456 1457 // We are free to use all registers as temps without saving them and 1458 // restoring them except fp. fp is the only callee save register 1459 // as far as the interpreter and the compiler(s) are concerned. 1460 1461 1462 const Register ic_reg = t1; 1463 const Register receiver = j_rarg0; 1464 1465 __ verify_oop(receiver); 1466 assert_different_registers(receiver, t0, t1); 1467 1468 __ ic_check(); 1469 1470 int vep_offset = ((intptr_t)__ pc()) - start; 1471 1472 // If we have to make this method not-entrant we'll overwrite its 1473 // first instruction with a jump. 1474 { 1475 Assembler::IncompressibleRegion ir(masm); // keep the nop as 4 bytes for patching. 1476 MacroAssembler::assert_alignment(__ pc()); 1477 __ nop(); // 4 bytes 1478 } 1479 1480 if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) { 1481 Label L_skip_barrier; 1482 __ mov_metadata(t1, method->method_holder()); // InstanceKlass* 1483 __ clinit_barrier(t1, t0, &L_skip_barrier); 1484 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 1485 1486 __ bind(L_skip_barrier); 1487 } 1488 1489 // Generate stack overflow check 1490 __ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size())); 1491 1492 // Generate a new frame for the wrapper. 1493 __ enter(); 1494 // -2 because return address is already present and so is saved fp 1495 __ sub(sp, sp, stack_size - 2 * wordSize); 1496 1497 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1498 assert_cond(bs != nullptr); 1499 bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */); 1500 1501 // Frame is now completed as far as size and linkage. 1502 int frame_complete = ((intptr_t)__ pc()) - start; 1503 1504 // We use x18 as the oop handle for the receiver/klass 1505 // It is callee save so it survives the call to native 1506 1507 const Register oop_handle_reg = x18; 1508 1509 // 1510 // We immediately shuffle the arguments so that any vm call we have to 1511 // make from here on out (sync slow path, jvmti, etc.) we will have 1512 // captured the oops from our caller and have a valid oopMap for 1513 // them. 1514 1515 // ----------------- 1516 // The Grand Shuffle 1517 1518 // The Java calling convention is either equal (linux) or denser (win64) than the 1519 // c calling convention. However the because of the jni_env argument the c calling 1520 // convention always has at least one more (and two for static) arguments than Java. 1521 // Therefore if we move the args from java -> c backwards then we will never have 1522 // a register->register conflict and we don't have to build a dependency graph 1523 // and figure out how to break any cycles. 1524 // 1525 1526 // Record esp-based slot for receiver on stack for non-static methods 1527 int receiver_offset = -1; 1528 1529 // This is a trick. We double the stack slots so we can claim 1530 // the oops in the caller's frame. Since we are sure to have 1531 // more args than the caller doubling is enough to make 1532 // sure we can capture all the incoming oop args from the 1533 // caller. 1534 // 1535 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1536 assert_cond(map != nullptr); 1537 1538 int float_args = 0; 1539 int int_args = 0; 1540 1541 #ifdef ASSERT 1542 bool reg_destroyed[Register::number_of_registers]; 1543 bool freg_destroyed[FloatRegister::number_of_registers]; 1544 for ( int r = 0 ; r < Register::number_of_registers ; r++ ) { 1545 reg_destroyed[r] = false; 1546 } 1547 for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) { 1548 freg_destroyed[f] = false; 1549 } 1550 1551 #endif /* ASSERT */ 1552 1553 // For JNI natives the incoming and outgoing registers are offset upwards. 1554 GrowableArray<int> arg_order(2 * total_in_args); 1555 VMRegPair tmp_vmreg; 1556 tmp_vmreg.set2(x9->as_VMReg()); 1557 1558 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) { 1559 arg_order.push(i); 1560 arg_order.push(c_arg); 1561 } 1562 1563 int temploc = -1; 1564 for (int ai = 0; ai < arg_order.length(); ai += 2) { 1565 int i = arg_order.at(ai); 1566 int c_arg = arg_order.at(ai + 1); 1567 __ block_comment(err_msg("mv %d -> %d", i, c_arg)); 1568 assert(c_arg != -1 && i != -1, "wrong order"); 1569 #ifdef ASSERT 1570 if (in_regs[i].first()->is_Register()) { 1571 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!"); 1572 } else if (in_regs[i].first()->is_FloatRegister()) { 1573 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!"); 1574 } 1575 if (out_regs[c_arg].first()->is_Register()) { 1576 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 1577 } else if (out_regs[c_arg].first()->is_FloatRegister()) { 1578 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true; 1579 } 1580 #endif /* ASSERT */ 1581 switch (in_sig_bt[i]) { 1582 case T_ARRAY: 1583 case T_OBJECT: 1584 __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], 1585 ((i == 0) && (!is_static)), 1586 &receiver_offset); 1587 int_args++; 1588 break; 1589 case T_VOID: 1590 break; 1591 1592 case T_FLOAT: 1593 __ float_move(in_regs[i], out_regs[c_arg]); 1594 float_args++; 1595 break; 1596 1597 case T_DOUBLE: 1598 assert( i + 1 < total_in_args && 1599 in_sig_bt[i + 1] == T_VOID && 1600 out_sig_bt[c_arg + 1] == T_VOID, "bad arg list"); 1601 __ double_move(in_regs[i], out_regs[c_arg]); 1602 float_args++; 1603 break; 1604 1605 case T_LONG : 1606 __ long_move(in_regs[i], out_regs[c_arg]); 1607 int_args++; 1608 break; 1609 1610 case T_ADDRESS: 1611 assert(false, "found T_ADDRESS in java args"); 1612 break; 1613 1614 default: 1615 __ move32_64(in_regs[i], out_regs[c_arg]); 1616 int_args++; 1617 } 1618 } 1619 1620 // point c_arg at the first arg that is already loaded in case we 1621 // need to spill before we call out 1622 int c_arg = total_c_args - total_in_args; 1623 1624 // Pre-load a static method's oop into c_rarg1. 1625 if (method->is_static()) { 1626 1627 // load oop into a register 1628 __ movoop(c_rarg1, 1629 JNIHandles::make_local(method->method_holder()->java_mirror())); 1630 1631 // Now handlize the static class mirror it's known not-null. 1632 __ sd(c_rarg1, Address(sp, klass_offset)); 1633 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 1634 1635 // Now get the handle 1636 __ la(c_rarg1, Address(sp, klass_offset)); 1637 // and protect the arg if we must spill 1638 c_arg--; 1639 } 1640 1641 // Change state to native (we save the return address in the thread, since it might not 1642 // be pushed on the stack when we do a stack traversal). It is enough that the pc() 1643 // points into the right code segment. It does not have to be the correct return pc. 1644 // We use the same pc/oopMap repeatedly when we call out. 1645 1646 Label native_return; 1647 if (LockingMode != LM_LEGACY && method->is_object_wait0()) { 1648 // For convenience we use the pc we want to resume to in case of preemption on Object.wait. 1649 __ set_last_Java_frame(sp, noreg, native_return, t0); 1650 } else { 1651 intptr_t the_pc = (intptr_t) __ pc(); 1652 oop_maps->add_gc_map(the_pc - start, map); 1653 1654 __ set_last_Java_frame(sp, noreg, __ pc(), t0); 1655 } 1656 1657 Label dtrace_method_entry, dtrace_method_entry_done; 1658 if (DTraceMethodProbes) { 1659 __ j(dtrace_method_entry); 1660 __ bind(dtrace_method_entry_done); 1661 } 1662 1663 // RedefineClasses() tracing support for obsolete method entry 1664 if (log_is_enabled(Trace, redefine, class, obsolete)) { 1665 // protect the args we've loaded 1666 save_args(masm, total_c_args, c_arg, out_regs); 1667 __ mov_metadata(c_rarg1, method()); 1668 __ call_VM_leaf( 1669 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 1670 xthread, c_rarg1); 1671 restore_args(masm, total_c_args, c_arg, out_regs); 1672 } 1673 1674 // Lock a synchronized method 1675 1676 // Register definitions used by locking and unlocking 1677 1678 const Register swap_reg = x10; 1679 const Register obj_reg = x9; // Will contain the oop 1680 const Register lock_reg = x30; // Address of compiler lock object (BasicLock) 1681 const Register old_hdr = x30; // value of old header at unlock time 1682 const Register lock_tmp = x31; // Temporary used by lightweight_lock/unlock 1683 const Register tmp = ra; 1684 1685 Label slow_path_lock; 1686 Label lock_done; 1687 1688 if (method->is_synchronized()) { 1689 Label count; 1690 1691 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes(); 1692 1693 // Get the handle (the 2nd argument) 1694 __ mv(oop_handle_reg, c_rarg1); 1695 1696 // Get address of the box 1697 1698 __ la(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 1699 1700 // Load the oop from the handle 1701 __ ld(obj_reg, Address(oop_handle_reg, 0)); 1702 1703 if (LockingMode == LM_MONITOR) { 1704 __ j(slow_path_lock); 1705 } else if (LockingMode == LM_LEGACY) { 1706 // Load (object->mark() | 1) into swap_reg % x10 1707 __ ld(t0, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 1708 __ ori(swap_reg, t0, 1); 1709 1710 // Save (object->mark() | 1) into BasicLock's displaced header 1711 __ sd(swap_reg, Address(lock_reg, mark_word_offset)); 1712 1713 // src -> dest if dest == x10 else x10 <- dest 1714 __ cmpxchg_obj_header(x10, lock_reg, obj_reg, lock_tmp, count, /*fallthrough*/nullptr); 1715 1716 // Test if the oopMark is an obvious stack pointer, i.e., 1717 // 1) (mark & 3) == 0, and 1718 // 2) sp <= mark < mark + os::pagesize() 1719 // These 3 tests can be done by evaluating the following 1720 // expression: ((mark - sp) & (3 - os::vm_page_size())), 1721 // assuming both stack pointer and pagesize have their 1722 // least significant 2 bits clear. 1723 // NOTE: the oopMark is in swap_reg % 10 as the result of cmpxchg 1724 1725 __ sub(swap_reg, swap_reg, sp); 1726 __ andi(swap_reg, swap_reg, 3 - (int)os::vm_page_size()); 1727 1728 // Save the test result, for recursive case, the result is zero 1729 __ sd(swap_reg, Address(lock_reg, mark_word_offset)); 1730 __ bnez(swap_reg, slow_path_lock); 1731 1732 __ bind(count); 1733 __ inc_held_monitor_count(); 1734 } else { 1735 assert(LockingMode == LM_LIGHTWEIGHT, "must be"); 1736 __ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock); 1737 } 1738 1739 // Slow path will re-enter here 1740 __ bind(lock_done); 1741 } 1742 1743 1744 // Finally just about ready to make the JNI call 1745 1746 // get JNIEnv* which is first argument to native 1747 __ la(c_rarg0, Address(xthread, in_bytes(JavaThread::jni_environment_offset()))); 1748 1749 // Now set thread in native 1750 __ la(t1, Address(xthread, JavaThread::thread_state_offset())); 1751 __ mv(t0, _thread_in_native); 1752 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); 1753 __ sw(t0, Address(t1)); 1754 1755 __ rt_call(native_func); 1756 1757 // Verify or restore cpu control state after JNI call 1758 __ restore_cpu_control_state_after_jni(t0); 1759 1760 // Unpack native results. 1761 if (ret_type != T_OBJECT && ret_type != T_ARRAY) { 1762 __ cast_primitive_type(ret_type, x10); 1763 } 1764 1765 Label safepoint_in_progress, safepoint_in_progress_done; 1766 Label after_transition; 1767 1768 // Switch thread to "native transition" state before reading the synchronization state. 1769 // This additional state is necessary because reading and testing the synchronization 1770 // state is not atomic w.r.t. GC, as this scenario demonstrates: 1771 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 1772 // VM thread changes sync state to synchronizing and suspends threads for GC. 1773 // Thread A is resumed to finish this native method, but doesn't block here since it 1774 // didn't see any synchronization is progress, and escapes. 1775 __ mv(t0, _thread_in_native_trans); 1776 1777 __ sw(t0, Address(xthread, JavaThread::thread_state_offset())); 1778 1779 // Force this write out before the read below 1780 if (!UseSystemMemoryBarrier) { 1781 __ membar(MacroAssembler::AnyAny); 1782 } 1783 1784 // check for safepoint operation in progress and/or pending suspend requests 1785 { 1786 // We need an acquire here to ensure that any subsequent load of the 1787 // global SafepointSynchronize::_state flag is ordered after this load 1788 // of the thread-local polling word. We don't want this poll to 1789 // return false (i.e. not safepointing) and a later poll of the global 1790 // SafepointSynchronize::_state spuriously to return true. 1791 // This is to avoid a race when we're in a native->Java transition 1792 // racing the code which wakes up from a safepoint. 1793 1794 __ safepoint_poll(safepoint_in_progress, true /* at_return */, true /* acquire */, false /* in_nmethod */); 1795 __ lwu(t0, Address(xthread, JavaThread::suspend_flags_offset())); 1796 __ bnez(t0, safepoint_in_progress); 1797 __ bind(safepoint_in_progress_done); 1798 } 1799 1800 // change thread state 1801 __ la(t1, Address(xthread, JavaThread::thread_state_offset())); 1802 __ mv(t0, _thread_in_Java); 1803 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); 1804 __ sw(t0, Address(t1)); 1805 __ bind(after_transition); 1806 1807 if (LockingMode != LM_LEGACY && method->is_object_wait0()) { 1808 // Check preemption for Object.wait() 1809 __ ld(t1, Address(xthread, JavaThread::preempt_alternate_return_offset())); 1810 __ beqz(t1, native_return); 1811 __ sd(zr, Address(xthread, JavaThread::preempt_alternate_return_offset())); 1812 __ jr(t1); 1813 __ bind(native_return); 1814 1815 intptr_t the_pc = (intptr_t) __ pc(); 1816 oop_maps->add_gc_map(the_pc - start, map); 1817 } 1818 1819 Label reguard; 1820 Label reguard_done; 1821 __ lbu(t0, Address(xthread, JavaThread::stack_guard_state_offset())); 1822 __ mv(t1, StackOverflow::stack_guard_yellow_reserved_disabled); 1823 __ beq(t0, t1, reguard); 1824 __ bind(reguard_done); 1825 1826 // native result if any is live 1827 1828 // Unlock 1829 Label unlock_done; 1830 Label slow_path_unlock; 1831 if (method->is_synchronized()) { 1832 1833 // Get locked oop from the handle we passed to jni 1834 __ ld(obj_reg, Address(oop_handle_reg, 0)); 1835 1836 Label done, not_recursive; 1837 1838 if (LockingMode == LM_LEGACY) { 1839 // Simple recursive lock? 1840 __ ld(t0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 1841 __ bnez(t0, not_recursive); 1842 __ dec_held_monitor_count(); 1843 __ j(done); 1844 } 1845 1846 __ bind(not_recursive); 1847 1848 // Must save x10 if if it is live now because cmpxchg must use it 1849 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { 1850 save_native_result(masm, ret_type, stack_slots); 1851 } 1852 1853 if (LockingMode == LM_MONITOR) { 1854 __ j(slow_path_unlock); 1855 } else if (LockingMode == LM_LEGACY) { 1856 // get address of the stack lock 1857 __ la(x10, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 1858 // get old displaced header 1859 __ ld(old_hdr, Address(x10, 0)); 1860 1861 // Atomic swap old header if oop still contains the stack lock 1862 Label count; 1863 __ cmpxchg_obj_header(x10, old_hdr, obj_reg, lock_tmp, count, &slow_path_unlock); 1864 __ bind(count); 1865 __ dec_held_monitor_count(); 1866 } else { 1867 assert(LockingMode == LM_LIGHTWEIGHT, ""); 1868 __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock); 1869 } 1870 1871 // slow path re-enters here 1872 __ bind(unlock_done); 1873 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { 1874 restore_native_result(masm, ret_type, stack_slots); 1875 } 1876 1877 __ bind(done); 1878 } 1879 1880 Label dtrace_method_exit, dtrace_method_exit_done; 1881 if (DTraceMethodProbes) { 1882 __ j(dtrace_method_exit); 1883 __ bind(dtrace_method_exit_done); 1884 } 1885 1886 __ reset_last_Java_frame(false); 1887 1888 // Unbox oop result, e.g. JNIHandles::resolve result. 1889 if (is_reference_type(ret_type)) { 1890 __ resolve_jobject(x10, x11, x12); 1891 } 1892 1893 if (CheckJNICalls) { 1894 // clear_pending_jni_exception_check 1895 __ sd(zr, Address(xthread, JavaThread::pending_jni_exception_check_fn_offset())); 1896 } 1897 1898 // reset handle block 1899 __ ld(x12, Address(xthread, JavaThread::active_handles_offset())); 1900 __ sd(zr, Address(x12, JNIHandleBlock::top_offset())); 1901 1902 __ leave(); 1903 1904 // Any exception pending? 1905 Label exception_pending; 1906 __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset()))); 1907 __ bnez(t0, exception_pending); 1908 1909 // We're done 1910 __ ret(); 1911 1912 // Unexpected paths are out of line and go here 1913 1914 // forward the exception 1915 __ bind(exception_pending); 1916 1917 // and forward the exception 1918 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1919 1920 // Slow path locking & unlocking 1921 if (method->is_synchronized()) { 1922 1923 __ block_comment("Slow path lock {"); 1924 __ bind(slow_path_lock); 1925 1926 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM 1927 // args are (oop obj, BasicLock* lock, JavaThread* thread) 1928 1929 // protect the args we've loaded 1930 save_args(masm, total_c_args, c_arg, out_regs); 1931 1932 __ mv(c_rarg0, obj_reg); 1933 __ mv(c_rarg1, lock_reg); 1934 __ mv(c_rarg2, xthread); 1935 1936 // Not a leaf but we have last_Java_frame setup as we want 1937 // Force freeze slow path in case we try to preempt. We will pin the 1938 // vthread to the carrier (see FreezeBase::recurse_freeze_native_frame()). 1939 __ push_cont_fastpath(); 1940 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3); 1941 __ pop_cont_fastpath(); 1942 restore_args(masm, total_c_args, c_arg, out_regs); 1943 1944 #ifdef ASSERT 1945 { Label L; 1946 __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset()))); 1947 __ beqz(t0, L); 1948 __ stop("no pending exception allowed on exit from monitorenter"); 1949 __ bind(L); 1950 } 1951 #endif 1952 __ j(lock_done); 1953 1954 __ block_comment("} Slow path lock"); 1955 1956 __ block_comment("Slow path unlock {"); 1957 __ bind(slow_path_unlock); 1958 1959 if (ret_type == T_FLOAT || ret_type == T_DOUBLE) { 1960 save_native_result(masm, ret_type, stack_slots); 1961 } 1962 1963 __ mv(c_rarg2, xthread); 1964 __ la(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); 1965 __ mv(c_rarg0, obj_reg); 1966 1967 // Save pending exception around call to VM (which contains an EXCEPTION_MARK) 1968 // NOTE that obj_reg == x9 currently 1969 __ ld(x9, Address(xthread, in_bytes(Thread::pending_exception_offset()))); 1970 __ sd(zr, Address(xthread, in_bytes(Thread::pending_exception_offset()))); 1971 1972 __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)); 1973 1974 #ifdef ASSERT 1975 { 1976 Label L; 1977 __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset()))); 1978 __ beqz(t0, L); 1979 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C"); 1980 __ bind(L); 1981 } 1982 #endif /* ASSERT */ 1983 1984 __ sd(x9, Address(xthread, in_bytes(Thread::pending_exception_offset()))); 1985 1986 if (ret_type == T_FLOAT || ret_type == T_DOUBLE) { 1987 restore_native_result(masm, ret_type, stack_slots); 1988 } 1989 __ j(unlock_done); 1990 1991 __ block_comment("} Slow path unlock"); 1992 1993 } // synchronized 1994 1995 // SLOW PATH Reguard the stack if needed 1996 1997 __ bind(reguard); 1998 save_native_result(masm, ret_type, stack_slots); 1999 __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2000 restore_native_result(masm, ret_type, stack_slots); 2001 // and continue 2002 __ j(reguard_done); 2003 2004 // SLOW PATH safepoint 2005 { 2006 __ block_comment("safepoint {"); 2007 __ bind(safepoint_in_progress); 2008 2009 // Don't use call_VM as it will see a possible pending exception and forward it 2010 // and never return here preventing us from clearing _last_native_pc down below. 2011 // 2012 save_native_result(masm, ret_type, stack_slots); 2013 __ mv(c_rarg0, xthread); 2014 #ifndef PRODUCT 2015 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); 2016 #endif 2017 __ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); 2018 2019 // Restore any method result value 2020 restore_native_result(masm, ret_type, stack_slots); 2021 2022 __ j(safepoint_in_progress_done); 2023 __ block_comment("} safepoint"); 2024 } 2025 2026 // SLOW PATH dtrace support 2027 if (DTraceMethodProbes) { 2028 { 2029 __ block_comment("dtrace entry {"); 2030 __ bind(dtrace_method_entry); 2031 2032 // We have all of the arguments setup at this point. We must not touch any register 2033 // argument registers at this point (what if we save/restore them there are no oop? 2034 2035 save_args(masm, total_c_args, c_arg, out_regs); 2036 __ mov_metadata(c_rarg1, method()); 2037 __ call_VM_leaf( 2038 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2039 xthread, c_rarg1); 2040 restore_args(masm, total_c_args, c_arg, out_regs); 2041 __ j(dtrace_method_entry_done); 2042 __ block_comment("} dtrace entry"); 2043 } 2044 2045 { 2046 __ block_comment("dtrace exit {"); 2047 __ bind(dtrace_method_exit); 2048 save_native_result(masm, ret_type, stack_slots); 2049 __ mov_metadata(c_rarg1, method()); 2050 __ call_VM_leaf( 2051 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2052 xthread, c_rarg1); 2053 restore_native_result(masm, ret_type, stack_slots); 2054 __ j(dtrace_method_exit_done); 2055 __ block_comment("} dtrace exit"); 2056 } 2057 } 2058 2059 __ flush(); 2060 2061 nmethod *nm = nmethod::new_native_nmethod(method, 2062 compile_id, 2063 masm->code(), 2064 vep_offset, 2065 frame_complete, 2066 stack_slots / VMRegImpl::slots_per_word, 2067 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2068 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size), 2069 oop_maps); 2070 assert(nm != nullptr, "create native nmethod fail!"); 2071 return nm; 2072 } 2073 2074 // this function returns the adjust size (in number of words) to a c2i adapter 2075 // activation for use during deoptimization 2076 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 2077 assert(callee_locals >= callee_parameters, 2078 "test and remove; got more parms than locals"); 2079 if (callee_locals < callee_parameters) { 2080 return 0; // No adjustment for negative locals 2081 } 2082 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords; 2083 // diff is counted in stack words 2084 return align_up(diff, 2); 2085 } 2086 2087 //------------------------------generate_deopt_blob---------------------------- 2088 void SharedRuntime::generate_deopt_blob() { 2089 // Allocate space for the code 2090 ResourceMark rm; 2091 // Setup code generation tools 2092 int pad = 0; 2093 #if INCLUDE_JVMCI 2094 if (EnableJVMCI) { 2095 pad += 512; // Increase the buffer size when compiling for JVMCI 2096 } 2097 #endif 2098 const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id); 2099 CodeBuffer buffer(name, 2048 + pad, 1024); 2100 MacroAssembler* masm = new MacroAssembler(&buffer); 2101 int frame_size_in_words = -1; 2102 OopMap* map = nullptr; 2103 OopMapSet *oop_maps = new OopMapSet(); 2104 assert_cond(masm != nullptr && oop_maps != nullptr); 2105 RegisterSaver reg_saver(COMPILER2_OR_JVMCI != 0); 2106 2107 // ------------- 2108 // This code enters when returning to a de-optimized nmethod. A return 2109 // address has been pushed on the stack, and return values are in 2110 // registers. 2111 // If we are doing a normal deopt then we were called from the patched 2112 // nmethod from the point we returned to the nmethod. So the return 2113 // address on the stack is wrong by NativeCall::instruction_size 2114 // We will adjust the value so it looks like we have the original return 2115 // address on the stack (like when we eagerly deoptimized). 2116 // In the case of an exception pending when deoptimizing, we enter 2117 // with a return address on the stack that points after the call we patched 2118 // into the exception handler. We have the following register state from, 2119 // e.g., the forward exception stub (see stubGenerator_riscv.cpp). 2120 // x10: exception oop 2121 // x9: exception handler 2122 // x13: throwing pc 2123 // So in this case we simply jam x13 into the useless return address and 2124 // the stack looks just like we want. 2125 // 2126 // At this point we need to de-opt. We save the argument return 2127 // registers. We call the first C routine, fetch_unroll_info(). This 2128 // routine captures the return values and returns a structure which 2129 // describes the current frame size and the sizes of all replacement frames. 2130 // The current frame is compiled code and may contain many inlined 2131 // functions, each with their own JVM state. We pop the current frame, then 2132 // push all the new frames. Then we call the C routine unpack_frames() to 2133 // populate these frames. Finally unpack_frames() returns us the new target 2134 // address. Notice that callee-save registers are BLOWN here; they have 2135 // already been captured in the vframeArray at the time the return PC was 2136 // patched. 2137 address start = __ pc(); 2138 Label cont; 2139 2140 // Prolog for non exception case! 2141 2142 // Save everything in sight. 2143 map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words); 2144 2145 // Normal deoptimization. Save exec mode for unpack_frames. 2146 __ mv(xcpool, Deoptimization::Unpack_deopt); // callee-saved 2147 __ j(cont); 2148 2149 int reexecute_offset = __ pc() - start; 2150 #if INCLUDE_JVMCI && !defined(COMPILER1) 2151 if (UseJVMCICompiler) { 2152 // JVMCI does not use this kind of deoptimization 2153 __ should_not_reach_here(); 2154 } 2155 #endif 2156 2157 // Reexecute case 2158 // return address is the pc describes what bci to do re-execute at 2159 2160 // No need to update map as each call to save_live_registers will produce identical oopmap 2161 (void) reg_saver.save_live_registers(masm, 0, &frame_size_in_words); 2162 2163 __ mv(xcpool, Deoptimization::Unpack_reexecute); // callee-saved 2164 __ j(cont); 2165 2166 #if INCLUDE_JVMCI 2167 Label after_fetch_unroll_info_call; 2168 int implicit_exception_uncommon_trap_offset = 0; 2169 int uncommon_trap_offset = 0; 2170 2171 if (EnableJVMCI) { 2172 implicit_exception_uncommon_trap_offset = __ pc() - start; 2173 2174 __ ld(ra, Address(xthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); 2175 __ sd(zr, Address(xthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); 2176 2177 uncommon_trap_offset = __ pc() - start; 2178 2179 // Save everything in sight. 2180 reg_saver.save_live_registers(masm, 0, &frame_size_in_words); 2181 // fetch_unroll_info needs to call last_java_frame() 2182 Label retaddr; 2183 __ set_last_Java_frame(sp, noreg, retaddr, t0); 2184 2185 __ lw(c_rarg1, Address(xthread, in_bytes(JavaThread::pending_deoptimization_offset()))); 2186 __ mv(t0, -1); 2187 __ sw(t0, Address(xthread, in_bytes(JavaThread::pending_deoptimization_offset()))); 2188 2189 __ mv(xcpool, Deoptimization::Unpack_reexecute); 2190 __ mv(c_rarg0, xthread); 2191 __ orrw(c_rarg2, zr, xcpool); // exec mode 2192 __ rt_call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)); 2193 __ bind(retaddr); 2194 oop_maps->add_gc_map( __ pc()-start, map->deep_copy()); 2195 2196 __ reset_last_Java_frame(false); 2197 2198 __ j(after_fetch_unroll_info_call); 2199 } // EnableJVMCI 2200 #endif // INCLUDE_JVMCI 2201 2202 int exception_offset = __ pc() - start; 2203 2204 // Prolog for exception case 2205 2206 // all registers are dead at this entry point, except for x10, and 2207 // x13 which contain the exception oop and exception pc 2208 // respectively. Set them in TLS and fall thru to the 2209 // unpack_with_exception_in_tls entry point. 2210 2211 __ sd(x13, Address(xthread, JavaThread::exception_pc_offset())); 2212 __ sd(x10, Address(xthread, JavaThread::exception_oop_offset())); 2213 2214 int exception_in_tls_offset = __ pc() - start; 2215 2216 // new implementation because exception oop is now passed in JavaThread 2217 2218 // Prolog for exception case 2219 // All registers must be preserved because they might be used by LinearScan 2220 // Exceptiop oop and throwing PC are passed in JavaThread 2221 // tos: stack at point of call to method that threw the exception (i.e. only 2222 // args are on the stack, no return address) 2223 2224 // The return address pushed by save_live_registers will be patched 2225 // later with the throwing pc. The correct value is not available 2226 // now because loading it from memory would destroy registers. 2227 2228 // NB: The SP at this point must be the SP of the method that is 2229 // being deoptimized. Deoptimization assumes that the frame created 2230 // here by save_live_registers is immediately below the method's SP. 2231 // This is a somewhat fragile mechanism. 2232 2233 // Save everything in sight. 2234 map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words); 2235 2236 // Now it is safe to overwrite any register 2237 2238 // Deopt during an exception. Save exec mode for unpack_frames. 2239 __ mv(xcpool, Deoptimization::Unpack_exception); // callee-saved 2240 2241 // load throwing pc from JavaThread and patch it as the return address 2242 // of the current frame. Then clear the field in JavaThread 2243 2244 __ ld(x13, Address(xthread, JavaThread::exception_pc_offset())); 2245 __ sd(x13, Address(fp, frame::return_addr_offset * wordSize)); 2246 __ sd(zr, Address(xthread, JavaThread::exception_pc_offset())); 2247 2248 #ifdef ASSERT 2249 // verify that there is really an exception oop in JavaThread 2250 __ ld(x10, Address(xthread, JavaThread::exception_oop_offset())); 2251 __ verify_oop(x10); 2252 2253 // verify that there is no pending exception 2254 Label no_pending_exception; 2255 __ ld(t0, Address(xthread, Thread::pending_exception_offset())); 2256 __ beqz(t0, no_pending_exception); 2257 __ stop("must not have pending exception here"); 2258 __ bind(no_pending_exception); 2259 #endif 2260 2261 __ bind(cont); 2262 2263 // Call C code. Need thread and this frame, but NOT official VM entry 2264 // crud. We cannot block on this call, no GC can happen. 2265 // 2266 // UnrollBlock* fetch_unroll_info(JavaThread* thread) 2267 2268 // fetch_unroll_info needs to call last_java_frame(). 2269 2270 Label retaddr; 2271 __ set_last_Java_frame(sp, noreg, retaddr, t0); 2272 #ifdef ASSERT 2273 { 2274 Label L; 2275 __ ld(t0, Address(xthread, 2276 JavaThread::last_Java_fp_offset())); 2277 __ beqz(t0, L); 2278 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared"); 2279 __ bind(L); 2280 } 2281 #endif // ASSERT 2282 __ mv(c_rarg0, xthread); 2283 __ mv(c_rarg1, xcpool); 2284 __ rt_call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)); 2285 __ bind(retaddr); 2286 2287 // Need to have an oopmap that tells fetch_unroll_info where to 2288 // find any register it might need. 2289 oop_maps->add_gc_map(__ pc() - start, map); 2290 2291 __ reset_last_Java_frame(false); 2292 2293 #if INCLUDE_JVMCI 2294 if (EnableJVMCI) { 2295 __ bind(after_fetch_unroll_info_call); 2296 } 2297 #endif 2298 2299 // Load UnrollBlock* into x15 2300 __ mv(x15, x10); 2301 2302 __ lwu(xcpool, Address(x15, Deoptimization::UnrollBlock::unpack_kind_offset())); 2303 Label noException; 2304 __ mv(t0, Deoptimization::Unpack_exception); 2305 __ bne(xcpool, t0, noException); // Was exception pending? 2306 __ ld(x10, Address(xthread, JavaThread::exception_oop_offset())); 2307 __ ld(x13, Address(xthread, JavaThread::exception_pc_offset())); 2308 __ sd(zr, Address(xthread, JavaThread::exception_oop_offset())); 2309 __ sd(zr, Address(xthread, JavaThread::exception_pc_offset())); 2310 2311 __ verify_oop(x10); 2312 2313 // Overwrite the result registers with the exception results. 2314 __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10))); 2315 2316 __ bind(noException); 2317 2318 // Only register save data is on the stack. 2319 // Now restore the result registers. Everything else is either dead 2320 // or captured in the vframeArray. 2321 2322 // Restore fp result register 2323 __ fld(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10))); 2324 // Restore integer result register 2325 __ ld(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10))); 2326 2327 // Pop all of the register save area off the stack 2328 __ add(sp, sp, frame_size_in_words * wordSize); 2329 2330 // All of the register save area has been popped of the stack. Only the 2331 // return address remains. 2332 2333 // Pop all the frames we must move/replace. 2334 // 2335 // Frame picture (youngest to oldest) 2336 // 1: self-frame (no frame link) 2337 // 2: deopting frame (no frame link) 2338 // 3: caller of deopting frame (could be compiled/interpreted). 2339 // 2340 // Note: by leaving the return address of self-frame on the stack 2341 // and using the size of frame 2 to adjust the stack 2342 // when we are done the return to frame 3 will still be on the stack. 2343 2344 // Pop deoptimized frame 2345 __ lwu(x12, Address(x15, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset())); 2346 __ sub(x12, x12, 2 * wordSize); 2347 __ add(sp, sp, x12); 2348 __ ld(fp, Address(sp, 0)); 2349 __ ld(ra, Address(sp, wordSize)); 2350 __ addi(sp, sp, 2 * wordSize); 2351 // RA should now be the return address to the caller (3) 2352 2353 #ifdef ASSERT 2354 // Compilers generate code that bang the stack by as much as the 2355 // interpreter would need. So this stack banging should never 2356 // trigger a fault. Verify that it does not on non product builds. 2357 __ lwu(x9, Address(x15, Deoptimization::UnrollBlock::total_frame_sizes_offset())); 2358 __ bang_stack_size(x9, x12); 2359 #endif 2360 // Load address of array of frame pcs into x12 2361 __ ld(x12, Address(x15, Deoptimization::UnrollBlock::frame_pcs_offset())); 2362 2363 // Load address of array of frame sizes into x14 2364 __ ld(x14, Address(x15, Deoptimization::UnrollBlock::frame_sizes_offset())); 2365 2366 // Load counter into x13 2367 __ lwu(x13, Address(x15, Deoptimization::UnrollBlock::number_of_frames_offset())); 2368 2369 // Now adjust the caller's stack to make up for the extra locals 2370 // but record the original sp so that we can save it in the skeletal interpreter 2371 // frame and the stack walking of interpreter_sender will get the unextended sp 2372 // value and not the "real" sp value. 2373 2374 const Register sender_sp = x16; 2375 2376 __ mv(sender_sp, sp); 2377 __ lwu(x9, Address(x15, 2378 Deoptimization::UnrollBlock:: 2379 caller_adjustment_offset())); 2380 __ sub(sp, sp, x9); 2381 2382 // Push interpreter frames in a loop 2383 __ mv(t0, 0xDEADDEAD); // Make a recognizable pattern 2384 __ mv(t1, t0); 2385 Label loop; 2386 __ bind(loop); 2387 __ ld(x9, Address(x14, 0)); // Load frame size 2388 __ addi(x14, x14, wordSize); 2389 __ sub(x9, x9, 2 * wordSize); // We'll push pc and fp by hand 2390 __ ld(ra, Address(x12, 0)); // Load pc 2391 __ addi(x12, x12, wordSize); 2392 __ enter(); // Save old & set new fp 2393 __ sub(sp, sp, x9); // Prolog 2394 // This value is corrected by layout_activation_impl 2395 __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize)); 2396 __ sd(sender_sp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable 2397 __ mv(sender_sp, sp); // Pass sender_sp to next frame 2398 __ addi(x13, x13, -1); // Decrement counter 2399 __ bnez(x13, loop); 2400 2401 // Re-push self-frame 2402 __ ld(ra, Address(x12)); 2403 __ enter(); 2404 2405 // Allocate a full sized register save area. We subtract 2 because 2406 // enter() just pushed 2 words 2407 __ sub(sp, sp, (frame_size_in_words - 2) * wordSize); 2408 2409 // Restore frame locals after moving the frame 2410 __ fsd(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10))); 2411 __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10))); 2412 2413 // Call C code. Need thread but NOT official VM entry 2414 // crud. We cannot block on this call, no GC can happen. Call should 2415 // restore return values to their stack-slots with the new SP. 2416 // 2417 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode) 2418 2419 // Use fp because the frames look interpreted now 2420 // Don't need the precise return PC here, just precise enough to point into this code blob. 2421 address the_pc = __ pc(); 2422 __ set_last_Java_frame(sp, fp, the_pc, t0); 2423 2424 __ mv(c_rarg0, xthread); 2425 __ mv(c_rarg1, xcpool); // second arg: exec_mode 2426 __ rt_call(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)); 2427 2428 // Set an oopmap for the call site 2429 // Use the same PC we used for the last java frame 2430 oop_maps->add_gc_map(the_pc - start, 2431 new OopMap(frame_size_in_words, 0)); 2432 2433 // Clear fp AND pc 2434 __ reset_last_Java_frame(true); 2435 2436 // Collect return values 2437 __ fld(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10))); 2438 __ ld(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10))); 2439 2440 // Pop self-frame. 2441 __ leave(); // Epilog 2442 2443 // Jump to interpreter 2444 __ ret(); 2445 2446 // Make sure all code is generated 2447 masm->flush(); 2448 2449 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words); 2450 assert(_deopt_blob != nullptr, "create deoptimization blob fail!"); 2451 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 2452 #if INCLUDE_JVMCI 2453 if (EnableJVMCI) { 2454 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset); 2455 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset); 2456 } 2457 #endif 2458 } 2459 2460 // Number of stack slots between incoming argument block and the start of 2461 // a new frame. The PROLOG must add this many slots to the stack. The 2462 // EPILOG must remove this many slots. 2463 // RISCV needs two words for RA (return address) and FP (frame pointer). 2464 uint SharedRuntime::in_preserve_stack_slots() { 2465 return 2 * VMRegImpl::slots_per_word; 2466 } 2467 2468 uint SharedRuntime::out_preserve_stack_slots() { 2469 return 0; 2470 } 2471 2472 VMReg SharedRuntime::thread_register() { 2473 return xthread->as_VMReg(); 2474 } 2475 2476 //------------------------------generate_handler_blob------ 2477 // 2478 // Generate a special Compile2Runtime blob that saves all registers, 2479 // and setup oopmap. 2480 // 2481 SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) { 2482 assert(is_polling_page_id(id), "expected a polling page stub id"); 2483 2484 ResourceMark rm; 2485 OopMapSet *oop_maps = new OopMapSet(); 2486 assert_cond(oop_maps != nullptr); 2487 OopMap* map = nullptr; 2488 2489 // Allocate space for the code. Setup code generation tools. 2490 const char* name = SharedRuntime::stub_name(id); 2491 CodeBuffer buffer(name, 2048, 1024); 2492 MacroAssembler* masm = new MacroAssembler(&buffer); 2493 assert_cond(masm != nullptr); 2494 2495 address start = __ pc(); 2496 address call_pc = nullptr; 2497 int frame_size_in_words = -1; 2498 bool cause_return = (id == SharedStubId::polling_page_return_handler_id); 2499 RegisterSaver reg_saver(id == SharedStubId::polling_page_vectors_safepoint_handler_id /* save_vectors */); 2500 2501 // Save Integer and Float registers. 2502 map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words); 2503 2504 // The following is basically a call_VM. However, we need the precise 2505 // address of the call in order to generate an oopmap. Hence, we do all the 2506 // work ourselves. 2507 2508 Label retaddr; 2509 __ set_last_Java_frame(sp, noreg, retaddr, t0); 2510 2511 // The return address must always be correct so that frame constructor never 2512 // sees an invalid pc. 2513 2514 if (!cause_return) { 2515 // overwrite the return address pushed by save_live_registers 2516 // Additionally, x18 is a callee-saved register so we can look at 2517 // it later to determine if someone changed the return address for 2518 // us! 2519 __ ld(x18, Address(xthread, JavaThread::saved_exception_pc_offset())); 2520 __ sd(x18, Address(fp, frame::return_addr_offset * wordSize)); 2521 } 2522 2523 // Do the call 2524 __ mv(c_rarg0, xthread); 2525 __ rt_call(call_ptr); 2526 __ bind(retaddr); 2527 2528 // Set an oopmap for the call site. This oopmap will map all 2529 // oop-registers and debug-info registers as callee-saved. This 2530 // will allow deoptimization at this safepoint to find all possible 2531 // debug-info recordings, as well as let GC find all oops. 2532 2533 oop_maps->add_gc_map( __ pc() - start, map); 2534 2535 Label noException; 2536 2537 __ reset_last_Java_frame(false); 2538 2539 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); 2540 2541 __ ld(t0, Address(xthread, Thread::pending_exception_offset())); 2542 __ beqz(t0, noException); 2543 2544 // Exception pending 2545 2546 reg_saver.restore_live_registers(masm); 2547 2548 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2549 2550 // No exception case 2551 __ bind(noException); 2552 2553 Label no_adjust, bail; 2554 if (!cause_return) { 2555 // If our stashed return pc was modified by the runtime we avoid touching it 2556 __ ld(t0, Address(fp, frame::return_addr_offset * wordSize)); 2557 __ bne(x18, t0, no_adjust); 2558 2559 #ifdef ASSERT 2560 // Verify the correct encoding of the poll we're about to skip. 2561 // See NativeInstruction::is_lwu_to_zr() 2562 __ lwu(t0, Address(x18)); 2563 __ andi(t1, t0, 0b0000011); 2564 __ mv(t2, 0b0000011); 2565 __ bne(t1, t2, bail); // 0-6:0b0000011 2566 __ srli(t1, t0, 7); 2567 __ andi(t1, t1, 0b00000); 2568 __ bnez(t1, bail); // 7-11:0b00000 2569 __ srli(t1, t0, 12); 2570 __ andi(t1, t1, 0b110); 2571 __ mv(t2, 0b110); 2572 __ bne(t1, t2, bail); // 12-14:0b110 2573 #endif 2574 // Adjust return pc forward to step over the safepoint poll instruction 2575 __ add(x18, x18, NativeInstruction::instruction_size); 2576 __ sd(x18, Address(fp, frame::return_addr_offset * wordSize)); 2577 } 2578 2579 __ bind(no_adjust); 2580 // Normal exit, restore registers and exit. 2581 2582 reg_saver.restore_live_registers(masm); 2583 __ ret(); 2584 2585 #ifdef ASSERT 2586 __ bind(bail); 2587 __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected"); 2588 #endif 2589 2590 // Make sure all code is generated 2591 masm->flush(); 2592 2593 // Fill-out other meta info 2594 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words); 2595 } 2596 2597 // 2598 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss 2599 // 2600 // Generate a stub that calls into vm to find out the proper destination 2601 // of a java call. All the argument registers are live at this point 2602 // but since this is generic code we don't know what they are and the caller 2603 // must do any gc of the args. 2604 // 2605 RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) { 2606 assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before"); 2607 assert(is_resolve_id(id), "expected a resolve stub id"); 2608 2609 // allocate space for the code 2610 ResourceMark rm; 2611 2612 const char* name = SharedRuntime::stub_name(id); 2613 CodeBuffer buffer(name, 1000, 512); 2614 MacroAssembler* masm = new MacroAssembler(&buffer); 2615 assert_cond(masm != nullptr); 2616 2617 int frame_size_in_words = -1; 2618 RegisterSaver reg_saver(false /* save_vectors */); 2619 2620 OopMapSet *oop_maps = new OopMapSet(); 2621 assert_cond(oop_maps != nullptr); 2622 OopMap* map = nullptr; 2623 2624 int start = __ offset(); 2625 2626 map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words); 2627 2628 int frame_complete = __ offset(); 2629 2630 { 2631 Label retaddr; 2632 __ set_last_Java_frame(sp, noreg, retaddr, t0); 2633 2634 __ mv(c_rarg0, xthread); 2635 __ rt_call(destination); 2636 __ bind(retaddr); 2637 } 2638 2639 // Set an oopmap for the call site. 2640 // We need this not only for callee-saved registers, but also for volatile 2641 // registers that the compiler might be keeping live across a safepoint. 2642 2643 oop_maps->add_gc_map( __ offset() - start, map); 2644 2645 // x10 contains the address we are going to jump to assuming no exception got installed 2646 2647 // clear last_Java_sp 2648 __ reset_last_Java_frame(false); 2649 // check for pending exceptions 2650 Label pending; 2651 __ ld(t0, Address(xthread, Thread::pending_exception_offset())); 2652 __ bnez(t0, pending); 2653 2654 // get the returned Method* 2655 __ get_vm_result_2(xmethod, xthread); 2656 __ sd(xmethod, Address(sp, reg_saver.reg_offset_in_bytes(xmethod))); 2657 2658 // x10 is where we want to jump, overwrite t0 which is saved and temporary 2659 __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(t0))); 2660 reg_saver.restore_live_registers(masm); 2661 2662 // We are back to the original state on entry and ready to go. 2663 2664 __ jr(t0); 2665 2666 // Pending exception after the safepoint 2667 2668 __ bind(pending); 2669 2670 reg_saver.restore_live_registers(masm); 2671 2672 // exception pending => remove activation and forward to exception handler 2673 2674 __ sd(zr, Address(xthread, JavaThread::vm_result_offset())); 2675 2676 __ ld(x10, Address(xthread, Thread::pending_exception_offset())); 2677 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2678 2679 // ------------- 2680 // make sure all code is generated 2681 masm->flush(); 2682 2683 // return the blob 2684 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true); 2685 } 2686 2687 // Continuation point for throwing of implicit exceptions that are 2688 // not handled in the current activation. Fabricates an exception 2689 // oop and initiates normal exception dispatching in this 2690 // frame. Since we need to preserve callee-saved values (currently 2691 // only for C2, but done for C1 as well) we need a callee-saved oop 2692 // map and therefore have to make these stubs into RuntimeStubs 2693 // rather than BufferBlobs. If the compiler needs all registers to 2694 // be preserved between the fault point and the exception handler 2695 // then it must assume responsibility for that in 2696 // AbstractCompiler::continuation_for_implicit_null_exception or 2697 // continuation_for_implicit_division_by_zero_exception. All other 2698 // implicit exceptions (e.g., NullPointerException or 2699 // AbstractMethodError on entry) are either at call sites or 2700 // otherwise assume that stack unwinding will be initiated, so 2701 // caller saved registers were assumed volatile in the compiler. 2702 2703 RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) { 2704 assert(is_throw_id(id), "expected a throw stub id"); 2705 2706 const char* name = SharedRuntime::stub_name(id); 2707 2708 // Information about frame layout at time of blocking runtime call. 2709 // Note that we only have to preserve callee-saved registers since 2710 // the compilers are responsible for supplying a continuation point 2711 // if they expect all registers to be preserved. 2712 // n.b. riscv asserts that frame::arg_reg_save_area_bytes == 0 2713 assert_cond(runtime_entry != nullptr); 2714 enum layout { 2715 fp_off = 0, 2716 fp_off2, 2717 return_off, 2718 return_off2, 2719 framesize // inclusive of return address 2720 }; 2721 2722 const int insts_size = 1024; 2723 const int locs_size = 64; 2724 2725 ResourceMark rm; 2726 const char* timer_msg = "SharedRuntime generate_throw_exception"; 2727 TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); 2728 2729 CodeBuffer code(name, insts_size, locs_size); 2730 OopMapSet* oop_maps = new OopMapSet(); 2731 MacroAssembler* masm = new MacroAssembler(&code); 2732 assert_cond(oop_maps != nullptr && masm != nullptr); 2733 2734 address start = __ pc(); 2735 2736 // This is an inlined and slightly modified version of call_VM 2737 // which has the ability to fetch the return PC out of 2738 // thread-local storage and also sets up last_Java_sp slightly 2739 // differently than the real call_VM 2740 2741 __ enter(); // Save FP and RA before call 2742 2743 assert(is_even(framesize / 2), "sp not 16-byte aligned"); 2744 2745 // ra and fp are already in place 2746 __ addi(sp, fp, 0 - ((unsigned)framesize << LogBytesPerInt)); // prolog 2747 2748 int frame_complete = __ pc() - start; 2749 2750 // Set up last_Java_sp and last_Java_fp 2751 address the_pc = __ pc(); 2752 __ set_last_Java_frame(sp, fp, the_pc, t0); 2753 2754 // Call runtime 2755 __ mv(c_rarg0, xthread); 2756 BLOCK_COMMENT("call runtime_entry"); 2757 __ rt_call(runtime_entry); 2758 2759 // Generate oop map 2760 OopMap* map = new OopMap(framesize, 0); 2761 assert_cond(map != nullptr); 2762 2763 oop_maps->add_gc_map(the_pc - start, map); 2764 2765 __ reset_last_Java_frame(true); 2766 2767 __ leave(); 2768 2769 // check for pending exceptions 2770 #ifdef ASSERT 2771 Label L; 2772 __ ld(t0, Address(xthread, Thread::pending_exception_offset())); 2773 __ bnez(t0, L); 2774 __ should_not_reach_here(); 2775 __ bind(L); 2776 #endif // ASSERT 2777 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2778 2779 // codeBlob framesize is in words (not VMRegImpl::slot_size) 2780 RuntimeStub* stub = 2781 RuntimeStub::new_runtime_stub(name, 2782 &code, 2783 frame_complete, 2784 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 2785 oop_maps, false); 2786 assert(stub != nullptr, "create runtime stub fail!"); 2787 return stub; 2788 } 2789 2790 #if INCLUDE_JFR 2791 2792 static void jfr_prologue(address the_pc, MacroAssembler* masm, Register thread) { 2793 __ set_last_Java_frame(sp, fp, the_pc, t0); 2794 __ mv(c_rarg0, thread); 2795 } 2796 2797 static void jfr_epilogue(MacroAssembler* masm) { 2798 __ reset_last_Java_frame(true); 2799 } 2800 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint. 2801 // It returns a jobject handle to the event writer. 2802 // The handle is dereferenced and the return value is the event writer oop. 2803 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { 2804 enum layout { 2805 fp_off, 2806 fp_off2, 2807 return_off, 2808 return_off2, 2809 framesize // inclusive of return address 2810 }; 2811 2812 int insts_size = 1024; 2813 int locs_size = 64; 2814 const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id); 2815 CodeBuffer code(name, insts_size, locs_size); 2816 OopMapSet* oop_maps = new OopMapSet(); 2817 MacroAssembler* masm = new MacroAssembler(&code); 2818 2819 address start = __ pc(); 2820 __ enter(); 2821 int frame_complete = __ pc() - start; 2822 address the_pc = __ pc(); 2823 jfr_prologue(the_pc, masm, xthread); 2824 __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1); 2825 2826 jfr_epilogue(masm); 2827 __ resolve_global_jobject(x10, t0, t1); 2828 __ leave(); 2829 __ ret(); 2830 2831 OopMap* map = new OopMap(framesize, 1); 2832 oop_maps->add_gc_map(the_pc - start, map); 2833 2834 RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) 2835 RuntimeStub::new_runtime_stub(name, &code, frame_complete, 2836 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 2837 oop_maps, false); 2838 return stub; 2839 } 2840 2841 // For c2: call to return a leased buffer. 2842 RuntimeStub* SharedRuntime::generate_jfr_return_lease() { 2843 enum layout { 2844 fp_off, 2845 fp_off2, 2846 return_off, 2847 return_off2, 2848 framesize // inclusive of return address 2849 }; 2850 2851 int insts_size = 1024; 2852 int locs_size = 64; 2853 const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id); 2854 CodeBuffer code(name, insts_size, locs_size); 2855 OopMapSet* oop_maps = new OopMapSet(); 2856 MacroAssembler* masm = new MacroAssembler(&code); 2857 2858 address start = __ pc(); 2859 __ enter(); 2860 int frame_complete = __ pc() - start; 2861 address the_pc = __ pc(); 2862 jfr_prologue(the_pc, masm, xthread); 2863 __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1); 2864 2865 jfr_epilogue(masm); 2866 __ leave(); 2867 __ ret(); 2868 2869 OopMap* map = new OopMap(framesize, 1); 2870 oop_maps->add_gc_map(the_pc - start, map); 2871 2872 RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) 2873 RuntimeStub::new_runtime_stub(name, &code, frame_complete, 2874 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 2875 oop_maps, false); 2876 return stub; 2877 } 2878 2879 #endif // INCLUDE_JFR