1 /*
   2  * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "code/compiledIC.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "code/vtableStubs.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/shared/barrierSetAssembler.hpp"
  34 #include "interpreter/interp_masm.hpp"
  35 #include "interpreter/interpreter.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "nativeInst_riscv.hpp"
  39 #include "oops/klass.inline.hpp"
  40 #include "oops/method.inline.hpp"
  41 #include "prims/methodHandles.hpp"
  42 #include "runtime/continuation.hpp"
  43 #include "runtime/continuationEntry.inline.hpp"
  44 #include "runtime/globals.hpp"
  45 #include "runtime/jniHandles.hpp"
  46 #include "runtime/safepointMechanism.hpp"
  47 #include "runtime/sharedRuntime.hpp"
  48 #include "runtime/signature.hpp"
  49 #include "runtime/stubRoutines.hpp"
  50 #include "runtime/timerTrace.hpp"
  51 #include "runtime/vframeArray.hpp"
  52 #include "utilities/align.hpp"
  53 #include "utilities/formatBuffer.hpp"
  54 #include "vmreg_riscv.inline.hpp"
  55 #ifdef COMPILER1
  56 #include "c1/c1_Runtime1.hpp"
  57 #endif
  58 #ifdef COMPILER2
  59 #include "adfiles/ad_riscv.hpp"
  60 #include "opto/runtime.hpp"
  61 #endif
  62 #if INCLUDE_JVMCI
  63 #include "jvmci/jvmciJavaClasses.hpp"
  64 #endif
  65 
  66 #define __ masm->
  67 
  68 #ifdef PRODUCT
  69 #define BLOCK_COMMENT(str) /* nothing */
  70 #else
  71 #define BLOCK_COMMENT(str) __ block_comment(str)
  72 #endif
  73 
  74 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  75 
  76 class RegisterSaver {
  77   const bool _save_vectors;
  78  public:
  79   RegisterSaver(bool save_vectors) : _save_vectors(UseRVV && save_vectors) {}
  80   ~RegisterSaver() {}
  81   OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
  82   void restore_live_registers(MacroAssembler* masm);
  83 
  84   // Offsets into the register save area
  85   // Used by deoptimization when it is managing result register
  86   // values on its own
  87   // gregs:28, float_register:32; except: x1(ra) & x2(sp) & gp(x3) & tp(x4)
  88   // |---v0---|<---SP
  89   // |---v1---|save vectors only in generate_handler_blob
  90   // |-- .. --|
  91   // |---v31--|-----
  92   // |---f0---|
  93   // |---f1---|
  94   // |   ..   |
  95   // |---f31--|
  96   // |---reserved slot for stack alignment---|
  97   // |---x5---|
  98   // |   x6   |
  99   // |---.. --|
 100   // |---x31--|
 101   // |---fp---|
 102   // |---ra---|
 103   int v0_offset_in_bytes(void) { return 0; }
 104   int f0_offset_in_bytes(void) {
 105     int f0_offset = 0;
 106 #ifdef COMPILER2
 107     if (_save_vectors) {
 108       f0_offset += Matcher::scalable_vector_reg_size(T_INT) * VectorRegister::number_of_registers *
 109                    BytesPerInt;
 110     }
 111 #endif
 112     return f0_offset;
 113   }
 114   int reserved_slot_offset_in_bytes(void) {
 115     return f0_offset_in_bytes() +
 116            FloatRegister::max_slots_per_register *
 117            FloatRegister::number_of_registers *
 118            BytesPerInt;
 119   }
 120 
 121   int reg_offset_in_bytes(Register r) {
 122     assert (r->encoding() > 4, "ra, sp, gp and tp not saved");
 123     return reserved_slot_offset_in_bytes() + (r->encoding() - 4 /* x1, x2, x3, x4 */) * wordSize;
 124   }
 125 
 126   int freg_offset_in_bytes(FloatRegister f) {
 127     return f0_offset_in_bytes() + f->encoding() * wordSize;
 128   }
 129 
 130   int ra_offset_in_bytes(void) {
 131     return reserved_slot_offset_in_bytes() +
 132            (Register::number_of_registers - 3) *
 133            Register::max_slots_per_register *
 134            BytesPerInt;
 135   }
 136 };
 137 
 138 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
 139   int vector_size_in_bytes = 0;
 140   int vector_size_in_slots = 0;
 141 #ifdef COMPILER2
 142   if (_save_vectors) {
 143     vector_size_in_bytes += Matcher::scalable_vector_reg_size(T_BYTE);
 144     vector_size_in_slots += Matcher::scalable_vector_reg_size(T_INT);
 145   }
 146 #endif
 147 
 148   int frame_size_in_bytes = align_up(additional_frame_words * wordSize + ra_offset_in_bytes() + wordSize, 16);
 149   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 150   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 151   // The caller will allocate additional_frame_words
 152   int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
 153   // CodeBlob frame size is in words.
 154   int frame_size_in_words = frame_size_in_bytes / wordSize;
 155   *total_frame_words = frame_size_in_words;
 156 
 157   // Save Integer, Float and Vector registers.
 158   __ enter();
 159   __ push_CPU_state(_save_vectors, vector_size_in_bytes);
 160 
 161   // Set an oopmap for the call site.  This oopmap will map all
 162   // oop-registers and debug-info registers as callee-saved.  This
 163   // will allow deoptimization at this safepoint to find all possible
 164   // debug-info recordings, as well as let GC find all oops.
 165 
 166   OopMapSet *oop_maps = new OopMapSet();
 167   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 168   assert_cond(oop_maps != nullptr && oop_map != nullptr);
 169 
 170   int sp_offset_in_slots = 0;
 171   int step_in_slots = 0;
 172   if (_save_vectors) {
 173     step_in_slots = vector_size_in_slots;
 174     for (int i = 0; i < VectorRegister::number_of_registers; i++, sp_offset_in_slots += step_in_slots) {
 175       VectorRegister r = as_VectorRegister(i);
 176       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots), r->as_VMReg());
 177     }
 178   }
 179 
 180   step_in_slots = FloatRegister::max_slots_per_register;
 181   for (int i = 0; i < FloatRegister::number_of_registers; i++, sp_offset_in_slots += step_in_slots) {
 182     FloatRegister r = as_FloatRegister(i);
 183     oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots), r->as_VMReg());
 184   }
 185 
 186   step_in_slots = Register::max_slots_per_register;
 187   // skip the slot reserved for alignment, see MacroAssembler::push_reg;
 188   // also skip x5 ~ x6 on the stack because they are caller-saved registers.
 189   sp_offset_in_slots += Register::max_slots_per_register * 3;
 190   // besides, we ignore x0 ~ x4 because push_CPU_state won't push them on the stack.
 191   for (int i = 7; i < Register::number_of_registers; i++, sp_offset_in_slots += step_in_slots) {
 192     Register r = as_Register(i);
 193     if (r != xthread) {
 194       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots + additional_frame_slots), r->as_VMReg());
 195     }
 196   }
 197 
 198   return oop_map;
 199 }
 200 
 201 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 202 #ifdef COMPILER2
 203   __ pop_CPU_state(_save_vectors, Matcher::scalable_vector_reg_size(T_BYTE));
 204 #else
 205 #if !INCLUDE_JVMCI
 206   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 207 #endif
 208   __ pop_CPU_state(_save_vectors);
 209 #endif
 210   __ leave();
 211 }
 212 
 213 // Is vector's size (in bytes) bigger than a size saved by default?
 214 // riscv does not ovlerlay the floating-point registers on vector registers like aarch64.
 215 bool SharedRuntime::is_wide_vector(int size) {
 216   return UseRVV;
 217 }
 218 
 219 // ---------------------------------------------------------------------------
 220 // Read the array of BasicTypes from a signature, and compute where the
 221 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 222 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 223 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 224 // as framesizes are fixed.
 225 // VMRegImpl::stack0 refers to the first slot 0(sp).
 226 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
 227 // Register up to Register::number_of_registers) are the 64-bit
 228 // integer registers.
 229 
 230 // Note: the INPUTS in sig_bt are in units of Java argument words,
 231 // which are 64-bit.  The OUTPUTS are in 32-bit units.
 232 
 233 // The Java calling convention is a "shifted" version of the C ABI.
 234 // By skipping the first C ABI register we can call non-static jni
 235 // methods with small numbers of arguments without having to shuffle
 236 // the arguments at all. Since we control the java ABI we ought to at
 237 // least get some advantage out of it.
 238 
 239 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 240                                            VMRegPair *regs,
 241                                            int total_args_passed) {
 242   // Create the mapping between argument positions and
 243   // registers.
 244   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 245     j_rarg0, j_rarg1, j_rarg2, j_rarg3,
 246     j_rarg4, j_rarg5, j_rarg6, j_rarg7
 247   };
 248   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 249     j_farg0, j_farg1, j_farg2, j_farg3,
 250     j_farg4, j_farg5, j_farg6, j_farg7
 251   };
 252 
 253   uint int_args = 0;
 254   uint fp_args = 0;
 255   uint stk_args = 0;
 256 
 257   for (int i = 0; i < total_args_passed; i++) {
 258     switch (sig_bt[i]) {
 259       case T_BOOLEAN: // fall through
 260       case T_CHAR:    // fall through
 261       case T_BYTE:    // fall through
 262       case T_SHORT:   // fall through
 263       case T_INT:
 264         if (int_args < Argument::n_int_register_parameters_j) {
 265           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 266         } else {
 267           stk_args = align_up(stk_args, 2);
 268           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 269           stk_args += 1;
 270         }
 271         break;
 272       case T_VOID:
 273         // halves of T_LONG or T_DOUBLE
 274         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 275         regs[i].set_bad();
 276         break;
 277       case T_LONG:      // fall through
 278         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 279       case T_OBJECT:    // fall through
 280       case T_ARRAY:     // fall through
 281       case T_ADDRESS:
 282         if (int_args < Argument::n_int_register_parameters_j) {
 283           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 284         } else {
 285           stk_args = align_up(stk_args, 2);
 286           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 287           stk_args += 2;
 288         }
 289         break;
 290       case T_FLOAT:
 291         if (fp_args < Argument::n_float_register_parameters_j) {
 292           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 293         } else {
 294           stk_args = align_up(stk_args, 2);
 295           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 296           stk_args += 1;
 297         }
 298         break;
 299       case T_DOUBLE:
 300         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 301         if (fp_args < Argument::n_float_register_parameters_j) {
 302           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 303         } else {
 304           stk_args = align_up(stk_args, 2);
 305           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 306           stk_args += 2;
 307         }
 308         break;
 309       default:
 310         ShouldNotReachHere();
 311     }
 312   }
 313 
 314   return stk_args;
 315 }
 316 
 317 // Patch the callers callsite with entry to compiled code if it exists.
 318 static void patch_callers_callsite(MacroAssembler *masm) {
 319   Label L;
 320   __ ld(t0, Address(xmethod, in_bytes(Method::code_offset())));
 321   __ beqz(t0, L);
 322 
 323   __ enter();
 324   __ push_CPU_state();
 325 
 326   // VM needs caller's callsite
 327   // VM needs target method
 328   // This needs to be a long call since we will relocate this adapter to
 329   // the codeBuffer and it may not reach
 330 
 331 #ifndef PRODUCT
 332   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 333 #endif
 334 
 335   __ mv(c_rarg0, xmethod);
 336   __ mv(c_rarg1, ra);
 337   __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
 338 
 339   __ pop_CPU_state();
 340   // restore sp
 341   __ leave();
 342   __ bind(L);
 343 }
 344 
 345 static void gen_c2i_adapter(MacroAssembler *masm,
 346                             int total_args_passed,
 347                             int comp_args_on_stack,
 348                             const BasicType *sig_bt,
 349                             const VMRegPair *regs,
 350                             Label& skip_fixup) {
 351   // Before we get into the guts of the C2I adapter, see if we should be here
 352   // at all.  We've come from compiled code and are attempting to jump to the
 353   // interpreter, which means the caller made a static call to get here
 354   // (vcalls always get a compiled target if there is one).  Check for a
 355   // compiled target.  If there is one, we need to patch the caller's call.
 356   patch_callers_callsite(masm);
 357 
 358   __ bind(skip_fixup);
 359 
 360   int words_pushed = 0;
 361 
 362   // Since all args are passed on the stack, total_args_passed *
 363   // Interpreter::stackElementSize is the space we need.
 364 
 365   int extraspace = total_args_passed * Interpreter::stackElementSize;
 366 
 367   __ mv(x19_sender_sp, sp);
 368 
 369   // stack is aligned, keep it that way
 370   extraspace = align_up(extraspace, 2 * wordSize);
 371 
 372   if (extraspace) {
 373     __ sub(sp, sp, extraspace);
 374   }
 375 
 376   // Now write the args into the outgoing interpreter space
 377   for (int i = 0; i < total_args_passed; i++) {
 378     if (sig_bt[i] == T_VOID) {
 379       assert(i > 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "missing half");
 380       continue;
 381     }
 382 
 383     // offset to start parameters
 384     int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 385     int next_off = st_off - Interpreter::stackElementSize;
 386 
 387     // Say 4 args:
 388     // i   st_off
 389     // 0   32 T_LONG
 390     // 1   24 T_VOID
 391     // 2   16 T_OBJECT
 392     // 3    8 T_BOOL
 393     // -    0 return address
 394     //
 395     // However to make thing extra confusing. Because we can fit a Java long/double in
 396     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 397     // leaves one slot empty and only stores to a single slot. In this case the
 398     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 399 
 400     VMReg r_1 = regs[i].first();
 401     VMReg r_2 = regs[i].second();
 402     if (!r_1->is_valid()) {
 403       assert(!r_2->is_valid(), "");
 404       continue;
 405     }
 406     if (r_1->is_stack()) {
 407       // memory to memory use t0
 408       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
 409                     + extraspace
 410                     + words_pushed * wordSize);
 411       if (!r_2->is_valid()) {
 412         __ lwu(t0, Address(sp, ld_off));
 413         __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 414       } else {
 415         __ ld(t0, Address(sp, ld_off), /*temp register*/esp);
 416 
 417         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 418         // T_DOUBLE and T_LONG use two slots in the interpreter
 419         if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 420           // ld_off == LSW, ld_off+wordSize == MSW
 421           // st_off == MSW, next_off == LSW
 422           __ sd(t0, Address(sp, next_off), /*temp register*/esp);
 423 #ifdef ASSERT
 424           // Overwrite the unused slot with known junk
 425           __ mv(t0, 0xdeadffffdeadaaaaul);
 426           __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 427 #endif /* ASSERT */
 428         } else {
 429           __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 430         }
 431       }
 432     } else if (r_1->is_Register()) {
 433       Register r = r_1->as_Register();
 434       if (!r_2->is_valid()) {
 435         // must be only an int (or less ) so move only 32bits to slot
 436         __ sd(r, Address(sp, st_off));
 437       } else {
 438         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 439         // T_DOUBLE and T_LONG use two slots in the interpreter
 440         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 441           // long/double in gpr
 442 #ifdef ASSERT
 443           // Overwrite the unused slot with known junk
 444           __ mv(t0, 0xdeadffffdeadaaabul);
 445           __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 446 #endif /* ASSERT */
 447           __ sd(r, Address(sp, next_off));
 448         } else {
 449           __ sd(r, Address(sp, st_off));
 450         }
 451       }
 452     } else {
 453       assert(r_1->is_FloatRegister(), "");
 454       if (!r_2->is_valid()) {
 455         // only a float use just part of the slot
 456         __ fsw(r_1->as_FloatRegister(), Address(sp, st_off));
 457       } else {
 458 #ifdef ASSERT
 459         // Overwrite the unused slot with known junk
 460         __ mv(t0, 0xdeadffffdeadaaacul);
 461         __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 462 #endif /* ASSERT */
 463         __ fsd(r_1->as_FloatRegister(), Address(sp, next_off));
 464       }
 465     }
 466   }
 467 
 468   __ mv(esp, sp); // Interp expects args on caller's expression stack
 469 
 470   __ ld(t1, Address(xmethod, in_bytes(Method::interpreter_entry_offset())));
 471   __ jr(t1);
 472 }
 473 
 474 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 475                                     int total_args_passed,
 476                                     int comp_args_on_stack,
 477                                     const BasicType *sig_bt,
 478                                     const VMRegPair *regs) {
 479   // Note: x19_sender_sp contains the senderSP on entry. We must
 480   // preserve it since we may do a i2c -> c2i transition if we lose a
 481   // race where compiled code goes non-entrant while we get args
 482   // ready.
 483 
 484   // Cut-out for having no stack args.
 485   int comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord;
 486   if (comp_args_on_stack != 0) {
 487     __ sub(t0, sp, comp_words_on_stack * wordSize);
 488     __ andi(sp, t0, -16);
 489   }
 490 
 491   // Will jump to the compiled code just as if compiled code was doing it.
 492   // Pre-load the register-jump target early, to schedule it better.
 493   __ ld(t1, Address(xmethod, in_bytes(Method::from_compiled_offset())));
 494 
 495 #if INCLUDE_JVMCI
 496   if (EnableJVMCI) {
 497     // check if this call should be routed towards a specific entry point
 498     __ ld(t0, Address(xthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 499     Label no_alternative_target;
 500     __ beqz(t0, no_alternative_target);
 501     __ mv(t1, t0);
 502     __ sd(zr, Address(xthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 503     __ bind(no_alternative_target);
 504   }
 505 #endif // INCLUDE_JVMCI
 506 
 507   // Now generate the shuffle code.
 508   for (int i = 0; i < total_args_passed; i++) {
 509     if (sig_bt[i] == T_VOID) {
 510       assert(i > 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "missing half");
 511       continue;
 512     }
 513 
 514     // Pick up 0, 1 or 2 words from SP+offset.
 515 
 516     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 517            "scrambled load targets?");
 518     // Load in argument order going down.
 519     int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 520     // Point to interpreter value (vs. tag)
 521     int next_off = ld_off - Interpreter::stackElementSize;
 522 
 523     VMReg r_1 = regs[i].first();
 524     VMReg r_2 = regs[i].second();
 525     if (!r_1->is_valid()) {
 526       assert(!r_2->is_valid(), "");
 527       continue;
 528     }
 529     if (r_1->is_stack()) {
 530       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 531       int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size;
 532       if (!r_2->is_valid()) {
 533         __ lw(t0, Address(esp, ld_off));
 534         __ sd(t0, Address(sp, st_off), /*temp register*/t2);
 535       } else {
 536         //
 537         // We are using two optoregs. This can be either T_OBJECT,
 538         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 539         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 540         // So we must adjust where to pick up the data to match the
 541         // interpreter.
 542         //
 543         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 544         // are accessed as negative so LSW is at LOW address
 545 
 546         // ld_off is MSW so get LSW
 547         const int offset = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
 548                            next_off : ld_off;
 549         __ ld(t0, Address(esp, offset));
 550         // st_off is LSW (i.e. reg.first())
 551         __ sd(t0, Address(sp, st_off), /*temp register*/t2);
 552       }
 553     } else if (r_1->is_Register()) {  // Register argument
 554       Register r = r_1->as_Register();
 555       if (r_2->is_valid()) {
 556         //
 557         // We are using two VMRegs. This can be either T_OBJECT,
 558         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 559         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 560         // So we must adjust where to pick up the data to match the
 561         // interpreter.
 562 
 563         const int offset = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
 564                            next_off : ld_off;
 565 
 566         // this can be a misaligned move
 567         __ ld(r, Address(esp, offset));
 568       } else {
 569         // sign extend and use a full word?
 570         __ lw(r, Address(esp, ld_off));
 571       }
 572     } else {
 573       if (!r_2->is_valid()) {
 574         __ flw(r_1->as_FloatRegister(), Address(esp, ld_off));
 575       } else {
 576         __ fld(r_1->as_FloatRegister(), Address(esp, next_off));
 577       }
 578     }
 579   }
 580 
 581   __ push_cont_fastpath(xthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
 582 
 583   // 6243940 We might end up in handle_wrong_method if
 584   // the callee is deoptimized as we race thru here. If that
 585   // happens we don't want to take a safepoint because the
 586   // caller frame will look interpreted and arguments are now
 587   // "compiled" so it is much better to make this transition
 588   // invisible to the stack walking code. Unfortunately if
 589   // we try and find the callee by normal means a safepoint
 590   // is possible. So we stash the desired callee in the thread
 591   // and the vm will find there should this case occur.
 592 
 593   __ sd(xmethod, Address(xthread, JavaThread::callee_target_offset()));
 594 
 595   __ jr(t1);
 596 }
 597 
 598 // ---------------------------------------------------------------
 599 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 600                                                             int total_args_passed,
 601                                                             int comp_args_on_stack,
 602                                                             const BasicType *sig_bt,
 603                                                             const VMRegPair *regs,
 604                                                             AdapterFingerPrint* fingerprint) {
 605   address i2c_entry = __ pc();
 606   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 607 
 608   address c2i_unverified_entry = __ pc();
 609   Label skip_fixup;
 610 
 611   const Register receiver = j_rarg0;
 612   const Register data = t0;
 613 
 614   // -------------------------------------------------------------------------
 615   // Generate a C2I adapter.  On entry we know xmethod holds the Method* during calls
 616   // to the interpreter.  The args start out packed in the compiled layout.  They
 617   // need to be unpacked into the interpreter layout.  This will almost always
 618   // require some stack space.  We grow the current (compiled) stack, then repack
 619   // the args.  We  finally end in a jump to the generic interpreter entry point.
 620   // On exit from the interpreter, the interpreter will restore our SP (lest the
 621   // compiled code, which relies solely on SP and not FP, get sick).
 622 
 623   {
 624     __ block_comment("c2i_unverified_entry {");
 625 
 626     __ ic_check();
 627     __ ld(xmethod, Address(data, CompiledICData::speculated_method_offset()));
 628 
 629     __ ld(t0, Address(xmethod, in_bytes(Method::code_offset())));
 630     __ beqz(t0, skip_fixup);
 631     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 632     __ block_comment("} c2i_unverified_entry");
 633   }
 634 
 635   address c2i_entry = __ pc();
 636 
 637   // Class initialization barrier for static methods
 638   address c2i_no_clinit_check_entry = nullptr;
 639   if (VM_Version::supports_fast_class_init_checks()) {
 640     Label L_skip_barrier;
 641 
 642     { // Bypass the barrier for non-static methods
 643       __ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
 644       __ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
 645       __ beqz(t1, L_skip_barrier); // non-static
 646     }
 647 
 648     __ load_method_holder(t1, xmethod);
 649     __ clinit_barrier(t1, t0, &L_skip_barrier);
 650     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 651 
 652     __ bind(L_skip_barrier);
 653     c2i_no_clinit_check_entry = __ pc();
 654   }
 655 
 656   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 657   bs->c2i_entry_barrier(masm);
 658 
 659   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 660 
 661   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
 662 }
 663 
 664 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
 665                                              uint num_bits,
 666                                              uint total_args_passed) {
 667   assert(total_args_passed <= Argument::n_vector_register_parameters_c, "unsupported");
 668   assert(num_bits >= 64 && num_bits <= 2048 && is_power_of_2(num_bits), "unsupported");
 669 
 670   // check more info at https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc
 671   static const VectorRegister VEC_ArgReg[Argument::n_vector_register_parameters_c] = {
 672     v8, v9, v10, v11, v12, v13, v14, v15,
 673     v16, v17, v18, v19, v20, v21, v22, v23
 674   };
 675 
 676   const int next_reg_val = 3;
 677   for (uint i = 0; i < total_args_passed; i++) {
 678     VMReg vmreg = VEC_ArgReg[i]->as_VMReg();
 679     regs[i].set_pair(vmreg->next(next_reg_val), vmreg);
 680   }
 681   return 0;
 682 }
 683 
 684 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 685                                          VMRegPair *regs,
 686                                          int total_args_passed) {
 687 
 688   // We return the amount of VMRegImpl stack slots we need to reserve for all
 689   // the arguments NOT counting out_preserve_stack_slots.
 690 
 691   static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 692     c_rarg0, c_rarg1, c_rarg2, c_rarg3,
 693     c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 694   };
 695   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 696     c_farg0, c_farg1, c_farg2, c_farg3,
 697     c_farg4, c_farg5, c_farg6, c_farg7
 698   };
 699 
 700   uint int_args = 0;
 701   uint fp_args = 0;
 702   uint stk_args = 0; // inc by 2 each time
 703 
 704   for (int i = 0; i < total_args_passed; i++) {
 705     switch (sig_bt[i]) {
 706       case T_BOOLEAN:  // fall through
 707       case T_CHAR:     // fall through
 708       case T_BYTE:     // fall through
 709       case T_SHORT:    // fall through
 710       case T_INT:
 711         if (int_args < Argument::n_int_register_parameters_c) {
 712           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 713         } else {
 714           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 715           stk_args += 2;
 716         }
 717         break;
 718       case T_LONG:      // fall through
 719         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 720       case T_OBJECT:    // fall through
 721       case T_ARRAY:     // fall through
 722       case T_ADDRESS:   // fall through
 723       case T_METADATA:
 724         if (int_args < Argument::n_int_register_parameters_c) {
 725           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 726         } else {
 727           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 728           stk_args += 2;
 729         }
 730         break;
 731       case T_FLOAT:
 732         if (fp_args < Argument::n_float_register_parameters_c) {
 733           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 734         } else if (int_args < Argument::n_int_register_parameters_c) {
 735           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 736         } else {
 737           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 738           stk_args += 2;
 739         }
 740         break;
 741       case T_DOUBLE:
 742         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 743         if (fp_args < Argument::n_float_register_parameters_c) {
 744           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 745         } else if (int_args < Argument::n_int_register_parameters_c) {
 746           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 747         } else {
 748           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 749           stk_args += 2;
 750         }
 751         break;
 752       case T_VOID: // Halves of longs and doubles
 753         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 754         regs[i].set_bad();
 755         break;
 756       default:
 757         ShouldNotReachHere();
 758     }
 759   }
 760 
 761   return stk_args;
 762 }
 763 
 764 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
 765   // We always ignore the frame_slots arg and just use the space just below frame pointer
 766   // which by this time is free to use
 767   switch (ret_type) {
 768     case T_FLOAT:
 769       __ fsw(f10, Address(fp, -3 * wordSize));
 770       break;
 771     case T_DOUBLE:
 772       __ fsd(f10, Address(fp, -3 * wordSize));
 773       break;
 774     case T_VOID:  break;
 775     default: {
 776       __ sd(x10, Address(fp, -3 * wordSize));
 777     }
 778   }
 779 }
 780 
 781 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
 782   // We always ignore the frame_slots arg and just use the space just below frame pointer
 783   // which by this time is free to use
 784   switch (ret_type) {
 785     case T_FLOAT:
 786       __ flw(f10, Address(fp, -3 * wordSize));
 787       break;
 788     case T_DOUBLE:
 789       __ fld(f10, Address(fp, -3 * wordSize));
 790       break;
 791     case T_VOID:  break;
 792     default: {
 793       __ ld(x10, Address(fp, -3 * wordSize));
 794     }
 795   }
 796 }
 797 
 798 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
 799   RegSet x;
 800   for ( int i = first_arg ; i < arg_count ; i++ ) {
 801     if (args[i].first()->is_Register()) {
 802       x = x + args[i].first()->as_Register();
 803     } else if (args[i].first()->is_FloatRegister()) {
 804       __ subi(sp, sp, 2 * wordSize);
 805       __ fsd(args[i].first()->as_FloatRegister(), Address(sp, 0));
 806     }
 807   }
 808   __ push_reg(x, sp);
 809 }
 810 
 811 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
 812   RegSet x;
 813   for ( int i = first_arg ; i < arg_count ; i++ ) {
 814     if (args[i].first()->is_Register()) {
 815       x = x + args[i].first()->as_Register();
 816     } else {
 817       ;
 818     }
 819   }
 820   __ pop_reg(x, sp);
 821   for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
 822     if (args[i].first()->is_Register()) {
 823       ;
 824     } else if (args[i].first()->is_FloatRegister()) {
 825       __ fld(args[i].first()->as_FloatRegister(), Address(sp, 0));
 826       __ addi(sp, sp, 2 * wordSize);
 827     }
 828   }
 829 }
 830 
 831 static void verify_oop_args(MacroAssembler* masm,
 832                             const methodHandle& method,
 833                             const BasicType* sig_bt,
 834                             const VMRegPair* regs) {
 835   const Register temp_reg = x9;  // not part of any compiled calling seq
 836   if (VerifyOops) {
 837     for (int i = 0; i < method->size_of_parameters(); i++) {
 838       if (sig_bt[i] == T_OBJECT ||
 839           sig_bt[i] == T_ARRAY) {
 840         VMReg r = regs[i].first();
 841         assert(r->is_valid(), "bad oop arg");
 842         if (r->is_stack()) {
 843           __ ld(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
 844           __ verify_oop(temp_reg);
 845         } else {
 846           __ verify_oop(r->as_Register());
 847         }
 848       }
 849     }
 850   }
 851 }
 852 
 853 // on exit, sp points to the ContinuationEntry
 854 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) {
 855   assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, "");
 856   assert(in_bytes(ContinuationEntry::cont_offset())  % VMRegImpl::stack_slot_size == 0, "");
 857   assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, "");
 858 
 859   stack_slots += (int)ContinuationEntry::size() / wordSize;
 860   __ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata
 861 
 862   OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize) / VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
 863 
 864   __ ld(t0, Address(xthread, JavaThread::cont_entry_offset()));
 865   __ sd(t0, Address(sp, ContinuationEntry::parent_offset()));
 866   __ sd(sp, Address(xthread, JavaThread::cont_entry_offset()));
 867 
 868   return map;
 869 }
 870 
 871 // on entry c_rarg1 points to the continuation
 872 //          sp points to ContinuationEntry
 873 //          c_rarg3 -- isVirtualThread
 874 static void fill_continuation_entry(MacroAssembler* masm) {
 875 #ifdef ASSERT
 876   __ mv(t0, ContinuationEntry::cookie_value());
 877   __ sw(t0, Address(sp, ContinuationEntry::cookie_offset()));
 878 #endif
 879 
 880   __ sd(c_rarg1, Address(sp, ContinuationEntry::cont_offset()));
 881   __ sw(c_rarg3, Address(sp, ContinuationEntry::flags_offset()));
 882   __ sd(zr,      Address(sp, ContinuationEntry::chunk_offset()));
 883   __ sw(zr,      Address(sp, ContinuationEntry::argsize_offset()));
 884   __ sw(zr,      Address(sp, ContinuationEntry::pin_count_offset()));
 885 
 886   __ ld(t0, Address(xthread, JavaThread::cont_fastpath_offset()));
 887   __ sd(t0, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
 888   __ ld(t0, Address(xthread, JavaThread::held_monitor_count_offset()));
 889   __ sd(t0, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
 890 
 891   __ sd(zr, Address(xthread, JavaThread::cont_fastpath_offset()));
 892   __ sd(zr, Address(xthread, JavaThread::held_monitor_count_offset()));
 893 }
 894 
 895 // on entry, sp points to the ContinuationEntry
 896 // on exit, fp points to the spilled fp + 2 * wordSize in the entry frame
 897 static void continuation_enter_cleanup(MacroAssembler* masm) {
 898 #ifndef PRODUCT
 899   Label OK;
 900   __ ld(t0, Address(xthread, JavaThread::cont_entry_offset()));
 901   __ beq(sp, t0, OK);
 902   __ stop("incorrect sp");
 903   __ bind(OK);
 904 #endif
 905 
 906   __ ld(t0, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
 907   __ sd(t0, Address(xthread, JavaThread::cont_fastpath_offset()));
 908 
 909   if (CheckJNICalls) {
 910     // Check if this is a virtual thread continuation
 911     Label L_skip_vthread_code;
 912     __ lwu(t0, Address(sp, ContinuationEntry::flags_offset()));
 913     __ beqz(t0, L_skip_vthread_code);
 914 
 915     // If the held monitor count is > 0 and this vthread is terminating then
 916     // it failed to release a JNI monitor. So we issue the same log message
 917     // that JavaThread::exit does.
 918     __ ld(t0, Address(xthread, JavaThread::jni_monitor_count_offset()));
 919     __ beqz(t0, L_skip_vthread_code);
 920 
 921     // Save return value potentially containing the exception oop in callee-saved x9
 922     __ mv(x9, x10);
 923     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held));
 924     // Restore potential return value
 925     __ mv(x10, x9);
 926 
 927     // For vthreads we have to explicitly zero the JNI monitor count of the carrier
 928     // on termination. The held count is implicitly zeroed below when we restore from
 929     // the parent held count (which has to be zero).
 930     __ sd(zr, Address(xthread, JavaThread::jni_monitor_count_offset()));
 931 
 932     __ bind(L_skip_vthread_code);
 933   }
 934 #ifdef ASSERT
 935   else {
 936     // Check if this is a virtual thread continuation
 937     Label L_skip_vthread_code;
 938     __ lwu(t0, Address(sp, ContinuationEntry::flags_offset()));
 939     __ beqz(t0, L_skip_vthread_code);
 940 
 941     // See comment just above. If not checking JNI calls the JNI count is only
 942     // needed for assertion checking.
 943     __ sd(zr, Address(xthread, JavaThread::jni_monitor_count_offset()));
 944 
 945     __ bind(L_skip_vthread_code);
 946   }
 947 #endif
 948 
 949   __ ld(t0, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
 950   __ sd(t0, Address(xthread, JavaThread::held_monitor_count_offset()));
 951 
 952   __ ld(t0, Address(sp, ContinuationEntry::parent_offset()));
 953   __ sd(t0, Address(xthread, JavaThread::cont_entry_offset()));
 954   __ add(fp, sp, (int)ContinuationEntry::size() + 2 * wordSize /* 2 extra words to match up with leave() */);
 955 }
 956 
 957 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
 958 // On entry: c_rarg1 -- the continuation object
 959 //           c_rarg2 -- isContinue
 960 //           c_rarg3 -- isVirtualThread
 961 static void gen_continuation_enter(MacroAssembler* masm,
 962                                    const methodHandle& method,
 963                                    const BasicType* sig_bt,
 964                                    const VMRegPair* regs,
 965                                    int& exception_offset,
 966                                    OopMapSet*oop_maps,
 967                                    int& frame_complete,
 968                                    int& stack_slots,
 969                                    int& interpreted_entry_offset,
 970                                    int& compiled_entry_offset) {
 971   // verify_oop_args(masm, method, sig_bt, regs);
 972   Address resolve(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
 973 
 974   address start = __ pc();
 975 
 976   Label call_thaw, exit;
 977 
 978   // i2i entry used at interp_only_mode only
 979   interpreted_entry_offset = __ pc() - start;
 980   {
 981 #ifdef ASSERT
 982     Label is_interp_only;
 983     __ lw(t0, Address(xthread, JavaThread::interp_only_mode_offset()));
 984     __ bnez(t0, is_interp_only);
 985     __ stop("enterSpecial interpreter entry called when not in interp_only_mode");
 986     __ bind(is_interp_only);
 987 #endif
 988 
 989     // Read interpreter arguments into registers (this is an ad-hoc i2c adapter)
 990     __ ld(c_rarg1, Address(esp, Interpreter::stackElementSize * 2));
 991     __ ld(c_rarg2, Address(esp, Interpreter::stackElementSize * 1));
 992     __ ld(c_rarg3, Address(esp, Interpreter::stackElementSize * 0));
 993     __ push_cont_fastpath(xthread);
 994 
 995     __ enter();
 996     stack_slots = 2; // will be adjusted in setup
 997     OopMap* map = continuation_enter_setup(masm, stack_slots);
 998     // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe,
 999     // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
1000 
1001     fill_continuation_entry(masm);
1002 
1003     __ bnez(c_rarg2, call_thaw);
1004 
1005     // Make sure the call is patchable
1006     __ align(NativeInstruction::instruction_size);
1007 
1008     const address tr_call = __ reloc_call(resolve);
1009     if (tr_call == nullptr) {
1010       fatal("CodeCache is full at gen_continuation_enter");
1011     }
1012 
1013     oop_maps->add_gc_map(__ pc() - start, map);
1014     __ post_call_nop();
1015 
1016     __ j(exit);
1017 
1018     address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1019     if (stub == nullptr) {
1020       fatal("CodeCache is full at gen_continuation_enter");
1021     }
1022   }
1023 
1024   // compiled entry
1025   __ align(CodeEntryAlignment);
1026   compiled_entry_offset = __ pc() - start;
1027 
1028   __ enter();
1029   stack_slots = 2; // will be adjusted in setup
1030   OopMap* map = continuation_enter_setup(masm, stack_slots);
1031   frame_complete = __ pc() - start;
1032 
1033   fill_continuation_entry(masm);
1034 
1035   __ bnez(c_rarg2, call_thaw);
1036 
1037   // Make sure the call is patchable
1038   __ align(NativeInstruction::instruction_size);
1039 
1040   const address tr_call = __ reloc_call(resolve);
1041   if (tr_call == nullptr) {
1042     fatal("CodeCache is full at gen_continuation_enter");
1043   }
1044 
1045   oop_maps->add_gc_map(__ pc() - start, map);
1046   __ post_call_nop();
1047 
1048   __ j(exit);
1049 
1050   __ bind(call_thaw);
1051 
1052   ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1053   __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1054   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1055   ContinuationEntry::_return_pc_offset = __ pc() - start;
1056   __ post_call_nop();
1057 
1058   __ bind(exit);
1059   ContinuationEntry::_cleanup_offset = __ pc() - start;
1060   continuation_enter_cleanup(masm);
1061   __ leave();
1062   __ ret();
1063 
1064   // exception handling
1065   exception_offset = __ pc() - start;
1066   {
1067     __ mv(x9, x10); // save return value contaning the exception oop in callee-saved x9
1068 
1069     continuation_enter_cleanup(masm);
1070 
1071     __ ld(c_rarg1, Address(fp, -1 * wordSize)); // return address
1072     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), xthread, c_rarg1);
1073 
1074     // see OptoRuntime::generate_exception_blob: x10 -- exception oop, x13 -- exception pc
1075 
1076     __ mv(x11, x10); // the exception handler
1077     __ mv(x10, x9); // restore return value contaning the exception oop
1078     __ verify_oop(x10);
1079 
1080     __ leave();
1081     __ mv(x13, ra);
1082     __ jr(x11); // the exception handler
1083   }
1084 
1085   address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1086   if (stub == nullptr) {
1087     fatal("CodeCache is full at gen_continuation_enter");
1088   }
1089 }
1090 
1091 static void gen_continuation_yield(MacroAssembler* masm,
1092                                    const methodHandle& method,
1093                                    const BasicType* sig_bt,
1094                                    const VMRegPair* regs,
1095                                    OopMapSet* oop_maps,
1096                                    int& frame_complete,
1097                                    int& stack_slots,
1098                                    int& compiled_entry_offset) {
1099   enum layout {
1100     fp_off,
1101     fp_off2,
1102     return_off,
1103     return_off2,
1104     framesize // inclusive of return address
1105   };
1106   // assert(is_even(framesize/2), "sp not 16-byte aligned");
1107 
1108   stack_slots = framesize / VMRegImpl::slots_per_word;
1109   assert(stack_slots == 2, "recheck layout");
1110 
1111   address start = __ pc();
1112 
1113   compiled_entry_offset = __ pc() - start;
1114   __ enter();
1115 
1116   __ mv(c_rarg1, sp);
1117 
1118   frame_complete = __ pc() - start;
1119   address the_pc = __ pc();
1120 
1121   __ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup
1122 
1123   __ mv(c_rarg0, xthread);
1124   __ set_last_Java_frame(sp, fp, the_pc, t0);
1125   __ call_VM_leaf(Continuation::freeze_entry(), 2);
1126   __ reset_last_Java_frame(true);
1127 
1128   Label pinned;
1129 
1130   __ bnez(x10, pinned);
1131 
1132   // We've succeeded, set sp to the ContinuationEntry
1133   __ ld(sp, Address(xthread, JavaThread::cont_entry_offset()));
1134   continuation_enter_cleanup(masm);
1135 
1136   __ bind(pinned); // pinned -- return to caller
1137 
1138   // handle pending exception thrown by freeze
1139   __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1140   Label ok;
1141   __ beqz(t0, ok);
1142   __ leave();
1143   __ j(RuntimeAddress(StubRoutines::forward_exception_entry()));
1144   __ bind(ok);
1145 
1146   __ leave();
1147   __ ret();
1148 
1149   OopMap* map = new OopMap(framesize, 1);
1150   oop_maps->add_gc_map(the_pc - start, map);
1151 }
1152 
1153 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1154   ::continuation_enter_cleanup(masm);
1155 }
1156 
1157 static void gen_special_dispatch(MacroAssembler* masm,
1158                                  const methodHandle& method,
1159                                  const BasicType* sig_bt,
1160                                  const VMRegPair* regs) {
1161   verify_oop_args(masm, method, sig_bt, regs);
1162   vmIntrinsics::ID iid = method->intrinsic_id();
1163 
1164   // Now write the args into the outgoing interpreter space
1165   bool     has_receiver   = false;
1166   Register receiver_reg   = noreg;
1167   int      member_arg_pos = -1;
1168   Register member_reg     = noreg;
1169   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1170   if (ref_kind != 0) {
1171     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1172     member_reg = x9;  // known to be free at this point
1173     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1174   } else if (iid == vmIntrinsics::_invokeBasic) {
1175     has_receiver = true;
1176   } else if (iid == vmIntrinsics::_linkToNative) {
1177     member_arg_pos = method->size_of_parameters() - 1;  // trailing NativeEntryPoint argument
1178     member_reg = x9;  // known to be free at this point
1179   } else {
1180     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1181   }
1182 
1183   if (member_reg != noreg) {
1184     // Load the member_arg into register, if necessary.
1185     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1186     VMReg r = regs[member_arg_pos].first();
1187     if (r->is_stack()) {
1188       __ ld(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1189     } else {
1190       // no data motion is needed
1191       member_reg = r->as_Register();
1192     }
1193   }
1194 
1195   if (has_receiver) {
1196     // Make sure the receiver is loaded into a register.
1197     assert(method->size_of_parameters() > 0, "oob");
1198     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1199     VMReg r = regs[0].first();
1200     assert(r->is_valid(), "bad receiver arg");
1201     if (r->is_stack()) {
1202       // Porting note:  This assumes that compiled calling conventions always
1203       // pass the receiver oop in a register.  If this is not true on some
1204       // platform, pick a temp and load the receiver from stack.
1205       fatal("receiver always in a register");
1206       receiver_reg = x12;  // known to be free at this point
1207       __ ld(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1208     } else {
1209       // no data motion is needed
1210       receiver_reg = r->as_Register();
1211     }
1212   }
1213 
1214   // Figure out which address we are really jumping to:
1215   MethodHandles::generate_method_handle_dispatch(masm, iid,
1216                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1217 }
1218 
1219 // ---------------------------------------------------------------------------
1220 // Generate a native wrapper for a given method.  The method takes arguments
1221 // in the Java compiled code convention, marshals them to the native
1222 // convention (handlizes oops, etc), transitions to native, makes the call,
1223 // returns to java state (possibly blocking), unhandlizes any result and
1224 // returns.
1225 //
1226 // Critical native functions are a shorthand for the use of
1227 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1228 // functions.  The wrapper is expected to unpack the arguments before
1229 // passing them to the callee and perform checks before and after the
1230 // native call to ensure that they GCLocker
1231 // lock_critical/unlock_critical semantics are followed.  Some other
1232 // parts of JNI setup are skipped like the tear down of the JNI handle
1233 // block and the check for pending exceptions it's impossible for them
1234 // to be thrown.
1235 //
1236 // They are roughly structured like this:
1237 //    if (GCLocker::needs_gc()) SharedRuntime::block_for_jni_critical()
1238 //    tranistion to thread_in_native
1239 //    unpack array arguments and call native entry point
1240 //    check for safepoint in progress
1241 //    check if any thread suspend flags are set
1242 //      call into JVM and possible unlock the JNI critical
1243 //      if a GC was suppressed while in the critical native.
1244 //    transition back to thread_in_Java
1245 //    return to caller
1246 //
1247 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1248                                                 const methodHandle& method,
1249                                                 int compile_id,
1250                                                 BasicType* in_sig_bt,
1251                                                 VMRegPair* in_regs,
1252                                                 BasicType ret_type) {
1253   if (method->is_continuation_native_intrinsic()) {
1254     int exception_offset = -1;
1255     OopMapSet* oop_maps = new OopMapSet();
1256     int frame_complete = -1;
1257     int stack_slots = -1;
1258     int interpreted_entry_offset = -1;
1259     int vep_offset = -1;
1260     if (method->is_continuation_enter_intrinsic()) {
1261       gen_continuation_enter(masm,
1262                              method,
1263                              in_sig_bt,
1264                              in_regs,
1265                              exception_offset,
1266                              oop_maps,
1267                              frame_complete,
1268                              stack_slots,
1269                              interpreted_entry_offset,
1270                              vep_offset);
1271     } else if (method->is_continuation_yield_intrinsic()) {
1272       gen_continuation_yield(masm,
1273                              method,
1274                              in_sig_bt,
1275                              in_regs,
1276                              oop_maps,
1277                              frame_complete,
1278                              stack_slots,
1279                              vep_offset);
1280     } else {
1281       guarantee(false, "Unknown Continuation native intrinsic");
1282     }
1283 
1284 #ifdef ASSERT
1285     if (method->is_continuation_enter_intrinsic()) {
1286       assert(interpreted_entry_offset != -1, "Must be set");
1287       assert(exception_offset != -1,         "Must be set");
1288     } else {
1289       assert(interpreted_entry_offset == -1, "Must be unset");
1290       assert(exception_offset == -1,         "Must be unset");
1291     }
1292     assert(frame_complete != -1,    "Must be set");
1293     assert(stack_slots != -1,       "Must be set");
1294     assert(vep_offset != -1,        "Must be set");
1295 #endif
1296 
1297     __ flush();
1298     nmethod* nm = nmethod::new_native_nmethod(method,
1299                                               compile_id,
1300                                               masm->code(),
1301                                               vep_offset,
1302                                               frame_complete,
1303                                               stack_slots,
1304                                               in_ByteSize(-1),
1305                                               in_ByteSize(-1),
1306                                               oop_maps,
1307                                               exception_offset);
1308     if (nm == nullptr) return nm;
1309     if (method->is_continuation_enter_intrinsic()) {
1310       ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1311     } else if (method->is_continuation_yield_intrinsic()) {
1312       _cont_doYield_stub = nm;
1313     } else {
1314       guarantee(false, "Unknown Continuation native intrinsic");
1315     }
1316     return nm;
1317   }
1318 
1319   if (method->is_method_handle_intrinsic()) {
1320     vmIntrinsics::ID iid = method->intrinsic_id();
1321     intptr_t start = (intptr_t)__ pc();
1322     int vep_offset = ((intptr_t)__ pc()) - start;
1323 
1324     // First instruction must be a nop as it may need to be patched on deoptimisation
1325     {
1326       Assembler::IncompressibleRegion ir(masm);  // keep the nop as 4 bytes for patching.
1327       MacroAssembler::assert_alignment(__ pc());
1328       __ nop();  // 4 bytes
1329     }
1330     gen_special_dispatch(masm,
1331                          method,
1332                          in_sig_bt,
1333                          in_regs);
1334     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1335     __ flush();
1336     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1337     return nmethod::new_native_nmethod(method,
1338                                        compile_id,
1339                                        masm->code(),
1340                                        vep_offset,
1341                                        frame_complete,
1342                                        stack_slots / VMRegImpl::slots_per_word,
1343                                        in_ByteSize(-1),
1344                                        in_ByteSize(-1),
1345                                        (OopMapSet*)nullptr);
1346   }
1347   address native_func = method->native_function();
1348   assert(native_func != nullptr, "must have function");
1349 
1350   // An OopMap for lock (and class if static)
1351   OopMapSet *oop_maps = new OopMapSet();
1352   assert_cond(oop_maps != nullptr);
1353   intptr_t start = (intptr_t)__ pc();
1354 
1355   // We have received a description of where all the java arg are located
1356   // on entry to the wrapper. We need to convert these args to where
1357   // the jni function will expect them. To figure out where they go
1358   // we convert the java signature to a C signature by inserting
1359   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1360 
1361   const int total_in_args = method->size_of_parameters();
1362   int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1363 
1364   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1365   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1366 
1367   int argc = 0;
1368   out_sig_bt[argc++] = T_ADDRESS;
1369   if (method->is_static()) {
1370     out_sig_bt[argc++] = T_OBJECT;
1371   }
1372 
1373   for (int i = 0; i < total_in_args ; i++) {
1374     out_sig_bt[argc++] = in_sig_bt[i];
1375   }
1376 
1377   // Now figure out where the args must be stored and how much stack space
1378   // they require.
1379   int out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1380 
1381   // Compute framesize for the wrapper.  We need to handlize all oops in
1382   // incoming registers
1383 
1384   // Calculate the total number of stack slots we will need.
1385 
1386   // First count the abi requirement plus all of the outgoing args
1387   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1388 
1389   // Now the space for the inbound oop handle area
1390   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1391 
1392   int oop_handle_offset = stack_slots;
1393   stack_slots += total_save_slots;
1394 
1395   // Now any space we need for handlizing a klass if static method
1396 
1397   int klass_slot_offset = 0;
1398   int klass_offset = -1;
1399   int lock_slot_offset = 0;
1400   bool is_static = false;
1401 
1402   if (method->is_static()) {
1403     klass_slot_offset = stack_slots;
1404     stack_slots += VMRegImpl::slots_per_word;
1405     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1406     is_static = true;
1407   }
1408 
1409   // Plus a lock if needed
1410 
1411   if (method->is_synchronized()) {
1412     lock_slot_offset = stack_slots;
1413     stack_slots += VMRegImpl::slots_per_word;
1414   }
1415 
1416   // Now a place (+2) to save return values or temp during shuffling
1417   // + 4 for return address (which we own) and saved fp
1418   stack_slots += 6;
1419 
1420   // Ok The space we have allocated will look like:
1421   //
1422   //
1423   // FP-> |                     |
1424   //      | 2 slots (ra)        |
1425   //      | 2 slots (fp)        |
1426   //      |---------------------|
1427   //      | 2 slots for moves   |
1428   //      |---------------------|
1429   //      | lock box (if sync)  |
1430   //      |---------------------| <- lock_slot_offset
1431   //      | klass (if static)   |
1432   //      |---------------------| <- klass_slot_offset
1433   //      | oopHandle area      |
1434   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1435   //      | outbound memory     |
1436   //      | based arguments     |
1437   //      |                     |
1438   //      |---------------------|
1439   //      |                     |
1440   // SP-> | out_preserved_slots |
1441   //
1442   //
1443 
1444 
1445   // Now compute actual number of stack words we need rounding to make
1446   // stack properly aligned.
1447   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1448 
1449   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1450 
1451   // First thing make an ic check to see if we should even be here
1452 
1453   // We are free to use all registers as temps without saving them and
1454   // restoring them except fp. fp is the only callee save register
1455   // as far as the interpreter and the compiler(s) are concerned.
1456 
1457   const Register receiver = j_rarg0;
1458 
1459   __ verify_oop(receiver);
1460   assert_different_registers(receiver, t0, t1);
1461 
1462   __ ic_check();
1463 
1464   int vep_offset = ((intptr_t)__ pc()) - start;
1465 
1466   // If we have to make this method not-entrant we'll overwrite its
1467   // first instruction with a jump.
1468   {
1469     Assembler::IncompressibleRegion ir(masm);  // keep the nop as 4 bytes for patching.
1470     MacroAssembler::assert_alignment(__ pc());
1471     __ nop();  // 4 bytes
1472   }
1473 
1474   if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
1475     Label L_skip_barrier;
1476     __ mov_metadata(t1, method->method_holder()); // InstanceKlass*
1477     __ clinit_barrier(t1, t0, &L_skip_barrier);
1478     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1479 
1480     __ bind(L_skip_barrier);
1481   }
1482 
1483   // Generate stack overflow check
1484   __ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size()));
1485 
1486   // Generate a new frame for the wrapper.
1487   __ enter();
1488   // -2 because return address is already present and so is saved fp
1489   __ sub(sp, sp, stack_size - 2 * wordSize);
1490 
1491   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1492   assert_cond(bs != nullptr);
1493   bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
1494 
1495   // Frame is now completed as far as size and linkage.
1496   int frame_complete = ((intptr_t)__ pc()) - start;
1497 
1498   // We use x18 as the oop handle for the receiver/klass
1499   // It is callee save so it survives the call to native
1500 
1501   const Register oop_handle_reg = x18;
1502 
1503   //
1504   // We immediately shuffle the arguments so that any vm call we have to
1505   // make from here on out (sync slow path, jvmti, etc.) we will have
1506   // captured the oops from our caller and have a valid oopMap for
1507   // them.
1508 
1509   // -----------------
1510   // The Grand Shuffle
1511 
1512   // The Java calling convention is either equal (linux) or denser (win64) than the
1513   // c calling convention. However the because of the jni_env argument the c calling
1514   // convention always has at least one more (and two for static) arguments than Java.
1515   // Therefore if we move the args from java -> c backwards then we will never have
1516   // a register->register conflict and we don't have to build a dependency graph
1517   // and figure out how to break any cycles.
1518   //
1519 
1520   // Record esp-based slot for receiver on stack for non-static methods
1521   int receiver_offset = -1;
1522 
1523   // This is a trick. We double the stack slots so we can claim
1524   // the oops in the caller's frame. Since we are sure to have
1525   // more args than the caller doubling is enough to make
1526   // sure we can capture all the incoming oop args from the
1527   // caller.
1528   //
1529   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1530   assert_cond(map != nullptr);
1531 
1532   int float_args = 0;
1533   int int_args = 0;
1534 
1535 #ifdef ASSERT
1536   bool reg_destroyed[Register::number_of_registers];
1537   bool freg_destroyed[FloatRegister::number_of_registers];
1538   for ( int r = 0 ; r < Register::number_of_registers ; r++ ) {
1539     reg_destroyed[r] = false;
1540   }
1541   for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) {
1542     freg_destroyed[f] = false;
1543   }
1544 
1545 #endif /* ASSERT */
1546 
1547   // For JNI natives the incoming and outgoing registers are offset upwards.
1548   GrowableArray<int> arg_order(2 * total_in_args);
1549 
1550   for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1551     arg_order.push(i);
1552     arg_order.push(c_arg);
1553   }
1554 
1555   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1556     int i = arg_order.at(ai);
1557     int c_arg = arg_order.at(ai + 1);
1558     __ block_comment(err_msg("mv %d -> %d", i, c_arg));
1559     assert(c_arg != -1 && i != -1, "wrong order");
1560 #ifdef ASSERT
1561     if (in_regs[i].first()->is_Register()) {
1562       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1563     } else if (in_regs[i].first()->is_FloatRegister()) {
1564       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1565     }
1566     if (out_regs[c_arg].first()->is_Register()) {
1567       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1568     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1569       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1570     }
1571 #endif /* ASSERT */
1572     switch (in_sig_bt[i]) {
1573       case T_ARRAY:
1574       case T_OBJECT:
1575         __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1576                        ((i == 0) && (!is_static)),
1577                        &receiver_offset);
1578         int_args++;
1579         break;
1580       case T_VOID:
1581         break;
1582 
1583       case T_FLOAT:
1584         __ float_move(in_regs[i], out_regs[c_arg]);
1585         float_args++;
1586         break;
1587 
1588       case T_DOUBLE:
1589         assert( i + 1 < total_in_args &&
1590                 in_sig_bt[i + 1] == T_VOID &&
1591                 out_sig_bt[c_arg + 1] == T_VOID, "bad arg list");
1592         __ double_move(in_regs[i], out_regs[c_arg]);
1593         float_args++;
1594         break;
1595 
1596       case T_LONG :
1597         __ long_move(in_regs[i], out_regs[c_arg]);
1598         int_args++;
1599         break;
1600 
1601       case T_ADDRESS:
1602         assert(false, "found T_ADDRESS in java args");
1603         break;
1604 
1605       default:
1606         __ move32_64(in_regs[i], out_regs[c_arg]);
1607         int_args++;
1608     }
1609   }
1610 
1611   // point c_arg at the first arg that is already loaded in case we
1612   // need to spill before we call out
1613   int c_arg = total_c_args - total_in_args;
1614 
1615   // Pre-load a static method's oop into c_rarg1.
1616   if (method->is_static()) {
1617 
1618     //  load oop into a register
1619     __ movoop(c_rarg1,
1620               JNIHandles::make_local(method->method_holder()->java_mirror()));
1621 
1622     // Now handlize the static class mirror it's known not-null.
1623     __ sd(c_rarg1, Address(sp, klass_offset));
1624     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1625 
1626     // Now get the handle
1627     __ la(c_rarg1, Address(sp, klass_offset));
1628     // and protect the arg if we must spill
1629     c_arg--;
1630   }
1631 
1632   // Change state to native (we save the return address in the thread, since it might not
1633   // be pushed on the stack when we do a stack traversal). It is enough that the pc()
1634   // points into the right code segment. It does not have to be the correct return pc.
1635   // We use the same pc/oopMap repeatedly when we call out.
1636 
1637   Label native_return;
1638   if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
1639     // For convenience we use the pc we want to resume to in case of preemption on Object.wait.
1640     __ set_last_Java_frame(sp, noreg, native_return, t0);
1641   } else {
1642     intptr_t the_pc = (intptr_t) __ pc();
1643     oop_maps->add_gc_map(the_pc - start, map);
1644 
1645     __ set_last_Java_frame(sp, noreg, __ pc(), t0);
1646   }
1647 
1648   Label dtrace_method_entry, dtrace_method_entry_done;
1649   if (DTraceMethodProbes) {
1650     __ j(dtrace_method_entry);
1651     __ bind(dtrace_method_entry_done);
1652   }
1653 
1654   // RedefineClasses() tracing support for obsolete method entry
1655   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1656     // protect the args we've loaded
1657     save_args(masm, total_c_args, c_arg, out_regs);
1658     __ mov_metadata(c_rarg1, method());
1659     __ call_VM_leaf(
1660       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1661       xthread, c_rarg1);
1662     restore_args(masm, total_c_args, c_arg, out_regs);
1663   }
1664 
1665   // Lock a synchronized method
1666 
1667   // Register definitions used by locking and unlocking
1668 
1669   const Register swap_reg = x10;
1670   const Register obj_reg  = x9;  // Will contain the oop
1671   const Register lock_reg = x30;  // Address of compiler lock object (BasicLock)
1672   const Register old_hdr  = x30;  // value of old header at unlock time
1673   const Register lock_tmp = x31;  // Temporary used by lightweight_lock/unlock
1674   const Register tmp      = ra;
1675 
1676   Label slow_path_lock;
1677   Label lock_done;
1678 
1679   if (method->is_synchronized()) {
1680     Label count;
1681 
1682     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1683 
1684     // Get the handle (the 2nd argument)
1685     __ mv(oop_handle_reg, c_rarg1);
1686 
1687     // Get address of the box
1688 
1689     __ la(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1690 
1691     // Load the oop from the handle
1692     __ ld(obj_reg, Address(oop_handle_reg, 0));
1693 
1694     if (LockingMode == LM_MONITOR) {
1695       __ j(slow_path_lock);
1696     } else if (LockingMode == LM_LEGACY) {
1697       // Load (object->mark() | 1) into swap_reg % x10
1698       __ ld(t0, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1699       __ ori(swap_reg, t0, 1);
1700 
1701       // Save (object->mark() | 1) into BasicLock's displaced header
1702       __ sd(swap_reg, Address(lock_reg, mark_word_offset));
1703 
1704       // src -> dest if dest == x10 else x10 <- dest
1705       __ cmpxchg_obj_header(x10, lock_reg, obj_reg, lock_tmp, count, /*fallthrough*/nullptr);
1706 
1707       // Test if the oopMark is an obvious stack pointer, i.e.,
1708       //  1) (mark & 3) == 0, and
1709       //  2) sp <= mark < mark + os::pagesize()
1710       // These 3 tests can be done by evaluating the following
1711       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1712       // assuming both stack pointer and pagesize have their
1713       // least significant 2 bits clear.
1714       // NOTE: the oopMark is in swap_reg % 10 as the result of cmpxchg
1715 
1716       __ sub(swap_reg, swap_reg, sp);
1717       __ mv(t0, 3 - (int)os::vm_page_size());
1718       __ andr(swap_reg, swap_reg, t0);
1719 
1720       // Save the test result, for recursive case, the result is zero
1721       __ sd(swap_reg, Address(lock_reg, mark_word_offset));
1722       __ bnez(swap_reg, slow_path_lock);
1723 
1724       __ bind(count);
1725       __ inc_held_monitor_count(t0);
1726     } else {
1727       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1728       __ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1729     }
1730 
1731     // Slow path will re-enter here
1732     __ bind(lock_done);
1733   }
1734 
1735 
1736   // Finally just about ready to make the JNI call
1737 
1738   // get JNIEnv* which is first argument to native
1739   __ la(c_rarg0, Address(xthread, in_bytes(JavaThread::jni_environment_offset())));
1740 
1741   // Now set thread in native
1742   __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1743   __ mv(t0, _thread_in_native);
1744   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1745   __ sw(t0, Address(t1));
1746 
1747   // Clobbers t1
1748   __ rt_call(native_func);
1749 
1750   // Verify or restore cpu control state after JNI call
1751   __ restore_cpu_control_state_after_jni(t0);
1752 
1753   // Unpack native results.
1754   if (ret_type != T_OBJECT && ret_type != T_ARRAY) {
1755     __ cast_primitive_type(ret_type, x10);
1756   }
1757 
1758   Label safepoint_in_progress, safepoint_in_progress_done;
1759 
1760   // Switch thread to "native transition" state before reading the synchronization state.
1761   // This additional state is necessary because reading and testing the synchronization
1762   // state is not atomic w.r.t. GC, as this scenario demonstrates:
1763   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1764   //     VM thread changes sync state to synchronizing and suspends threads for GC.
1765   //     Thread A is resumed to finish this native method, but doesn't block here since it
1766   //     didn't see any synchronization is progress, and escapes.
1767   __ mv(t0, _thread_in_native_trans);
1768 
1769   __ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
1770 
1771   // Force this write out before the read below
1772   if (!UseSystemMemoryBarrier) {
1773     __ membar(MacroAssembler::AnyAny);
1774   }
1775 
1776   // check for safepoint operation in progress and/or pending suspend requests
1777   {
1778     // We need an acquire here to ensure that any subsequent load of the
1779     // global SafepointSynchronize::_state flag is ordered after this load
1780     // of the thread-local polling word. We don't want this poll to
1781     // return false (i.e. not safepointing) and a later poll of the global
1782     // SafepointSynchronize::_state spuriously to return true.
1783     // This is to avoid a race when we're in a native->Java transition
1784     // racing the code which wakes up from a safepoint.
1785 
1786     __ safepoint_poll(safepoint_in_progress, true /* at_return */, true /* acquire */, false /* in_nmethod */);
1787     __ lwu(t0, Address(xthread, JavaThread::suspend_flags_offset()));
1788     __ bnez(t0, safepoint_in_progress);
1789     __ bind(safepoint_in_progress_done);
1790   }
1791 
1792   // change thread state
1793   __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1794   __ mv(t0, _thread_in_Java);
1795   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1796   __ sw(t0, Address(t1));
1797 
1798   if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
1799     // Check preemption for Object.wait()
1800     __ ld(t1, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1801     __ beqz(t1, native_return);
1802     __ sd(zr, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1803     __ jr(t1);
1804     __ bind(native_return);
1805 
1806     intptr_t the_pc = (intptr_t) __ pc();
1807     oop_maps->add_gc_map(the_pc - start, map);
1808   }
1809 
1810   Label reguard;
1811   Label reguard_done;
1812   __ lbu(t0, Address(xthread, JavaThread::stack_guard_state_offset()));
1813   __ mv(t1, StackOverflow::stack_guard_yellow_reserved_disabled);
1814   __ beq(t0, t1, reguard);
1815   __ bind(reguard_done);
1816 
1817   // native result if any is live
1818 
1819   // Unlock
1820   Label unlock_done;
1821   Label slow_path_unlock;
1822   if (method->is_synchronized()) {
1823 
1824     // Get locked oop from the handle we passed to jni
1825     __ ld(obj_reg, Address(oop_handle_reg, 0));
1826 
1827     Label done, not_recursive;
1828 
1829     if (LockingMode == LM_LEGACY) {
1830       // Simple recursive lock?
1831       __ ld(t0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1832       __ bnez(t0, not_recursive);
1833       __ dec_held_monitor_count(t0);
1834       __ j(done);
1835     }
1836 
1837     __ bind(not_recursive);
1838 
1839     // Must save x10 if if it is live now because cmpxchg must use it
1840     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1841       save_native_result(masm, ret_type, stack_slots);
1842     }
1843 
1844     if (LockingMode == LM_MONITOR) {
1845       __ j(slow_path_unlock);
1846     } else if (LockingMode == LM_LEGACY) {
1847       // get address of the stack lock
1848       __ la(x10, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1849       //  get old displaced header
1850       __ ld(old_hdr, Address(x10, 0));
1851 
1852       // Atomic swap old header if oop still contains the stack lock
1853       Label count;
1854       __ cmpxchg_obj_header(x10, old_hdr, obj_reg, lock_tmp, count, &slow_path_unlock);
1855       __ bind(count);
1856       __ dec_held_monitor_count(t0);
1857     } else {
1858       assert(LockingMode == LM_LIGHTWEIGHT, "");
1859       __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1860     }
1861 
1862     // slow path re-enters here
1863     __ bind(unlock_done);
1864     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1865       restore_native_result(masm, ret_type, stack_slots);
1866     }
1867 
1868     __ bind(done);
1869   }
1870 
1871   Label dtrace_method_exit, dtrace_method_exit_done;
1872   if (DTraceMethodProbes) {
1873     __ j(dtrace_method_exit);
1874     __ bind(dtrace_method_exit_done);
1875   }
1876 
1877   __ reset_last_Java_frame(false);
1878 
1879   // Unbox oop result, e.g. JNIHandles::resolve result.
1880   if (is_reference_type(ret_type)) {
1881     __ resolve_jobject(x10, x11, x12);
1882   }
1883 
1884   if (CheckJNICalls) {
1885     // clear_pending_jni_exception_check
1886     __ sd(zr, Address(xthread, JavaThread::pending_jni_exception_check_fn_offset()));
1887   }
1888 
1889   // reset handle block
1890   __ ld(x12, Address(xthread, JavaThread::active_handles_offset()));
1891   __ sd(zr, Address(x12, JNIHandleBlock::top_offset()));
1892 
1893   __ leave();
1894 
1895   // Any exception pending?
1896   Label exception_pending;
1897   __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1898   __ bnez(t0, exception_pending);
1899 
1900   // We're done
1901   __ ret();
1902 
1903   // Unexpected paths are out of line and go here
1904 
1905   // forward the exception
1906   __ bind(exception_pending);
1907 
1908   // and forward the exception
1909   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1910 
1911   // Slow path locking & unlocking
1912   if (method->is_synchronized()) {
1913 
1914     __ block_comment("Slow path lock {");
1915     __ bind(slow_path_lock);
1916 
1917     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1918     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1919 
1920     // protect the args we've loaded
1921     save_args(masm, total_c_args, c_arg, out_regs);
1922 
1923     __ mv(c_rarg0, obj_reg);
1924     __ mv(c_rarg1, lock_reg);
1925     __ mv(c_rarg2, xthread);
1926 
1927     // Not a leaf but we have last_Java_frame setup as we want.
1928     // We don't want to unmount in case of contention since that would complicate preserving
1929     // the arguments that had already been marshalled into the native convention. So we force
1930     // the freeze slow path to find this native wrapper frame (see recurse_freeze_native_frame())
1931     // and pin the vthread. Otherwise the fast path won't find it since we don't walk the stack.
1932     __ push_cont_fastpath();
1933     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
1934     __ pop_cont_fastpath();
1935     restore_args(masm, total_c_args, c_arg, out_regs);
1936 
1937 #ifdef ASSERT
1938     { Label L;
1939       __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1940       __ beqz(t0, L);
1941       __ stop("no pending exception allowed on exit from monitorenter");
1942       __ bind(L);
1943     }
1944 #endif
1945     __ j(lock_done);
1946 
1947     __ block_comment("} Slow path lock");
1948 
1949     __ block_comment("Slow path unlock {");
1950     __ bind(slow_path_unlock);
1951 
1952     if (ret_type == T_FLOAT || ret_type == T_DOUBLE) {
1953       save_native_result(masm, ret_type, stack_slots);
1954     }
1955 
1956     __ mv(c_rarg2, xthread);
1957     __ la(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1958     __ mv(c_rarg0, obj_reg);
1959 
1960     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
1961     // NOTE that obj_reg == x9 currently
1962     __ ld(x9, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1963     __ sd(zr, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1964 
1965     __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
1966 
1967 #ifdef ASSERT
1968     {
1969       Label L;
1970       __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1971       __ beqz(t0, L);
1972       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
1973       __ bind(L);
1974     }
1975 #endif /* ASSERT */
1976 
1977     __ sd(x9, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1978 
1979     if (ret_type == T_FLOAT || ret_type == T_DOUBLE) {
1980       restore_native_result(masm, ret_type, stack_slots);
1981     }
1982     __ j(unlock_done);
1983 
1984     __ block_comment("} Slow path unlock");
1985 
1986   } // synchronized
1987 
1988   // SLOW PATH Reguard the stack if needed
1989 
1990   __ bind(reguard);
1991   save_native_result(masm, ret_type, stack_slots);
1992   __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
1993   restore_native_result(masm, ret_type, stack_slots);
1994   // and continue
1995   __ j(reguard_done);
1996 
1997   // SLOW PATH safepoint
1998   {
1999     __ block_comment("safepoint {");
2000     __ bind(safepoint_in_progress);
2001 
2002     // Don't use call_VM as it will see a possible pending exception and forward it
2003     // and never return here preventing us from clearing _last_native_pc down below.
2004     //
2005     save_native_result(masm, ret_type, stack_slots);
2006     __ mv(c_rarg0, xthread);
2007 #ifndef PRODUCT
2008     assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2009 #endif
2010     __ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
2011 
2012     // Restore any method result value
2013     restore_native_result(masm, ret_type, stack_slots);
2014 
2015     __ j(safepoint_in_progress_done);
2016     __ block_comment("} safepoint");
2017   }
2018 
2019   // SLOW PATH dtrace support
2020   if (DTraceMethodProbes) {
2021     {
2022       __ block_comment("dtrace entry {");
2023       __ bind(dtrace_method_entry);
2024 
2025       // We have all of the arguments setup at this point. We must not touch any register
2026       // argument registers at this point (what if we save/restore them there are no oop?
2027 
2028       save_args(masm, total_c_args, c_arg, out_regs);
2029       __ mov_metadata(c_rarg1, method());
2030       __ call_VM_leaf(
2031         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2032         xthread, c_rarg1);
2033       restore_args(masm, total_c_args, c_arg, out_regs);
2034       __ j(dtrace_method_entry_done);
2035       __ block_comment("} dtrace entry");
2036     }
2037 
2038     {
2039       __ block_comment("dtrace exit {");
2040       __ bind(dtrace_method_exit);
2041       save_native_result(masm, ret_type, stack_slots);
2042       __ mov_metadata(c_rarg1, method());
2043       __ call_VM_leaf(
2044            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2045            xthread, c_rarg1);
2046       restore_native_result(masm, ret_type, stack_slots);
2047       __ j(dtrace_method_exit_done);
2048       __ block_comment("} dtrace exit");
2049     }
2050   }
2051 
2052   __ flush();
2053 
2054   nmethod *nm = nmethod::new_native_nmethod(method,
2055                                             compile_id,
2056                                             masm->code(),
2057                                             vep_offset,
2058                                             frame_complete,
2059                                             stack_slots / VMRegImpl::slots_per_word,
2060                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2061                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2062                                             oop_maps);
2063   assert(nm != nullptr, "create native nmethod fail!");
2064   return nm;
2065 }
2066 
2067 // this function returns the adjust size (in number of words) to a c2i adapter
2068 // activation for use during deoptimization
2069 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2070   assert(callee_locals >= callee_parameters,
2071          "test and remove; got more parms than locals");
2072   if (callee_locals < callee_parameters) {
2073     return 0;                   // No adjustment for negative locals
2074   }
2075   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2076   // diff is counted in stack words
2077   return align_up(diff, 2);
2078 }
2079 
2080 //------------------------------generate_deopt_blob----------------------------
2081 void SharedRuntime::generate_deopt_blob() {
2082   // Allocate space for the code
2083   ResourceMark rm;
2084   // Setup code generation tools
2085   int pad = 0;
2086 #if INCLUDE_JVMCI
2087   if (EnableJVMCI) {
2088     pad += 512; // Increase the buffer size when compiling for JVMCI
2089   }
2090 #endif
2091   const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id);
2092   CodeBuffer buffer(name, 2048 + pad, 1024);
2093   MacroAssembler* masm = new MacroAssembler(&buffer);
2094   int frame_size_in_words = -1;
2095   OopMap* map = nullptr;
2096   OopMapSet *oop_maps = new OopMapSet();
2097   assert_cond(masm != nullptr && oop_maps != nullptr);
2098   RegisterSaver reg_saver(COMPILER2_OR_JVMCI != 0);
2099 
2100   // -------------
2101   // This code enters when returning to a de-optimized nmethod.  A return
2102   // address has been pushed on the stack, and return values are in
2103   // registers.
2104   // If we are doing a normal deopt then we were called from the patched
2105   // nmethod from the point we returned to the nmethod. So the return
2106   // address on the stack is wrong by NativeCall::instruction_size
2107   // We will adjust the value so it looks like we have the original return
2108   // address on the stack (like when we eagerly deoptimized).
2109   // In the case of an exception pending when deoptimizing, we enter
2110   // with a return address on the stack that points after the call we patched
2111   // into the exception handler. We have the following register state from,
2112   // e.g., the forward exception stub (see stubGenerator_riscv.cpp).
2113   //    x10: exception oop
2114   //    x9: exception handler
2115   //    x13: throwing pc
2116   // So in this case we simply jam x13 into the useless return address and
2117   // the stack looks just like we want.
2118   //
2119   // At this point we need to de-opt.  We save the argument return
2120   // registers.  We call the first C routine, fetch_unroll_info().  This
2121   // routine captures the return values and returns a structure which
2122   // describes the current frame size and the sizes of all replacement frames.
2123   // The current frame is compiled code and may contain many inlined
2124   // functions, each with their own JVM state.  We pop the current frame, then
2125   // push all the new frames.  Then we call the C routine unpack_frames() to
2126   // populate these frames.  Finally unpack_frames() returns us the new target
2127   // address.  Notice that callee-save registers are BLOWN here; they have
2128   // already been captured in the vframeArray at the time the return PC was
2129   // patched.
2130   address start = __ pc();
2131   Label cont;
2132 
2133   // Prolog for non exception case!
2134 
2135   // Save everything in sight.
2136   map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2137 
2138   // Normal deoptimization.  Save exec mode for unpack_frames.
2139   __ mv(xcpool, Deoptimization::Unpack_deopt); // callee-saved
2140   __ j(cont);
2141 
2142   int reexecute_offset = __ pc() - start;
2143 #if INCLUDE_JVMCI && !defined(COMPILER1)
2144   if (UseJVMCICompiler) {
2145     // JVMCI does not use this kind of deoptimization
2146     __ should_not_reach_here();
2147   }
2148 #endif
2149 
2150   // Reexecute case
2151   // return address is the pc describes what bci to do re-execute at
2152 
2153   // No need to update map as each call to save_live_registers will produce identical oopmap
2154   (void) reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2155 
2156   __ mv(xcpool, Deoptimization::Unpack_reexecute); // callee-saved
2157   __ j(cont);
2158 
2159 #if INCLUDE_JVMCI
2160   Label after_fetch_unroll_info_call;
2161   int implicit_exception_uncommon_trap_offset = 0;
2162   int uncommon_trap_offset = 0;
2163 
2164   if (EnableJVMCI) {
2165     implicit_exception_uncommon_trap_offset = __ pc() - start;
2166 
2167     __ ld(ra, Address(xthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2168     __ sd(zr, Address(xthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2169 
2170     uncommon_trap_offset = __ pc() - start;
2171 
2172     // Save everything in sight.
2173     reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2174     // fetch_unroll_info needs to call last_java_frame()
2175     Label retaddr;
2176     __ set_last_Java_frame(sp, noreg, retaddr, t0);
2177 
2178     __ lw(c_rarg1, Address(xthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2179     __ mv(t0, -1);
2180     __ sw(t0, Address(xthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2181 
2182     __ mv(xcpool, Deoptimization::Unpack_reexecute);
2183     __ mv(c_rarg0, xthread);
2184     __ orrw(c_rarg2, zr, xcpool); // exec mode
2185     __ rt_call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap));
2186     __ bind(retaddr);
2187     oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2188 
2189     __ reset_last_Java_frame(false);
2190 
2191     __ j(after_fetch_unroll_info_call);
2192   } // EnableJVMCI
2193 #endif // INCLUDE_JVMCI
2194 
2195   int exception_offset = __ pc() - start;
2196 
2197   // Prolog for exception case
2198 
2199   // all registers are dead at this entry point, except for x10, and
2200   // x13 which contain the exception oop and exception pc
2201   // respectively.  Set them in TLS and fall thru to the
2202   // unpack_with_exception_in_tls entry point.
2203 
2204   __ sd(x13, Address(xthread, JavaThread::exception_pc_offset()));
2205   __ sd(x10, Address(xthread, JavaThread::exception_oop_offset()));
2206 
2207   int exception_in_tls_offset = __ pc() - start;
2208 
2209   // new implementation because exception oop is now passed in JavaThread
2210 
2211   // Prolog for exception case
2212   // All registers must be preserved because they might be used by LinearScan
2213   // Exceptiop oop and throwing PC are passed in JavaThread
2214   // tos: stack at point of call to method that threw the exception (i.e. only
2215   // args are on the stack, no return address)
2216 
2217   // The return address pushed by save_live_registers will be patched
2218   // later with the throwing pc. The correct value is not available
2219   // now because loading it from memory would destroy registers.
2220 
2221   // NB: The SP at this point must be the SP of the method that is
2222   // being deoptimized.  Deoptimization assumes that the frame created
2223   // here by save_live_registers is immediately below the method's SP.
2224   // This is a somewhat fragile mechanism.
2225 
2226   // Save everything in sight.
2227   map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2228 
2229   // Now it is safe to overwrite any register
2230 
2231   // Deopt during an exception.  Save exec mode for unpack_frames.
2232   __ mv(xcpool, Deoptimization::Unpack_exception); // callee-saved
2233 
2234   // load throwing pc from JavaThread and patch it as the return address
2235   // of the current frame. Then clear the field in JavaThread
2236 
2237   __ ld(x13, Address(xthread, JavaThread::exception_pc_offset()));
2238   __ sd(x13, Address(fp, frame::return_addr_offset * wordSize));
2239   __ sd(zr, Address(xthread, JavaThread::exception_pc_offset()));
2240 
2241 #ifdef ASSERT
2242   // verify that there is really an exception oop in JavaThread
2243   __ ld(x10, Address(xthread, JavaThread::exception_oop_offset()));
2244   __ verify_oop(x10);
2245 
2246   // verify that there is no pending exception
2247   Label no_pending_exception;
2248   __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
2249   __ beqz(t0, no_pending_exception);
2250   __ stop("must not have pending exception here");
2251   __ bind(no_pending_exception);
2252 #endif
2253 
2254   __ bind(cont);
2255 
2256   // Call C code.  Need thread and this frame, but NOT official VM entry
2257   // crud.  We cannot block on this call, no GC can happen.
2258   //
2259   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2260 
2261   // fetch_unroll_info needs to call last_java_frame().
2262 
2263   Label retaddr;
2264   __ set_last_Java_frame(sp, noreg, retaddr, t0);
2265 #ifdef ASSERT
2266   {
2267     Label L;
2268     __ ld(t0, Address(xthread,
2269                               JavaThread::last_Java_fp_offset()));
2270     __ beqz(t0, L);
2271     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2272     __ bind(L);
2273   }
2274 #endif // ASSERT
2275   __ mv(c_rarg0, xthread);
2276   __ mv(c_rarg1, xcpool);
2277   __ rt_call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info));
2278   __ bind(retaddr);
2279 
2280   // Need to have an oopmap that tells fetch_unroll_info where to
2281   // find any register it might need.
2282   oop_maps->add_gc_map(__ pc() - start, map);
2283 
2284   __ reset_last_Java_frame(false);
2285 
2286 #if INCLUDE_JVMCI
2287   if (EnableJVMCI) {
2288     __ bind(after_fetch_unroll_info_call);
2289   }
2290 #endif
2291 
2292   // Load UnrollBlock* into x15
2293   __ mv(x15, x10);
2294 
2295   __ lwu(xcpool, Address(x15, Deoptimization::UnrollBlock::unpack_kind_offset()));
2296   Label noException;
2297   __ mv(t0, Deoptimization::Unpack_exception);
2298   __ bne(xcpool, t0, noException); // Was exception pending?
2299   __ ld(x10, Address(xthread, JavaThread::exception_oop_offset()));
2300   __ ld(x13, Address(xthread, JavaThread::exception_pc_offset()));
2301   __ sd(zr, Address(xthread, JavaThread::exception_oop_offset()));
2302   __ sd(zr, Address(xthread, JavaThread::exception_pc_offset()));
2303 
2304   __ verify_oop(x10);
2305 
2306   // Overwrite the result registers with the exception results.
2307   __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10)));
2308 
2309   __ bind(noException);
2310 
2311   // Only register save data is on the stack.
2312   // Now restore the result registers.  Everything else is either dead
2313   // or captured in the vframeArray.
2314 
2315   // Restore fp result register
2316   __ fld(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10)));
2317   // Restore integer result register
2318   __ ld(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10)));
2319 
2320   // Pop all of the register save area off the stack
2321   __ add(sp, sp, frame_size_in_words * wordSize);
2322 
2323   // All of the register save area has been popped of the stack. Only the
2324   // return address remains.
2325 
2326   // Pop all the frames we must move/replace.
2327   //
2328   // Frame picture (youngest to oldest)
2329   // 1: self-frame (no frame link)
2330   // 2: deopting frame  (no frame link)
2331   // 3: caller of deopting frame (could be compiled/interpreted).
2332   //
2333   // Note: by leaving the return address of self-frame on the stack
2334   // and using the size of frame 2 to adjust the stack
2335   // when we are done the return to frame 3 will still be on the stack.
2336 
2337   // Pop deoptimized frame
2338   __ lwu(x12, Address(x15, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2339   __ subi(x12, x12, 2 * wordSize);
2340   __ add(sp, sp, x12);
2341   __ ld(fp, Address(sp, 0));
2342   __ ld(ra, Address(sp, wordSize));
2343   __ addi(sp, sp, 2 * wordSize);
2344   // RA should now be the return address to the caller (3)
2345 
2346 #ifdef ASSERT
2347   // Compilers generate code that bang the stack by as much as the
2348   // interpreter would need. So this stack banging should never
2349   // trigger a fault. Verify that it does not on non product builds.
2350   __ lwu(x9, Address(x15, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2351   __ bang_stack_size(x9, x12);
2352 #endif
2353   // Load address of array of frame pcs into x12
2354   __ ld(x12, Address(x15, Deoptimization::UnrollBlock::frame_pcs_offset()));
2355 
2356   // Load address of array of frame sizes into x14
2357   __ ld(x14, Address(x15, Deoptimization::UnrollBlock::frame_sizes_offset()));
2358 
2359   // Load counter into x13
2360   __ lwu(x13, Address(x15, Deoptimization::UnrollBlock::number_of_frames_offset()));
2361 
2362   // Now adjust the caller's stack to make up for the extra locals
2363   // but record the original sp so that we can save it in the skeletal interpreter
2364   // frame and the stack walking of interpreter_sender will get the unextended sp
2365   // value and not the "real" sp value.
2366 
2367   const Register sender_sp = x16;
2368 
2369   __ mv(sender_sp, sp);
2370   __ lwu(x9, Address(x15,
2371                      Deoptimization::UnrollBlock::
2372                      caller_adjustment_offset()));
2373   __ sub(sp, sp, x9);
2374 
2375   // Push interpreter frames in a loop
2376   __ mv(t0, 0xDEADDEAD);               // Make a recognizable pattern
2377   __ mv(t1, t0);
2378   Label loop;
2379   __ bind(loop);
2380   __ ld(x9, Address(x14, 0));          // Load frame size
2381   __ addi(x14, x14, wordSize);
2382   __ subi(x9, x9, 2 * wordSize);       // We'll push pc and fp by hand
2383   __ ld(ra, Address(x12, 0));          // Load pc
2384   __ addi(x12, x12, wordSize);
2385   __ enter();                          // Save old & set new fp
2386   __ sub(sp, sp, x9);                  // Prolog
2387   // This value is corrected by layout_activation_impl
2388   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
2389   __ sd(sender_sp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2390   __ mv(sender_sp, sp);                // Pass sender_sp to next frame
2391   __ subi(x13, x13, 1);                // Decrement counter
2392   __ bnez(x13, loop);
2393 
2394     // Re-push self-frame
2395   __ ld(ra, Address(x12));
2396   __ enter();
2397 
2398   // Allocate a full sized register save area.  We subtract 2 because
2399   // enter() just pushed 2 words
2400   __ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2401 
2402   // Restore frame locals after moving the frame
2403   __ fsd(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10)));
2404   __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10)));
2405 
2406   // Call C code.  Need thread but NOT official VM entry
2407   // crud.  We cannot block on this call, no GC can happen.  Call should
2408   // restore return values to their stack-slots with the new SP.
2409   //
2410   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2411 
2412   // Use fp because the frames look interpreted now
2413   // Don't need the precise return PC here, just precise enough to point into this code blob.
2414   address the_pc = __ pc();
2415   __ set_last_Java_frame(sp, fp, the_pc, t0);
2416 
2417   __ mv(c_rarg0, xthread);
2418   __ mv(c_rarg1, xcpool); // second arg: exec_mode
2419   __ rt_call(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames));
2420 
2421   // Set an oopmap for the call site
2422   // Use the same PC we used for the last java frame
2423   oop_maps->add_gc_map(the_pc - start,
2424                        new OopMap(frame_size_in_words, 0));
2425 
2426   // Clear fp AND pc
2427   __ reset_last_Java_frame(true);
2428 
2429   // Collect return values
2430   __ fld(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10)));
2431   __ ld(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10)));
2432 
2433   // Pop self-frame.
2434   __ leave();                           // Epilog
2435 
2436   // Jump to interpreter
2437   __ ret();
2438 
2439   // Make sure all code is generated
2440   masm->flush();
2441 
2442   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2443   assert(_deopt_blob != nullptr, "create deoptimization blob fail!");
2444   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2445 #if INCLUDE_JVMCI
2446   if (EnableJVMCI) {
2447     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2448     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2449   }
2450 #endif
2451 }
2452 
2453 // Number of stack slots between incoming argument block and the start of
2454 // a new frame. The PROLOG must add this many slots to the stack. The
2455 // EPILOG must remove this many slots.
2456 // RISCV needs two words for RA (return address) and FP (frame pointer).
2457 uint SharedRuntime::in_preserve_stack_slots() {
2458   return 2 * VMRegImpl::slots_per_word;
2459 }
2460 
2461 uint SharedRuntime::out_preserve_stack_slots() {
2462   return 0;
2463 }
2464 
2465 VMReg SharedRuntime::thread_register() {
2466   return xthread->as_VMReg();
2467 }
2468 
2469 //------------------------------generate_handler_blob------
2470 //
2471 // Generate a special Compile2Runtime blob that saves all registers,
2472 // and setup oopmap.
2473 //
2474 SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) {
2475   assert(is_polling_page_id(id), "expected a polling page stub id");
2476 
2477   ResourceMark rm;
2478   OopMapSet *oop_maps = new OopMapSet();
2479   assert_cond(oop_maps != nullptr);
2480   OopMap* map = nullptr;
2481 
2482   // Allocate space for the code.  Setup code generation tools.
2483   const char* name = SharedRuntime::stub_name(id);
2484   CodeBuffer buffer(name, 2048, 1024);
2485   MacroAssembler* masm = new MacroAssembler(&buffer);
2486   assert_cond(masm != nullptr);
2487 
2488   address start   = __ pc();
2489   address call_pc = nullptr;
2490   int frame_size_in_words = -1;
2491   bool cause_return = (id == SharedStubId::polling_page_return_handler_id);
2492   RegisterSaver reg_saver(id == SharedStubId::polling_page_vectors_safepoint_handler_id /* save_vectors */);
2493 
2494   // Save Integer and Float registers.
2495   map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2496 
2497   // The following is basically a call_VM.  However, we need the precise
2498   // address of the call in order to generate an oopmap. Hence, we do all the
2499   // work ourselves.
2500 
2501   Label retaddr;
2502   __ set_last_Java_frame(sp, noreg, retaddr, t0);
2503 
2504   // The return address must always be correct so that frame constructor never
2505   // sees an invalid pc.
2506 
2507   if (!cause_return) {
2508     // overwrite the return address pushed by save_live_registers
2509     // Additionally, x18 is a callee-saved register so we can look at
2510     // it later to determine if someone changed the return address for
2511     // us!
2512     __ ld(x18, Address(xthread, JavaThread::saved_exception_pc_offset()));
2513     __ sd(x18, Address(fp, frame::return_addr_offset * wordSize));
2514   }
2515 
2516   // Do the call
2517   __ mv(c_rarg0, xthread);
2518   __ rt_call(call_ptr);
2519   __ bind(retaddr);
2520 
2521   // Set an oopmap for the call site.  This oopmap will map all
2522   // oop-registers and debug-info registers as callee-saved.  This
2523   // will allow deoptimization at this safepoint to find all possible
2524   // debug-info recordings, as well as let GC find all oops.
2525 
2526   oop_maps->add_gc_map( __ pc() - start, map);
2527 
2528   Label noException;
2529 
2530   __ reset_last_Java_frame(false);
2531 
2532   __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
2533 
2534   __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
2535   __ beqz(t0, noException);
2536 
2537   // Exception pending
2538 
2539   reg_saver.restore_live_registers(masm);
2540 
2541   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2542 
2543   // No exception case
2544   __ bind(noException);
2545 
2546   Label no_adjust, bail;
2547   if (!cause_return) {
2548     // If our stashed return pc was modified by the runtime we avoid touching it
2549     __ ld(t0, Address(fp, frame::return_addr_offset * wordSize));
2550     __ bne(x18, t0, no_adjust);
2551 
2552 #ifdef ASSERT
2553     // Verify the correct encoding of the poll we're about to skip.
2554     // See NativeInstruction::is_lwu_to_zr()
2555     __ lwu(t0, Address(x18));
2556     __ andi(t1, t0, 0b1111111);
2557     __ mv(t2, 0b0000011);
2558     __ bne(t1, t2, bail); // 0-6:0b0000011
2559     __ srli(t1, t0, 7);
2560     __ andi(t1, t1, 0b11111);
2561     __ bnez(t1, bail);    // 7-11:0b00000
2562     __ srli(t1, t0, 12);
2563     __ andi(t1, t1, 0b111);
2564     __ mv(t2, 0b110);
2565     __ bne(t1, t2, bail); // 12-14:0b110
2566 #endif
2567 
2568     // Adjust return pc forward to step over the safepoint poll instruction
2569     __ addi(x18, x18, NativeInstruction::instruction_size);
2570     __ sd(x18, Address(fp, frame::return_addr_offset * wordSize));
2571   }
2572 
2573   __ bind(no_adjust);
2574   // Normal exit, restore registers and exit.
2575 
2576   reg_saver.restore_live_registers(masm);
2577   __ ret();
2578 
2579 #ifdef ASSERT
2580   __ bind(bail);
2581   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2582 #endif
2583 
2584   // Make sure all code is generated
2585   masm->flush();
2586 
2587   // Fill-out other meta info
2588   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2589 }
2590 
2591 //
2592 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2593 //
2594 // Generate a stub that calls into vm to find out the proper destination
2595 // of a java call. All the argument registers are live at this point
2596 // but since this is generic code we don't know what they are and the caller
2597 // must do any gc of the args.
2598 //
2599 RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) {
2600   assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2601   assert(is_resolve_id(id), "expected a resolve stub id");
2602 
2603   // allocate space for the code
2604   ResourceMark rm;
2605 
2606   const char* name = SharedRuntime::stub_name(id);
2607   CodeBuffer buffer(name, 1000, 512);
2608   MacroAssembler* masm = new MacroAssembler(&buffer);
2609   assert_cond(masm != nullptr);
2610 
2611   int frame_size_in_words = -1;
2612   RegisterSaver reg_saver(false /* save_vectors */);
2613 
2614   OopMapSet *oop_maps = new OopMapSet();
2615   assert_cond(oop_maps != nullptr);
2616   OopMap* map = nullptr;
2617 
2618   int start = __ offset();
2619 
2620   map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2621 
2622   int frame_complete = __ offset();
2623 
2624   {
2625     Label retaddr;
2626     __ set_last_Java_frame(sp, noreg, retaddr, t0);
2627 
2628     __ mv(c_rarg0, xthread);
2629     __ rt_call(destination);
2630     __ bind(retaddr);
2631   }
2632 
2633   // Set an oopmap for the call site.
2634   // We need this not only for callee-saved registers, but also for volatile
2635   // registers that the compiler might be keeping live across a safepoint.
2636 
2637   oop_maps->add_gc_map( __ offset() - start, map);
2638 
2639   // x10 contains the address we are going to jump to assuming no exception got installed
2640 
2641   // clear last_Java_sp
2642   __ reset_last_Java_frame(false);
2643   // check for pending exceptions
2644   Label pending;
2645   __ ld(t1, Address(xthread, Thread::pending_exception_offset()));
2646   __ bnez(t1, pending);
2647 
2648   // get the returned Method*
2649   __ get_vm_result_2(xmethod, xthread);
2650   __ sd(xmethod, Address(sp, reg_saver.reg_offset_in_bytes(xmethod)));
2651 
2652   // x10 is where we want to jump, overwrite t1 which is saved and temporary
2653   __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(t1)));
2654   reg_saver.restore_live_registers(masm);
2655 
2656   // We are back to the original state on entry and ready to go.
2657   __ jr(t1);
2658 
2659   // Pending exception after the safepoint
2660 
2661   __ bind(pending);
2662 
2663   reg_saver.restore_live_registers(masm);
2664 
2665   // exception pending => remove activation and forward to exception handler
2666 
2667   __ sd(zr, Address(xthread, JavaThread::vm_result_offset()));
2668 
2669   __ ld(x10, Address(xthread, Thread::pending_exception_offset()));
2670   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2671 
2672   // -------------
2673   // make sure all code is generated
2674   masm->flush();
2675 
2676   // return the  blob
2677   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2678 }
2679 
2680 // Continuation point for throwing of implicit exceptions that are
2681 // not handled in the current activation. Fabricates an exception
2682 // oop and initiates normal exception dispatching in this
2683 // frame. Since we need to preserve callee-saved values (currently
2684 // only for C2, but done for C1 as well) we need a callee-saved oop
2685 // map and therefore have to make these stubs into RuntimeStubs
2686 // rather than BufferBlobs.  If the compiler needs all registers to
2687 // be preserved between the fault point and the exception handler
2688 // then it must assume responsibility for that in
2689 // AbstractCompiler::continuation_for_implicit_null_exception or
2690 // continuation_for_implicit_division_by_zero_exception. All other
2691 // implicit exceptions (e.g., NullPointerException or
2692 // AbstractMethodError on entry) are either at call sites or
2693 // otherwise assume that stack unwinding will be initiated, so
2694 // caller saved registers were assumed volatile in the compiler.
2695 
2696 RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) {
2697   assert(is_throw_id(id), "expected a throw stub id");
2698 
2699   const char* name = SharedRuntime::stub_name(id);
2700 
2701   // Information about frame layout at time of blocking runtime call.
2702   // Note that we only have to preserve callee-saved registers since
2703   // the compilers are responsible for supplying a continuation point
2704   // if they expect all registers to be preserved.
2705   // n.b. riscv asserts that frame::arg_reg_save_area_bytes == 0
2706   assert_cond(runtime_entry != nullptr);
2707   enum layout {
2708     fp_off = 0,
2709     fp_off2,
2710     return_off,
2711     return_off2,
2712     framesize // inclusive of return address
2713   };
2714 
2715   const int insts_size = 1024;
2716   const int locs_size  = 64;
2717 
2718   ResourceMark rm;
2719   const char* timer_msg = "SharedRuntime generate_throw_exception";
2720   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
2721 
2722   CodeBuffer code(name, insts_size, locs_size);
2723   OopMapSet* oop_maps  = new OopMapSet();
2724   MacroAssembler* masm = new MacroAssembler(&code);
2725   assert_cond(oop_maps != nullptr && masm != nullptr);
2726 
2727   address start = __ pc();
2728 
2729   // This is an inlined and slightly modified version of call_VM
2730   // which has the ability to fetch the return PC out of
2731   // thread-local storage and also sets up last_Java_sp slightly
2732   // differently than the real call_VM
2733 
2734   __ enter(); // Save FP and RA before call
2735 
2736   assert(is_even(framesize / 2), "sp not 16-byte aligned");
2737 
2738   // ra and fp are already in place
2739   __ subi(sp, fp, (unsigned)framesize << LogBytesPerInt); // prolog
2740 
2741   int frame_complete = __ pc() - start;
2742 
2743   // Set up last_Java_sp and last_Java_fp
2744   address the_pc = __ pc();
2745   __ set_last_Java_frame(sp, fp, the_pc, t0);
2746 
2747   // Call runtime
2748   __ mv(c_rarg0, xthread);
2749   BLOCK_COMMENT("call runtime_entry");
2750   __ rt_call(runtime_entry);
2751 
2752   // Generate oop map
2753   OopMap* map = new OopMap(framesize, 0);
2754   assert_cond(map != nullptr);
2755 
2756   oop_maps->add_gc_map(the_pc - start, map);
2757 
2758   __ reset_last_Java_frame(true);
2759 
2760   __ leave();
2761 
2762   // check for pending exceptions
2763 #ifdef ASSERT
2764   Label L;
2765   __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
2766   __ bnez(t0, L);
2767   __ should_not_reach_here();
2768   __ bind(L);
2769 #endif // ASSERT
2770   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2771 
2772   // codeBlob framesize is in words (not VMRegImpl::slot_size)
2773   RuntimeStub* stub =
2774     RuntimeStub::new_runtime_stub(name,
2775                                   &code,
2776                                   frame_complete,
2777                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2778                                   oop_maps, false);
2779   assert(stub != nullptr, "create runtime stub fail!");
2780   return stub;
2781 }
2782 
2783 #if INCLUDE_JFR
2784 
2785 static void jfr_prologue(address the_pc, MacroAssembler* masm, Register thread) {
2786   __ set_last_Java_frame(sp, fp, the_pc, t0);
2787   __ mv(c_rarg0, thread);
2788 }
2789 
2790 static void jfr_epilogue(MacroAssembler* masm) {
2791   __ reset_last_Java_frame(true);
2792 }
2793 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
2794 // It returns a jobject handle to the event writer.
2795 // The handle is dereferenced and the return value is the event writer oop.
2796 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
2797   enum layout {
2798     fp_off,
2799     fp_off2,
2800     return_off,
2801     return_off2,
2802     framesize // inclusive of return address
2803   };
2804 
2805   int insts_size = 1024;
2806   int locs_size = 64;
2807   const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id);
2808   CodeBuffer code(name, insts_size, locs_size);
2809   OopMapSet* oop_maps = new OopMapSet();
2810   MacroAssembler* masm = new MacroAssembler(&code);
2811 
2812   address start = __ pc();
2813   __ enter();
2814   int frame_complete = __ pc() - start;
2815   address the_pc = __ pc();
2816   jfr_prologue(the_pc, masm, xthread);
2817   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
2818 
2819   jfr_epilogue(masm);
2820   __ resolve_global_jobject(x10, t0, t1);
2821   __ leave();
2822   __ ret();
2823 
2824   OopMap* map = new OopMap(framesize, 1);
2825   oop_maps->add_gc_map(the_pc - start, map);
2826 
2827   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
2828     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
2829                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2830                                   oop_maps, false);
2831   return stub;
2832 }
2833 
2834 // For c2: call to return a leased buffer.
2835 RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
2836   enum layout {
2837     fp_off,
2838     fp_off2,
2839     return_off,
2840     return_off2,
2841     framesize // inclusive of return address
2842   };
2843 
2844   int insts_size = 1024;
2845   int locs_size = 64;
2846   const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id);
2847   CodeBuffer code(name, insts_size, locs_size);
2848   OopMapSet* oop_maps = new OopMapSet();
2849   MacroAssembler* masm = new MacroAssembler(&code);
2850 
2851   address start = __ pc();
2852   __ enter();
2853   int frame_complete = __ pc() - start;
2854   address the_pc = __ pc();
2855   jfr_prologue(the_pc, masm, xthread);
2856   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
2857 
2858   jfr_epilogue(masm);
2859   __ leave();
2860   __ ret();
2861 
2862   OopMap* map = new OopMap(framesize, 1);
2863   oop_maps->add_gc_map(the_pc - start, map);
2864 
2865   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
2866     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
2867                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2868                                   oop_maps, false);
2869   return stub;
2870 }
2871 
2872 #endif // INCLUDE_JFR