1 /*
   2  * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "code/compiledIC.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "code/vtableStubs.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/shared/barrierSetAssembler.hpp"
  34 #include "interpreter/interp_masm.hpp"
  35 #include "interpreter/interpreter.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "nativeInst_riscv.hpp"
  39 #include "oops/klass.inline.hpp"
  40 #include "oops/method.inline.hpp"
  41 #include "prims/methodHandles.hpp"
  42 #include "runtime/continuation.hpp"
  43 #include "runtime/continuationEntry.inline.hpp"
  44 #include "runtime/globals.hpp"
  45 #include "runtime/jniHandles.hpp"
  46 #include "runtime/safepointMechanism.hpp"
  47 #include "runtime/sharedRuntime.hpp"
  48 #include "runtime/signature.hpp"
  49 #include "runtime/stubRoutines.hpp"
  50 #include "runtime/timerTrace.hpp"
  51 #include "runtime/vframeArray.hpp"
  52 #include "utilities/align.hpp"
  53 #include "utilities/formatBuffer.hpp"
  54 #include "vmreg_riscv.inline.hpp"
  55 #ifdef COMPILER1
  56 #include "c1/c1_Runtime1.hpp"
  57 #endif
  58 #ifdef COMPILER2
  59 #include "adfiles/ad_riscv.hpp"
  60 #include "opto/runtime.hpp"
  61 #endif
  62 #if INCLUDE_JVMCI
  63 #include "jvmci/jvmciJavaClasses.hpp"
  64 #endif
  65 
  66 #define __ masm->
  67 
  68 #ifdef PRODUCT
  69 #define BLOCK_COMMENT(str) /* nothing */
  70 #else
  71 #define BLOCK_COMMENT(str) __ block_comment(str)
  72 #endif
  73 
  74 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  75 
  76 class RegisterSaver {
  77   const bool _save_vectors;
  78  public:
  79   RegisterSaver(bool save_vectors) : _save_vectors(UseRVV && save_vectors) {}
  80   ~RegisterSaver() {}
  81   OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
  82   void restore_live_registers(MacroAssembler* masm);
  83 
  84   // Offsets into the register save area
  85   // Used by deoptimization when it is managing result register
  86   // values on its own
  87   // gregs:28, float_register:32; except: x1(ra) & x2(sp) & gp(x3) & tp(x4)
  88   // |---v0---|<---SP
  89   // |---v1---|save vectors only in generate_handler_blob
  90   // |-- .. --|
  91   // |---v31--|-----
  92   // |---f0---|
  93   // |---f1---|
  94   // |   ..   |
  95   // |---f31--|
  96   // |---reserved slot for stack alignment---|
  97   // |---x5---|
  98   // |   x6   |
  99   // |---.. --|
 100   // |---x31--|
 101   // |---fp---|
 102   // |---ra---|
 103   int v0_offset_in_bytes(void) { return 0; }
 104   int f0_offset_in_bytes(void) {
 105     int f0_offset = 0;
 106 #ifdef COMPILER2
 107     if (_save_vectors) {
 108       f0_offset += Matcher::scalable_vector_reg_size(T_INT) * VectorRegister::number_of_registers *
 109                    BytesPerInt;
 110     }
 111 #endif
 112     return f0_offset;
 113   }
 114   int reserved_slot_offset_in_bytes(void) {
 115     return f0_offset_in_bytes() +
 116            FloatRegister::max_slots_per_register *
 117            FloatRegister::number_of_registers *
 118            BytesPerInt;
 119   }
 120 
 121   int reg_offset_in_bytes(Register r) {
 122     assert (r->encoding() > 4, "ra, sp, gp and tp not saved");
 123     return reserved_slot_offset_in_bytes() + (r->encoding() - 4 /* x1, x2, x3, x4 */) * wordSize;
 124   }
 125 
 126   int freg_offset_in_bytes(FloatRegister f) {
 127     return f0_offset_in_bytes() + f->encoding() * wordSize;
 128   }
 129 
 130   int ra_offset_in_bytes(void) {
 131     return reserved_slot_offset_in_bytes() +
 132            (Register::number_of_registers - 3) *
 133            Register::max_slots_per_register *
 134            BytesPerInt;
 135   }
 136 };
 137 
 138 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
 139   int vector_size_in_bytes = 0;
 140   int vector_size_in_slots = 0;
 141 #ifdef COMPILER2
 142   if (_save_vectors) {
 143     vector_size_in_bytes += Matcher::scalable_vector_reg_size(T_BYTE);
 144     vector_size_in_slots += Matcher::scalable_vector_reg_size(T_INT);
 145   }
 146 #endif
 147 
 148   int frame_size_in_bytes = align_up(additional_frame_words * wordSize + ra_offset_in_bytes() + wordSize, 16);
 149   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 150   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 151   // The caller will allocate additional_frame_words
 152   int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
 153   // CodeBlob frame size is in words.
 154   int frame_size_in_words = frame_size_in_bytes / wordSize;
 155   *total_frame_words = frame_size_in_words;
 156 
 157   // Save Integer, Float and Vector registers.
 158   __ enter();
 159   __ push_CPU_state(_save_vectors, vector_size_in_bytes);
 160 
 161   // Set an oopmap for the call site.  This oopmap will map all
 162   // oop-registers and debug-info registers as callee-saved.  This
 163   // will allow deoptimization at this safepoint to find all possible
 164   // debug-info recordings, as well as let GC find all oops.
 165 
 166   OopMapSet *oop_maps = new OopMapSet();
 167   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 168   assert_cond(oop_maps != nullptr && oop_map != nullptr);
 169 
 170   int sp_offset_in_slots = 0;
 171   int step_in_slots = 0;
 172   if (_save_vectors) {
 173     step_in_slots = vector_size_in_slots;
 174     for (int i = 0; i < VectorRegister::number_of_registers; i++, sp_offset_in_slots += step_in_slots) {
 175       VectorRegister r = as_VectorRegister(i);
 176       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots), r->as_VMReg());
 177     }
 178   }
 179 
 180   step_in_slots = FloatRegister::max_slots_per_register;
 181   for (int i = 0; i < FloatRegister::number_of_registers; i++, sp_offset_in_slots += step_in_slots) {
 182     FloatRegister r = as_FloatRegister(i);
 183     oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots), r->as_VMReg());
 184   }
 185 
 186   step_in_slots = Register::max_slots_per_register;
 187   // skip the slot reserved for alignment, see MacroAssembler::push_reg;
 188   // also skip x5 ~ x6 on the stack because they are caller-saved registers.
 189   sp_offset_in_slots += Register::max_slots_per_register * 3;
 190   // besides, we ignore x0 ~ x4 because push_CPU_state won't push them on the stack.
 191   for (int i = 7; i < Register::number_of_registers; i++, sp_offset_in_slots += step_in_slots) {
 192     Register r = as_Register(i);
 193     if (r != xthread) {
 194       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots + additional_frame_slots), r->as_VMReg());
 195     }
 196   }
 197 
 198   return oop_map;
 199 }
 200 
 201 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 202 #ifdef COMPILER2
 203   __ pop_CPU_state(_save_vectors, Matcher::scalable_vector_reg_size(T_BYTE));
 204 #else
 205 #if !INCLUDE_JVMCI
 206   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 207 #endif
 208   __ pop_CPU_state(_save_vectors);
 209 #endif
 210   __ leave();
 211 }
 212 
 213 // Is vector's size (in bytes) bigger than a size saved by default?
 214 // riscv does not ovlerlay the floating-point registers on vector registers like aarch64.
 215 bool SharedRuntime::is_wide_vector(int size) {
 216   return UseRVV;
 217 }
 218 
 219 // ---------------------------------------------------------------------------
 220 // Read the array of BasicTypes from a signature, and compute where the
 221 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 222 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 223 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 224 // as framesizes are fixed.
 225 // VMRegImpl::stack0 refers to the first slot 0(sp).
 226 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
 227 // Register up to Register::number_of_registers) are the 64-bit
 228 // integer registers.
 229 
 230 // Note: the INPUTS in sig_bt are in units of Java argument words,
 231 // which are 64-bit.  The OUTPUTS are in 32-bit units.
 232 
 233 // The Java calling convention is a "shifted" version of the C ABI.
 234 // By skipping the first C ABI register we can call non-static jni
 235 // methods with small numbers of arguments without having to shuffle
 236 // the arguments at all. Since we control the java ABI we ought to at
 237 // least get some advantage out of it.
 238 
 239 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 240                                            VMRegPair *regs,
 241                                            int total_args_passed) {
 242   // Create the mapping between argument positions and
 243   // registers.
 244   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 245     j_rarg0, j_rarg1, j_rarg2, j_rarg3,
 246     j_rarg4, j_rarg5, j_rarg6, j_rarg7
 247   };
 248   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 249     j_farg0, j_farg1, j_farg2, j_farg3,
 250     j_farg4, j_farg5, j_farg6, j_farg7
 251   };
 252 
 253   uint int_args = 0;
 254   uint fp_args = 0;
 255   uint stk_args = 0;
 256 
 257   for (int i = 0; i < total_args_passed; i++) {
 258     switch (sig_bt[i]) {
 259       case T_BOOLEAN: // fall through
 260       case T_CHAR:    // fall through
 261       case T_BYTE:    // fall through
 262       case T_SHORT:   // fall through
 263       case T_INT:
 264         if (int_args < Argument::n_int_register_parameters_j) {
 265           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 266         } else {
 267           stk_args = align_up(stk_args, 2);
 268           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 269           stk_args += 1;
 270         }
 271         break;
 272       case T_VOID:
 273         // halves of T_LONG or T_DOUBLE
 274         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 275         regs[i].set_bad();
 276         break;
 277       case T_LONG:      // fall through
 278         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 279       case T_OBJECT:    // fall through
 280       case T_ARRAY:     // fall through
 281       case T_ADDRESS:
 282         if (int_args < Argument::n_int_register_parameters_j) {
 283           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 284         } else {
 285           stk_args = align_up(stk_args, 2);
 286           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 287           stk_args += 2;
 288         }
 289         break;
 290       case T_FLOAT:
 291         if (fp_args < Argument::n_float_register_parameters_j) {
 292           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 293         } else {
 294           stk_args = align_up(stk_args, 2);
 295           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 296           stk_args += 1;
 297         }
 298         break;
 299       case T_DOUBLE:
 300         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 301         if (fp_args < Argument::n_float_register_parameters_j) {
 302           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 303         } else {
 304           stk_args = align_up(stk_args, 2);
 305           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 306           stk_args += 2;
 307         }
 308         break;
 309       default:
 310         ShouldNotReachHere();
 311     }
 312   }
 313 
 314   return stk_args;
 315 }
 316 
 317 // Patch the callers callsite with entry to compiled code if it exists.
 318 static void patch_callers_callsite(MacroAssembler *masm) {
 319   Label L;
 320   __ ld(t0, Address(xmethod, in_bytes(Method::code_offset())));
 321   __ beqz(t0, L);
 322 
 323   __ enter();
 324   __ push_CPU_state();
 325 
 326   // VM needs caller's callsite
 327   // VM needs target method
 328   // This needs to be a long call since we will relocate this adapter to
 329   // the codeBuffer and it may not reach
 330 
 331 #ifndef PRODUCT
 332   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 333 #endif
 334 
 335   __ mv(c_rarg0, xmethod);
 336   __ mv(c_rarg1, ra);
 337   __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
 338 
 339   __ pop_CPU_state();
 340   // restore sp
 341   __ leave();
 342   __ bind(L);
 343 }
 344 
 345 static void gen_c2i_adapter(MacroAssembler *masm,
 346                             int total_args_passed,
 347                             int comp_args_on_stack,
 348                             const BasicType *sig_bt,
 349                             const VMRegPair *regs,
 350                             Label& skip_fixup) {
 351   // Before we get into the guts of the C2I adapter, see if we should be here
 352   // at all.  We've come from compiled code and are attempting to jump to the
 353   // interpreter, which means the caller made a static call to get here
 354   // (vcalls always get a compiled target if there is one).  Check for a
 355   // compiled target.  If there is one, we need to patch the caller's call.
 356   patch_callers_callsite(masm);
 357 
 358   __ bind(skip_fixup);
 359 
 360   int words_pushed = 0;
 361 
 362   // Since all args are passed on the stack, total_args_passed *
 363   // Interpreter::stackElementSize is the space we need.
 364 
 365   int extraspace = total_args_passed * Interpreter::stackElementSize;
 366 
 367   __ mv(x19_sender_sp, sp);
 368 
 369   // stack is aligned, keep it that way
 370   extraspace = align_up(extraspace, 2 * wordSize);
 371 
 372   if (extraspace) {
 373     __ sub(sp, sp, extraspace);
 374   }
 375 
 376   // Now write the args into the outgoing interpreter space
 377   for (int i = 0; i < total_args_passed; i++) {
 378     if (sig_bt[i] == T_VOID) {
 379       assert(i > 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "missing half");
 380       continue;
 381     }
 382 
 383     // offset to start parameters
 384     int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 385     int next_off = st_off - Interpreter::stackElementSize;
 386 
 387     // Say 4 args:
 388     // i   st_off
 389     // 0   32 T_LONG
 390     // 1   24 T_VOID
 391     // 2   16 T_OBJECT
 392     // 3    8 T_BOOL
 393     // -    0 return address
 394     //
 395     // However to make thing extra confusing. Because we can fit a Java long/double in
 396     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 397     // leaves one slot empty and only stores to a single slot. In this case the
 398     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 399 
 400     VMReg r_1 = regs[i].first();
 401     VMReg r_2 = regs[i].second();
 402     if (!r_1->is_valid()) {
 403       assert(!r_2->is_valid(), "");
 404       continue;
 405     }
 406     if (r_1->is_stack()) {
 407       // memory to memory use t0
 408       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
 409                     + extraspace
 410                     + words_pushed * wordSize);
 411       if (!r_2->is_valid()) {
 412         __ lwu(t0, Address(sp, ld_off));
 413         __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 414       } else {
 415         __ ld(t0, Address(sp, ld_off), /*temp register*/esp);
 416 
 417         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 418         // T_DOUBLE and T_LONG use two slots in the interpreter
 419         if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 420           // ld_off == LSW, ld_off+wordSize == MSW
 421           // st_off == MSW, next_off == LSW
 422           __ sd(t0, Address(sp, next_off), /*temp register*/esp);
 423 #ifdef ASSERT
 424           // Overwrite the unused slot with known junk
 425           __ mv(t0, 0xdeadffffdeadaaaaul);
 426           __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 427 #endif /* ASSERT */
 428         } else {
 429           __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 430         }
 431       }
 432     } else if (r_1->is_Register()) {
 433       Register r = r_1->as_Register();
 434       if (!r_2->is_valid()) {
 435         // must be only an int (or less ) so move only 32bits to slot
 436         __ sd(r, Address(sp, st_off));
 437       } else {
 438         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 439         // T_DOUBLE and T_LONG use two slots in the interpreter
 440         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 441           // long/double in gpr
 442 #ifdef ASSERT
 443           // Overwrite the unused slot with known junk
 444           __ mv(t0, 0xdeadffffdeadaaabul);
 445           __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 446 #endif /* ASSERT */
 447           __ sd(r, Address(sp, next_off));
 448         } else {
 449           __ sd(r, Address(sp, st_off));
 450         }
 451       }
 452     } else {
 453       assert(r_1->is_FloatRegister(), "");
 454       if (!r_2->is_valid()) {
 455         // only a float use just part of the slot
 456         __ fsw(r_1->as_FloatRegister(), Address(sp, st_off));
 457       } else {
 458 #ifdef ASSERT
 459         // Overwrite the unused slot with known junk
 460         __ mv(t0, 0xdeadffffdeadaaacul);
 461         __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 462 #endif /* ASSERT */
 463         __ fsd(r_1->as_FloatRegister(), Address(sp, next_off));
 464       }
 465     }
 466   }
 467 
 468   __ mv(esp, sp); // Interp expects args on caller's expression stack
 469 
 470   __ ld(t1, Address(xmethod, in_bytes(Method::interpreter_entry_offset())));
 471   __ jr(t1);
 472 }
 473 
 474 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 475                                     int total_args_passed,
 476                                     int comp_args_on_stack,
 477                                     const BasicType *sig_bt,
 478                                     const VMRegPair *regs) {
 479   // Note: x19_sender_sp contains the senderSP on entry. We must
 480   // preserve it since we may do a i2c -> c2i transition if we lose a
 481   // race where compiled code goes non-entrant while we get args
 482   // ready.
 483 
 484   // Cut-out for having no stack args.
 485   int comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord;
 486   if (comp_args_on_stack != 0) {
 487     __ sub(t0, sp, comp_words_on_stack * wordSize);
 488     __ andi(sp, t0, -16);
 489   }
 490 
 491   // Will jump to the compiled code just as if compiled code was doing it.
 492   // Pre-load the register-jump target early, to schedule it better.
 493   __ ld(t1, Address(xmethod, in_bytes(Method::from_compiled_offset())));
 494 
 495 #if INCLUDE_JVMCI
 496   if (EnableJVMCI) {
 497     // check if this call should be routed towards a specific entry point
 498     __ ld(t0, Address(xthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 499     Label no_alternative_target;
 500     __ beqz(t0, no_alternative_target);
 501     __ mv(t1, t0);
 502     __ sd(zr, Address(xthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 503     __ bind(no_alternative_target);
 504   }
 505 #endif // INCLUDE_JVMCI
 506 
 507   // Now generate the shuffle code.
 508   for (int i = 0; i < total_args_passed; i++) {
 509     if (sig_bt[i] == T_VOID) {
 510       assert(i > 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "missing half");
 511       continue;
 512     }
 513 
 514     // Pick up 0, 1 or 2 words from SP+offset.
 515 
 516     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 517            "scrambled load targets?");
 518     // Load in argument order going down.
 519     int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 520     // Point to interpreter value (vs. tag)
 521     int next_off = ld_off - Interpreter::stackElementSize;
 522 
 523     VMReg r_1 = regs[i].first();
 524     VMReg r_2 = regs[i].second();
 525     if (!r_1->is_valid()) {
 526       assert(!r_2->is_valid(), "");
 527       continue;
 528     }
 529     if (r_1->is_stack()) {
 530       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 531       int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size;
 532       if (!r_2->is_valid()) {
 533         __ lw(t0, Address(esp, ld_off));
 534         __ sd(t0, Address(sp, st_off), /*temp register*/t2);
 535       } else {
 536         //
 537         // We are using two optoregs. This can be either T_OBJECT,
 538         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 539         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 540         // So we must adjust where to pick up the data to match the
 541         // interpreter.
 542         //
 543         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 544         // are accessed as negative so LSW is at LOW address
 545 
 546         // ld_off is MSW so get LSW
 547         const int offset = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
 548                            next_off : ld_off;
 549         __ ld(t0, Address(esp, offset));
 550         // st_off is LSW (i.e. reg.first())
 551         __ sd(t0, Address(sp, st_off), /*temp register*/t2);
 552       }
 553     } else if (r_1->is_Register()) {  // Register argument
 554       Register r = r_1->as_Register();
 555       if (r_2->is_valid()) {
 556         //
 557         // We are using two VMRegs. This can be either T_OBJECT,
 558         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 559         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 560         // So we must adjust where to pick up the data to match the
 561         // interpreter.
 562 
 563         const int offset = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
 564                            next_off : ld_off;
 565 
 566         // this can be a misaligned move
 567         __ ld(r, Address(esp, offset));
 568       } else {
 569         // sign extend and use a full word?
 570         __ lw(r, Address(esp, ld_off));
 571       }
 572     } else {
 573       if (!r_2->is_valid()) {
 574         __ flw(r_1->as_FloatRegister(), Address(esp, ld_off));
 575       } else {
 576         __ fld(r_1->as_FloatRegister(), Address(esp, next_off));
 577       }
 578     }
 579   }
 580 
 581   __ push_cont_fastpath(xthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
 582 
 583   // 6243940 We might end up in handle_wrong_method if
 584   // the callee is deoptimized as we race thru here. If that
 585   // happens we don't want to take a safepoint because the
 586   // caller frame will look interpreted and arguments are now
 587   // "compiled" so it is much better to make this transition
 588   // invisible to the stack walking code. Unfortunately if
 589   // we try and find the callee by normal means a safepoint
 590   // is possible. So we stash the desired callee in the thread
 591   // and the vm will find there should this case occur.
 592 
 593   __ sd(xmethod, Address(xthread, JavaThread::callee_target_offset()));
 594 
 595   __ jr(t1);
 596 }
 597 
 598 // ---------------------------------------------------------------
 599 
 600 void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 601                                             int total_args_passed,
 602                                             int comp_args_on_stack,
 603                                             const BasicType *sig_bt,
 604                                             const VMRegPair *regs,
 605                                             AdapterHandlerEntry* handler) {
 606   address i2c_entry = __ pc();
 607   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 608 
 609   address c2i_unverified_entry = __ pc();
 610   Label skip_fixup;
 611 
 612   const Register receiver = j_rarg0;
 613   const Register data = t0;
 614 
 615   // -------------------------------------------------------------------------
 616   // Generate a C2I adapter.  On entry we know xmethod holds the Method* during calls
 617   // to the interpreter.  The args start out packed in the compiled layout.  They
 618   // need to be unpacked into the interpreter layout.  This will almost always
 619   // require some stack space.  We grow the current (compiled) stack, then repack
 620   // the args.  We  finally end in a jump to the generic interpreter entry point.
 621   // On exit from the interpreter, the interpreter will restore our SP (lest the
 622   // compiled code, which relies solely on SP and not FP, get sick).
 623 
 624   {
 625     __ block_comment("c2i_unverified_entry {");
 626 
 627     __ ic_check();
 628     __ ld(xmethod, Address(data, CompiledICData::speculated_method_offset()));
 629 
 630     __ ld(t0, Address(xmethod, in_bytes(Method::code_offset())));
 631     __ beqz(t0, skip_fixup);
 632     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 633     __ block_comment("} c2i_unverified_entry");
 634   }
 635 
 636   address c2i_entry = __ pc();
 637 
 638   // Class initialization barrier for static methods
 639   address c2i_no_clinit_check_entry = nullptr;
 640   if (VM_Version::supports_fast_class_init_checks()) {
 641     Label L_skip_barrier;
 642 
 643     { // Bypass the barrier for non-static methods
 644       __ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
 645       __ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
 646       __ beqz(t1, L_skip_barrier); // non-static
 647     }
 648 
 649     __ load_method_holder(t1, xmethod);
 650     __ clinit_barrier(t1, t0, &L_skip_barrier);
 651     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 652 
 653     __ bind(L_skip_barrier);
 654     c2i_no_clinit_check_entry = __ pc();
 655   }
 656 
 657   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 658   bs->c2i_entry_barrier(masm);
 659 
 660   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 661 
 662   handler->set_entry_points(i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
 663   return;
 664 }
 665 
 666 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
 667                                              uint num_bits,
 668                                              uint total_args_passed) {
 669   assert(total_args_passed <= Argument::n_vector_register_parameters_c, "unsupported");
 670   assert(num_bits >= 64 && num_bits <= 2048 && is_power_of_2(num_bits), "unsupported");
 671 
 672   // check more info at https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc
 673   static const VectorRegister VEC_ArgReg[Argument::n_vector_register_parameters_c] = {
 674     v8, v9, v10, v11, v12, v13, v14, v15,
 675     v16, v17, v18, v19, v20, v21, v22, v23
 676   };
 677 
 678   const int next_reg_val = 3;
 679   for (uint i = 0; i < total_args_passed; i++) {
 680     VMReg vmreg = VEC_ArgReg[i]->as_VMReg();
 681     regs[i].set_pair(vmreg->next(next_reg_val), vmreg);
 682   }
 683   return 0;
 684 }
 685 
 686 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 687                                          VMRegPair *regs,
 688                                          int total_args_passed) {
 689 
 690   // We return the amount of VMRegImpl stack slots we need to reserve for all
 691   // the arguments NOT counting out_preserve_stack_slots.
 692 
 693   static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 694     c_rarg0, c_rarg1, c_rarg2, c_rarg3,
 695     c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 696   };
 697   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 698     c_farg0, c_farg1, c_farg2, c_farg3,
 699     c_farg4, c_farg5, c_farg6, c_farg7
 700   };
 701 
 702   uint int_args = 0;
 703   uint fp_args = 0;
 704   uint stk_args = 0; // inc by 2 each time
 705 
 706   for (int i = 0; i < total_args_passed; i++) {
 707     switch (sig_bt[i]) {
 708       case T_BOOLEAN:  // fall through
 709       case T_CHAR:     // fall through
 710       case T_BYTE:     // fall through
 711       case T_SHORT:    // fall through
 712       case T_INT:
 713         if (int_args < Argument::n_int_register_parameters_c) {
 714           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 715         } else {
 716           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 717           stk_args += 2;
 718         }
 719         break;
 720       case T_LONG:      // fall through
 721         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 722       case T_OBJECT:    // fall through
 723       case T_ARRAY:     // fall through
 724       case T_ADDRESS:   // fall through
 725       case T_METADATA:
 726         if (int_args < Argument::n_int_register_parameters_c) {
 727           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 728         } else {
 729           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 730           stk_args += 2;
 731         }
 732         break;
 733       case T_FLOAT:
 734         if (fp_args < Argument::n_float_register_parameters_c) {
 735           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 736         } else if (int_args < Argument::n_int_register_parameters_c) {
 737           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 738         } else {
 739           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 740           stk_args += 2;
 741         }
 742         break;
 743       case T_DOUBLE:
 744         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 745         if (fp_args < Argument::n_float_register_parameters_c) {
 746           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 747         } else if (int_args < Argument::n_int_register_parameters_c) {
 748           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 749         } else {
 750           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 751           stk_args += 2;
 752         }
 753         break;
 754       case T_VOID: // Halves of longs and doubles
 755         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 756         regs[i].set_bad();
 757         break;
 758       default:
 759         ShouldNotReachHere();
 760     }
 761   }
 762 
 763   return stk_args;
 764 }
 765 
 766 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
 767   // We always ignore the frame_slots arg and just use the space just below frame pointer
 768   // which by this time is free to use
 769   switch (ret_type) {
 770     case T_FLOAT:
 771       __ fsw(f10, Address(fp, -3 * wordSize));
 772       break;
 773     case T_DOUBLE:
 774       __ fsd(f10, Address(fp, -3 * wordSize));
 775       break;
 776     case T_VOID:  break;
 777     default: {
 778       __ sd(x10, Address(fp, -3 * wordSize));
 779     }
 780   }
 781 }
 782 
 783 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
 784   // We always ignore the frame_slots arg and just use the space just below frame pointer
 785   // which by this time is free to use
 786   switch (ret_type) {
 787     case T_FLOAT:
 788       __ flw(f10, Address(fp, -3 * wordSize));
 789       break;
 790     case T_DOUBLE:
 791       __ fld(f10, Address(fp, -3 * wordSize));
 792       break;
 793     case T_VOID:  break;
 794     default: {
 795       __ ld(x10, Address(fp, -3 * wordSize));
 796     }
 797   }
 798 }
 799 
 800 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
 801   RegSet x;
 802   for ( int i = first_arg ; i < arg_count ; i++ ) {
 803     if (args[i].first()->is_Register()) {
 804       x = x + args[i].first()->as_Register();
 805     } else if (args[i].first()->is_FloatRegister()) {
 806       __ subi(sp, sp, 2 * wordSize);
 807       __ fsd(args[i].first()->as_FloatRegister(), Address(sp, 0));
 808     }
 809   }
 810   __ push_reg(x, sp);
 811 }
 812 
 813 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
 814   RegSet x;
 815   for ( int i = first_arg ; i < arg_count ; i++ ) {
 816     if (args[i].first()->is_Register()) {
 817       x = x + args[i].first()->as_Register();
 818     } else {
 819       ;
 820     }
 821   }
 822   __ pop_reg(x, sp);
 823   for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
 824     if (args[i].first()->is_Register()) {
 825       ;
 826     } else if (args[i].first()->is_FloatRegister()) {
 827       __ fld(args[i].first()->as_FloatRegister(), Address(sp, 0));
 828       __ addi(sp, sp, 2 * wordSize);
 829     }
 830   }
 831 }
 832 
 833 static void verify_oop_args(MacroAssembler* masm,
 834                             const methodHandle& method,
 835                             const BasicType* sig_bt,
 836                             const VMRegPair* regs) {
 837   const Register temp_reg = x9;  // not part of any compiled calling seq
 838   if (VerifyOops) {
 839     for (int i = 0; i < method->size_of_parameters(); i++) {
 840       if (sig_bt[i] == T_OBJECT ||
 841           sig_bt[i] == T_ARRAY) {
 842         VMReg r = regs[i].first();
 843         assert(r->is_valid(), "bad oop arg");
 844         if (r->is_stack()) {
 845           __ ld(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
 846           __ verify_oop(temp_reg);
 847         } else {
 848           __ verify_oop(r->as_Register());
 849         }
 850       }
 851     }
 852   }
 853 }
 854 
 855 // on exit, sp points to the ContinuationEntry
 856 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) {
 857   assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, "");
 858   assert(in_bytes(ContinuationEntry::cont_offset())  % VMRegImpl::stack_slot_size == 0, "");
 859   assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, "");
 860 
 861   stack_slots += (int)ContinuationEntry::size() / wordSize;
 862   __ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata
 863 
 864   OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize) / VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
 865 
 866   __ ld(t0, Address(xthread, JavaThread::cont_entry_offset()));
 867   __ sd(t0, Address(sp, ContinuationEntry::parent_offset()));
 868   __ sd(sp, Address(xthread, JavaThread::cont_entry_offset()));
 869 
 870   return map;
 871 }
 872 
 873 // on entry c_rarg1 points to the continuation
 874 //          sp points to ContinuationEntry
 875 //          c_rarg3 -- isVirtualThread
 876 static void fill_continuation_entry(MacroAssembler* masm) {
 877 #ifdef ASSERT
 878   __ mv(t0, ContinuationEntry::cookie_value());
 879   __ sw(t0, Address(sp, ContinuationEntry::cookie_offset()));
 880 #endif
 881 
 882   __ sd(c_rarg1, Address(sp, ContinuationEntry::cont_offset()));
 883   __ sw(c_rarg3, Address(sp, ContinuationEntry::flags_offset()));
 884   __ sd(zr,      Address(sp, ContinuationEntry::chunk_offset()));
 885   __ sw(zr,      Address(sp, ContinuationEntry::argsize_offset()));
 886   __ sw(zr,      Address(sp, ContinuationEntry::pin_count_offset()));
 887 
 888   __ ld(t0, Address(xthread, JavaThread::cont_fastpath_offset()));
 889   __ sd(t0, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
 890   __ ld(t0, Address(xthread, JavaThread::held_monitor_count_offset()));
 891   __ sd(t0, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
 892 
 893   __ sd(zr, Address(xthread, JavaThread::cont_fastpath_offset()));
 894   __ sd(zr, Address(xthread, JavaThread::held_monitor_count_offset()));
 895 }
 896 
 897 // on entry, sp points to the ContinuationEntry
 898 // on exit, fp points to the spilled fp + 2 * wordSize in the entry frame
 899 static void continuation_enter_cleanup(MacroAssembler* masm) {
 900 #ifndef PRODUCT
 901   Label OK;
 902   __ ld(t0, Address(xthread, JavaThread::cont_entry_offset()));
 903   __ beq(sp, t0, OK);
 904   __ stop("incorrect sp");
 905   __ bind(OK);
 906 #endif
 907 
 908   __ ld(t0, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
 909   __ sd(t0, Address(xthread, JavaThread::cont_fastpath_offset()));
 910 
 911   if (CheckJNICalls) {
 912     // Check if this is a virtual thread continuation
 913     Label L_skip_vthread_code;
 914     __ lwu(t0, Address(sp, ContinuationEntry::flags_offset()));
 915     __ beqz(t0, L_skip_vthread_code);
 916 
 917     // If the held monitor count is > 0 and this vthread is terminating then
 918     // it failed to release a JNI monitor. So we issue the same log message
 919     // that JavaThread::exit does.
 920     __ ld(t0, Address(xthread, JavaThread::jni_monitor_count_offset()));
 921     __ beqz(t0, L_skip_vthread_code);
 922 
 923     // Save return value potentially containing the exception oop in callee-saved x9
 924     __ mv(x9, x10);
 925     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held));
 926     // Restore potential return value
 927     __ mv(x10, x9);
 928 
 929     // For vthreads we have to explicitly zero the JNI monitor count of the carrier
 930     // on termination. The held count is implicitly zeroed below when we restore from
 931     // the parent held count (which has to be zero).
 932     __ sd(zr, Address(xthread, JavaThread::jni_monitor_count_offset()));
 933 
 934     __ bind(L_skip_vthread_code);
 935   }
 936 #ifdef ASSERT
 937   else {
 938     // Check if this is a virtual thread continuation
 939     Label L_skip_vthread_code;
 940     __ lwu(t0, Address(sp, ContinuationEntry::flags_offset()));
 941     __ beqz(t0, L_skip_vthread_code);
 942 
 943     // See comment just above. If not checking JNI calls the JNI count is only
 944     // needed for assertion checking.
 945     __ sd(zr, Address(xthread, JavaThread::jni_monitor_count_offset()));
 946 
 947     __ bind(L_skip_vthread_code);
 948   }
 949 #endif
 950 
 951   __ ld(t0, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
 952   __ sd(t0, Address(xthread, JavaThread::held_monitor_count_offset()));
 953 
 954   __ ld(t0, Address(sp, ContinuationEntry::parent_offset()));
 955   __ sd(t0, Address(xthread, JavaThread::cont_entry_offset()));
 956   __ add(fp, sp, (int)ContinuationEntry::size() + 2 * wordSize /* 2 extra words to match up with leave() */);
 957 }
 958 
 959 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
 960 // On entry: c_rarg1 -- the continuation object
 961 //           c_rarg2 -- isContinue
 962 //           c_rarg3 -- isVirtualThread
 963 static void gen_continuation_enter(MacroAssembler* masm,
 964                                    const methodHandle& method,
 965                                    const BasicType* sig_bt,
 966                                    const VMRegPair* regs,
 967                                    int& exception_offset,
 968                                    OopMapSet*oop_maps,
 969                                    int& frame_complete,
 970                                    int& stack_slots,
 971                                    int& interpreted_entry_offset,
 972                                    int& compiled_entry_offset) {
 973   // verify_oop_args(masm, method, sig_bt, regs);
 974   Address resolve(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
 975 
 976   address start = __ pc();
 977 
 978   Label call_thaw, exit;
 979 
 980   // i2i entry used at interp_only_mode only
 981   interpreted_entry_offset = __ pc() - start;
 982   {
 983 #ifdef ASSERT
 984     Label is_interp_only;
 985     __ lw(t0, Address(xthread, JavaThread::interp_only_mode_offset()));
 986     __ bnez(t0, is_interp_only);
 987     __ stop("enterSpecial interpreter entry called when not in interp_only_mode");
 988     __ bind(is_interp_only);
 989 #endif
 990 
 991     // Read interpreter arguments into registers (this is an ad-hoc i2c adapter)
 992     __ ld(c_rarg1, Address(esp, Interpreter::stackElementSize * 2));
 993     __ ld(c_rarg2, Address(esp, Interpreter::stackElementSize * 1));
 994     __ ld(c_rarg3, Address(esp, Interpreter::stackElementSize * 0));
 995     __ push_cont_fastpath(xthread);
 996 
 997     __ enter();
 998     stack_slots = 2; // will be adjusted in setup
 999     OopMap* map = continuation_enter_setup(masm, stack_slots);
1000     // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe,
1001     // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
1002 
1003     fill_continuation_entry(masm);
1004 
1005     __ bnez(c_rarg2, call_thaw);
1006 
1007     // Make sure the call is patchable
1008     __ align(NativeInstruction::instruction_size);
1009 
1010     const address tr_call = __ reloc_call(resolve);
1011     if (tr_call == nullptr) {
1012       fatal("CodeCache is full at gen_continuation_enter");
1013     }
1014 
1015     oop_maps->add_gc_map(__ pc() - start, map);
1016     __ post_call_nop();
1017 
1018     __ j(exit);
1019 
1020     address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1021     if (stub == nullptr) {
1022       fatal("CodeCache is full at gen_continuation_enter");
1023     }
1024   }
1025 
1026   // compiled entry
1027   __ align(CodeEntryAlignment);
1028   compiled_entry_offset = __ pc() - start;
1029 
1030   __ enter();
1031   stack_slots = 2; // will be adjusted in setup
1032   OopMap* map = continuation_enter_setup(masm, stack_slots);
1033   frame_complete = __ pc() - start;
1034 
1035   fill_continuation_entry(masm);
1036 
1037   __ bnez(c_rarg2, call_thaw);
1038 
1039   // Make sure the call is patchable
1040   __ align(NativeInstruction::instruction_size);
1041 
1042   const address tr_call = __ reloc_call(resolve);
1043   if (tr_call == nullptr) {
1044     fatal("CodeCache is full at gen_continuation_enter");
1045   }
1046 
1047   oop_maps->add_gc_map(__ pc() - start, map);
1048   __ post_call_nop();
1049 
1050   __ j(exit);
1051 
1052   __ bind(call_thaw);
1053 
1054   ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1055   __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1056   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1057   ContinuationEntry::_return_pc_offset = __ pc() - start;
1058   __ post_call_nop();
1059 
1060   __ bind(exit);
1061   ContinuationEntry::_cleanup_offset = __ pc() - start;
1062   continuation_enter_cleanup(masm);
1063   __ leave();
1064   __ ret();
1065 
1066   // exception handling
1067   exception_offset = __ pc() - start;
1068   {
1069     __ mv(x9, x10); // save return value contaning the exception oop in callee-saved x9
1070 
1071     continuation_enter_cleanup(masm);
1072 
1073     __ ld(c_rarg1, Address(fp, -1 * wordSize)); // return address
1074     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), xthread, c_rarg1);
1075 
1076     // see OptoRuntime::generate_exception_blob: x10 -- exception oop, x13 -- exception pc
1077 
1078     __ mv(x11, x10); // the exception handler
1079     __ mv(x10, x9); // restore return value contaning the exception oop
1080     __ verify_oop(x10);
1081 
1082     __ leave();
1083     __ mv(x13, ra);
1084     __ jr(x11); // the exception handler
1085   }
1086 
1087   address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1088   if (stub == nullptr) {
1089     fatal("CodeCache is full at gen_continuation_enter");
1090   }
1091 }
1092 
1093 static void gen_continuation_yield(MacroAssembler* masm,
1094                                    const methodHandle& method,
1095                                    const BasicType* sig_bt,
1096                                    const VMRegPair* regs,
1097                                    OopMapSet* oop_maps,
1098                                    int& frame_complete,
1099                                    int& stack_slots,
1100                                    int& compiled_entry_offset) {
1101   enum layout {
1102     fp_off,
1103     fp_off2,
1104     return_off,
1105     return_off2,
1106     framesize // inclusive of return address
1107   };
1108   // assert(is_even(framesize/2), "sp not 16-byte aligned");
1109 
1110   stack_slots = framesize / VMRegImpl::slots_per_word;
1111   assert(stack_slots == 2, "recheck layout");
1112 
1113   address start = __ pc();
1114 
1115   compiled_entry_offset = __ pc() - start;
1116   __ enter();
1117 
1118   __ mv(c_rarg1, sp);
1119 
1120   frame_complete = __ pc() - start;
1121   address the_pc = __ pc();
1122 
1123   __ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup
1124 
1125   __ mv(c_rarg0, xthread);
1126   __ set_last_Java_frame(sp, fp, the_pc, t0);
1127   __ call_VM_leaf(Continuation::freeze_entry(), 2);
1128   __ reset_last_Java_frame(true);
1129 
1130   Label pinned;
1131 
1132   __ bnez(x10, pinned);
1133 
1134   // We've succeeded, set sp to the ContinuationEntry
1135   __ ld(sp, Address(xthread, JavaThread::cont_entry_offset()));
1136   continuation_enter_cleanup(masm);
1137 
1138   __ bind(pinned); // pinned -- return to caller
1139 
1140   // handle pending exception thrown by freeze
1141   __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1142   Label ok;
1143   __ beqz(t0, ok);
1144   __ leave();
1145   __ j(RuntimeAddress(StubRoutines::forward_exception_entry()));
1146   __ bind(ok);
1147 
1148   __ leave();
1149   __ ret();
1150 
1151   OopMap* map = new OopMap(framesize, 1);
1152   oop_maps->add_gc_map(the_pc - start, map);
1153 }
1154 
1155 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1156   ::continuation_enter_cleanup(masm);
1157 }
1158 
1159 static void gen_special_dispatch(MacroAssembler* masm,
1160                                  const methodHandle& method,
1161                                  const BasicType* sig_bt,
1162                                  const VMRegPair* regs) {
1163   verify_oop_args(masm, method, sig_bt, regs);
1164   vmIntrinsics::ID iid = method->intrinsic_id();
1165 
1166   // Now write the args into the outgoing interpreter space
1167   bool     has_receiver   = false;
1168   Register receiver_reg   = noreg;
1169   int      member_arg_pos = -1;
1170   Register member_reg     = noreg;
1171   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1172   if (ref_kind != 0) {
1173     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1174     member_reg = x9;  // known to be free at this point
1175     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1176   } else if (iid == vmIntrinsics::_invokeBasic) {
1177     has_receiver = true;
1178   } else if (iid == vmIntrinsics::_linkToNative) {
1179     member_arg_pos = method->size_of_parameters() - 1;  // trailing NativeEntryPoint argument
1180     member_reg = x9;  // known to be free at this point
1181   } else {
1182     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1183   }
1184 
1185   if (member_reg != noreg) {
1186     // Load the member_arg into register, if necessary.
1187     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1188     VMReg r = regs[member_arg_pos].first();
1189     if (r->is_stack()) {
1190       __ ld(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1191     } else {
1192       // no data motion is needed
1193       member_reg = r->as_Register();
1194     }
1195   }
1196 
1197   if (has_receiver) {
1198     // Make sure the receiver is loaded into a register.
1199     assert(method->size_of_parameters() > 0, "oob");
1200     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1201     VMReg r = regs[0].first();
1202     assert(r->is_valid(), "bad receiver arg");
1203     if (r->is_stack()) {
1204       // Porting note:  This assumes that compiled calling conventions always
1205       // pass the receiver oop in a register.  If this is not true on some
1206       // platform, pick a temp and load the receiver from stack.
1207       fatal("receiver always in a register");
1208       receiver_reg = x12;  // known to be free at this point
1209       __ ld(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1210     } else {
1211       // no data motion is needed
1212       receiver_reg = r->as_Register();
1213     }
1214   }
1215 
1216   // Figure out which address we are really jumping to:
1217   MethodHandles::generate_method_handle_dispatch(masm, iid,
1218                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1219 }
1220 
1221 // ---------------------------------------------------------------------------
1222 // Generate a native wrapper for a given method.  The method takes arguments
1223 // in the Java compiled code convention, marshals them to the native
1224 // convention (handlizes oops, etc), transitions to native, makes the call,
1225 // returns to java state (possibly blocking), unhandlizes any result and
1226 // returns.
1227 //
1228 // Critical native functions are a shorthand for the use of
1229 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1230 // functions.  The wrapper is expected to unpack the arguments before
1231 // passing them to the callee and perform checks before and after the
1232 // native call to ensure that they GCLocker
1233 // lock_critical/unlock_critical semantics are followed.  Some other
1234 // parts of JNI setup are skipped like the tear down of the JNI handle
1235 // block and the check for pending exceptions it's impossible for them
1236 // to be thrown.
1237 //
1238 // They are roughly structured like this:
1239 //    if (GCLocker::needs_gc()) SharedRuntime::block_for_jni_critical()
1240 //    tranistion to thread_in_native
1241 //    unpack array arguments and call native entry point
1242 //    check for safepoint in progress
1243 //    check if any thread suspend flags are set
1244 //      call into JVM and possible unlock the JNI critical
1245 //      if a GC was suppressed while in the critical native.
1246 //    transition back to thread_in_Java
1247 //    return to caller
1248 //
1249 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1250                                                 const methodHandle& method,
1251                                                 int compile_id,
1252                                                 BasicType* in_sig_bt,
1253                                                 VMRegPair* in_regs,
1254                                                 BasicType ret_type) {
1255   if (method->is_continuation_native_intrinsic()) {
1256     int exception_offset = -1;
1257     OopMapSet* oop_maps = new OopMapSet();
1258     int frame_complete = -1;
1259     int stack_slots = -1;
1260     int interpreted_entry_offset = -1;
1261     int vep_offset = -1;
1262     if (method->is_continuation_enter_intrinsic()) {
1263       gen_continuation_enter(masm,
1264                              method,
1265                              in_sig_bt,
1266                              in_regs,
1267                              exception_offset,
1268                              oop_maps,
1269                              frame_complete,
1270                              stack_slots,
1271                              interpreted_entry_offset,
1272                              vep_offset);
1273     } else if (method->is_continuation_yield_intrinsic()) {
1274       gen_continuation_yield(masm,
1275                              method,
1276                              in_sig_bt,
1277                              in_regs,
1278                              oop_maps,
1279                              frame_complete,
1280                              stack_slots,
1281                              vep_offset);
1282     } else {
1283       guarantee(false, "Unknown Continuation native intrinsic");
1284     }
1285 
1286 #ifdef ASSERT
1287     if (method->is_continuation_enter_intrinsic()) {
1288       assert(interpreted_entry_offset != -1, "Must be set");
1289       assert(exception_offset != -1,         "Must be set");
1290     } else {
1291       assert(interpreted_entry_offset == -1, "Must be unset");
1292       assert(exception_offset == -1,         "Must be unset");
1293     }
1294     assert(frame_complete != -1,    "Must be set");
1295     assert(stack_slots != -1,       "Must be set");
1296     assert(vep_offset != -1,        "Must be set");
1297 #endif
1298 
1299     __ flush();
1300     nmethod* nm = nmethod::new_native_nmethod(method,
1301                                               compile_id,
1302                                               masm->code(),
1303                                               vep_offset,
1304                                               frame_complete,
1305                                               stack_slots,
1306                                               in_ByteSize(-1),
1307                                               in_ByteSize(-1),
1308                                               oop_maps,
1309                                               exception_offset);
1310     if (nm == nullptr) return nm;
1311     if (method->is_continuation_enter_intrinsic()) {
1312       ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1313     } else if (method->is_continuation_yield_intrinsic()) {
1314       _cont_doYield_stub = nm;
1315     } else {
1316       guarantee(false, "Unknown Continuation native intrinsic");
1317     }
1318     return nm;
1319   }
1320 
1321   if (method->is_method_handle_intrinsic()) {
1322     vmIntrinsics::ID iid = method->intrinsic_id();
1323     intptr_t start = (intptr_t)__ pc();
1324     int vep_offset = ((intptr_t)__ pc()) - start;
1325 
1326     // First instruction must be a nop as it may need to be patched on deoptimisation
1327     {
1328       Assembler::IncompressibleScope scope(masm); // keep the nop as 4 bytes for patching.
1329       MacroAssembler::assert_alignment(__ pc());
1330       __ nop();  // 4 bytes
1331     }
1332     gen_special_dispatch(masm,
1333                          method,
1334                          in_sig_bt,
1335                          in_regs);
1336     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1337     __ flush();
1338     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1339     return nmethod::new_native_nmethod(method,
1340                                        compile_id,
1341                                        masm->code(),
1342                                        vep_offset,
1343                                        frame_complete,
1344                                        stack_slots / VMRegImpl::slots_per_word,
1345                                        in_ByteSize(-1),
1346                                        in_ByteSize(-1),
1347                                        (OopMapSet*)nullptr);
1348   }
1349   address native_func = method->native_function();
1350   assert(native_func != nullptr, "must have function");
1351 
1352   // An OopMap for lock (and class if static)
1353   OopMapSet *oop_maps = new OopMapSet();
1354   assert_cond(oop_maps != nullptr);
1355   intptr_t start = (intptr_t)__ pc();
1356 
1357   // We have received a description of where all the java arg are located
1358   // on entry to the wrapper. We need to convert these args to where
1359   // the jni function will expect them. To figure out where they go
1360   // we convert the java signature to a C signature by inserting
1361   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1362 
1363   const int total_in_args = method->size_of_parameters();
1364   int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1365 
1366   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1367   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1368 
1369   int argc = 0;
1370   out_sig_bt[argc++] = T_ADDRESS;
1371   if (method->is_static()) {
1372     out_sig_bt[argc++] = T_OBJECT;
1373   }
1374 
1375   for (int i = 0; i < total_in_args ; i++) {
1376     out_sig_bt[argc++] = in_sig_bt[i];
1377   }
1378 
1379   // Now figure out where the args must be stored and how much stack space
1380   // they require.
1381   int out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1382 
1383   // Compute framesize for the wrapper.  We need to handlize all oops in
1384   // incoming registers
1385 
1386   // Calculate the total number of stack slots we will need.
1387 
1388   // First count the abi requirement plus all of the outgoing args
1389   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1390 
1391   // Now the space for the inbound oop handle area
1392   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1393 
1394   int oop_handle_offset = stack_slots;
1395   stack_slots += total_save_slots;
1396 
1397   // Now any space we need for handlizing a klass if static method
1398 
1399   int klass_slot_offset = 0;
1400   int klass_offset = -1;
1401   int lock_slot_offset = 0;
1402   bool is_static = false;
1403 
1404   if (method->is_static()) {
1405     klass_slot_offset = stack_slots;
1406     stack_slots += VMRegImpl::slots_per_word;
1407     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1408     is_static = true;
1409   }
1410 
1411   // Plus a lock if needed
1412 
1413   if (method->is_synchronized()) {
1414     lock_slot_offset = stack_slots;
1415     stack_slots += VMRegImpl::slots_per_word;
1416   }
1417 
1418   // Now a place (+2) to save return values or temp during shuffling
1419   // + 4 for return address (which we own) and saved fp
1420   stack_slots += 6;
1421 
1422   // Ok The space we have allocated will look like:
1423   //
1424   //
1425   // FP-> |                     |
1426   //      | 2 slots (ra)        |
1427   //      | 2 slots (fp)        |
1428   //      |---------------------|
1429   //      | 2 slots for moves   |
1430   //      |---------------------|
1431   //      | lock box (if sync)  |
1432   //      |---------------------| <- lock_slot_offset
1433   //      | klass (if static)   |
1434   //      |---------------------| <- klass_slot_offset
1435   //      | oopHandle area      |
1436   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1437   //      | outbound memory     |
1438   //      | based arguments     |
1439   //      |                     |
1440   //      |---------------------|
1441   //      |                     |
1442   // SP-> | out_preserved_slots |
1443   //
1444   //
1445 
1446 
1447   // Now compute actual number of stack words we need rounding to make
1448   // stack properly aligned.
1449   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1450 
1451   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1452 
1453   // First thing make an ic check to see if we should even be here
1454 
1455   // We are free to use all registers as temps without saving them and
1456   // restoring them except fp. fp is the only callee save register
1457   // as far as the interpreter and the compiler(s) are concerned.
1458 
1459   const Register receiver = j_rarg0;
1460 
1461   __ verify_oop(receiver);
1462   assert_different_registers(receiver, t0, t1);
1463 
1464   __ ic_check();
1465 
1466   int vep_offset = ((intptr_t)__ pc()) - start;
1467 
1468   // If we have to make this method not-entrant we'll overwrite its
1469   // first instruction with a jump.
1470   {
1471     Assembler::IncompressibleScope scope(masm); // keep the nop as 4 bytes for patching.
1472     MacroAssembler::assert_alignment(__ pc());
1473     __ nop();  // 4 bytes
1474   }
1475 
1476   if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
1477     Label L_skip_barrier;
1478     __ mov_metadata(t1, method->method_holder()); // InstanceKlass*
1479     __ clinit_barrier(t1, t0, &L_skip_barrier);
1480     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1481 
1482     __ bind(L_skip_barrier);
1483   }
1484 
1485   // Generate stack overflow check
1486   __ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size()));
1487 
1488   // Generate a new frame for the wrapper.
1489   __ enter();
1490   // -2 because return address is already present and so is saved fp
1491   __ sub(sp, sp, stack_size - 2 * wordSize);
1492 
1493   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1494   assert_cond(bs != nullptr);
1495   bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
1496 
1497   // Frame is now completed as far as size and linkage.
1498   int frame_complete = ((intptr_t)__ pc()) - start;
1499 
1500   // We use x18 as the oop handle for the receiver/klass
1501   // It is callee save so it survives the call to native
1502 
1503   const Register oop_handle_reg = x18;
1504 
1505   //
1506   // We immediately shuffle the arguments so that any vm call we have to
1507   // make from here on out (sync slow path, jvmti, etc.) we will have
1508   // captured the oops from our caller and have a valid oopMap for
1509   // them.
1510 
1511   // -----------------
1512   // The Grand Shuffle
1513 
1514   // The Java calling convention is either equal (linux) or denser (win64) than the
1515   // c calling convention. However the because of the jni_env argument the c calling
1516   // convention always has at least one more (and two for static) arguments than Java.
1517   // Therefore if we move the args from java -> c backwards then we will never have
1518   // a register->register conflict and we don't have to build a dependency graph
1519   // and figure out how to break any cycles.
1520   //
1521 
1522   // Record esp-based slot for receiver on stack for non-static methods
1523   int receiver_offset = -1;
1524 
1525   // This is a trick. We double the stack slots so we can claim
1526   // the oops in the caller's frame. Since we are sure to have
1527   // more args than the caller doubling is enough to make
1528   // sure we can capture all the incoming oop args from the
1529   // caller.
1530   //
1531   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1532   assert_cond(map != nullptr);
1533 
1534   int float_args = 0;
1535   int int_args = 0;
1536 
1537 #ifdef ASSERT
1538   bool reg_destroyed[Register::number_of_registers];
1539   bool freg_destroyed[FloatRegister::number_of_registers];
1540   for ( int r = 0 ; r < Register::number_of_registers ; r++ ) {
1541     reg_destroyed[r] = false;
1542   }
1543   for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) {
1544     freg_destroyed[f] = false;
1545   }
1546 
1547 #endif /* ASSERT */
1548 
1549   // For JNI natives the incoming and outgoing registers are offset upwards.
1550   GrowableArray<int> arg_order(2 * total_in_args);
1551 
1552   for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1553     arg_order.push(i);
1554     arg_order.push(c_arg);
1555   }
1556 
1557   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1558     int i = arg_order.at(ai);
1559     int c_arg = arg_order.at(ai + 1);
1560     __ block_comment(err_msg("mv %d -> %d", i, c_arg));
1561     assert(c_arg != -1 && i != -1, "wrong order");
1562 #ifdef ASSERT
1563     if (in_regs[i].first()->is_Register()) {
1564       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1565     } else if (in_regs[i].first()->is_FloatRegister()) {
1566       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1567     }
1568     if (out_regs[c_arg].first()->is_Register()) {
1569       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1570     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1571       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1572     }
1573 #endif /* ASSERT */
1574     switch (in_sig_bt[i]) {
1575       case T_ARRAY:
1576       case T_OBJECT:
1577         __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1578                        ((i == 0) && (!is_static)),
1579                        &receiver_offset);
1580         int_args++;
1581         break;
1582       case T_VOID:
1583         break;
1584 
1585       case T_FLOAT:
1586         __ float_move(in_regs[i], out_regs[c_arg]);
1587         float_args++;
1588         break;
1589 
1590       case T_DOUBLE:
1591         assert( i + 1 < total_in_args &&
1592                 in_sig_bt[i + 1] == T_VOID &&
1593                 out_sig_bt[c_arg + 1] == T_VOID, "bad arg list");
1594         __ double_move(in_regs[i], out_regs[c_arg]);
1595         float_args++;
1596         break;
1597 
1598       case T_LONG :
1599         __ long_move(in_regs[i], out_regs[c_arg]);
1600         int_args++;
1601         break;
1602 
1603       case T_ADDRESS:
1604         assert(false, "found T_ADDRESS in java args");
1605         break;
1606 
1607       default:
1608         __ move32_64(in_regs[i], out_regs[c_arg]);
1609         int_args++;
1610     }
1611   }
1612 
1613   // point c_arg at the first arg that is already loaded in case we
1614   // need to spill before we call out
1615   int c_arg = total_c_args - total_in_args;
1616 
1617   // Pre-load a static method's oop into c_rarg1.
1618   if (method->is_static()) {
1619 
1620     //  load oop into a register
1621     __ movoop(c_rarg1,
1622               JNIHandles::make_local(method->method_holder()->java_mirror()));
1623 
1624     // Now handlize the static class mirror it's known not-null.
1625     __ sd(c_rarg1, Address(sp, klass_offset));
1626     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1627 
1628     // Now get the handle
1629     __ la(c_rarg1, Address(sp, klass_offset));
1630     // and protect the arg if we must spill
1631     c_arg--;
1632   }
1633 
1634   // Change state to native (we save the return address in the thread, since it might not
1635   // be pushed on the stack when we do a stack traversal). It is enough that the pc()
1636   // points into the right code segment. It does not have to be the correct return pc.
1637   // We use the same pc/oopMap repeatedly when we call out.
1638 
1639   Label native_return;
1640   if (method->is_object_wait0()) {
1641     // For convenience we use the pc we want to resume to in case of preemption on Object.wait.
1642     __ set_last_Java_frame(sp, noreg, native_return, t0);
1643   } else {
1644     intptr_t the_pc = (intptr_t) __ pc();
1645     oop_maps->add_gc_map(the_pc - start, map);
1646 
1647     __ set_last_Java_frame(sp, noreg, __ pc(), t0);
1648   }
1649 
1650   Label dtrace_method_entry, dtrace_method_entry_done;
1651   if (DTraceMethodProbes) {
1652     __ j(dtrace_method_entry);
1653     __ bind(dtrace_method_entry_done);
1654   }
1655 
1656   // RedefineClasses() tracing support for obsolete method entry
1657   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1658     // protect the args we've loaded
1659     save_args(masm, total_c_args, c_arg, out_regs);
1660     __ mov_metadata(c_rarg1, method());
1661     __ call_VM_leaf(
1662       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1663       xthread, c_rarg1);
1664     restore_args(masm, total_c_args, c_arg, out_regs);
1665   }
1666 
1667   // Lock a synchronized method
1668 
1669   // Register definitions used by locking and unlocking
1670 
1671   const Register swap_reg = x10;
1672   const Register obj_reg  = x9;  // Will contain the oop
1673   const Register lock_reg = x30;  // Address of compiler lock object (BasicLock)
1674   const Register old_hdr  = x30;  // value of old header at unlock time
1675   const Register lock_tmp = x31;  // Temporary used by lightweight_lock/unlock
1676   const Register tmp      = ra;
1677 
1678   Label slow_path_lock;
1679   Label lock_done;
1680 
1681   if (method->is_synchronized()) {
1682     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1683 
1684     // Get the handle (the 2nd argument)
1685     __ mv(oop_handle_reg, c_rarg1);
1686 
1687     // Get address of the box
1688 
1689     __ la(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1690 
1691     // Load the oop from the handle
1692     __ ld(obj_reg, Address(oop_handle_reg, 0));
1693 
1694     __ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1695 
1696     // Slow path will re-enter here
1697     __ bind(lock_done);
1698   }
1699 
1700 
1701   // Finally just about ready to make the JNI call
1702 
1703   // get JNIEnv* which is first argument to native
1704   __ la(c_rarg0, Address(xthread, in_bytes(JavaThread::jni_environment_offset())));
1705 
1706   // Now set thread in native
1707   __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1708   __ mv(t0, _thread_in_native);
1709   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1710   __ sw(t0, Address(t1));
1711 
1712   // Clobbers t1
1713   __ rt_call(native_func);
1714 
1715   // Verify or restore cpu control state after JNI call
1716   __ restore_cpu_control_state_after_jni(t0);
1717 
1718   // Unpack native results.
1719   if (ret_type != T_OBJECT && ret_type != T_ARRAY) {
1720     __ cast_primitive_type(ret_type, x10);
1721   }
1722 
1723   Label safepoint_in_progress, safepoint_in_progress_done;
1724 
1725   // Switch thread to "native transition" state before reading the synchronization state.
1726   // This additional state is necessary because reading and testing the synchronization
1727   // state is not atomic w.r.t. GC, as this scenario demonstrates:
1728   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1729   //     VM thread changes sync state to synchronizing and suspends threads for GC.
1730   //     Thread A is resumed to finish this native method, but doesn't block here since it
1731   //     didn't see any synchronization is progress, and escapes.
1732   __ mv(t0, _thread_in_native_trans);
1733 
1734   __ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
1735 
1736   // Force this write out before the read below
1737   if (!UseSystemMemoryBarrier) {
1738     __ membar(MacroAssembler::AnyAny);
1739   }
1740 
1741   // check for safepoint operation in progress and/or pending suspend requests
1742   {
1743     __ safepoint_poll(safepoint_in_progress, true /* at_return */, false /* in_nmethod */);
1744     __ lwu(t0, Address(xthread, JavaThread::suspend_flags_offset()));
1745     __ bnez(t0, safepoint_in_progress);
1746     __ bind(safepoint_in_progress_done);
1747   }
1748 
1749   // change thread state
1750   __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1751   __ mv(t0, _thread_in_Java);
1752   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1753   __ sw(t0, Address(t1));
1754 
1755   if (method->is_object_wait0()) {
1756     // Check preemption for Object.wait()
1757     __ ld(t1, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1758     __ beqz(t1, native_return);
1759     __ sd(zr, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1760     __ jr(t1);
1761     __ bind(native_return);
1762 
1763     intptr_t the_pc = (intptr_t) __ pc();
1764     oop_maps->add_gc_map(the_pc - start, map);
1765   }
1766 
1767   Label reguard;
1768   Label reguard_done;
1769   __ lbu(t0, Address(xthread, JavaThread::stack_guard_state_offset()));
1770   __ mv(t1, StackOverflow::stack_guard_yellow_reserved_disabled);
1771   __ beq(t0, t1, reguard);
1772   __ bind(reguard_done);
1773 
1774   // native result if any is live
1775 
1776   // Unlock
1777   Label unlock_done;
1778   Label slow_path_unlock;
1779   if (method->is_synchronized()) {
1780 
1781     // Get locked oop from the handle we passed to jni
1782     __ ld(obj_reg, Address(oop_handle_reg, 0));
1783 
1784     // Must save x10 if if it is live now because cmpxchg must use it
1785     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1786       save_native_result(masm, ret_type, stack_slots);
1787     }
1788 
1789     __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1790 
1791     // slow path re-enters here
1792     __ bind(unlock_done);
1793     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1794       restore_native_result(masm, ret_type, stack_slots);
1795     }
1796   }
1797 
1798   Label dtrace_method_exit, dtrace_method_exit_done;
1799   if (DTraceMethodProbes) {
1800     __ j(dtrace_method_exit);
1801     __ bind(dtrace_method_exit_done);
1802   }
1803 
1804   __ reset_last_Java_frame(false);
1805 
1806   // Unbox oop result, e.g. JNIHandles::resolve result.
1807   if (is_reference_type(ret_type)) {
1808     __ resolve_jobject(x10, x11, x12);
1809   }
1810 
1811   if (CheckJNICalls) {
1812     // clear_pending_jni_exception_check
1813     __ sd(zr, Address(xthread, JavaThread::pending_jni_exception_check_fn_offset()));
1814   }
1815 
1816   // reset handle block
1817   __ ld(x12, Address(xthread, JavaThread::active_handles_offset()));
1818   __ sd(zr, Address(x12, JNIHandleBlock::top_offset()));
1819 
1820   __ leave();
1821 
1822   #if INCLUDE_JFR
1823   // We need to do a poll test after unwind in case the sampler
1824   // managed to sample the native frame after returning to Java.
1825   Label L_return;
1826   __ ld(t0, Address(xthread, JavaThread::polling_word_offset()));
1827   address poll_test_pc = __ pc();
1828   __ relocate(relocInfo::poll_return_type);
1829   __ test_bit(t0, t0, log2i_exact(SafepointMechanism::poll_bit()));
1830   __ beqz(t0, L_return);
1831   assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
1832          "polling page return stub not created yet");
1833   address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
1834   __ la(t0, InternalAddress(poll_test_pc));
1835   __ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset()));
1836   __ far_jump(RuntimeAddress(stub));
1837   __ bind(L_return);
1838 #endif // INCLUDE_JFR
1839 
1840   // Any exception pending?
1841   Label exception_pending;
1842   __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1843   __ bnez(t0, exception_pending);
1844 
1845   // We're done
1846   __ ret();
1847 
1848   // Unexpected paths are out of line and go here
1849 
1850   // forward the exception
1851   __ bind(exception_pending);
1852 
1853   // and forward the exception
1854   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1855 
1856   // Slow path locking & unlocking
1857   if (method->is_synchronized()) {
1858 
1859     __ block_comment("Slow path lock {");
1860     __ bind(slow_path_lock);
1861 
1862     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1863     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1864 
1865     // protect the args we've loaded
1866     save_args(masm, total_c_args, c_arg, out_regs);
1867 
1868     __ mv(c_rarg0, obj_reg);
1869     __ mv(c_rarg1, lock_reg);
1870     __ mv(c_rarg2, xthread);
1871 
1872     // Not a leaf but we have last_Java_frame setup as we want.
1873     // We don't want to unmount in case of contention since that would complicate preserving
1874     // the arguments that had already been marshalled into the native convention. So we force
1875     // the freeze slow path to find this native wrapper frame (see recurse_freeze_native_frame())
1876     // and pin the vthread. Otherwise the fast path won't find it since we don't walk the stack.
1877     __ push_cont_fastpath();
1878     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
1879     __ pop_cont_fastpath();
1880     restore_args(masm, total_c_args, c_arg, out_regs);
1881 
1882 #ifdef ASSERT
1883     { Label L;
1884       __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1885       __ beqz(t0, L);
1886       __ stop("no pending exception allowed on exit from monitorenter");
1887       __ bind(L);
1888     }
1889 #endif
1890     __ j(lock_done);
1891 
1892     __ block_comment("} Slow path lock");
1893 
1894     __ block_comment("Slow path unlock {");
1895     __ bind(slow_path_unlock);
1896 
1897     if (ret_type == T_FLOAT || ret_type == T_DOUBLE) {
1898       save_native_result(masm, ret_type, stack_slots);
1899     }
1900 
1901     __ mv(c_rarg2, xthread);
1902     __ la(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1903     __ mv(c_rarg0, obj_reg);
1904 
1905     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
1906     // NOTE that obj_reg == x9 currently
1907     __ ld(x9, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1908     __ sd(zr, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1909 
1910     __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
1911 
1912 #ifdef ASSERT
1913     {
1914       Label L;
1915       __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1916       __ beqz(t0, L);
1917       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
1918       __ bind(L);
1919     }
1920 #endif /* ASSERT */
1921 
1922     __ sd(x9, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1923 
1924     if (ret_type == T_FLOAT || ret_type == T_DOUBLE) {
1925       restore_native_result(masm, ret_type, stack_slots);
1926     }
1927     __ j(unlock_done);
1928 
1929     __ block_comment("} Slow path unlock");
1930 
1931   } // synchronized
1932 
1933   // SLOW PATH Reguard the stack if needed
1934 
1935   __ bind(reguard);
1936   save_native_result(masm, ret_type, stack_slots);
1937   __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
1938   restore_native_result(masm, ret_type, stack_slots);
1939   // and continue
1940   __ j(reguard_done);
1941 
1942   // SLOW PATH safepoint
1943   {
1944     __ block_comment("safepoint {");
1945     __ bind(safepoint_in_progress);
1946 
1947     // Don't use call_VM as it will see a possible pending exception and forward it
1948     // and never return here preventing us from clearing _last_native_pc down below.
1949     //
1950     save_native_result(masm, ret_type, stack_slots);
1951     __ mv(c_rarg0, xthread);
1952 #ifndef PRODUCT
1953     assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
1954 #endif
1955     __ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
1956 
1957     // Restore any method result value
1958     restore_native_result(masm, ret_type, stack_slots);
1959 
1960     __ j(safepoint_in_progress_done);
1961     __ block_comment("} safepoint");
1962   }
1963 
1964   // SLOW PATH dtrace support
1965   if (DTraceMethodProbes) {
1966     {
1967       __ block_comment("dtrace entry {");
1968       __ bind(dtrace_method_entry);
1969 
1970       // We have all of the arguments setup at this point. We must not touch any register
1971       // argument registers at this point (what if we save/restore them there are no oop?
1972 
1973       save_args(masm, total_c_args, c_arg, out_regs);
1974       __ mov_metadata(c_rarg1, method());
1975       __ call_VM_leaf(
1976         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1977         xthread, c_rarg1);
1978       restore_args(masm, total_c_args, c_arg, out_regs);
1979       __ j(dtrace_method_entry_done);
1980       __ block_comment("} dtrace entry");
1981     }
1982 
1983     {
1984       __ block_comment("dtrace exit {");
1985       __ bind(dtrace_method_exit);
1986       save_native_result(masm, ret_type, stack_slots);
1987       __ mov_metadata(c_rarg1, method());
1988       __ call_VM_leaf(
1989            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1990            xthread, c_rarg1);
1991       restore_native_result(masm, ret_type, stack_slots);
1992       __ j(dtrace_method_exit_done);
1993       __ block_comment("} dtrace exit");
1994     }
1995   }
1996 
1997   __ flush();
1998 
1999   nmethod *nm = nmethod::new_native_nmethod(method,
2000                                             compile_id,
2001                                             masm->code(),
2002                                             vep_offset,
2003                                             frame_complete,
2004                                             stack_slots / VMRegImpl::slots_per_word,
2005                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2006                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2007                                             oop_maps);
2008   assert(nm != nullptr, "create native nmethod fail!");
2009   return nm;
2010 }
2011 
2012 // this function returns the adjust size (in number of words) to a c2i adapter
2013 // activation for use during deoptimization
2014 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2015   assert(callee_locals >= callee_parameters,
2016          "test and remove; got more parms than locals");
2017   if (callee_locals < callee_parameters) {
2018     return 0;                   // No adjustment for negative locals
2019   }
2020   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2021   // diff is counted in stack words
2022   return align_up(diff, 2);
2023 }
2024 
2025 //------------------------------generate_deopt_blob----------------------------
2026 void SharedRuntime::generate_deopt_blob() {
2027   // Allocate space for the code
2028   ResourceMark rm;
2029   // Setup code generation tools
2030   int pad = 0;
2031 #if INCLUDE_JVMCI
2032   if (EnableJVMCI) {
2033     pad += 512; // Increase the buffer size when compiling for JVMCI
2034   }
2035 #endif
2036   const char* name = SharedRuntime::stub_name(StubId::shared_deopt_id);
2037   CodeBuffer buffer(name, 2048 + pad, 1024);
2038   MacroAssembler* masm = new MacroAssembler(&buffer);
2039   int frame_size_in_words = -1;
2040   OopMap* map = nullptr;
2041   OopMapSet *oop_maps = new OopMapSet();
2042   assert_cond(masm != nullptr && oop_maps != nullptr);
2043   RegisterSaver reg_saver(COMPILER2_OR_JVMCI != 0);
2044 
2045   // -------------
2046   // This code enters when returning to a de-optimized nmethod.  A return
2047   // address has been pushed on the stack, and return values are in
2048   // registers.
2049   // If we are doing a normal deopt then we were called from the patched
2050   // nmethod from the point we returned to the nmethod. So the return
2051   // address on the stack is wrong by NativeCall::instruction_size
2052   // We will adjust the value so it looks like we have the original return
2053   // address on the stack (like when we eagerly deoptimized).
2054   // In the case of an exception pending when deoptimizing, we enter
2055   // with a return address on the stack that points after the call we patched
2056   // into the exception handler. We have the following register state from,
2057   // e.g., the forward exception stub (see stubGenerator_riscv.cpp).
2058   //    x10: exception oop
2059   //    x9: exception handler
2060   //    x13: throwing pc
2061   // So in this case we simply jam x13 into the useless return address and
2062   // the stack looks just like we want.
2063   //
2064   // At this point we need to de-opt.  We save the argument return
2065   // registers.  We call the first C routine, fetch_unroll_info().  This
2066   // routine captures the return values and returns a structure which
2067   // describes the current frame size and the sizes of all replacement frames.
2068   // The current frame is compiled code and may contain many inlined
2069   // functions, each with their own JVM state.  We pop the current frame, then
2070   // push all the new frames.  Then we call the C routine unpack_frames() to
2071   // populate these frames.  Finally unpack_frames() returns us the new target
2072   // address.  Notice that callee-save registers are BLOWN here; they have
2073   // already been captured in the vframeArray at the time the return PC was
2074   // patched.
2075   address start = __ pc();
2076   Label cont;
2077 
2078   // Prolog for non exception case!
2079 
2080   // Save everything in sight.
2081   map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2082 
2083   // Normal deoptimization.  Save exec mode for unpack_frames.
2084   __ mv(xcpool, Deoptimization::Unpack_deopt); // callee-saved
2085   __ j(cont);
2086 
2087   int reexecute_offset = __ pc() - start;
2088 #if INCLUDE_JVMCI && !defined(COMPILER1)
2089   if (UseJVMCICompiler) {
2090     // JVMCI does not use this kind of deoptimization
2091     __ should_not_reach_here();
2092   }
2093 #endif
2094 
2095   // Reexecute case
2096   // return address is the pc describes what bci to do re-execute at
2097 
2098   // No need to update map as each call to save_live_registers will produce identical oopmap
2099   (void) reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2100 
2101   __ mv(xcpool, Deoptimization::Unpack_reexecute); // callee-saved
2102   __ j(cont);
2103 
2104 #if INCLUDE_JVMCI
2105   Label after_fetch_unroll_info_call;
2106   int implicit_exception_uncommon_trap_offset = 0;
2107   int uncommon_trap_offset = 0;
2108 
2109   if (EnableJVMCI) {
2110     implicit_exception_uncommon_trap_offset = __ pc() - start;
2111 
2112     __ ld(ra, Address(xthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2113     __ sd(zr, Address(xthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2114 
2115     uncommon_trap_offset = __ pc() - start;
2116 
2117     // Save everything in sight.
2118     reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2119     // fetch_unroll_info needs to call last_java_frame()
2120     Label retaddr;
2121     __ set_last_Java_frame(sp, noreg, retaddr, t0);
2122 
2123     __ lw(c_rarg1, Address(xthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2124     __ mv(t0, -1);
2125     __ sw(t0, Address(xthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2126 
2127     __ mv(xcpool, Deoptimization::Unpack_reexecute);
2128     __ mv(c_rarg0, xthread);
2129     __ orrw(c_rarg2, zr, xcpool); // exec mode
2130     __ rt_call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap));
2131     __ bind(retaddr);
2132     oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2133 
2134     __ reset_last_Java_frame(false);
2135 
2136     __ j(after_fetch_unroll_info_call);
2137   } // EnableJVMCI
2138 #endif // INCLUDE_JVMCI
2139 
2140   int exception_offset = __ pc() - start;
2141 
2142   // Prolog for exception case
2143 
2144   // all registers are dead at this entry point, except for x10, and
2145   // x13 which contain the exception oop and exception pc
2146   // respectively.  Set them in TLS and fall thru to the
2147   // unpack_with_exception_in_tls entry point.
2148 
2149   __ sd(x13, Address(xthread, JavaThread::exception_pc_offset()));
2150   __ sd(x10, Address(xthread, JavaThread::exception_oop_offset()));
2151 
2152   int exception_in_tls_offset = __ pc() - start;
2153 
2154   // new implementation because exception oop is now passed in JavaThread
2155 
2156   // Prolog for exception case
2157   // All registers must be preserved because they might be used by LinearScan
2158   // Exceptiop oop and throwing PC are passed in JavaThread
2159   // tos: stack at point of call to method that threw the exception (i.e. only
2160   // args are on the stack, no return address)
2161 
2162   // The return address pushed by save_live_registers will be patched
2163   // later with the throwing pc. The correct value is not available
2164   // now because loading it from memory would destroy registers.
2165 
2166   // NB: The SP at this point must be the SP of the method that is
2167   // being deoptimized.  Deoptimization assumes that the frame created
2168   // here by save_live_registers is immediately below the method's SP.
2169   // This is a somewhat fragile mechanism.
2170 
2171   // Save everything in sight.
2172   map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2173 
2174   // Now it is safe to overwrite any register
2175 
2176   // Deopt during an exception.  Save exec mode for unpack_frames.
2177   __ mv(xcpool, Deoptimization::Unpack_exception); // callee-saved
2178 
2179   // load throwing pc from JavaThread and patch it as the return address
2180   // of the current frame. Then clear the field in JavaThread
2181 
2182   __ ld(x13, Address(xthread, JavaThread::exception_pc_offset()));
2183   __ sd(x13, Address(fp, frame::return_addr_offset * wordSize));
2184   __ sd(zr, Address(xthread, JavaThread::exception_pc_offset()));
2185 
2186 #ifdef ASSERT
2187   // verify that there is really an exception oop in JavaThread
2188   __ ld(x10, Address(xthread, JavaThread::exception_oop_offset()));
2189   __ verify_oop(x10);
2190 
2191   // verify that there is no pending exception
2192   Label no_pending_exception;
2193   __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
2194   __ beqz(t0, no_pending_exception);
2195   __ stop("must not have pending exception here");
2196   __ bind(no_pending_exception);
2197 #endif
2198 
2199   __ bind(cont);
2200 
2201   // Call C code.  Need thread and this frame, but NOT official VM entry
2202   // crud.  We cannot block on this call, no GC can happen.
2203   //
2204   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2205 
2206   // fetch_unroll_info needs to call last_java_frame().
2207 
2208   Label retaddr;
2209   __ set_last_Java_frame(sp, noreg, retaddr, t0);
2210 #ifdef ASSERT
2211   {
2212     Label L;
2213     __ ld(t0, Address(xthread,
2214                               JavaThread::last_Java_fp_offset()));
2215     __ beqz(t0, L);
2216     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2217     __ bind(L);
2218   }
2219 #endif // ASSERT
2220   __ mv(c_rarg0, xthread);
2221   __ mv(c_rarg1, xcpool);
2222   __ rt_call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info));
2223   __ bind(retaddr);
2224 
2225   // Need to have an oopmap that tells fetch_unroll_info where to
2226   // find any register it might need.
2227   oop_maps->add_gc_map(__ pc() - start, map);
2228 
2229   __ reset_last_Java_frame(false);
2230 
2231 #if INCLUDE_JVMCI
2232   if (EnableJVMCI) {
2233     __ bind(after_fetch_unroll_info_call);
2234   }
2235 #endif
2236 
2237   // Load UnrollBlock* into x15
2238   __ mv(x15, x10);
2239 
2240   __ lwu(xcpool, Address(x15, Deoptimization::UnrollBlock::unpack_kind_offset()));
2241   Label noException;
2242   __ mv(t0, Deoptimization::Unpack_exception);
2243   __ bne(xcpool, t0, noException); // Was exception pending?
2244   __ ld(x10, Address(xthread, JavaThread::exception_oop_offset()));
2245   __ ld(x13, Address(xthread, JavaThread::exception_pc_offset()));
2246   __ sd(zr, Address(xthread, JavaThread::exception_oop_offset()));
2247   __ sd(zr, Address(xthread, JavaThread::exception_pc_offset()));
2248 
2249   __ verify_oop(x10);
2250 
2251   // Overwrite the result registers with the exception results.
2252   __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10)));
2253 
2254   __ bind(noException);
2255 
2256   // Only register save data is on the stack.
2257   // Now restore the result registers.  Everything else is either dead
2258   // or captured in the vframeArray.
2259 
2260   // Restore fp result register
2261   __ fld(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10)));
2262   // Restore integer result register
2263   __ ld(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10)));
2264 
2265   // Pop all of the register save area off the stack
2266   __ add(sp, sp, frame_size_in_words * wordSize);
2267 
2268   // All of the register save area has been popped of the stack. Only the
2269   // return address remains.
2270 
2271   // Pop all the frames we must move/replace.
2272   //
2273   // Frame picture (youngest to oldest)
2274   // 1: self-frame (no frame link)
2275   // 2: deopting frame  (no frame link)
2276   // 3: caller of deopting frame (could be compiled/interpreted).
2277   //
2278   // Note: by leaving the return address of self-frame on the stack
2279   // and using the size of frame 2 to adjust the stack
2280   // when we are done the return to frame 3 will still be on the stack.
2281 
2282   // Pop deoptimized frame
2283   __ lwu(x12, Address(x15, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2284   __ subi(x12, x12, 2 * wordSize);
2285   __ add(sp, sp, x12);
2286   __ ld(fp, Address(sp, 0));
2287   __ ld(ra, Address(sp, wordSize));
2288   __ addi(sp, sp, 2 * wordSize);
2289   // RA should now be the return address to the caller (3)
2290 
2291 #ifdef ASSERT
2292   // Compilers generate code that bang the stack by as much as the
2293   // interpreter would need. So this stack banging should never
2294   // trigger a fault. Verify that it does not on non product builds.
2295   __ lwu(x9, Address(x15, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2296   __ bang_stack_size(x9, x12);
2297 #endif
2298   // Load address of array of frame pcs into x12
2299   __ ld(x12, Address(x15, Deoptimization::UnrollBlock::frame_pcs_offset()));
2300 
2301   // Load address of array of frame sizes into x14
2302   __ ld(x14, Address(x15, Deoptimization::UnrollBlock::frame_sizes_offset()));
2303 
2304   // Load counter into x13
2305   __ lwu(x13, Address(x15, Deoptimization::UnrollBlock::number_of_frames_offset()));
2306 
2307   // Now adjust the caller's stack to make up for the extra locals
2308   // but record the original sp so that we can save it in the skeletal interpreter
2309   // frame and the stack walking of interpreter_sender will get the unextended sp
2310   // value and not the "real" sp value.
2311 
2312   const Register sender_sp = x16;
2313 
2314   __ mv(sender_sp, sp);
2315   __ lwu(x9, Address(x15,
2316                      Deoptimization::UnrollBlock::
2317                      caller_adjustment_offset()));
2318   __ sub(sp, sp, x9);
2319 
2320   // Push interpreter frames in a loop
2321   __ mv(t0, 0xDEADDEAD);               // Make a recognizable pattern
2322   __ mv(t1, t0);
2323   Label loop;
2324   __ bind(loop);
2325   __ ld(x9, Address(x14, 0));          // Load frame size
2326   __ addi(x14, x14, wordSize);
2327   __ subi(x9, x9, 2 * wordSize);       // We'll push pc and fp by hand
2328   __ ld(ra, Address(x12, 0));          // Load pc
2329   __ addi(x12, x12, wordSize);
2330   __ enter();                          // Save old & set new fp
2331   __ sub(sp, sp, x9);                  // Prolog
2332   // This value is corrected by layout_activation_impl
2333   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
2334   __ sd(sender_sp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2335   __ mv(sender_sp, sp);                // Pass sender_sp to next frame
2336   __ subi(x13, x13, 1);                // Decrement counter
2337   __ bnez(x13, loop);
2338 
2339     // Re-push self-frame
2340   __ ld(ra, Address(x12));
2341   __ enter();
2342 
2343   // Allocate a full sized register save area.  We subtract 2 because
2344   // enter() just pushed 2 words
2345   __ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2346 
2347   // Restore frame locals after moving the frame
2348   __ fsd(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10)));
2349   __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10)));
2350 
2351   // Call C code.  Need thread but NOT official VM entry
2352   // crud.  We cannot block on this call, no GC can happen.  Call should
2353   // restore return values to their stack-slots with the new SP.
2354   //
2355   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2356 
2357   // Use fp because the frames look interpreted now
2358   // Don't need the precise return PC here, just precise enough to point into this code blob.
2359   address the_pc = __ pc();
2360   __ set_last_Java_frame(sp, fp, the_pc, t0);
2361 
2362   __ mv(c_rarg0, xthread);
2363   __ mv(c_rarg1, xcpool); // second arg: exec_mode
2364   __ rt_call(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames));
2365 
2366   // Set an oopmap for the call site
2367   // Use the same PC we used for the last java frame
2368   oop_maps->add_gc_map(the_pc - start,
2369                        new OopMap(frame_size_in_words, 0));
2370 
2371   // Clear fp AND pc
2372   __ reset_last_Java_frame(true);
2373 
2374   // Collect return values
2375   __ fld(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10)));
2376   __ ld(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10)));
2377 
2378   // Pop self-frame.
2379   __ leave();                           // Epilog
2380 
2381   // Jump to interpreter
2382   __ ret();
2383 
2384   // Make sure all code is generated
2385   masm->flush();
2386 
2387   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2388   assert(_deopt_blob != nullptr, "create deoptimization blob fail!");
2389   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2390 #if INCLUDE_JVMCI
2391   if (EnableJVMCI) {
2392     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2393     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2394   }
2395 #endif
2396 }
2397 
2398 // Number of stack slots between incoming argument block and the start of
2399 // a new frame. The PROLOG must add this many slots to the stack. The
2400 // EPILOG must remove this many slots.
2401 // RISCV needs two words for RA (return address) and FP (frame pointer).
2402 uint SharedRuntime::in_preserve_stack_slots() {
2403   return 2 * VMRegImpl::slots_per_word;
2404 }
2405 
2406 uint SharedRuntime::out_preserve_stack_slots() {
2407   return 0;
2408 }
2409 
2410 VMReg SharedRuntime::thread_register() {
2411   return xthread->as_VMReg();
2412 }
2413 
2414 //------------------------------generate_handler_blob------
2415 //
2416 // Generate a special Compile2Runtime blob that saves all registers,
2417 // and setup oopmap.
2418 //
2419 SafepointBlob* SharedRuntime::generate_handler_blob(StubId id, address call_ptr) {
2420   assert(is_polling_page_id(id), "expected a polling page stub id");
2421 
2422   ResourceMark rm;
2423   OopMapSet *oop_maps = new OopMapSet();
2424   assert_cond(oop_maps != nullptr);
2425   OopMap* map = nullptr;
2426 
2427   // Allocate space for the code.  Setup code generation tools.
2428   const char* name = SharedRuntime::stub_name(id);
2429   CodeBuffer buffer(name, 2048, 1024);
2430   MacroAssembler* masm = new MacroAssembler(&buffer);
2431   assert_cond(masm != nullptr);
2432 
2433   address start   = __ pc();
2434   address call_pc = nullptr;
2435   int frame_size_in_words = -1;
2436   bool cause_return = (id == StubId::shared_polling_page_return_handler_id);
2437   RegisterSaver reg_saver(id == StubId::shared_polling_page_vectors_safepoint_handler_id /* save_vectors */);
2438 
2439   // Save Integer and Float registers.
2440   map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2441 
2442   // The following is basically a call_VM.  However, we need the precise
2443   // address of the call in order to generate an oopmap. Hence, we do all the
2444   // work ourselves.
2445 
2446   Label retaddr;
2447   __ set_last_Java_frame(sp, noreg, retaddr, t0);
2448 
2449   // The return address must always be correct so that frame constructor never
2450   // sees an invalid pc.
2451 
2452   if (!cause_return) {
2453     // overwrite the return address pushed by save_live_registers
2454     // Additionally, x18 is a callee-saved register so we can look at
2455     // it later to determine if someone changed the return address for
2456     // us!
2457     __ ld(x18, Address(xthread, JavaThread::saved_exception_pc_offset()));
2458     __ sd(x18, Address(fp, frame::return_addr_offset * wordSize));
2459   }
2460 
2461   // Do the call
2462   __ mv(c_rarg0, xthread);
2463   __ rt_call(call_ptr);
2464   __ bind(retaddr);
2465 
2466   // Set an oopmap for the call site.  This oopmap will map all
2467   // oop-registers and debug-info registers as callee-saved.  This
2468   // will allow deoptimization at this safepoint to find all possible
2469   // debug-info recordings, as well as let GC find all oops.
2470 
2471   oop_maps->add_gc_map( __ pc() - start, map);
2472 
2473   Label noException;
2474 
2475   __ reset_last_Java_frame(false);
2476 
2477   __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
2478 
2479   __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
2480   __ beqz(t0, noException);
2481 
2482   // Exception pending
2483 
2484   reg_saver.restore_live_registers(masm);
2485 
2486   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2487 
2488   // No exception case
2489   __ bind(noException);
2490 
2491   Label no_adjust, bail;
2492   if (!cause_return) {
2493     // If our stashed return pc was modified by the runtime we avoid touching it
2494     __ ld(t0, Address(fp, frame::return_addr_offset * wordSize));
2495     __ bne(x18, t0, no_adjust);
2496 
2497 #ifdef ASSERT
2498     // Verify the correct encoding of the poll we're about to skip.
2499     // See NativeInstruction::is_lwu_to_zr()
2500     __ lwu(t0, Address(x18));
2501     __ andi(t1, t0, 0b1111111);
2502     __ mv(t2, 0b0000011);
2503     __ bne(t1, t2, bail); // 0-6:0b0000011
2504     __ srli(t1, t0, 7);
2505     __ andi(t1, t1, 0b11111);
2506     __ bnez(t1, bail);    // 7-11:0b00000
2507     __ srli(t1, t0, 12);
2508     __ andi(t1, t1, 0b111);
2509     __ mv(t2, 0b110);
2510     __ bne(t1, t2, bail); // 12-14:0b110
2511 #endif
2512 
2513     // Adjust return pc forward to step over the safepoint poll instruction
2514     __ addi(x18, x18, NativeInstruction::instruction_size);
2515     __ sd(x18, Address(fp, frame::return_addr_offset * wordSize));
2516   }
2517 
2518   __ bind(no_adjust);
2519   // Normal exit, restore registers and exit.
2520 
2521   reg_saver.restore_live_registers(masm);
2522   __ ret();
2523 
2524 #ifdef ASSERT
2525   __ bind(bail);
2526   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2527 #endif
2528 
2529   // Make sure all code is generated
2530   masm->flush();
2531 
2532   // Fill-out other meta info
2533   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2534 }
2535 
2536 //
2537 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2538 //
2539 // Generate a stub that calls into vm to find out the proper destination
2540 // of a java call. All the argument registers are live at this point
2541 // but since this is generic code we don't know what they are and the caller
2542 // must do any gc of the args.
2543 //
2544 RuntimeStub* SharedRuntime::generate_resolve_blob(StubId id, address destination) {
2545   assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2546   assert(is_resolve_id(id), "expected a resolve stub id");
2547 
2548   // allocate space for the code
2549   ResourceMark rm;
2550 
2551   const char* name = SharedRuntime::stub_name(id);
2552   CodeBuffer buffer(name, 1000, 512);
2553   MacroAssembler* masm = new MacroAssembler(&buffer);
2554   assert_cond(masm != nullptr);
2555 
2556   int frame_size_in_words = -1;
2557   RegisterSaver reg_saver(false /* save_vectors */);
2558 
2559   OopMapSet *oop_maps = new OopMapSet();
2560   assert_cond(oop_maps != nullptr);
2561   OopMap* map = nullptr;
2562 
2563   int start = __ offset();
2564 
2565   map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2566 
2567   int frame_complete = __ offset();
2568 
2569   {
2570     Label retaddr;
2571     __ set_last_Java_frame(sp, noreg, retaddr, t0);
2572 
2573     __ mv(c_rarg0, xthread);
2574     __ rt_call(destination);
2575     __ bind(retaddr);
2576   }
2577 
2578   // Set an oopmap for the call site.
2579   // We need this not only for callee-saved registers, but also for volatile
2580   // registers that the compiler might be keeping live across a safepoint.
2581 
2582   oop_maps->add_gc_map( __ offset() - start, map);
2583 
2584   // x10 contains the address we are going to jump to assuming no exception got installed
2585 
2586   // clear last_Java_sp
2587   __ reset_last_Java_frame(false);
2588   // check for pending exceptions
2589   Label pending;
2590   __ ld(t1, Address(xthread, Thread::pending_exception_offset()));
2591   __ bnez(t1, pending);
2592 
2593   // get the returned Method*
2594   __ get_vm_result_metadata(xmethod, xthread);
2595   __ sd(xmethod, Address(sp, reg_saver.reg_offset_in_bytes(xmethod)));
2596 
2597   // x10 is where we want to jump, overwrite t1 which is saved and temporary
2598   __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(t1)));
2599   reg_saver.restore_live_registers(masm);
2600 
2601   // We are back to the original state on entry and ready to go.
2602   __ jr(t1);
2603 
2604   // Pending exception after the safepoint
2605 
2606   __ bind(pending);
2607 
2608   reg_saver.restore_live_registers(masm);
2609 
2610   // exception pending => remove activation and forward to exception handler
2611 
2612   __ sd(zr, Address(xthread, JavaThread::vm_result_oop_offset()));
2613 
2614   __ ld(x10, Address(xthread, Thread::pending_exception_offset()));
2615   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2616 
2617   // -------------
2618   // make sure all code is generated
2619   masm->flush();
2620 
2621   // return the  blob
2622   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2623 }
2624 
2625 // Continuation point for throwing of implicit exceptions that are
2626 // not handled in the current activation. Fabricates an exception
2627 // oop and initiates normal exception dispatching in this
2628 // frame. Since we need to preserve callee-saved values (currently
2629 // only for C2, but done for C1 as well) we need a callee-saved oop
2630 // map and therefore have to make these stubs into RuntimeStubs
2631 // rather than BufferBlobs.  If the compiler needs all registers to
2632 // be preserved between the fault point and the exception handler
2633 // then it must assume responsibility for that in
2634 // AbstractCompiler::continuation_for_implicit_null_exception or
2635 // continuation_for_implicit_division_by_zero_exception. All other
2636 // implicit exceptions (e.g., NullPointerException or
2637 // AbstractMethodError on entry) are either at call sites or
2638 // otherwise assume that stack unwinding will be initiated, so
2639 // caller saved registers were assumed volatile in the compiler.
2640 
2641 RuntimeStub* SharedRuntime::generate_throw_exception(StubId id, address runtime_entry) {
2642   assert(is_throw_id(id), "expected a throw stub id");
2643 
2644   const char* name = SharedRuntime::stub_name(id);
2645 
2646   // Information about frame layout at time of blocking runtime call.
2647   // Note that we only have to preserve callee-saved registers since
2648   // the compilers are responsible for supplying a continuation point
2649   // if they expect all registers to be preserved.
2650   // n.b. riscv asserts that frame::arg_reg_save_area_bytes == 0
2651   assert_cond(runtime_entry != nullptr);
2652   enum layout {
2653     fp_off = 0,
2654     fp_off2,
2655     return_off,
2656     return_off2,
2657     framesize // inclusive of return address
2658   };
2659 
2660   const int insts_size = 1024;
2661   const int locs_size  = 64;
2662 
2663   ResourceMark rm;
2664   const char* timer_msg = "SharedRuntime generate_throw_exception";
2665   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
2666 
2667   CodeBuffer code(name, insts_size, locs_size);
2668   OopMapSet* oop_maps  = new OopMapSet();
2669   MacroAssembler* masm = new MacroAssembler(&code);
2670   assert_cond(oop_maps != nullptr && masm != nullptr);
2671 
2672   address start = __ pc();
2673 
2674   // This is an inlined and slightly modified version of call_VM
2675   // which has the ability to fetch the return PC out of
2676   // thread-local storage and also sets up last_Java_sp slightly
2677   // differently than the real call_VM
2678 
2679   __ enter(); // Save FP and RA before call
2680 
2681   assert(is_even(framesize / 2), "sp not 16-byte aligned");
2682 
2683   // ra and fp are already in place
2684   __ subi(sp, fp, (unsigned)framesize << LogBytesPerInt); // prolog
2685 
2686   int frame_complete = __ pc() - start;
2687 
2688   // Set up last_Java_sp and last_Java_fp
2689   address the_pc = __ pc();
2690   __ set_last_Java_frame(sp, fp, the_pc, t0);
2691 
2692   // Call runtime
2693   __ mv(c_rarg0, xthread);
2694   BLOCK_COMMENT("call runtime_entry");
2695   __ rt_call(runtime_entry);
2696 
2697   // Generate oop map
2698   OopMap* map = new OopMap(framesize, 0);
2699   assert_cond(map != nullptr);
2700 
2701   oop_maps->add_gc_map(the_pc - start, map);
2702 
2703   __ reset_last_Java_frame(true);
2704 
2705   __ leave();
2706 
2707   // check for pending exceptions
2708 #ifdef ASSERT
2709   Label L;
2710   __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
2711   __ bnez(t0, L);
2712   __ should_not_reach_here();
2713   __ bind(L);
2714 #endif // ASSERT
2715   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2716 
2717   // codeBlob framesize is in words (not VMRegImpl::slot_size)
2718   RuntimeStub* stub =
2719     RuntimeStub::new_runtime_stub(name,
2720                                   &code,
2721                                   frame_complete,
2722                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2723                                   oop_maps, false);
2724   assert(stub != nullptr, "create runtime stub fail!");
2725   return stub;
2726 }
2727 
2728 #if INCLUDE_JFR
2729 
2730 static void jfr_prologue(address the_pc, MacroAssembler* masm, Register thread) {
2731   __ set_last_Java_frame(sp, fp, the_pc, t0);
2732   __ mv(c_rarg0, thread);
2733 }
2734 
2735 static void jfr_epilogue(MacroAssembler* masm) {
2736   __ reset_last_Java_frame(true);
2737 }
2738 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
2739 // It returns a jobject handle to the event writer.
2740 // The handle is dereferenced and the return value is the event writer oop.
2741 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
2742   enum layout {
2743     fp_off,
2744     fp_off2,
2745     return_off,
2746     return_off2,
2747     framesize // inclusive of return address
2748   };
2749 
2750   int insts_size = 1024;
2751   int locs_size = 64;
2752   const char* name = SharedRuntime::stub_name(StubId::shared_jfr_write_checkpoint_id);
2753   CodeBuffer code(name, insts_size, locs_size);
2754   OopMapSet* oop_maps = new OopMapSet();
2755   MacroAssembler* masm = new MacroAssembler(&code);
2756 
2757   address start = __ pc();
2758   __ enter();
2759   int frame_complete = __ pc() - start;
2760   address the_pc = __ pc();
2761   jfr_prologue(the_pc, masm, xthread);
2762   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
2763 
2764   jfr_epilogue(masm);
2765   __ resolve_global_jobject(x10, t0, t1);
2766   __ leave();
2767   __ ret();
2768 
2769   OopMap* map = new OopMap(framesize, 1);
2770   oop_maps->add_gc_map(the_pc - start, map);
2771 
2772   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
2773     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
2774                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2775                                   oop_maps, false);
2776   return stub;
2777 }
2778 
2779 // For c2: call to return a leased buffer.
2780 RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
2781   enum layout {
2782     fp_off,
2783     fp_off2,
2784     return_off,
2785     return_off2,
2786     framesize // inclusive of return address
2787   };
2788 
2789   int insts_size = 1024;
2790   int locs_size = 64;
2791   const char* name = SharedRuntime::stub_name(StubId::shared_jfr_return_lease_id);
2792   CodeBuffer code(name, insts_size, locs_size);
2793   OopMapSet* oop_maps = new OopMapSet();
2794   MacroAssembler* masm = new MacroAssembler(&code);
2795 
2796   address start = __ pc();
2797   __ enter();
2798   int frame_complete = __ pc() - start;
2799   address the_pc = __ pc();
2800   jfr_prologue(the_pc, masm, xthread);
2801   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
2802 
2803   jfr_epilogue(masm);
2804   __ leave();
2805   __ ret();
2806 
2807   OopMap* map = new OopMap(framesize, 1);
2808   oop_maps->add_gc_map(the_pc - start, map);
2809 
2810   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
2811     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
2812                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2813                                   oop_maps, false);
2814   return stub;
2815 }
2816 
2817 #endif // INCLUDE_JFR