1 /*
   2  * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "code/compiledIC.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "code/vtableStubs.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/shared/barrierSetAssembler.hpp"
  34 #include "interpreter/interp_masm.hpp"
  35 #include "interpreter/interpreter.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "nativeInst_riscv.hpp"
  39 #include "oops/klass.inline.hpp"
  40 #include "oops/method.inline.hpp"
  41 #include "prims/methodHandles.hpp"
  42 #include "runtime/continuation.hpp"
  43 #include "runtime/continuationEntry.inline.hpp"
  44 #include "runtime/globals.hpp"
  45 #include "runtime/jniHandles.hpp"
  46 #include "runtime/safepointMechanism.hpp"
  47 #include "runtime/sharedRuntime.hpp"
  48 #include "runtime/signature.hpp"
  49 #include "runtime/stubRoutines.hpp"
  50 #include "runtime/timerTrace.hpp"
  51 #include "runtime/vframeArray.hpp"
  52 #include "utilities/align.hpp"
  53 #include "utilities/formatBuffer.hpp"
  54 #include "vmreg_riscv.inline.hpp"
  55 #ifdef COMPILER1
  56 #include "c1/c1_Runtime1.hpp"
  57 #endif
  58 #ifdef COMPILER2
  59 #include "adfiles/ad_riscv.hpp"
  60 #include "opto/runtime.hpp"
  61 #endif
  62 #if INCLUDE_JVMCI
  63 #include "jvmci/jvmciJavaClasses.hpp"
  64 #endif
  65 
  66 #define __ masm->
  67 
  68 #ifdef PRODUCT
  69 #define BLOCK_COMMENT(str) /* nothing */
  70 #else
  71 #define BLOCK_COMMENT(str) __ block_comment(str)
  72 #endif
  73 
  74 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  75 
  76 class RegisterSaver {
  77   const bool _save_vectors;
  78  public:
  79   RegisterSaver(bool save_vectors) : _save_vectors(UseRVV && save_vectors) {}
  80   ~RegisterSaver() {}
  81   OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
  82   void restore_live_registers(MacroAssembler* masm);
  83 
  84   // Offsets into the register save area
  85   // Used by deoptimization when it is managing result register
  86   // values on its own
  87   // gregs:28, float_register:32; except: x1(ra) & x2(sp) & gp(x3) & tp(x4)
  88   // |---v0---|<---SP
  89   // |---v1---|save vectors only in generate_handler_blob
  90   // |-- .. --|
  91   // |---v31--|-----
  92   // |---f0---|
  93   // |---f1---|
  94   // |   ..   |
  95   // |---f31--|
  96   // |---reserved slot for stack alignment---|
  97   // |---x5---|
  98   // |   x6   |
  99   // |---.. --|
 100   // |---x31--|
 101   // |---fp---|
 102   // |---ra---|
 103   int v0_offset_in_bytes(void) { return 0; }
 104   int f0_offset_in_bytes(void) {
 105     int f0_offset = 0;
 106 #ifdef COMPILER2
 107     if (_save_vectors) {
 108       f0_offset += Matcher::scalable_vector_reg_size(T_INT) * VectorRegister::number_of_registers *
 109                    BytesPerInt;
 110     }
 111 #endif
 112     return f0_offset;
 113   }
 114   int reserved_slot_offset_in_bytes(void) {
 115     return f0_offset_in_bytes() +
 116            FloatRegister::max_slots_per_register *
 117            FloatRegister::number_of_registers *
 118            BytesPerInt;
 119   }
 120 
 121   int reg_offset_in_bytes(Register r) {
 122     assert (r->encoding() > 4, "ra, sp, gp and tp not saved");
 123     return reserved_slot_offset_in_bytes() + (r->encoding() - 4 /* x1, x2, x3, x4 */) * wordSize;
 124   }
 125 
 126   int freg_offset_in_bytes(FloatRegister f) {
 127     return f0_offset_in_bytes() + f->encoding() * wordSize;
 128   }
 129 
 130   int ra_offset_in_bytes(void) {
 131     return reserved_slot_offset_in_bytes() +
 132            (Register::number_of_registers - 3) *
 133            Register::max_slots_per_register *
 134            BytesPerInt;
 135   }
 136 };
 137 
 138 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
 139   int vector_size_in_bytes = 0;
 140   int vector_size_in_slots = 0;
 141 #ifdef COMPILER2
 142   if (_save_vectors) {
 143     vector_size_in_bytes += Matcher::scalable_vector_reg_size(T_BYTE);
 144     vector_size_in_slots += Matcher::scalable_vector_reg_size(T_INT);
 145   }
 146 #endif
 147 
 148   int frame_size_in_bytes = align_up(additional_frame_words * wordSize + ra_offset_in_bytes() + wordSize, 16);
 149   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 150   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 151   // The caller will allocate additional_frame_words
 152   int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
 153   // CodeBlob frame size is in words.
 154   int frame_size_in_words = frame_size_in_bytes / wordSize;
 155   *total_frame_words = frame_size_in_words;
 156 
 157   // Save Integer, Float and Vector registers.
 158   __ enter();
 159   __ push_CPU_state(_save_vectors, vector_size_in_bytes);
 160 
 161   // Set an oopmap for the call site.  This oopmap will map all
 162   // oop-registers and debug-info registers as callee-saved.  This
 163   // will allow deoptimization at this safepoint to find all possible
 164   // debug-info recordings, as well as let GC find all oops.
 165 
 166   OopMapSet *oop_maps = new OopMapSet();
 167   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 168   assert_cond(oop_maps != nullptr && oop_map != nullptr);
 169 
 170   int sp_offset_in_slots = 0;
 171   int step_in_slots = 0;
 172   if (_save_vectors) {
 173     step_in_slots = vector_size_in_slots;
 174     for (int i = 0; i < VectorRegister::number_of_registers; i++, sp_offset_in_slots += step_in_slots) {
 175       VectorRegister r = as_VectorRegister(i);
 176       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots), r->as_VMReg());
 177     }
 178   }
 179 
 180   step_in_slots = FloatRegister::max_slots_per_register;
 181   for (int i = 0; i < FloatRegister::number_of_registers; i++, sp_offset_in_slots += step_in_slots) {
 182     FloatRegister r = as_FloatRegister(i);
 183     oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots), r->as_VMReg());
 184   }
 185 
 186   step_in_slots = Register::max_slots_per_register;
 187   // skip the slot reserved for alignment, see MacroAssembler::push_reg;
 188   // also skip x5 ~ x6 on the stack because they are caller-saved registers.
 189   sp_offset_in_slots += Register::max_slots_per_register * 3;
 190   // besides, we ignore x0 ~ x4 because push_CPU_state won't push them on the stack.
 191   for (int i = 7; i < Register::number_of_registers; i++, sp_offset_in_slots += step_in_slots) {
 192     Register r = as_Register(i);
 193     if (r != xthread) {
 194       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots + additional_frame_slots), r->as_VMReg());
 195     }
 196   }
 197 
 198   return oop_map;
 199 }
 200 
 201 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 202 #ifdef COMPILER2
 203   __ pop_CPU_state(_save_vectors, Matcher::scalable_vector_reg_size(T_BYTE));
 204 #else
 205 #if !INCLUDE_JVMCI
 206   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 207 #endif
 208   __ pop_CPU_state(_save_vectors);
 209 #endif
 210   __ leave();
 211 }
 212 
 213 // Is vector's size (in bytes) bigger than a size saved by default?
 214 // riscv does not ovlerlay the floating-point registers on vector registers like aarch64.
 215 bool SharedRuntime::is_wide_vector(int size) {
 216   return UseRVV && size > 0;
 217 }
 218 
 219 // ---------------------------------------------------------------------------
 220 // Read the array of BasicTypes from a signature, and compute where the
 221 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 222 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 223 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 224 // as framesizes are fixed.
 225 // VMRegImpl::stack0 refers to the first slot 0(sp).
 226 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
 227 // Register up to Register::number_of_registers) are the 64-bit
 228 // integer registers.
 229 
 230 // Note: the INPUTS in sig_bt are in units of Java argument words,
 231 // which are 64-bit.  The OUTPUTS are in 32-bit units.
 232 
 233 // The Java calling convention is a "shifted" version of the C ABI.
 234 // By skipping the first C ABI register we can call non-static jni
 235 // methods with small numbers of arguments without having to shuffle
 236 // the arguments at all. Since we control the java ABI we ought to at
 237 // least get some advantage out of it.
 238 
 239 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 240                                            VMRegPair *regs,
 241                                            int total_args_passed) {
 242   // Create the mapping between argument positions and
 243   // registers.
 244   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 245     j_rarg0, j_rarg1, j_rarg2, j_rarg3,
 246     j_rarg4, j_rarg5, j_rarg6, j_rarg7
 247   };
 248   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 249     j_farg0, j_farg1, j_farg2, j_farg3,
 250     j_farg4, j_farg5, j_farg6, j_farg7
 251   };
 252 
 253   uint int_args = 0;
 254   uint fp_args = 0;
 255   uint stk_args = 0;
 256 
 257   for (int i = 0; i < total_args_passed; i++) {
 258     switch (sig_bt[i]) {
 259       case T_BOOLEAN: // fall through
 260       case T_CHAR:    // fall through
 261       case T_BYTE:    // fall through
 262       case T_SHORT:   // fall through
 263       case T_INT:
 264         if (int_args < Argument::n_int_register_parameters_j) {
 265           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 266         } else {
 267           stk_args = align_up(stk_args, 2);
 268           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 269           stk_args += 1;
 270         }
 271         break;
 272       case T_VOID:
 273         // halves of T_LONG or T_DOUBLE
 274         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 275         regs[i].set_bad();
 276         break;
 277       case T_LONG:      // fall through
 278         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 279       case T_OBJECT:    // fall through
 280       case T_ARRAY:     // fall through
 281       case T_ADDRESS:
 282         if (int_args < Argument::n_int_register_parameters_j) {
 283           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 284         } else {
 285           stk_args = align_up(stk_args, 2);
 286           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 287           stk_args += 2;
 288         }
 289         break;
 290       case T_FLOAT:
 291         if (fp_args < Argument::n_float_register_parameters_j) {
 292           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 293         } else {
 294           stk_args = align_up(stk_args, 2);
 295           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 296           stk_args += 1;
 297         }
 298         break;
 299       case T_DOUBLE:
 300         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 301         if (fp_args < Argument::n_float_register_parameters_j) {
 302           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 303         } else {
 304           stk_args = align_up(stk_args, 2);
 305           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 306           stk_args += 2;
 307         }
 308         break;
 309       default:
 310         ShouldNotReachHere();
 311     }
 312   }
 313 
 314   return stk_args;
 315 }
 316 
 317 // Patch the callers callsite with entry to compiled code if it exists.
 318 static void patch_callers_callsite(MacroAssembler *masm) {
 319   Label L;
 320   __ ld(t0, Address(xmethod, in_bytes(Method::code_offset())));
 321   __ beqz(t0, L);
 322 
 323   __ enter();
 324   __ push_CPU_state();
 325 
 326   // VM needs caller's callsite
 327   // VM needs target method
 328   // This needs to be a long call since we will relocate this adapter to
 329   // the codeBuffer and it may not reach
 330 
 331 #ifndef PRODUCT
 332   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 333 #endif
 334 
 335   __ mv(c_rarg0, xmethod);
 336   __ mv(c_rarg1, ra);
 337   __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
 338 
 339   __ pop_CPU_state();
 340   // restore sp
 341   __ leave();
 342   __ bind(L);
 343 }
 344 
 345 static void gen_c2i_adapter(MacroAssembler *masm,
 346                             int comp_args_on_stack,
 347                             const GrowableArray<SigEntry>* sig,
 348                             const VMRegPair *regs,
 349                             Label& skip_fixup) {
 350   // Before we get into the guts of the C2I adapter, see if we should be here
 351   // at all.  We've come from compiled code and are attempting to jump to the
 352   // interpreter, which means the caller made a static call to get here
 353   // (vcalls always get a compiled target if there is one).  Check for a
 354   // compiled target.  If there is one, we need to patch the caller's call.
 355   patch_callers_callsite(masm);
 356 
 357   __ bind(skip_fixup);
 358 
 359   int words_pushed = 0;
 360 
 361   // Since all args are passed on the stack, total_args_passed *
 362   // Interpreter::stackElementSize is the space we need.
 363 
 364   int total_args_passed = sig->length();
 365   int extraspace = total_args_passed * Interpreter::stackElementSize;
 366 
 367   __ mv(x19_sender_sp, sp);
 368 
 369   // stack is aligned, keep it that way
 370   extraspace = align_up(extraspace, 2 * wordSize);
 371 
 372   if (extraspace) {
 373     __ sub(sp, sp, extraspace);
 374   }
 375 
 376   // Now write the args into the outgoing interpreter space
 377   for (int i = 0; i < total_args_passed; i++) {
 378     BasicType bt = sig->at(i)._bt;
 379     if (bt == T_VOID) {
 380       assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
 381       continue;
 382     }
 383 
 384     // offset to start parameters
 385     int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 386     int next_off = st_off - Interpreter::stackElementSize;
 387 
 388     // Say 4 args:
 389     // i   st_off
 390     // 0   32 T_LONG
 391     // 1   24 T_VOID
 392     // 2   16 T_OBJECT
 393     // 3    8 T_BOOL
 394     // -    0 return address
 395     //
 396     // However to make thing extra confusing. Because we can fit a Java long/double in
 397     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 398     // leaves one slot empty and only stores to a single slot. In this case the
 399     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 400 
 401     VMReg r_1 = regs[i].first();
 402     VMReg r_2 = regs[i].second();
 403     if (!r_1->is_valid()) {
 404       assert(!r_2->is_valid(), "");
 405       continue;
 406     }
 407     if (r_1->is_stack()) {
 408       // memory to memory use t0
 409       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
 410                     + extraspace
 411                     + words_pushed * wordSize);
 412       if (!r_2->is_valid()) {
 413         __ lwu(t0, Address(sp, ld_off));
 414         __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 415       } else {
 416         __ ld(t0, Address(sp, ld_off), /*temp register*/esp);
 417 
 418         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 419         // T_DOUBLE and T_LONG use two slots in the interpreter
 420         if (bt == T_LONG || bt == T_DOUBLE) {
 421           // ld_off == LSW, ld_off+wordSize == MSW
 422           // st_off == MSW, next_off == LSW
 423           __ sd(t0, Address(sp, next_off), /*temp register*/esp);
 424 #ifdef ASSERT
 425           // Overwrite the unused slot with known junk
 426           __ mv(t0, 0xdeadffffdeadaaaaul);
 427           __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 428 #endif /* ASSERT */
 429         } else {
 430           __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 431         }
 432       }
 433     } else if (r_1->is_Register()) {
 434       Register r = r_1->as_Register();
 435       if (!r_2->is_valid()) {
 436         // must be only an int (or less ) so move only 32bits to slot
 437         __ sd(r, Address(sp, st_off));
 438       } else {
 439         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 440         // T_DOUBLE and T_LONG use two slots in the interpreter
 441         if (bt == T_LONG || bt == T_DOUBLE) {
 442           // long/double in gpr
 443 #ifdef ASSERT
 444           // Overwrite the unused slot with known junk
 445           __ mv(t0, 0xdeadffffdeadaaabul);
 446           __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 447 #endif /* ASSERT */
 448           __ sd(r, Address(sp, next_off));
 449         } else {
 450           __ sd(r, Address(sp, st_off));
 451         }
 452       }
 453     } else {
 454       assert(r_1->is_FloatRegister(), "");
 455       if (!r_2->is_valid()) {
 456         // only a float use just part of the slot
 457         __ fsw(r_1->as_FloatRegister(), Address(sp, st_off));
 458       } else {
 459 #ifdef ASSERT
 460         // Overwrite the unused slot with known junk
 461         __ mv(t0, 0xdeadffffdeadaaacul);
 462         __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 463 #endif /* ASSERT */
 464         __ fsd(r_1->as_FloatRegister(), Address(sp, next_off));
 465       }
 466     }
 467   }
 468 
 469   __ mv(esp, sp); // Interp expects args on caller's expression stack
 470 
 471   __ ld(t1, Address(xmethod, in_bytes(Method::interpreter_entry_offset())));
 472   __ jr(t1);
 473 }
 474 
 475 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 476                                     int comp_args_on_stack,
 477                                     const GrowableArray<SigEntry>* sig,
 478                                     const VMRegPair *regs) {
 479   // Note: x19_sender_sp contains the senderSP on entry. We must
 480   // preserve it since we may do a i2c -> c2i transition if we lose a
 481   // race where compiled code goes non-entrant while we get args
 482   // ready.
 483 
 484   // Cut-out for having no stack args.
 485   int comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord;
 486   if (comp_args_on_stack != 0) {
 487     __ sub(t0, sp, comp_words_on_stack * wordSize);
 488     __ andi(sp, t0, -16);
 489   }
 490 
 491   // Will jump to the compiled code just as if compiled code was doing it.
 492   // Pre-load the register-jump target early, to schedule it better.
 493   __ ld(t1, Address(xmethod, in_bytes(Method::from_compiled_offset())));
 494 
 495 #if INCLUDE_JVMCI
 496   if (EnableJVMCI) {
 497     // check if this call should be routed towards a specific entry point
 498     __ ld(t0, Address(xthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 499     Label no_alternative_target;
 500     __ beqz(t0, no_alternative_target);
 501     __ mv(t1, t0);
 502     __ sd(zr, Address(xthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 503     __ bind(no_alternative_target);
 504   }
 505 #endif // INCLUDE_JVMCI
 506 
 507   // Now generate the shuffle code.
 508   int total_args_passed = sig->length();
 509   for (int i = 0; i < total_args_passed; i++) {
 510     BasicType bt = sig->at(i)._bt;
 511     if (bt == T_VOID) {
 512       assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
 513       continue;
 514     }
 515 
 516     // Pick up 0, 1 or 2 words from SP+offset.
 517 
 518     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 519            "scrambled load targets?");
 520     // Load in argument order going down.
 521     int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 522     // Point to interpreter value (vs. tag)
 523     int next_off = ld_off - Interpreter::stackElementSize;
 524 
 525     VMReg r_1 = regs[i].first();
 526     VMReg r_2 = regs[i].second();
 527     if (!r_1->is_valid()) {
 528       assert(!r_2->is_valid(), "");
 529       continue;
 530     }
 531     if (r_1->is_stack()) {
 532       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 533       int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size;
 534       if (!r_2->is_valid()) {
 535         __ lw(t0, Address(esp, ld_off));
 536         __ sd(t0, Address(sp, st_off), /*temp register*/t2);
 537       } else {
 538         //
 539         // We are using two optoregs. This can be either T_OBJECT,
 540         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 541         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 542         // So we must adjust where to pick up the data to match the
 543         // interpreter.
 544         //
 545         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 546         // are accessed as negative so LSW is at LOW address
 547 
 548         // ld_off is MSW so get LSW
 549         const int offset = (bt == T_LONG || bt == T_DOUBLE) ?
 550                            next_off : ld_off;
 551         __ ld(t0, Address(esp, offset));
 552         // st_off is LSW (i.e. reg.first())
 553         __ sd(t0, Address(sp, st_off), /*temp register*/t2);
 554       }
 555     } else if (r_1->is_Register()) {  // Register argument
 556       Register r = r_1->as_Register();
 557       if (r_2->is_valid()) {
 558         //
 559         // We are using two VMRegs. This can be either T_OBJECT,
 560         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 561         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 562         // So we must adjust where to pick up the data to match the
 563         // interpreter.
 564 
 565         const int offset = (bt == T_LONG || bt == T_DOUBLE) ?
 566                            next_off : ld_off;
 567 
 568         // this can be a misaligned move
 569         __ ld(r, Address(esp, offset));
 570       } else {
 571         // sign extend and use a full word?
 572         __ lw(r, Address(esp, ld_off));
 573       }
 574     } else {
 575       if (!r_2->is_valid()) {
 576         __ flw(r_1->as_FloatRegister(), Address(esp, ld_off));
 577       } else {
 578         __ fld(r_1->as_FloatRegister(), Address(esp, next_off));
 579       }
 580     }
 581   }
 582 
 583   __ push_cont_fastpath(xthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
 584 
 585   // 6243940 We might end up in handle_wrong_method if
 586   // the callee is deoptimized as we race thru here. If that
 587   // happens we don't want to take a safepoint because the
 588   // caller frame will look interpreted and arguments are now
 589   // "compiled" so it is much better to make this transition
 590   // invisible to the stack walking code. Unfortunately if
 591   // we try and find the callee by normal means a safepoint
 592   // is possible. So we stash the desired callee in the thread
 593   // and the vm will find there should this case occur.
 594 
 595   __ sd(xmethod, Address(xthread, JavaThread::callee_target_offset()));
 596 
 597   __ jr(t1);
 598 }
 599 
 600 // ---------------------------------------------------------------
 601 
 602 void SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,
 603                                             int comp_args_on_stack,
 604                                             const GrowableArray<SigEntry>* sig,
 605                                             const VMRegPair* regs,
 606                                             const GrowableArray<SigEntry>* sig_cc,
 607                                             const VMRegPair* regs_cc,
 608                                             const GrowableArray<SigEntry>* sig_cc_ro,
 609                                             const VMRegPair* regs_cc_ro,
 610                                             address entry_address[AdapterBlob::ENTRY_COUNT],
 611                                             AdapterBlob*& new_adapter,
 612                                             bool allocate_code_blob) {
 613   entry_address[AdapterBlob::I2C] = __ pc();
 614   gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);
 615 
 616   entry_address[AdapterBlob::C2I_Unverified] = __ pc();
 617   Label skip_fixup;
 618 
 619   const Register receiver = j_rarg0;
 620   const Register data = t0;
 621 
 622   // -------------------------------------------------------------------------
 623   // Generate a C2I adapter.  On entry we know xmethod holds the Method* during calls
 624   // to the interpreter.  The args start out packed in the compiled layout.  They
 625   // need to be unpacked into the interpreter layout.  This will almost always
 626   // require some stack space.  We grow the current (compiled) stack, then repack
 627   // the args.  We  finally end in a jump to the generic interpreter entry point.
 628   // On exit from the interpreter, the interpreter will restore our SP (lest the
 629   // compiled code, which relies solely on SP and not FP, get sick).
 630 
 631   {
 632     __ block_comment("c2i_unverified_entry {");
 633 
 634     __ ic_check();
 635     __ ld(xmethod, Address(data, CompiledICData::speculated_method_offset()));
 636 
 637     __ ld(t0, Address(xmethod, in_bytes(Method::code_offset())));
 638     __ beqz(t0, skip_fixup);
 639     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 640     __ block_comment("} c2i_unverified_entry");
 641   }
 642 
 643   entry_address[AdapterBlob::C2I] = __ pc();
 644 
 645   // Class initialization barrier for static methods
 646   entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
 647   assert(VM_Version::supports_fast_class_init_checks(), "sanity");
 648   Label L_skip_barrier;
 649 
 650   // Bypass the barrier for non-static methods
 651   __ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
 652   __ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
 653   __ beqz(t1, L_skip_barrier); // non-static
 654 
 655   __ load_method_holder(t1, xmethod);
 656   __ clinit_barrier(t1, t0, &L_skip_barrier);
 657   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 658 
 659   __ bind(L_skip_barrier);
 660   entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
 661 
 662   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 663   bs->c2i_entry_barrier(masm);
 664 
 665   gen_c2i_adapter(masm, comp_args_on_stack, sig, regs, skip_fixup);
 666   return;
 667 }
 668 
 669 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
 670                                              uint num_bits,
 671                                              uint total_args_passed) {
 672   assert(total_args_passed <= Argument::n_vector_register_parameters_c, "unsupported");
 673   assert(num_bits >= 64 && num_bits <= 2048 && is_power_of_2(num_bits), "unsupported");
 674 
 675   // check more info at https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc
 676   static const VectorRegister VEC_ArgReg[Argument::n_vector_register_parameters_c] = {
 677     v8, v9, v10, v11, v12, v13, v14, v15,
 678     v16, v17, v18, v19, v20, v21, v22, v23
 679   };
 680 
 681   const int next_reg_val = 3;
 682   for (uint i = 0; i < total_args_passed; i++) {
 683     VMReg vmreg = VEC_ArgReg[i]->as_VMReg();
 684     regs[i].set_pair(vmreg->next(next_reg_val), vmreg);
 685   }
 686   return 0;
 687 }
 688 
 689 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 690                                          VMRegPair *regs,
 691                                          int total_args_passed) {
 692 
 693   // We return the amount of VMRegImpl stack slots we need to reserve for all
 694   // the arguments NOT counting out_preserve_stack_slots.
 695 
 696   static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 697     c_rarg0, c_rarg1, c_rarg2, c_rarg3,
 698     c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 699   };
 700   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 701     c_farg0, c_farg1, c_farg2, c_farg3,
 702     c_farg4, c_farg5, c_farg6, c_farg7
 703   };
 704 
 705   uint int_args = 0;
 706   uint fp_args = 0;
 707   uint stk_args = 0; // inc by 2 each time
 708 
 709   for (int i = 0; i < total_args_passed; i++) {
 710     switch (sig_bt[i]) {
 711       case T_BOOLEAN:  // fall through
 712       case T_CHAR:     // fall through
 713       case T_BYTE:     // fall through
 714       case T_SHORT:    // fall through
 715       case T_INT:
 716         if (int_args < Argument::n_int_register_parameters_c) {
 717           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 718         } else {
 719           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 720           stk_args += 2;
 721         }
 722         break;
 723       case T_LONG:      // fall through
 724         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 725       case T_OBJECT:    // fall through
 726       case T_ARRAY:     // fall through
 727       case T_ADDRESS:   // fall through
 728       case T_METADATA:
 729         if (int_args < Argument::n_int_register_parameters_c) {
 730           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 731         } else {
 732           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 733           stk_args += 2;
 734         }
 735         break;
 736       case T_FLOAT:
 737         if (fp_args < Argument::n_float_register_parameters_c) {
 738           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 739         } else if (int_args < Argument::n_int_register_parameters_c) {
 740           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 741         } else {
 742           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 743           stk_args += 2;
 744         }
 745         break;
 746       case T_DOUBLE:
 747         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 748         if (fp_args < Argument::n_float_register_parameters_c) {
 749           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 750         } else if (int_args < Argument::n_int_register_parameters_c) {
 751           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 752         } else {
 753           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 754           stk_args += 2;
 755         }
 756         break;
 757       case T_VOID: // Halves of longs and doubles
 758         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 759         regs[i].set_bad();
 760         break;
 761       default:
 762         ShouldNotReachHere();
 763     }
 764   }
 765 
 766   return stk_args;
 767 }
 768 
 769 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
 770   // We always ignore the frame_slots arg and just use the space just below frame pointer
 771   // which by this time is free to use
 772   switch (ret_type) {
 773     case T_FLOAT:
 774       __ fsw(f10, Address(fp, -3 * wordSize));
 775       break;
 776     case T_DOUBLE:
 777       __ fsd(f10, Address(fp, -3 * wordSize));
 778       break;
 779     case T_VOID:  break;
 780     default: {
 781       __ sd(x10, Address(fp, -3 * wordSize));
 782     }
 783   }
 784 }
 785 
 786 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
 787   // We always ignore the frame_slots arg and just use the space just below frame pointer
 788   // which by this time is free to use
 789   switch (ret_type) {
 790     case T_FLOAT:
 791       __ flw(f10, Address(fp, -3 * wordSize));
 792       break;
 793     case T_DOUBLE:
 794       __ fld(f10, Address(fp, -3 * wordSize));
 795       break;
 796     case T_VOID:  break;
 797     default: {
 798       __ ld(x10, Address(fp, -3 * wordSize));
 799     }
 800   }
 801 }
 802 
 803 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
 804   RegSet x;
 805   for ( int i = first_arg ; i < arg_count ; i++ ) {
 806     if (args[i].first()->is_Register()) {
 807       x = x + args[i].first()->as_Register();
 808     } else if (args[i].first()->is_FloatRegister()) {
 809       __ subi(sp, sp, 2 * wordSize);
 810       __ fsd(args[i].first()->as_FloatRegister(), Address(sp, 0));
 811     }
 812   }
 813   __ push_reg(x, sp);
 814 }
 815 
 816 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
 817   RegSet x;
 818   for ( int i = first_arg ; i < arg_count ; i++ ) {
 819     if (args[i].first()->is_Register()) {
 820       x = x + args[i].first()->as_Register();
 821     } else {
 822       ;
 823     }
 824   }
 825   __ pop_reg(x, sp);
 826   for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
 827     if (args[i].first()->is_Register()) {
 828       ;
 829     } else if (args[i].first()->is_FloatRegister()) {
 830       __ fld(args[i].first()->as_FloatRegister(), Address(sp, 0));
 831       __ addi(sp, sp, 2 * wordSize);
 832     }
 833   }
 834 }
 835 
 836 static void verify_oop_args(MacroAssembler* masm,
 837                             const methodHandle& method,
 838                             const BasicType* sig_bt,
 839                             const VMRegPair* regs) {
 840   const Register temp_reg = x9;  // not part of any compiled calling seq
 841   if (VerifyOops) {
 842     for (int i = 0; i < method->size_of_parameters(); i++) {
 843       if (sig_bt[i] == T_OBJECT ||
 844           sig_bt[i] == T_ARRAY) {
 845         VMReg r = regs[i].first();
 846         assert(r->is_valid(), "bad oop arg");
 847         if (r->is_stack()) {
 848           __ ld(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
 849           __ verify_oop(temp_reg);
 850         } else {
 851           __ verify_oop(r->as_Register());
 852         }
 853       }
 854     }
 855   }
 856 }
 857 
 858 // on exit, sp points to the ContinuationEntry
 859 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) {
 860   assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, "");
 861   assert(in_bytes(ContinuationEntry::cont_offset())  % VMRegImpl::stack_slot_size == 0, "");
 862   assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, "");
 863 
 864   stack_slots += (int)ContinuationEntry::size() / wordSize;
 865   __ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata
 866 
 867   OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize) / VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
 868 
 869   __ ld(t0, Address(xthread, JavaThread::cont_entry_offset()));
 870   __ sd(t0, Address(sp, ContinuationEntry::parent_offset()));
 871   __ sd(sp, Address(xthread, JavaThread::cont_entry_offset()));
 872 
 873   return map;
 874 }
 875 
 876 // on entry c_rarg1 points to the continuation
 877 //          sp points to ContinuationEntry
 878 //          c_rarg3 -- isVirtualThread
 879 static void fill_continuation_entry(MacroAssembler* masm) {
 880 #ifdef ASSERT
 881   __ mv(t0, ContinuationEntry::cookie_value());
 882   __ sw(t0, Address(sp, ContinuationEntry::cookie_offset()));
 883 #endif
 884 
 885   __ sd(c_rarg1, Address(sp, ContinuationEntry::cont_offset()));
 886   __ sw(c_rarg3, Address(sp, ContinuationEntry::flags_offset()));
 887   __ sd(zr,      Address(sp, ContinuationEntry::chunk_offset()));
 888   __ sw(zr,      Address(sp, ContinuationEntry::argsize_offset()));
 889   __ sw(zr,      Address(sp, ContinuationEntry::pin_count_offset()));
 890 
 891   __ ld(t0, Address(xthread, JavaThread::cont_fastpath_offset()));
 892   __ sd(t0, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
 893 
 894   __ sd(zr, Address(xthread, JavaThread::cont_fastpath_offset()));
 895 }
 896 
 897 // on entry, sp points to the ContinuationEntry
 898 // on exit, fp points to the spilled fp + 2 * wordSize in the entry frame
 899 static void continuation_enter_cleanup(MacroAssembler* masm) {
 900 #ifndef PRODUCT
 901   Label OK;
 902   __ ld(t0, Address(xthread, JavaThread::cont_entry_offset()));
 903   __ beq(sp, t0, OK);
 904   __ stop("incorrect sp");
 905   __ bind(OK);
 906 #endif
 907 
 908   __ ld(t0, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
 909   __ sd(t0, Address(xthread, JavaThread::cont_fastpath_offset()));
 910   __ ld(t0, Address(sp, ContinuationEntry::parent_offset()));
 911   __ sd(t0, Address(xthread, JavaThread::cont_entry_offset()));
 912   __ add(fp, sp, (int)ContinuationEntry::size() + 2 * wordSize /* 2 extra words to match up with leave() */);
 913 }
 914 
 915 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
 916 // On entry: c_rarg1 -- the continuation object
 917 //           c_rarg2 -- isContinue
 918 //           c_rarg3 -- isVirtualThread
 919 static void gen_continuation_enter(MacroAssembler* masm,
 920                                    const methodHandle& method,
 921                                    const BasicType* sig_bt,
 922                                    const VMRegPair* regs,
 923                                    int& exception_offset,
 924                                    OopMapSet*oop_maps,
 925                                    int& frame_complete,
 926                                    int& stack_slots,
 927                                    int& interpreted_entry_offset,
 928                                    int& compiled_entry_offset) {
 929   // verify_oop_args(masm, method, sig_bt, regs);
 930   Address resolve(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
 931 
 932   address start = __ pc();
 933 
 934   Label call_thaw, exit;
 935 
 936   // i2i entry used at interp_only_mode only
 937   interpreted_entry_offset = __ pc() - start;
 938   {
 939 #ifdef ASSERT
 940     Label is_interp_only;
 941     __ lw(t0, Address(xthread, JavaThread::interp_only_mode_offset()));
 942     __ bnez(t0, is_interp_only);
 943     __ stop("enterSpecial interpreter entry called when not in interp_only_mode");
 944     __ bind(is_interp_only);
 945 #endif
 946 
 947     // Read interpreter arguments into registers (this is an ad-hoc i2c adapter)
 948     __ ld(c_rarg1, Address(esp, Interpreter::stackElementSize * 2));
 949     __ ld(c_rarg2, Address(esp, Interpreter::stackElementSize * 1));
 950     __ ld(c_rarg3, Address(esp, Interpreter::stackElementSize * 0));
 951     __ push_cont_fastpath(xthread);
 952 
 953     __ enter();
 954     stack_slots = 2; // will be adjusted in setup
 955     OopMap* map = continuation_enter_setup(masm, stack_slots);
 956     // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe,
 957     // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
 958 
 959     fill_continuation_entry(masm);
 960 
 961     __ bnez(c_rarg2, call_thaw);
 962 
 963     address call_pc;
 964     {
 965       Assembler::IncompressibleScope scope(masm);
 966       // Make sure the call is patchable
 967       __ align(NativeInstruction::instruction_size);
 968 
 969       call_pc = __ reloc_call(resolve);
 970       if (call_pc == nullptr) {
 971         fatal("CodeCache is full at gen_continuation_enter");
 972       }
 973 
 974       oop_maps->add_gc_map(__ pc() - start, map);
 975       __ post_call_nop();
 976     }
 977     __ j(exit);
 978 
 979     address stub = CompiledDirectCall::emit_to_interp_stub(masm, call_pc);
 980     if (stub == nullptr) {
 981       fatal("CodeCache is full at gen_continuation_enter");
 982     }
 983   }
 984 
 985   // compiled entry
 986   __ align(CodeEntryAlignment);
 987   compiled_entry_offset = __ pc() - start;
 988 
 989   __ enter();
 990   stack_slots = 2; // will be adjusted in setup
 991   OopMap* map = continuation_enter_setup(masm, stack_slots);
 992   frame_complete = __ pc() - start;
 993 
 994   fill_continuation_entry(masm);
 995 
 996   __ bnez(c_rarg2, call_thaw);
 997 
 998   address call_pc;
 999   {
1000     Assembler::IncompressibleScope scope(masm);
1001     // Make sure the call is patchable
1002     __ align(NativeInstruction::instruction_size);
1003 
1004     call_pc = __ reloc_call(resolve);
1005     if (call_pc == nullptr) {
1006       fatal("CodeCache is full at gen_continuation_enter");
1007     }
1008 
1009     oop_maps->add_gc_map(__ pc() - start, map);
1010     __ post_call_nop();
1011   }
1012 
1013   __ j(exit);
1014 
1015   __ bind(call_thaw);
1016 
1017   // Post call nops must be natural aligned due to cmodx rules.
1018   {
1019     Assembler::IncompressibleScope scope(masm);
1020     __ align(NativeInstruction::instruction_size);
1021 
1022     ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1023     __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1024     oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1025     ContinuationEntry::_return_pc_offset = __ pc() - start;
1026     __ post_call_nop();
1027   }
1028 
1029   __ bind(exit);
1030   ContinuationEntry::_cleanup_offset = __ pc() - start;
1031   continuation_enter_cleanup(masm);
1032   __ leave();
1033   __ ret();
1034 
1035   // exception handling
1036   exception_offset = __ pc() - start;
1037   {
1038     __ mv(x9, x10); // save return value contaning the exception oop in callee-saved x9
1039 
1040     continuation_enter_cleanup(masm);
1041 
1042     __ ld(c_rarg1, Address(fp, -1 * wordSize)); // return address
1043     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), xthread, c_rarg1);
1044 
1045     // see OptoRuntime::generate_exception_blob: x10 -- exception oop, x13 -- exception pc
1046 
1047     __ mv(x11, x10); // the exception handler
1048     __ mv(x10, x9); // restore return value contaning the exception oop
1049     __ verify_oop(x10);
1050 
1051     __ leave();
1052     __ mv(x13, ra);
1053     __ jr(x11); // the exception handler
1054   }
1055 
1056   address stub = CompiledDirectCall::emit_to_interp_stub(masm, call_pc);
1057   if (stub == nullptr) {
1058     fatal("CodeCache is full at gen_continuation_enter");
1059   }
1060 }
1061 
1062 static void gen_continuation_yield(MacroAssembler* masm,
1063                                    const methodHandle& method,
1064                                    const BasicType* sig_bt,
1065                                    const VMRegPair* regs,
1066                                    OopMapSet* oop_maps,
1067                                    int& frame_complete,
1068                                    int& stack_slots,
1069                                    int& compiled_entry_offset) {
1070   enum layout {
1071     fp_off,
1072     fp_off2,
1073     return_off,
1074     return_off2,
1075     framesize // inclusive of return address
1076   };
1077   // assert(is_even(framesize/2), "sp not 16-byte aligned");
1078 
1079   stack_slots = framesize / VMRegImpl::slots_per_word;
1080   assert(stack_slots == 2, "recheck layout");
1081 
1082   address start = __ pc();
1083 
1084   compiled_entry_offset = __ pc() - start;
1085   __ enter();
1086 
1087   __ mv(c_rarg1, sp);
1088 
1089   // Post call nops must be natural aligned due to cmodx rules.
1090   __ align(NativeInstruction::instruction_size);
1091 
1092   frame_complete = __ pc() - start;
1093   address the_pc = __ pc();
1094 
1095   {
1096     Assembler::IncompressibleScope scope(masm);
1097     __ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup
1098   }
1099 
1100   __ mv(c_rarg0, xthread);
1101   __ set_last_Java_frame(sp, fp, the_pc, t0);
1102   __ call_VM_leaf(Continuation::freeze_entry(), 2);
1103   __ reset_last_Java_frame(true);
1104 
1105   Label pinned;
1106 
1107   __ bnez(x10, pinned);
1108 
1109   // We've succeeded, set sp to the ContinuationEntry
1110   __ ld(sp, Address(xthread, JavaThread::cont_entry_offset()));
1111   continuation_enter_cleanup(masm);
1112 
1113   __ bind(pinned); // pinned -- return to caller
1114 
1115   // handle pending exception thrown by freeze
1116   __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1117   Label ok;
1118   __ beqz(t0, ok);
1119   __ leave();
1120   __ j(RuntimeAddress(StubRoutines::forward_exception_entry()));
1121   __ bind(ok);
1122 
1123   __ leave();
1124   __ ret();
1125 
1126   OopMap* map = new OopMap(framesize, 1);
1127   oop_maps->add_gc_map(the_pc - start, map);
1128 }
1129 
1130 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1131   ::continuation_enter_cleanup(masm);
1132 }
1133 
1134 static void gen_special_dispatch(MacroAssembler* masm,
1135                                  const methodHandle& method,
1136                                  const BasicType* sig_bt,
1137                                  const VMRegPair* regs) {
1138   verify_oop_args(masm, method, sig_bt, regs);
1139   vmIntrinsics::ID iid = method->intrinsic_id();
1140 
1141   // Now write the args into the outgoing interpreter space
1142   bool     has_receiver   = false;
1143   Register receiver_reg   = noreg;
1144   int      member_arg_pos = -1;
1145   Register member_reg     = noreg;
1146   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1147   if (ref_kind != 0) {
1148     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1149     member_reg = x9;  // known to be free at this point
1150     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1151   } else if (iid == vmIntrinsics::_invokeBasic) {
1152     has_receiver = true;
1153   } else if (iid == vmIntrinsics::_linkToNative) {
1154     member_arg_pos = method->size_of_parameters() - 1;  // trailing NativeEntryPoint argument
1155     member_reg = x9;  // known to be free at this point
1156   } else {
1157     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1158   }
1159 
1160   if (member_reg != noreg) {
1161     // Load the member_arg into register, if necessary.
1162     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1163     VMReg r = regs[member_arg_pos].first();
1164     if (r->is_stack()) {
1165       __ ld(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1166     } else {
1167       // no data motion is needed
1168       member_reg = r->as_Register();
1169     }
1170   }
1171 
1172   if (has_receiver) {
1173     // Make sure the receiver is loaded into a register.
1174     assert(method->size_of_parameters() > 0, "oob");
1175     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1176     VMReg r = regs[0].first();
1177     assert(r->is_valid(), "bad receiver arg");
1178     if (r->is_stack()) {
1179       // Porting note:  This assumes that compiled calling conventions always
1180       // pass the receiver oop in a register.  If this is not true on some
1181       // platform, pick a temp and load the receiver from stack.
1182       fatal("receiver always in a register");
1183       receiver_reg = x12;  // known to be free at this point
1184       __ ld(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1185     } else {
1186       // no data motion is needed
1187       receiver_reg = r->as_Register();
1188     }
1189   }
1190 
1191   // Figure out which address we are really jumping to:
1192   MethodHandles::generate_method_handle_dispatch(masm, iid,
1193                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1194 }
1195 
1196 // ---------------------------------------------------------------------------
1197 // Generate a native wrapper for a given method.  The method takes arguments
1198 // in the Java compiled code convention, marshals them to the native
1199 // convention (handlizes oops, etc), transitions to native, makes the call,
1200 // returns to java state (possibly blocking), unhandlizes any result and
1201 // returns.
1202 //
1203 // Critical native functions are a shorthand for the use of
1204 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1205 // functions.  The wrapper is expected to unpack the arguments before
1206 // passing them to the callee and perform checks before and after the
1207 // native call to ensure that they GCLocker
1208 // lock_critical/unlock_critical semantics are followed.  Some other
1209 // parts of JNI setup are skipped like the tear down of the JNI handle
1210 // block and the check for pending exceptions it's impossible for them
1211 // to be thrown.
1212 //
1213 // They are roughly structured like this:
1214 //    if (GCLocker::needs_gc()) SharedRuntime::block_for_jni_critical()
1215 //    tranistion to thread_in_native
1216 //    unpack array arguments and call native entry point
1217 //    check for safepoint in progress
1218 //    check if any thread suspend flags are set
1219 //      call into JVM and possible unlock the JNI critical
1220 //      if a GC was suppressed while in the critical native.
1221 //    transition back to thread_in_Java
1222 //    return to caller
1223 //
1224 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1225                                                 const methodHandle& method,
1226                                                 int compile_id,
1227                                                 BasicType* in_sig_bt,
1228                                                 VMRegPair* in_regs,
1229                                                 BasicType ret_type) {
1230   if (method->is_continuation_native_intrinsic()) {
1231     int exception_offset = -1;
1232     OopMapSet* oop_maps = new OopMapSet();
1233     int frame_complete = -1;
1234     int stack_slots = -1;
1235     int interpreted_entry_offset = -1;
1236     int vep_offset = -1;
1237     if (method->is_continuation_enter_intrinsic()) {
1238       gen_continuation_enter(masm,
1239                              method,
1240                              in_sig_bt,
1241                              in_regs,
1242                              exception_offset,
1243                              oop_maps,
1244                              frame_complete,
1245                              stack_slots,
1246                              interpreted_entry_offset,
1247                              vep_offset);
1248     } else if (method->is_continuation_yield_intrinsic()) {
1249       gen_continuation_yield(masm,
1250                              method,
1251                              in_sig_bt,
1252                              in_regs,
1253                              oop_maps,
1254                              frame_complete,
1255                              stack_slots,
1256                              vep_offset);
1257     } else {
1258       guarantee(false, "Unknown Continuation native intrinsic");
1259     }
1260 
1261 #ifdef ASSERT
1262     if (method->is_continuation_enter_intrinsic()) {
1263       assert(interpreted_entry_offset != -1, "Must be set");
1264       assert(exception_offset != -1,         "Must be set");
1265     } else {
1266       assert(interpreted_entry_offset == -1, "Must be unset");
1267       assert(exception_offset == -1,         "Must be unset");
1268     }
1269     assert(frame_complete != -1,    "Must be set");
1270     assert(stack_slots != -1,       "Must be set");
1271     assert(vep_offset != -1,        "Must be set");
1272 #endif
1273 
1274     __ flush();
1275     nmethod* nm = nmethod::new_native_nmethod(method,
1276                                               compile_id,
1277                                               masm->code(),
1278                                               vep_offset,
1279                                               frame_complete,
1280                                               stack_slots,
1281                                               in_ByteSize(-1),
1282                                               in_ByteSize(-1),
1283                                               oop_maps,
1284                                               exception_offset);
1285     if (nm == nullptr) return nm;
1286     if (method->is_continuation_enter_intrinsic()) {
1287       ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1288     } else if (method->is_continuation_yield_intrinsic()) {
1289       _cont_doYield_stub = nm;
1290     } else {
1291       guarantee(false, "Unknown Continuation native intrinsic");
1292     }
1293     return nm;
1294   }
1295 
1296   if (method->is_method_handle_intrinsic()) {
1297     vmIntrinsics::ID iid = method->intrinsic_id();
1298     intptr_t start = (intptr_t)__ pc();
1299     int vep_offset = ((intptr_t)__ pc()) - start;
1300 
1301     // First instruction must be a nop as it may need to be patched on deoptimisation
1302     {
1303       Assembler::IncompressibleScope scope(masm); // keep the nop as 4 bytes for patching.
1304       MacroAssembler::assert_alignment(__ pc());
1305       __ nop();  // 4 bytes
1306     }
1307     gen_special_dispatch(masm,
1308                          method,
1309                          in_sig_bt,
1310                          in_regs);
1311     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1312     __ flush();
1313     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1314     return nmethod::new_native_nmethod(method,
1315                                        compile_id,
1316                                        masm->code(),
1317                                        vep_offset,
1318                                        frame_complete,
1319                                        stack_slots / VMRegImpl::slots_per_word,
1320                                        in_ByteSize(-1),
1321                                        in_ByteSize(-1),
1322                                        (OopMapSet*)nullptr);
1323   }
1324   address native_func = method->native_function();
1325   assert(native_func != nullptr, "must have function");
1326 
1327   // An OopMap for lock (and class if static)
1328   OopMapSet *oop_maps = new OopMapSet();
1329   assert_cond(oop_maps != nullptr);
1330   intptr_t start = (intptr_t)__ pc();
1331 
1332   // We have received a description of where all the java arg are located
1333   // on entry to the wrapper. We need to convert these args to where
1334   // the jni function will expect them. To figure out where they go
1335   // we convert the java signature to a C signature by inserting
1336   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1337 
1338   const int total_in_args = method->size_of_parameters();
1339   int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1340 
1341   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1342   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1343 
1344   int argc = 0;
1345   out_sig_bt[argc++] = T_ADDRESS;
1346   if (method->is_static()) {
1347     out_sig_bt[argc++] = T_OBJECT;
1348   }
1349 
1350   for (int i = 0; i < total_in_args ; i++) {
1351     out_sig_bt[argc++] = in_sig_bt[i];
1352   }
1353 
1354   // Now figure out where the args must be stored and how much stack space
1355   // they require.
1356   int out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1357 
1358   // Compute framesize for the wrapper.  We need to handlize all oops in
1359   // incoming registers
1360 
1361   // Calculate the total number of stack slots we will need.
1362 
1363   // First count the abi requirement plus all of the outgoing args
1364   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1365 
1366   // Now the space for the inbound oop handle area
1367   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1368 
1369   int oop_handle_offset = stack_slots;
1370   stack_slots += total_save_slots;
1371 
1372   // Now any space we need for handlizing a klass if static method
1373 
1374   int klass_slot_offset = 0;
1375   int klass_offset = -1;
1376   int lock_slot_offset = 0;
1377   bool is_static = false;
1378 
1379   if (method->is_static()) {
1380     klass_slot_offset = stack_slots;
1381     stack_slots += VMRegImpl::slots_per_word;
1382     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1383     is_static = true;
1384   }
1385 
1386   // Plus a lock if needed
1387 
1388   if (method->is_synchronized()) {
1389     lock_slot_offset = stack_slots;
1390     stack_slots += VMRegImpl::slots_per_word;
1391   }
1392 
1393   // Now a place (+2) to save return values or temp during shuffling
1394   // + 4 for return address (which we own) and saved fp
1395   stack_slots += 6;
1396 
1397   // Ok The space we have allocated will look like:
1398   //
1399   //
1400   // FP-> |                     |
1401   //      | 2 slots (ra)        |
1402   //      | 2 slots (fp)        |
1403   //      |---------------------|
1404   //      | 2 slots for moves   |
1405   //      |---------------------|
1406   //      | lock box (if sync)  |
1407   //      |---------------------| <- lock_slot_offset
1408   //      | klass (if static)   |
1409   //      |---------------------| <- klass_slot_offset
1410   //      | oopHandle area      |
1411   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1412   //      | outbound memory     |
1413   //      | based arguments     |
1414   //      |                     |
1415   //      |---------------------|
1416   //      |                     |
1417   // SP-> | out_preserved_slots |
1418   //
1419   //
1420 
1421 
1422   // Now compute actual number of stack words we need rounding to make
1423   // stack properly aligned.
1424   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1425 
1426   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1427 
1428   // First thing make an ic check to see if we should even be here
1429 
1430   // We are free to use all registers as temps without saving them and
1431   // restoring them except fp. fp is the only callee save register
1432   // as far as the interpreter and the compiler(s) are concerned.
1433 
1434   const Register receiver = j_rarg0;
1435 
1436   __ verify_oop(receiver);
1437   assert_different_registers(receiver, t0, t1);
1438 
1439   __ ic_check();
1440 
1441   int vep_offset = ((intptr_t)__ pc()) - start;
1442 
1443   // If we have to make this method not-entrant we'll overwrite its
1444   // first instruction with a jump.
1445   {
1446     Assembler::IncompressibleScope scope(masm); // keep the nop as 4 bytes for patching.
1447     MacroAssembler::assert_alignment(__ pc());
1448     __ nop();  // 4 bytes
1449   }
1450 
1451   if (method->needs_clinit_barrier()) {
1452     assert(VM_Version::supports_fast_class_init_checks(), "sanity");
1453     Label L_skip_barrier;
1454     __ mov_metadata(t1, method->method_holder()); // InstanceKlass*
1455     __ clinit_barrier(t1, t0, &L_skip_barrier);
1456     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1457 
1458     __ bind(L_skip_barrier);
1459   }
1460 
1461   // Generate stack overflow check
1462   __ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size()));
1463 
1464   // Generate a new frame for the wrapper.
1465   __ enter();
1466   // -2 because return address is already present and so is saved fp
1467   __ sub(sp, sp, stack_size - 2 * wordSize);
1468 
1469   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1470   assert_cond(bs != nullptr);
1471   bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
1472 
1473   // Frame is now completed as far as size and linkage.
1474   int frame_complete = ((intptr_t)__ pc()) - start;
1475 
1476   // We use x18 as the oop handle for the receiver/klass
1477   // It is callee save so it survives the call to native
1478 
1479   const Register oop_handle_reg = x18;
1480 
1481   //
1482   // We immediately shuffle the arguments so that any vm call we have to
1483   // make from here on out (sync slow path, jvmti, etc.) we will have
1484   // captured the oops from our caller and have a valid oopMap for
1485   // them.
1486 
1487   // -----------------
1488   // The Grand Shuffle
1489 
1490   // The Java calling convention is either equal (linux) or denser (win64) than the
1491   // c calling convention. However the because of the jni_env argument the c calling
1492   // convention always has at least one more (and two for static) arguments than Java.
1493   // Therefore if we move the args from java -> c backwards then we will never have
1494   // a register->register conflict and we don't have to build a dependency graph
1495   // and figure out how to break any cycles.
1496   //
1497 
1498   // Record esp-based slot for receiver on stack for non-static methods
1499   int receiver_offset = -1;
1500 
1501   // This is a trick. We double the stack slots so we can claim
1502   // the oops in the caller's frame. Since we are sure to have
1503   // more args than the caller doubling is enough to make
1504   // sure we can capture all the incoming oop args from the
1505   // caller.
1506   //
1507   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1508   assert_cond(map != nullptr);
1509 
1510   int float_args = 0;
1511   int int_args = 0;
1512 
1513 #ifdef ASSERT
1514   bool reg_destroyed[Register::number_of_registers];
1515   bool freg_destroyed[FloatRegister::number_of_registers];
1516   for ( int r = 0 ; r < Register::number_of_registers ; r++ ) {
1517     reg_destroyed[r] = false;
1518   }
1519   for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) {
1520     freg_destroyed[f] = false;
1521   }
1522 
1523 #endif /* ASSERT */
1524 
1525   // For JNI natives the incoming and outgoing registers are offset upwards.
1526   GrowableArray<int> arg_order(2 * total_in_args);
1527 
1528   for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1529     arg_order.push(i);
1530     arg_order.push(c_arg);
1531   }
1532 
1533   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1534     int i = arg_order.at(ai);
1535     int c_arg = arg_order.at(ai + 1);
1536     __ block_comment(err_msg("mv %d -> %d", i, c_arg));
1537     assert(c_arg != -1 && i != -1, "wrong order");
1538 #ifdef ASSERT
1539     if (in_regs[i].first()->is_Register()) {
1540       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1541     } else if (in_regs[i].first()->is_FloatRegister()) {
1542       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1543     }
1544     if (out_regs[c_arg].first()->is_Register()) {
1545       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1546     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1547       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1548     }
1549 #endif /* ASSERT */
1550     switch (in_sig_bt[i]) {
1551       case T_ARRAY:
1552       case T_OBJECT:
1553         __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1554                        ((i == 0) && (!is_static)),
1555                        &receiver_offset);
1556         int_args++;
1557         break;
1558       case T_VOID:
1559         break;
1560 
1561       case T_FLOAT:
1562         __ float_move(in_regs[i], out_regs[c_arg]);
1563         float_args++;
1564         break;
1565 
1566       case T_DOUBLE:
1567         assert( i + 1 < total_in_args &&
1568                 in_sig_bt[i + 1] == T_VOID &&
1569                 out_sig_bt[c_arg + 1] == T_VOID, "bad arg list");
1570         __ double_move(in_regs[i], out_regs[c_arg]);
1571         float_args++;
1572         break;
1573 
1574       case T_LONG :
1575         __ long_move(in_regs[i], out_regs[c_arg]);
1576         int_args++;
1577         break;
1578 
1579       case T_ADDRESS:
1580         assert(false, "found T_ADDRESS in java args");
1581         break;
1582 
1583       default:
1584         __ move32_64(in_regs[i], out_regs[c_arg]);
1585         int_args++;
1586     }
1587   }
1588 
1589   // point c_arg at the first arg that is already loaded in case we
1590   // need to spill before we call out
1591   int c_arg = total_c_args - total_in_args;
1592 
1593   // Pre-load a static method's oop into c_rarg1.
1594   if (method->is_static()) {
1595 
1596     //  load oop into a register
1597     __ movoop(c_rarg1,
1598               JNIHandles::make_local(method->method_holder()->java_mirror()));
1599 
1600     // Now handlize the static class mirror it's known not-null.
1601     __ sd(c_rarg1, Address(sp, klass_offset));
1602     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1603 
1604     // Now get the handle
1605     __ la(c_rarg1, Address(sp, klass_offset));
1606     // and protect the arg if we must spill
1607     c_arg--;
1608   }
1609 
1610   // Change state to native (we save the return address in the thread, since it might not
1611   // be pushed on the stack when we do a stack traversal). It is enough that the pc()
1612   // points into the right code segment. It does not have to be the correct return pc.
1613   // We use the same pc/oopMap repeatedly when we call out.
1614 
1615   Label native_return;
1616   if (method->is_object_wait0()) {
1617     // For convenience we use the pc we want to resume to in case of preemption on Object.wait.
1618     __ set_last_Java_frame(sp, noreg, native_return, t0);
1619   } else {
1620     intptr_t the_pc = (intptr_t) __ pc();
1621     oop_maps->add_gc_map(the_pc - start, map);
1622 
1623     __ set_last_Java_frame(sp, noreg, __ pc(), t0);
1624   }
1625 
1626   Label dtrace_method_entry, dtrace_method_entry_done;
1627   if (DTraceMethodProbes) {
1628     __ j(dtrace_method_entry);
1629     __ bind(dtrace_method_entry_done);
1630   }
1631 
1632   // RedefineClasses() tracing support for obsolete method entry
1633   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1634     // protect the args we've loaded
1635     save_args(masm, total_c_args, c_arg, out_regs);
1636     __ mov_metadata(c_rarg1, method());
1637     __ call_VM_leaf(
1638       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1639       xthread, c_rarg1);
1640     restore_args(masm, total_c_args, c_arg, out_regs);
1641   }
1642 
1643   // Lock a synchronized method
1644 
1645   // Register definitions used by locking and unlocking
1646 
1647   const Register swap_reg = x10;
1648   const Register obj_reg  = x9;  // Will contain the oop
1649   const Register lock_reg = x30;  // Address of compiler lock object (BasicLock)
1650   const Register old_hdr  = x30;  // value of old header at unlock time
1651   const Register lock_tmp = x31;  // Temporary used by fast_lock/unlock
1652   const Register tmp      = ra;
1653 
1654   Label slow_path_lock;
1655   Label lock_done;
1656 
1657   if (method->is_synchronized()) {
1658     // Get the handle (the 2nd argument)
1659     __ mv(oop_handle_reg, c_rarg1);
1660 
1661     // Get address of the box
1662 
1663     __ la(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1664 
1665     // Load the oop from the handle
1666     __ ld(obj_reg, Address(oop_handle_reg, 0));
1667 
1668     __ fast_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1669 
1670     // Slow path will re-enter here
1671     __ bind(lock_done);
1672   }
1673 
1674 
1675   // Finally just about ready to make the JNI call
1676 
1677   // get JNIEnv* which is first argument to native
1678   __ la(c_rarg0, Address(xthread, in_bytes(JavaThread::jni_environment_offset())));
1679 
1680   // Now set thread in native
1681   __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1682   __ mv(t0, _thread_in_native);
1683   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1684   __ sw(t0, Address(t1));
1685 
1686   // Clobbers t1
1687   __ rt_call(native_func);
1688 
1689   // Verify or restore cpu control state after JNI call
1690   __ restore_cpu_control_state_after_jni(t0);
1691 
1692   // Unpack native results.
1693   if (ret_type != T_OBJECT && ret_type != T_ARRAY) {
1694     __ cast_primitive_type(ret_type, x10);
1695   }
1696 
1697   Label safepoint_in_progress, safepoint_in_progress_done;
1698 
1699   // Switch thread to "native transition" state before reading the synchronization state.
1700   // This additional state is necessary because reading and testing the synchronization
1701   // state is not atomic w.r.t. GC, as this scenario demonstrates:
1702   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1703   //     VM thread changes sync state to synchronizing and suspends threads for GC.
1704   //     Thread A is resumed to finish this native method, but doesn't block here since it
1705   //     didn't see any synchronization is progress, and escapes.
1706   __ mv(t0, _thread_in_native_trans);
1707 
1708   __ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
1709 
1710   // Force this write out before the read below
1711   if (!UseSystemMemoryBarrier) {
1712     __ membar(MacroAssembler::AnyAny);
1713   }
1714 
1715   // check for safepoint operation in progress and/or pending suspend requests
1716   {
1717     __ safepoint_poll(safepoint_in_progress, true /* at_return */, false /* in_nmethod */);
1718     __ lwu(t0, Address(xthread, JavaThread::suspend_flags_offset()));
1719     __ bnez(t0, safepoint_in_progress);
1720     __ bind(safepoint_in_progress_done);
1721   }
1722 
1723   // change thread state
1724   __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1725   __ mv(t0, _thread_in_Java);
1726   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1727   __ sw(t0, Address(t1));
1728 
1729   if (method->is_object_wait0()) {
1730     // Check preemption for Object.wait()
1731     __ ld(t1, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1732     __ beqz(t1, native_return);
1733     __ sd(zr, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1734     __ jr(t1);
1735     __ bind(native_return);
1736 
1737     intptr_t the_pc = (intptr_t) __ pc();
1738     oop_maps->add_gc_map(the_pc - start, map);
1739   }
1740 
1741   Label reguard;
1742   Label reguard_done;
1743   __ lbu(t0, Address(xthread, JavaThread::stack_guard_state_offset()));
1744   __ mv(t1, StackOverflow::stack_guard_yellow_reserved_disabled);
1745   __ beq(t0, t1, reguard);
1746   __ bind(reguard_done);
1747 
1748   // native result if any is live
1749 
1750   // Unlock
1751   Label unlock_done;
1752   Label slow_path_unlock;
1753   if (method->is_synchronized()) {
1754 
1755     // Get locked oop from the handle we passed to jni
1756     __ ld(obj_reg, Address(oop_handle_reg, 0));
1757 
1758     // Must save x10 if if it is live now because cmpxchg must use it
1759     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1760       save_native_result(masm, ret_type, stack_slots);
1761     }
1762 
1763     __ fast_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1764 
1765     // slow path re-enters here
1766     __ bind(unlock_done);
1767     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1768       restore_native_result(masm, ret_type, stack_slots);
1769     }
1770   }
1771 
1772   Label dtrace_method_exit, dtrace_method_exit_done;
1773   if (DTraceMethodProbes) {
1774     __ j(dtrace_method_exit);
1775     __ bind(dtrace_method_exit_done);
1776   }
1777 
1778   __ reset_last_Java_frame(false);
1779 
1780   // Unbox oop result, e.g. JNIHandles::resolve result.
1781   if (is_reference_type(ret_type)) {
1782     __ resolve_jobject(x10, x11, x12);
1783   }
1784 
1785   if (CheckJNICalls) {
1786     // clear_pending_jni_exception_check
1787     __ sd(zr, Address(xthread, JavaThread::pending_jni_exception_check_fn_offset()));
1788   }
1789 
1790   // reset handle block
1791   __ ld(x12, Address(xthread, JavaThread::active_handles_offset()));
1792   __ sd(zr, Address(x12, JNIHandleBlock::top_offset()));
1793 
1794   __ leave();
1795 
1796   #if INCLUDE_JFR
1797   // We need to do a poll test after unwind in case the sampler
1798   // managed to sample the native frame after returning to Java.
1799   Label L_return;
1800   __ ld(t0, Address(xthread, JavaThread::polling_word_offset()));
1801   address poll_test_pc = __ pc();
1802   __ relocate(relocInfo::poll_return_type);
1803   __ test_bit(t0, t0, log2i_exact(SafepointMechanism::poll_bit()));
1804   __ beqz(t0, L_return);
1805   assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
1806          "polling page return stub not created yet");
1807   address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
1808   __ la(t0, InternalAddress(poll_test_pc));
1809   __ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset()));
1810   __ far_jump(RuntimeAddress(stub));
1811   __ bind(L_return);
1812 #endif // INCLUDE_JFR
1813 
1814   // Any exception pending?
1815   Label exception_pending;
1816   __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1817   __ bnez(t0, exception_pending);
1818 
1819   // We're done
1820   __ ret();
1821 
1822   // Unexpected paths are out of line and go here
1823 
1824   // forward the exception
1825   __ bind(exception_pending);
1826 
1827   // and forward the exception
1828   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1829 
1830   // Slow path locking & unlocking
1831   if (method->is_synchronized()) {
1832 
1833     __ block_comment("Slow path lock {");
1834     __ bind(slow_path_lock);
1835 
1836     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1837     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1838 
1839     // protect the args we've loaded
1840     save_args(masm, total_c_args, c_arg, out_regs);
1841 
1842     __ mv(c_rarg0, obj_reg);
1843     __ mv(c_rarg1, lock_reg);
1844     __ mv(c_rarg2, xthread);
1845 
1846     // Not a leaf but we have last_Java_frame setup as we want.
1847     // We don't want to unmount in case of contention since that would complicate preserving
1848     // the arguments that had already been marshalled into the native convention. So we force
1849     // the freeze slow path to find this native wrapper frame (see recurse_freeze_native_frame())
1850     // and pin the vthread. Otherwise the fast path won't find it since we don't walk the stack.
1851     __ push_cont_fastpath();
1852     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
1853     __ pop_cont_fastpath();
1854     restore_args(masm, total_c_args, c_arg, out_regs);
1855 
1856 #ifdef ASSERT
1857     { Label L;
1858       __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1859       __ beqz(t0, L);
1860       __ stop("no pending exception allowed on exit from monitorenter");
1861       __ bind(L);
1862     }
1863 #endif
1864     __ j(lock_done);
1865 
1866     __ block_comment("} Slow path lock");
1867 
1868     __ block_comment("Slow path unlock {");
1869     __ bind(slow_path_unlock);
1870 
1871     if (ret_type == T_FLOAT || ret_type == T_DOUBLE) {
1872       save_native_result(masm, ret_type, stack_slots);
1873     }
1874 
1875     __ mv(c_rarg2, xthread);
1876     __ la(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1877     __ mv(c_rarg0, obj_reg);
1878 
1879     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
1880     // NOTE that obj_reg == x9 currently
1881     __ ld(x9, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1882     __ sd(zr, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1883 
1884     __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
1885 
1886 #ifdef ASSERT
1887     {
1888       Label L;
1889       __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1890       __ beqz(t0, L);
1891       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
1892       __ bind(L);
1893     }
1894 #endif /* ASSERT */
1895 
1896     __ sd(x9, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1897 
1898     if (ret_type == T_FLOAT || ret_type == T_DOUBLE) {
1899       restore_native_result(masm, ret_type, stack_slots);
1900     }
1901     __ j(unlock_done);
1902 
1903     __ block_comment("} Slow path unlock");
1904 
1905   } // synchronized
1906 
1907   // SLOW PATH Reguard the stack if needed
1908 
1909   __ bind(reguard);
1910   save_native_result(masm, ret_type, stack_slots);
1911   __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
1912   restore_native_result(masm, ret_type, stack_slots);
1913   // and continue
1914   __ j(reguard_done);
1915 
1916   // SLOW PATH safepoint
1917   {
1918     __ block_comment("safepoint {");
1919     __ bind(safepoint_in_progress);
1920 
1921     // Don't use call_VM as it will see a possible pending exception and forward it
1922     // and never return here preventing us from clearing _last_native_pc down below.
1923     //
1924     save_native_result(masm, ret_type, stack_slots);
1925     __ mv(c_rarg0, xthread);
1926 #ifndef PRODUCT
1927     assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
1928 #endif
1929     __ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
1930 
1931     // Restore any method result value
1932     restore_native_result(masm, ret_type, stack_slots);
1933 
1934     __ j(safepoint_in_progress_done);
1935     __ block_comment("} safepoint");
1936   }
1937 
1938   // SLOW PATH dtrace support
1939   if (DTraceMethodProbes) {
1940     {
1941       __ block_comment("dtrace entry {");
1942       __ bind(dtrace_method_entry);
1943 
1944       // We have all of the arguments setup at this point. We must not touch any register
1945       // argument registers at this point (what if we save/restore them there are no oop?
1946 
1947       save_args(masm, total_c_args, c_arg, out_regs);
1948       __ mov_metadata(c_rarg1, method());
1949       __ call_VM_leaf(
1950         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1951         xthread, c_rarg1);
1952       restore_args(masm, total_c_args, c_arg, out_regs);
1953       __ j(dtrace_method_entry_done);
1954       __ block_comment("} dtrace entry");
1955     }
1956 
1957     {
1958       __ block_comment("dtrace exit {");
1959       __ bind(dtrace_method_exit);
1960       save_native_result(masm, ret_type, stack_slots);
1961       __ mov_metadata(c_rarg1, method());
1962       __ call_VM_leaf(
1963            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1964            xthread, c_rarg1);
1965       restore_native_result(masm, ret_type, stack_slots);
1966       __ j(dtrace_method_exit_done);
1967       __ block_comment("} dtrace exit");
1968     }
1969   }
1970 
1971   __ flush();
1972 
1973   nmethod *nm = nmethod::new_native_nmethod(method,
1974                                             compile_id,
1975                                             masm->code(),
1976                                             vep_offset,
1977                                             frame_complete,
1978                                             stack_slots / VMRegImpl::slots_per_word,
1979                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
1980                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
1981                                             oop_maps);
1982   assert(nm != nullptr, "create native nmethod fail!");
1983   return nm;
1984 }
1985 
1986 // this function returns the adjust size (in number of words) to a c2i adapter
1987 // activation for use during deoptimization
1988 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
1989   assert(callee_locals >= callee_parameters,
1990          "test and remove; got more parms than locals");
1991   if (callee_locals < callee_parameters) {
1992     return 0;                   // No adjustment for negative locals
1993   }
1994   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
1995   // diff is counted in stack words
1996   return align_up(diff, 2);
1997 }
1998 
1999 //------------------------------generate_deopt_blob----------------------------
2000 void SharedRuntime::generate_deopt_blob() {
2001   // Allocate space for the code
2002   ResourceMark rm;
2003   // Setup code generation tools
2004   int pad = 0;
2005 #if INCLUDE_JVMCI
2006   if (EnableJVMCI) {
2007     pad += 512; // Increase the buffer size when compiling for JVMCI
2008   }
2009 #endif
2010   const char* name = SharedRuntime::stub_name(StubId::shared_deopt_id);
2011   CodeBuffer buffer(name, 2048 + pad, 1024);
2012   MacroAssembler* masm = new MacroAssembler(&buffer);
2013   int frame_size_in_words = -1;
2014   OopMap* map = nullptr;
2015   OopMapSet *oop_maps = new OopMapSet();
2016   assert_cond(masm != nullptr && oop_maps != nullptr);
2017   RegisterSaver reg_saver(COMPILER2_OR_JVMCI != 0);
2018 
2019   // -------------
2020   // This code enters when returning to a de-optimized nmethod.  A return
2021   // address has been pushed on the stack, and return values are in
2022   // registers.
2023   // If we are doing a normal deopt then we were called from the patched
2024   // nmethod from the point we returned to the nmethod. So the return
2025   // address on the stack is wrong by NativeCall::instruction_size
2026   // We will adjust the value so it looks like we have the original return
2027   // address on the stack (like when we eagerly deoptimized).
2028   // In the case of an exception pending when deoptimizing, we enter
2029   // with a return address on the stack that points after the call we patched
2030   // into the exception handler. We have the following register state from,
2031   // e.g., the forward exception stub (see stubGenerator_riscv.cpp).
2032   //    x10: exception oop
2033   //    x9: exception handler
2034   //    x13: throwing pc
2035   // So in this case we simply jam x13 into the useless return address and
2036   // the stack looks just like we want.
2037   //
2038   // At this point we need to de-opt.  We save the argument return
2039   // registers.  We call the first C routine, fetch_unroll_info().  This
2040   // routine captures the return values and returns a structure which
2041   // describes the current frame size and the sizes of all replacement frames.
2042   // The current frame is compiled code and may contain many inlined
2043   // functions, each with their own JVM state.  We pop the current frame, then
2044   // push all the new frames.  Then we call the C routine unpack_frames() to
2045   // populate these frames.  Finally unpack_frames() returns us the new target
2046   // address.  Notice that callee-save registers are BLOWN here; they have
2047   // already been captured in the vframeArray at the time the return PC was
2048   // patched.
2049   address start = __ pc();
2050   Label cont;
2051 
2052   // Prolog for non exception case!
2053 
2054   // Save everything in sight.
2055   map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2056 
2057   // Normal deoptimization.  Save exec mode for unpack_frames.
2058   __ mv(xcpool, Deoptimization::Unpack_deopt); // callee-saved
2059   __ j(cont);
2060 
2061   int reexecute_offset = __ pc() - start;
2062 #if INCLUDE_JVMCI && !defined(COMPILER1)
2063   if (UseJVMCICompiler) {
2064     // JVMCI does not use this kind of deoptimization
2065     __ should_not_reach_here();
2066   }
2067 #endif
2068 
2069   // Reexecute case
2070   // return address is the pc describes what bci to do re-execute at
2071 
2072   // No need to update map as each call to save_live_registers will produce identical oopmap
2073   (void) reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2074 
2075   __ mv(xcpool, Deoptimization::Unpack_reexecute); // callee-saved
2076   __ j(cont);
2077 
2078 #if INCLUDE_JVMCI
2079   Label after_fetch_unroll_info_call;
2080   int implicit_exception_uncommon_trap_offset = 0;
2081   int uncommon_trap_offset = 0;
2082 
2083   if (EnableJVMCI) {
2084     implicit_exception_uncommon_trap_offset = __ pc() - start;
2085 
2086     __ ld(ra, Address(xthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2087     __ sd(zr, Address(xthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2088 
2089     uncommon_trap_offset = __ pc() - start;
2090 
2091     // Save everything in sight.
2092     reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2093     // fetch_unroll_info needs to call last_java_frame()
2094     Label retaddr;
2095     __ set_last_Java_frame(sp, noreg, retaddr, t0);
2096 
2097     __ lw(c_rarg1, Address(xthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2098     __ mv(t0, -1);
2099     __ sw(t0, Address(xthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2100 
2101     __ mv(xcpool, Deoptimization::Unpack_reexecute);
2102     __ mv(c_rarg0, xthread);
2103     __ orrw(c_rarg2, zr, xcpool); // exec mode
2104     __ rt_call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap));
2105     __ bind(retaddr);
2106     oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2107 
2108     __ reset_last_Java_frame(false);
2109 
2110     __ j(after_fetch_unroll_info_call);
2111   } // EnableJVMCI
2112 #endif // INCLUDE_JVMCI
2113 
2114   int exception_offset = __ pc() - start;
2115 
2116   // Prolog for exception case
2117 
2118   // all registers are dead at this entry point, except for x10, and
2119   // x13 which contain the exception oop and exception pc
2120   // respectively.  Set them in TLS and fall thru to the
2121   // unpack_with_exception_in_tls entry point.
2122 
2123   __ sd(x13, Address(xthread, JavaThread::exception_pc_offset()));
2124   __ sd(x10, Address(xthread, JavaThread::exception_oop_offset()));
2125 
2126   int exception_in_tls_offset = __ pc() - start;
2127 
2128   // new implementation because exception oop is now passed in JavaThread
2129 
2130   // Prolog for exception case
2131   // All registers must be preserved because they might be used by LinearScan
2132   // Exceptiop oop and throwing PC are passed in JavaThread
2133   // tos: stack at point of call to method that threw the exception (i.e. only
2134   // args are on the stack, no return address)
2135 
2136   // The return address pushed by save_live_registers will be patched
2137   // later with the throwing pc. The correct value is not available
2138   // now because loading it from memory would destroy registers.
2139 
2140   // NB: The SP at this point must be the SP of the method that is
2141   // being deoptimized.  Deoptimization assumes that the frame created
2142   // here by save_live_registers is immediately below the method's SP.
2143   // This is a somewhat fragile mechanism.
2144 
2145   // Save everything in sight.
2146   map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2147 
2148   // Now it is safe to overwrite any register
2149 
2150   // Deopt during an exception.  Save exec mode for unpack_frames.
2151   __ mv(xcpool, Deoptimization::Unpack_exception); // callee-saved
2152 
2153   // load throwing pc from JavaThread and patch it as the return address
2154   // of the current frame. Then clear the field in JavaThread
2155 
2156   __ ld(x13, Address(xthread, JavaThread::exception_pc_offset()));
2157   __ sd(x13, Address(fp, frame::return_addr_offset * wordSize));
2158   __ sd(zr, Address(xthread, JavaThread::exception_pc_offset()));
2159 
2160 #ifdef ASSERT
2161   // verify that there is really an exception oop in JavaThread
2162   __ ld(x10, Address(xthread, JavaThread::exception_oop_offset()));
2163   __ verify_oop(x10);
2164 
2165   // verify that there is no pending exception
2166   Label no_pending_exception;
2167   __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
2168   __ beqz(t0, no_pending_exception);
2169   __ stop("must not have pending exception here");
2170   __ bind(no_pending_exception);
2171 #endif
2172 
2173   __ bind(cont);
2174 
2175   // Call C code.  Need thread and this frame, but NOT official VM entry
2176   // crud.  We cannot block on this call, no GC can happen.
2177   //
2178   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2179 
2180   // fetch_unroll_info needs to call last_java_frame().
2181 
2182   Label retaddr;
2183   __ set_last_Java_frame(sp, noreg, retaddr, t0);
2184 #ifdef ASSERT
2185   {
2186     Label L;
2187     __ ld(t0, Address(xthread,
2188                               JavaThread::last_Java_fp_offset()));
2189     __ beqz(t0, L);
2190     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2191     __ bind(L);
2192   }
2193 #endif // ASSERT
2194   __ mv(c_rarg0, xthread);
2195   __ mv(c_rarg1, xcpool);
2196   __ rt_call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info));
2197   __ bind(retaddr);
2198 
2199   // Need to have an oopmap that tells fetch_unroll_info where to
2200   // find any register it might need.
2201   oop_maps->add_gc_map(__ pc() - start, map);
2202 
2203   __ reset_last_Java_frame(false);
2204 
2205 #if INCLUDE_JVMCI
2206   if (EnableJVMCI) {
2207     __ bind(after_fetch_unroll_info_call);
2208   }
2209 #endif
2210 
2211   // Load UnrollBlock* into x15
2212   __ mv(x15, x10);
2213 
2214   __ lwu(xcpool, Address(x15, Deoptimization::UnrollBlock::unpack_kind_offset()));
2215   Label noException;
2216   __ mv(t0, Deoptimization::Unpack_exception);
2217   __ bne(xcpool, t0, noException); // Was exception pending?
2218   __ ld(x10, Address(xthread, JavaThread::exception_oop_offset()));
2219   __ ld(x13, Address(xthread, JavaThread::exception_pc_offset()));
2220   __ sd(zr, Address(xthread, JavaThread::exception_oop_offset()));
2221   __ sd(zr, Address(xthread, JavaThread::exception_pc_offset()));
2222 
2223   __ verify_oop(x10);
2224 
2225   // Overwrite the result registers with the exception results.
2226   __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10)));
2227 
2228   __ bind(noException);
2229 
2230   // Only register save data is on the stack.
2231   // Now restore the result registers.  Everything else is either dead
2232   // or captured in the vframeArray.
2233 
2234   // Restore fp result register
2235   __ fld(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10)));
2236   // Restore integer result register
2237   __ ld(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10)));
2238 
2239   // Pop all of the register save area off the stack
2240   __ add(sp, sp, frame_size_in_words * wordSize);
2241 
2242   // All of the register save area has been popped of the stack. Only the
2243   // return address remains.
2244 
2245   // Pop all the frames we must move/replace.
2246   //
2247   // Frame picture (youngest to oldest)
2248   // 1: self-frame (no frame link)
2249   // 2: deopting frame  (no frame link)
2250   // 3: caller of deopting frame (could be compiled/interpreted).
2251   //
2252   // Note: by leaving the return address of self-frame on the stack
2253   // and using the size of frame 2 to adjust the stack
2254   // when we are done the return to frame 3 will still be on the stack.
2255 
2256   // Pop deoptimized frame
2257   __ lwu(x12, Address(x15, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2258   __ subi(x12, x12, 2 * wordSize);
2259   __ add(sp, sp, x12);
2260   __ ld(fp, Address(sp, 0));
2261   __ ld(ra, Address(sp, wordSize));
2262   __ addi(sp, sp, 2 * wordSize);
2263   // RA should now be the return address to the caller (3)
2264 
2265 #ifdef ASSERT
2266   // Compilers generate code that bang the stack by as much as the
2267   // interpreter would need. So this stack banging should never
2268   // trigger a fault. Verify that it does not on non product builds.
2269   __ lwu(x9, Address(x15, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2270   __ bang_stack_size(x9, x12);
2271 #endif
2272   // Load address of array of frame pcs into x12
2273   __ ld(x12, Address(x15, Deoptimization::UnrollBlock::frame_pcs_offset()));
2274 
2275   // Load address of array of frame sizes into x14
2276   __ ld(x14, Address(x15, Deoptimization::UnrollBlock::frame_sizes_offset()));
2277 
2278   // Load counter into x13
2279   __ lwu(x13, Address(x15, Deoptimization::UnrollBlock::number_of_frames_offset()));
2280 
2281   // Now adjust the caller's stack to make up for the extra locals
2282   // but record the original sp so that we can save it in the skeletal interpreter
2283   // frame and the stack walking of interpreter_sender will get the unextended sp
2284   // value and not the "real" sp value.
2285 
2286   const Register sender_sp = x16;
2287 
2288   __ mv(sender_sp, sp);
2289   __ lwu(x9, Address(x15,
2290                      Deoptimization::UnrollBlock::
2291                      caller_adjustment_offset()));
2292   __ sub(sp, sp, x9);
2293 
2294   // Push interpreter frames in a loop
2295   __ mv(t0, 0xDEADDEAD);               // Make a recognizable pattern
2296   __ mv(t1, t0);
2297   Label loop;
2298   __ bind(loop);
2299   __ ld(x9, Address(x14, 0));          // Load frame size
2300   __ addi(x14, x14, wordSize);
2301   __ subi(x9, x9, 2 * wordSize);       // We'll push pc and fp by hand
2302   __ ld(ra, Address(x12, 0));          // Load pc
2303   __ addi(x12, x12, wordSize);
2304   __ enter();                          // Save old & set new fp
2305   __ sub(sp, sp, x9);                  // Prolog
2306   // This value is corrected by layout_activation_impl
2307   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
2308   __ sd(sender_sp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2309   __ mv(sender_sp, sp);                // Pass sender_sp to next frame
2310   __ subi(x13, x13, 1);                // Decrement counter
2311   __ bnez(x13, loop);
2312 
2313     // Re-push self-frame
2314   __ ld(ra, Address(x12));
2315   __ enter();
2316 
2317   // Allocate a full sized register save area.  We subtract 2 because
2318   // enter() just pushed 2 words
2319   __ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2320 
2321   // Restore frame locals after moving the frame
2322   __ fsd(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10)));
2323   __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10)));
2324 
2325   // Call C code.  Need thread but NOT official VM entry
2326   // crud.  We cannot block on this call, no GC can happen.  Call should
2327   // restore return values to their stack-slots with the new SP.
2328   //
2329   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2330 
2331   // Use fp because the frames look interpreted now
2332   // Don't need the precise return PC here, just precise enough to point into this code blob.
2333   address the_pc = __ pc();
2334   __ set_last_Java_frame(sp, fp, the_pc, t0);
2335 
2336   __ mv(c_rarg0, xthread);
2337   __ mv(c_rarg1, xcpool); // second arg: exec_mode
2338   __ rt_call(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames));
2339 
2340   // Set an oopmap for the call site
2341   // Use the same PC we used for the last java frame
2342   oop_maps->add_gc_map(the_pc - start,
2343                        new OopMap(frame_size_in_words, 0));
2344 
2345   // Clear fp AND pc
2346   __ reset_last_Java_frame(true);
2347 
2348   // Collect return values
2349   __ fld(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10)));
2350   __ ld(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10)));
2351 
2352   // Pop self-frame.
2353   __ leave();                           // Epilog
2354 
2355   // Jump to interpreter
2356   __ ret();
2357 
2358   // Make sure all code is generated
2359   masm->flush();
2360 
2361   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2362   assert(_deopt_blob != nullptr, "create deoptimization blob fail!");
2363   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2364 #if INCLUDE_JVMCI
2365   if (EnableJVMCI) {
2366     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2367     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2368   }
2369 #endif
2370 }
2371 
2372 // Number of stack slots between incoming argument block and the start of
2373 // a new frame. The PROLOG must add this many slots to the stack. The
2374 // EPILOG must remove this many slots.
2375 // RISCV needs two words for RA (return address) and FP (frame pointer).
2376 uint SharedRuntime::in_preserve_stack_slots() {
2377   return 2 * VMRegImpl::slots_per_word + (VerifyStackAtCalls ? 0 : 2) ;
2378 }
2379 
2380 uint SharedRuntime::out_preserve_stack_slots() {
2381   return 0;
2382 }
2383 
2384 VMReg SharedRuntime::thread_register() {
2385   return xthread->as_VMReg();
2386 }
2387 
2388 //------------------------------generate_handler_blob------
2389 //
2390 // Generate a special Compile2Runtime blob that saves all registers,
2391 // and setup oopmap.
2392 //
2393 SafepointBlob* SharedRuntime::generate_handler_blob(StubId id, address call_ptr) {
2394   assert(is_polling_page_id(id), "expected a polling page stub id");
2395 
2396   ResourceMark rm;
2397   OopMapSet *oop_maps = new OopMapSet();
2398   assert_cond(oop_maps != nullptr);
2399   OopMap* map = nullptr;
2400 
2401   // Allocate space for the code.  Setup code generation tools.
2402   const char* name = SharedRuntime::stub_name(id);
2403   CodeBuffer buffer(name, 2048, 1024);
2404   MacroAssembler* masm = new MacroAssembler(&buffer);
2405   assert_cond(masm != nullptr);
2406 
2407   address start   = __ pc();
2408   address call_pc = nullptr;
2409   int frame_size_in_words = -1;
2410   bool cause_return = (id == StubId::shared_polling_page_return_handler_id);
2411   RegisterSaver reg_saver(id == StubId::shared_polling_page_vectors_safepoint_handler_id /* save_vectors */);
2412 
2413   // Save Integer and Float registers.
2414   map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2415 
2416   // The following is basically a call_VM.  However, we need the precise
2417   // address of the call in order to generate an oopmap. Hence, we do all the
2418   // work ourselves.
2419 
2420   Label retaddr;
2421   __ set_last_Java_frame(sp, noreg, retaddr, t0);
2422 
2423   // The return address must always be correct so that frame constructor never
2424   // sees an invalid pc.
2425 
2426   if (!cause_return) {
2427     // overwrite the return address pushed by save_live_registers
2428     // Additionally, x18 is a callee-saved register so we can look at
2429     // it later to determine if someone changed the return address for
2430     // us!
2431     __ ld(x18, Address(xthread, JavaThread::saved_exception_pc_offset()));
2432     __ sd(x18, Address(fp, frame::return_addr_offset * wordSize));
2433   }
2434 
2435   // Do the call
2436   __ mv(c_rarg0, xthread);
2437   __ rt_call(call_ptr);
2438   __ bind(retaddr);
2439 
2440   // Set an oopmap for the call site.  This oopmap will map all
2441   // oop-registers and debug-info registers as callee-saved.  This
2442   // will allow deoptimization at this safepoint to find all possible
2443   // debug-info recordings, as well as let GC find all oops.
2444 
2445   oop_maps->add_gc_map( __ pc() - start, map);
2446 
2447   Label noException;
2448 
2449   __ reset_last_Java_frame(false);
2450 
2451   __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
2452 
2453   __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
2454   __ beqz(t0, noException);
2455 
2456   // Exception pending
2457 
2458   reg_saver.restore_live_registers(masm);
2459 
2460   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2461 
2462   // No exception case
2463   __ bind(noException);
2464 
2465   Label no_adjust, bail;
2466   if (!cause_return) {
2467     // If our stashed return pc was modified by the runtime we avoid touching it
2468     __ ld(t0, Address(fp, frame::return_addr_offset * wordSize));
2469     __ bne(x18, t0, no_adjust);
2470 
2471 #ifdef ASSERT
2472     // Verify the correct encoding of the poll we're about to skip.
2473     // See NativeInstruction::is_lwu_to_zr()
2474     __ lwu(t0, Address(x18));
2475     __ andi(t1, t0, 0b1111111);
2476     __ mv(t2, 0b0000011);
2477     __ bne(t1, t2, bail); // 0-6:0b0000011
2478     __ srli(t1, t0, 7);
2479     __ andi(t1, t1, 0b11111);
2480     __ bnez(t1, bail);    // 7-11:0b00000
2481     __ srli(t1, t0, 12);
2482     __ andi(t1, t1, 0b111);
2483     __ mv(t2, 0b110);
2484     __ bne(t1, t2, bail); // 12-14:0b110
2485 #endif
2486 
2487     // Adjust return pc forward to step over the safepoint poll instruction
2488     __ addi(x18, x18, NativeInstruction::instruction_size);
2489     __ sd(x18, Address(fp, frame::return_addr_offset * wordSize));
2490   }
2491 
2492   __ bind(no_adjust);
2493   // Normal exit, restore registers and exit.
2494 
2495   reg_saver.restore_live_registers(masm);
2496   __ ret();
2497 
2498 #ifdef ASSERT
2499   __ bind(bail);
2500   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2501 #endif
2502 
2503   // Make sure all code is generated
2504   masm->flush();
2505 
2506   // Fill-out other meta info
2507   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2508 }
2509 
2510 //
2511 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2512 //
2513 // Generate a stub that calls into vm to find out the proper destination
2514 // of a java call. All the argument registers are live at this point
2515 // but since this is generic code we don't know what they are and the caller
2516 // must do any gc of the args.
2517 //
2518 RuntimeStub* SharedRuntime::generate_resolve_blob(StubId id, address destination) {
2519   assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2520   assert(is_resolve_id(id), "expected a resolve stub id");
2521 
2522   // allocate space for the code
2523   ResourceMark rm;
2524 
2525   const char* name = SharedRuntime::stub_name(id);
2526   CodeBuffer buffer(name, 1000, 512);
2527   MacroAssembler* masm = new MacroAssembler(&buffer);
2528   assert_cond(masm != nullptr);
2529 
2530   int frame_size_in_words = -1;
2531   RegisterSaver reg_saver(false /* save_vectors */);
2532 
2533   OopMapSet *oop_maps = new OopMapSet();
2534   assert_cond(oop_maps != nullptr);
2535   OopMap* map = nullptr;
2536 
2537   int start = __ offset();
2538 
2539   map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2540 
2541   int frame_complete = __ offset();
2542 
2543   {
2544     Label retaddr;
2545     __ set_last_Java_frame(sp, noreg, retaddr, t0);
2546 
2547     __ mv(c_rarg0, xthread);
2548     __ rt_call(destination);
2549     __ bind(retaddr);
2550   }
2551 
2552   // Set an oopmap for the call site.
2553   // We need this not only for callee-saved registers, but also for volatile
2554   // registers that the compiler might be keeping live across a safepoint.
2555 
2556   oop_maps->add_gc_map( __ offset() - start, map);
2557 
2558   // x10 contains the address we are going to jump to assuming no exception got installed
2559 
2560   // clear last_Java_sp
2561   __ reset_last_Java_frame(false);
2562   // check for pending exceptions
2563   Label pending;
2564   __ ld(t1, Address(xthread, Thread::pending_exception_offset()));
2565   __ bnez(t1, pending);
2566 
2567   // get the returned Method*
2568   __ get_vm_result_metadata(xmethod, xthread);
2569   __ sd(xmethod, Address(sp, reg_saver.reg_offset_in_bytes(xmethod)));
2570 
2571   // x10 is where we want to jump, overwrite t1 which is saved and temporary
2572   __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(t1)));
2573   reg_saver.restore_live_registers(masm);
2574 
2575   // We are back to the original state on entry and ready to go.
2576   __ jr(t1);
2577 
2578   // Pending exception after the safepoint
2579 
2580   __ bind(pending);
2581 
2582   reg_saver.restore_live_registers(masm);
2583 
2584   // exception pending => remove activation and forward to exception handler
2585 
2586   __ sd(zr, Address(xthread, JavaThread::vm_result_oop_offset()));
2587 
2588   __ ld(x10, Address(xthread, Thread::pending_exception_offset()));
2589   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2590 
2591   // -------------
2592   // make sure all code is generated
2593   masm->flush();
2594 
2595   // return the  blob
2596   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2597 }
2598 
2599 // Continuation point for throwing of implicit exceptions that are
2600 // not handled in the current activation. Fabricates an exception
2601 // oop and initiates normal exception dispatching in this
2602 // frame. Since we need to preserve callee-saved values (currently
2603 // only for C2, but done for C1 as well) we need a callee-saved oop
2604 // map and therefore have to make these stubs into RuntimeStubs
2605 // rather than BufferBlobs.  If the compiler needs all registers to
2606 // be preserved between the fault point and the exception handler
2607 // then it must assume responsibility for that in
2608 // AbstractCompiler::continuation_for_implicit_null_exception or
2609 // continuation_for_implicit_division_by_zero_exception. All other
2610 // implicit exceptions (e.g., NullPointerException or
2611 // AbstractMethodError on entry) are either at call sites or
2612 // otherwise assume that stack unwinding will be initiated, so
2613 // caller saved registers were assumed volatile in the compiler.
2614 
2615 RuntimeStub* SharedRuntime::generate_throw_exception(StubId id, address runtime_entry) {
2616   assert(is_throw_id(id), "expected a throw stub id");
2617 
2618   const char* name = SharedRuntime::stub_name(id);
2619 
2620   // Information about frame layout at time of blocking runtime call.
2621   // Note that we only have to preserve callee-saved registers since
2622   // the compilers are responsible for supplying a continuation point
2623   // if they expect all registers to be preserved.
2624   // n.b. riscv asserts that frame::arg_reg_save_area_bytes == 0
2625   assert_cond(runtime_entry != nullptr);
2626   enum layout {
2627     fp_off = 0,
2628     fp_off2,
2629     return_off,
2630     return_off2,
2631     framesize // inclusive of return address
2632   };
2633 
2634   const int insts_size = 1024;
2635   const int locs_size  = 64;
2636 
2637   ResourceMark rm;
2638   const char* timer_msg = "SharedRuntime generate_throw_exception";
2639   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
2640 
2641   CodeBuffer code(name, insts_size, locs_size);
2642   OopMapSet* oop_maps  = new OopMapSet();
2643   MacroAssembler* masm = new MacroAssembler(&code);
2644   assert_cond(oop_maps != nullptr && masm != nullptr);
2645 
2646   address start = __ pc();
2647 
2648   // This is an inlined and slightly modified version of call_VM
2649   // which has the ability to fetch the return PC out of
2650   // thread-local storage and also sets up last_Java_sp slightly
2651   // differently than the real call_VM
2652 
2653   __ enter(); // Save FP and RA before call
2654 
2655   assert(is_even(framesize / 2), "sp not 16-byte aligned");
2656 
2657   // ra and fp are already in place
2658   __ subi(sp, fp, (unsigned)framesize << LogBytesPerInt); // prolog
2659 
2660   int frame_complete = __ pc() - start;
2661 
2662   // Set up last_Java_sp and last_Java_fp
2663   address the_pc = __ pc();
2664   __ set_last_Java_frame(sp, fp, the_pc, t0);
2665 
2666   // Call runtime
2667   __ mv(c_rarg0, xthread);
2668   BLOCK_COMMENT("call runtime_entry");
2669   __ rt_call(runtime_entry);
2670 
2671   // Generate oop map
2672   OopMap* map = new OopMap(framesize, 0);
2673   assert_cond(map != nullptr);
2674 
2675   oop_maps->add_gc_map(the_pc - start, map);
2676 
2677   __ reset_last_Java_frame(true);
2678 
2679   __ leave();
2680 
2681   // check for pending exceptions
2682 #ifdef ASSERT
2683   Label L;
2684   __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
2685   __ bnez(t0, L);
2686   __ should_not_reach_here();
2687   __ bind(L);
2688 #endif // ASSERT
2689   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2690 
2691   // codeBlob framesize is in words (not VMRegImpl::slot_size)
2692   RuntimeStub* stub =
2693     RuntimeStub::new_runtime_stub(name,
2694                                   &code,
2695                                   frame_complete,
2696                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2697                                   oop_maps, false);
2698   assert(stub != nullptr, "create runtime stub fail!");
2699   return stub;
2700 }
2701 
2702 #if INCLUDE_JFR
2703 
2704 static void jfr_prologue(address the_pc, MacroAssembler* masm, Register thread) {
2705   __ set_last_Java_frame(sp, fp, the_pc, t0);
2706   __ mv(c_rarg0, thread);
2707 }
2708 
2709 static void jfr_epilogue(MacroAssembler* masm) {
2710   __ reset_last_Java_frame(true);
2711 }
2712 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
2713 // It returns a jobject handle to the event writer.
2714 // The handle is dereferenced and the return value is the event writer oop.
2715 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
2716   enum layout {
2717     fp_off,
2718     fp_off2,
2719     return_off,
2720     return_off2,
2721     framesize // inclusive of return address
2722   };
2723 
2724   int insts_size = 1024;
2725   int locs_size = 64;
2726   const char* name = SharedRuntime::stub_name(StubId::shared_jfr_write_checkpoint_id);
2727   CodeBuffer code(name, insts_size, locs_size);
2728   OopMapSet* oop_maps = new OopMapSet();
2729   MacroAssembler* masm = new MacroAssembler(&code);
2730 
2731   address start = __ pc();
2732   __ enter();
2733   int frame_complete = __ pc() - start;
2734   address the_pc = __ pc();
2735   jfr_prologue(the_pc, masm, xthread);
2736   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
2737 
2738   jfr_epilogue(masm);
2739   __ resolve_global_jobject(x10, t0, t1);
2740   __ leave();
2741   __ ret();
2742 
2743   OopMap* map = new OopMap(framesize, 1);
2744   oop_maps->add_gc_map(the_pc - start, map);
2745 
2746   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
2747     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
2748                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2749                                   oop_maps, false);
2750   return stub;
2751 }
2752 
2753 // For c2: call to return a leased buffer.
2754 RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
2755   enum layout {
2756     fp_off,
2757     fp_off2,
2758     return_off,
2759     return_off2,
2760     framesize // inclusive of return address
2761   };
2762 
2763   int insts_size = 1024;
2764   int locs_size = 64;
2765   const char* name = SharedRuntime::stub_name(StubId::shared_jfr_return_lease_id);
2766   CodeBuffer code(name, insts_size, locs_size);
2767   OopMapSet* oop_maps = new OopMapSet();
2768   MacroAssembler* masm = new MacroAssembler(&code);
2769 
2770   address start = __ pc();
2771   __ enter();
2772   int frame_complete = __ pc() - start;
2773   address the_pc = __ pc();
2774   jfr_prologue(the_pc, masm, xthread);
2775   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
2776 
2777   jfr_epilogue(masm);
2778   __ leave();
2779   __ ret();
2780 
2781   OopMap* map = new OopMap(framesize, 1);
2782   oop_maps->add_gc_map(the_pc - start, map);
2783 
2784   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
2785     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
2786                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2787                                   oop_maps, false);
2788   return stub;
2789 }
2790 
2791 #endif // INCLUDE_JFR
2792 
2793 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j;
2794 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
2795 
2796 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) {
2797   Unimplemented();
2798   return 0;
2799 }
2800 
2801 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
2802   Unimplemented();
2803   return nullptr;
2804 }