1 /*
   2  * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "asm/macroAssembler.inline.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "code/icBuffer.hpp"
  32 #include "code/vtableStubs.hpp"
  33 #include "compiler/oopMap.hpp"
  34 #include "gc/shared/barrierSetAssembler.hpp"
  35 #include "interpreter/interp_masm.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "logging/log.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "nativeInst_riscv.hpp"
  40 #include "oops/compiledICHolder.hpp"
  41 #include "oops/klass.inline.hpp"
  42 #include "prims/methodHandles.hpp"
  43 #include "runtime/jniHandles.hpp"
  44 #include "runtime/safepointMechanism.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "runtime/signature.hpp"
  47 #include "runtime/stubRoutines.hpp"
  48 #include "runtime/vframeArray.hpp"
  49 #include "utilities/align.hpp"
  50 #include "utilities/formatBuffer.hpp"
  51 #include "vmreg_riscv.inline.hpp"
  52 #ifdef COMPILER1
  53 #include "c1/c1_Runtime1.hpp"
  54 #endif
  55 #ifdef COMPILER2
  56 #include "adfiles/ad_riscv.hpp"
  57 #include "opto/runtime.hpp"
  58 #endif
  59 
  60 #define __ masm->
  61 
  62 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  63 
  64 class SimpleRuntimeFrame {
  65 public:
  66 
  67   // Most of the runtime stubs have this simple frame layout.
  68   // This class exists to make the layout shared in one place.
  69   // Offsets are for compiler stack slots, which are jints.
  70   enum layout {
  71     // The frame sender code expects that fp will be in the "natural" place and
  72     // will override any oopMap setting for it. We must therefore force the layout
  73     // so that it agrees with the frame sender code.
  74     // we don't expect any arg reg save area so riscv asserts that
  75     // frame::arg_reg_save_area_bytes == 0
  76     fp_off = 0, fp_off2,
  77     return_off, return_off2,
  78     framesize
  79   };
  80 };
  81 
  82 class RegisterSaver {
  83   const bool _save_vectors;
  84  public:
  85   RegisterSaver(bool save_vectors) : _save_vectors(UseRVV && save_vectors) {}
  86   ~RegisterSaver() {}
  87   OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
  88   void restore_live_registers(MacroAssembler* masm);
  89 
  90   // Offsets into the register save area
  91   // Used by deoptimization when it is managing result register
  92   // values on its own
  93   // gregs:28, float_register:32; except: x1(ra) & x2(sp) & gp(x3) & tp(x4)
  94   // |---v0---|<---SP
  95   // |---v1---|save vectors only in generate_handler_blob
  96   // |-- .. --|
  97   // |---v31--|-----
  98   // |---f0---|
  99   // |---f1---|
 100   // |   ..   |
 101   // |---f31--|
 102   // |---reserved slot for stack alignment---|
 103   // |---x5---|
 104   // |   x6   |
 105   // |---.. --|
 106   // |---x31--|
 107   // |---fp---|
 108   // |---ra---|
 109   int v0_offset_in_bytes(void) { return 0; }
 110   int f0_offset_in_bytes(void) {
 111     int f0_offset = 0;
 112 #ifdef COMPILER2
 113     if (_save_vectors) {
 114       f0_offset += Matcher::scalable_vector_reg_size(T_INT) * VectorRegisterImpl::number_of_registers *
 115                    BytesPerInt;
 116     }
 117 #endif
 118     return f0_offset;
 119   }
 120   int reserved_slot_offset_in_bytes(void) {
 121     return f0_offset_in_bytes() +
 122            FloatRegisterImpl::max_slots_per_register *
 123            FloatRegisterImpl::number_of_registers *
 124            BytesPerInt;
 125   }
 126 
 127   int reg_offset_in_bytes(Register r) {
 128     assert (r->encoding() > 4, "ra, sp, gp and tp not saved");
 129     return reserved_slot_offset_in_bytes() + (r->encoding() - 4 /* x1, x2, x3, x4 */) * wordSize;
 130   }
 131 
 132   int freg_offset_in_bytes(FloatRegister f) {
 133     return f0_offset_in_bytes() + f->encoding() * wordSize;
 134   }
 135 
 136   int ra_offset_in_bytes(void) {
 137     return reserved_slot_offset_in_bytes() +
 138            (RegisterImpl::number_of_registers - 3) *
 139            RegisterImpl::max_slots_per_register *
 140            BytesPerInt;
 141   }
 142 };
 143 
 144 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
 145   int vector_size_in_bytes = 0;
 146   int vector_size_in_slots = 0;
 147 #ifdef COMPILER2
 148   if (_save_vectors) {
 149     vector_size_in_bytes += Matcher::scalable_vector_reg_size(T_BYTE);
 150     vector_size_in_slots += Matcher::scalable_vector_reg_size(T_INT);
 151   }
 152 #endif
 153 
 154   assert_cond(masm != NULL && total_frame_words != NULL);
 155   int frame_size_in_bytes = align_up(additional_frame_words * wordSize + ra_offset_in_bytes() + wordSize, 16);
 156   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 157   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 158   // The caller will allocate additional_frame_words
 159   int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
 160   // CodeBlob frame size is in words.
 161   int frame_size_in_words = frame_size_in_bytes / wordSize;
 162   *total_frame_words = frame_size_in_words;
 163 
 164   // Save Integer, Float and Vector registers.
 165   __ enter();
 166   __ push_CPU_state(_save_vectors, vector_size_in_bytes);
 167 
 168   // Set an oopmap for the call site.  This oopmap will map all
 169   // oop-registers and debug-info registers as callee-saved.  This
 170   // will allow deoptimization at this safepoint to find all possible
 171   // debug-info recordings, as well as let GC find all oops.
 172 
 173   OopMapSet *oop_maps = new OopMapSet();
 174   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 175   assert_cond(oop_maps != NULL && oop_map != NULL);
 176 
 177   int sp_offset_in_slots = 0;
 178   int step_in_slots = 0;
 179   if (_save_vectors) {
 180     step_in_slots = vector_size_in_slots;
 181     for (int i = 0; i < VectorRegisterImpl::number_of_registers; i++, sp_offset_in_slots += step_in_slots) {
 182       VectorRegister r = as_VectorRegister(i);
 183       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots), r->as_VMReg());
 184     }
 185   }
 186 
 187   step_in_slots = FloatRegisterImpl::max_slots_per_register;
 188   for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++, sp_offset_in_slots += step_in_slots) {
 189     FloatRegister r = as_FloatRegister(i);
 190     oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots), r->as_VMReg());
 191   }
 192 
 193   step_in_slots = RegisterImpl::max_slots_per_register;
 194   // skip the slot reserved for alignment, see MacroAssembler::push_reg;
 195   // also skip x5 ~ x6 on the stack because they are caller-saved registers.
 196   sp_offset_in_slots += RegisterImpl::max_slots_per_register * 3;
 197   // besides, we ignore x0 ~ x4 because push_CPU_state won't push them on the stack.
 198   for (int i = 7; i < RegisterImpl::number_of_registers; i++, sp_offset_in_slots += step_in_slots) {
 199     Register r = as_Register(i);
 200     if (r != xthread) {
 201       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots + additional_frame_slots), r->as_VMReg());
 202     }
 203   }
 204 
 205   return oop_map;
 206 }
 207 
 208 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 209   assert_cond(masm != NULL);
 210 #ifdef COMPILER2
 211   __ pop_CPU_state(_save_vectors, Matcher::scalable_vector_reg_size(T_BYTE));
 212 #else
 213   __ pop_CPU_state(_save_vectors);
 214 #endif
 215   __ leave();
 216 }
 217 
 218 // Is vector's size (in bytes) bigger than a size saved by default?
 219 // riscv does not ovlerlay the floating-point registers on vector registers like aarch64.
 220 bool SharedRuntime::is_wide_vector(int size) {
 221   return UseRVV;
 222 }
 223 
 224 // The java_calling_convention describes stack locations as ideal slots on
 225 // a frame with no abi restrictions. Since we must observe abi restrictions
 226 // (like the placement of the register window) the slots must be biased by
 227 // the following value.
 228 static int reg2offset_in(VMReg r) {
 229   // Account for saved fp and ra
 230   // This should really be in_preserve_stack_slots
 231   return r->reg2stack() * VMRegImpl::stack_slot_size;
 232 }
 233 
 234 static int reg2offset_out(VMReg r) {
 235   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 236 }
 237 
 238 // ---------------------------------------------------------------------------
 239 // Read the array of BasicTypes from a signature, and compute where the
 240 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 241 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 242 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 243 // as framesizes are fixed.
 244 // VMRegImpl::stack0 refers to the first slot 0(sp).
 245 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
 246 // up to RegisterImpl::number_of_registers) are the 64-bit
 247 // integer registers.
 248 
 249 // Note: the INPUTS in sig_bt are in units of Java argument words,
 250 // which are 64-bit.  The OUTPUTS are in 32-bit units.
 251 
 252 // The Java calling convention is a "shifted" version of the C ABI.
 253 // By skipping the first C ABI register we can call non-static jni
 254 // methods with small numbers of arguments without having to shuffle
 255 // the arguments at all. Since we control the java ABI we ought to at
 256 // least get some advantage out of it.
 257 
 258 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 259                                            VMRegPair *regs,
 260                                            int total_args_passed) {
 261   // Create the mapping between argument positions and
 262   // registers.
 263   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 264     j_rarg0, j_rarg1, j_rarg2, j_rarg3,
 265     j_rarg4, j_rarg5, j_rarg6, j_rarg7
 266   };
 267   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 268     j_farg0, j_farg1, j_farg2, j_farg3,
 269     j_farg4, j_farg5, j_farg6, j_farg7
 270   };
 271 
 272   uint int_args = 0;
 273   uint fp_args = 0;
 274   uint stk_args = 0; // inc by 2 each time
 275 
 276   for (int i = 0; i < total_args_passed; i++) {
 277     switch (sig_bt[i]) {
 278       case T_BOOLEAN: // fall through
 279       case T_CHAR:    // fall through
 280       case T_BYTE:    // fall through
 281       case T_SHORT:   // fall through
 282       case T_INT:
 283         if (int_args < Argument::n_int_register_parameters_j) {
 284           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 285         } else {
 286           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 287           stk_args += 2;
 288         }
 289         break;
 290       case T_VOID:
 291         // halves of T_LONG or T_DOUBLE
 292         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 293         regs[i].set_bad();
 294         break;
 295       case T_LONG:      // fall through
 296         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 297       case T_OBJECT:    // fall through
 298       case T_ARRAY:     // fall through
 299       case T_ADDRESS:
 300         if (int_args < Argument::n_int_register_parameters_j) {
 301           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 302         } else {
 303           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 304           stk_args += 2;
 305         }
 306         break;
 307       case T_FLOAT:
 308         if (fp_args < Argument::n_float_register_parameters_j) {
 309           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 310         } else {
 311           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 312           stk_args += 2;
 313         }
 314         break;
 315       case T_DOUBLE:
 316         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 317         if (fp_args < Argument::n_float_register_parameters_j) {
 318           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 319         } else {
 320           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 321           stk_args += 2;
 322         }
 323         break;
 324       default:
 325         ShouldNotReachHere();
 326     }
 327   }
 328 
 329   return align_up(stk_args, 2);
 330 }
 331 
 332 // Patch the callers callsite with entry to compiled code if it exists.
 333 static void patch_callers_callsite(MacroAssembler *masm) {
 334   assert_cond(masm != NULL);
 335   Label L;
 336   __ ld(t0, Address(xmethod, in_bytes(Method::code_offset())));
 337   __ beqz(t0, L);
 338 
 339   __ enter();
 340   __ push_CPU_state();
 341 
 342   // VM needs caller's callsite
 343   // VM needs target method
 344   // This needs to be a long call since we will relocate this adapter to
 345   // the codeBuffer and it may not reach
 346 
 347 #ifndef PRODUCT
 348   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 349 #endif
 350 
 351   __ mv(c_rarg0, xmethod);
 352   __ mv(c_rarg1, ra);
 353   int32_t offset = 0;
 354   __ la_patchable(t0, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)), offset);
 355   __ jalr(x1, t0, offset);
 356 
 357   __ pop_CPU_state();
 358   // restore sp
 359   __ leave();
 360   __ bind(L);
 361 }
 362 
 363 static void gen_c2i_adapter(MacroAssembler *masm,
 364                             int total_args_passed,
 365                             int comp_args_on_stack,
 366                             const BasicType *sig_bt,
 367                             const VMRegPair *regs,
 368                             Label& skip_fixup) {
 369   // Before we get into the guts of the C2I adapter, see if we should be here
 370   // at all.  We've come from compiled code and are attempting to jump to the
 371   // interpreter, which means the caller made a static call to get here
 372   // (vcalls always get a compiled target if there is one).  Check for a
 373   // compiled target.  If there is one, we need to patch the caller's call.
 374   patch_callers_callsite(masm);
 375 
 376   __ bind(skip_fixup);
 377 
 378   int words_pushed = 0;
 379 
 380   // Since all args are passed on the stack, total_args_passed *
 381   // Interpreter::stackElementSize is the space we need.
 382 
 383   int extraspace = total_args_passed * Interpreter::stackElementSize;
 384 
 385   __ mv(x30, sp);
 386 
 387   // stack is aligned, keep it that way
 388   extraspace = align_up(extraspace, 2 * wordSize);
 389 
 390   if (extraspace) {
 391     __ sub(sp, sp, extraspace);
 392   }
 393 
 394   // Now write the args into the outgoing interpreter space
 395   for (int i = 0; i < total_args_passed; i++) {
 396     if (sig_bt[i] == T_VOID) {
 397       assert(i > 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "missing half");
 398       continue;
 399     }
 400 
 401     // offset to start parameters
 402     int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 403     int next_off = st_off - Interpreter::stackElementSize;
 404 
 405     // Say 4 args:
 406     // i   st_off
 407     // 0   32 T_LONG
 408     // 1   24 T_VOID
 409     // 2   16 T_OBJECT
 410     // 3    8 T_BOOL
 411     // -    0 return address
 412     //
 413     // However to make thing extra confusing. Because we can fit a Java long/double in
 414     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 415     // leaves one slot empty and only stores to a single slot. In this case the
 416     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 417 
 418     VMReg r_1 = regs[i].first();
 419     VMReg r_2 = regs[i].second();
 420     if (!r_1->is_valid()) {
 421       assert(!r_2->is_valid(), "");
 422       continue;
 423     }
 424     if (r_1->is_stack()) {
 425       // memory to memory use t0
 426       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
 427                     + extraspace
 428                     + words_pushed * wordSize);
 429       if (!r_2->is_valid()) {
 430         __ lwu(t0, Address(sp, ld_off));
 431         __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 432       } else {
 433         __ ld(t0, Address(sp, ld_off), /*temp register*/esp);
 434 
 435         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 436         // T_DOUBLE and T_LONG use two slots in the interpreter
 437         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 438           // ld_off == LSW, ld_off+wordSize == MSW
 439           // st_off == MSW, next_off == LSW
 440           __ sd(t0, Address(sp, next_off), /*temp register*/esp);
 441 #ifdef ASSERT
 442           // Overwrite the unused slot with known junk
 443           __ li(t0, 0xdeadffffdeadaaaaul);
 444           __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 445 #endif /* ASSERT */
 446         } else {
 447           __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 448         }
 449       }
 450     } else if (r_1->is_Register()) {
 451       Register r = r_1->as_Register();
 452       if (!r_2->is_valid()) {
 453         // must be only an int (or less ) so move only 32bits to slot
 454         __ sd(r, Address(sp, st_off));
 455       } else {
 456         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 457         // T_DOUBLE and T_LONG use two slots in the interpreter
 458         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 459           // long/double in gpr
 460 #ifdef ASSERT
 461           // Overwrite the unused slot with known junk
 462           __ li(t0, 0xdeadffffdeadaaabul);
 463           __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 464 #endif /* ASSERT */
 465           __ sd(r, Address(sp, next_off));
 466         } else {
 467           __ sd(r, Address(sp, st_off));
 468         }
 469       }
 470     } else {
 471       assert(r_1->is_FloatRegister(), "");
 472       if (!r_2->is_valid()) {
 473         // only a float use just part of the slot
 474         __ fsw(r_1->as_FloatRegister(), Address(sp, st_off));
 475       } else {
 476 #ifdef ASSERT
 477         // Overwrite the unused slot with known junk
 478         __ li(t0, 0xdeadffffdeadaaacul);
 479         __ sd(t0, Address(sp, st_off), /*temp register*/esp);
 480 #endif /* ASSERT */
 481         __ fsd(r_1->as_FloatRegister(), Address(sp, next_off));
 482       }
 483     }
 484   }
 485 
 486   __ mv(esp, sp); // Interp expects args on caller's expression stack
 487 
 488   __ ld(t0, Address(xmethod, in_bytes(Method::interpreter_entry_offset())));
 489   __ jr(t0);
 490 }
 491 
 492 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 493                                     int total_args_passed,
 494                                     int comp_args_on_stack,
 495                                     const BasicType *sig_bt,
 496                                     const VMRegPair *regs) {
 497   // Cut-out for having no stack args.
 498   int comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord;
 499   if (comp_args_on_stack != 0) {
 500     __ sub(t0, sp, comp_words_on_stack * wordSize);
 501     __ andi(sp, t0, -16);
 502   }
 503 
 504   // Will jump to the compiled code just as if compiled code was doing it.
 505   // Pre-load the register-jump target early, to schedule it better.
 506   __ ld(t1, Address(xmethod, in_bytes(Method::from_compiled_offset())));
 507 
 508   // Now generate the shuffle code.
 509   for (int i = 0; i < total_args_passed; i++) {
 510     if (sig_bt[i] == T_VOID) {
 511       assert(i > 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "missing half");
 512       continue;
 513     }
 514 
 515     // Pick up 0, 1 or 2 words from SP+offset.
 516 
 517     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 518            "scrambled load targets?");
 519     // Load in argument order going down.
 520     int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 521     // Point to interpreter value (vs. tag)
 522     int next_off = ld_off - Interpreter::stackElementSize;
 523 
 524     VMReg r_1 = regs[i].first();
 525     VMReg r_2 = regs[i].second();
 526     if (!r_1->is_valid()) {
 527       assert(!r_2->is_valid(), "");
 528       continue;
 529     }
 530     if (r_1->is_stack()) {
 531       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 532       int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size;
 533       if (!r_2->is_valid()) {
 534         __ lw(t0, Address(esp, ld_off));
 535         __ sd(t0, Address(sp, st_off), /*temp register*/t2);
 536       } else {
 537         //
 538         // We are using two optoregs. This can be either T_OBJECT,
 539         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 540         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 541         // So we must adjust where to pick up the data to match the
 542         // interpreter.
 543         //
 544         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 545         // are accessed as negative so LSW is at LOW address
 546 
 547         // ld_off is MSW so get LSW
 548         const int offset = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
 549                            next_off : ld_off;
 550         __ ld(t0, Address(esp, offset));
 551         // st_off is LSW (i.e. reg.first())
 552         __ sd(t0, Address(sp, st_off), /*temp register*/t2);
 553       }
 554     } else if (r_1->is_Register()) {  // Register argument
 555       Register r = r_1->as_Register();
 556       if (r_2->is_valid()) {
 557         //
 558         // We are using two VMRegs. This can be either T_OBJECT,
 559         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 560         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 561         // So we must adjust where to pick up the data to match the
 562         // interpreter.
 563 
 564         const int offset = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
 565                            next_off : ld_off;
 566 
 567         // this can be a misaligned move
 568         __ ld(r, Address(esp, offset));
 569       } else {
 570         // sign extend and use a full word?
 571         __ lw(r, Address(esp, ld_off));
 572       }
 573     } else {
 574       if (!r_2->is_valid()) {
 575         __ flw(r_1->as_FloatRegister(), Address(esp, ld_off));
 576       } else {
 577         __ fld(r_1->as_FloatRegister(), Address(esp, next_off));
 578       }
 579     }
 580   }
 581 
 582   // 6243940 We might end up in handle_wrong_method if
 583   // the callee is deoptimized as we race thru here. If that
 584   // happens we don't want to take a safepoint because the
 585   // caller frame will look interpreted and arguments are now
 586   // "compiled" so it is much better to make this transition
 587   // invisible to the stack walking code. Unfortunately if
 588   // we try and find the callee by normal means a safepoint
 589   // is possible. So we stash the desired callee in the thread
 590   // and the vm will find there should this case occur.
 591 
 592   __ sd(xmethod, Address(xthread, JavaThread::callee_target_offset()));
 593 
 594   __ jr(t1);
 595 }
 596 
 597 // ---------------------------------------------------------------
 598 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 599                                                             int total_args_passed,
 600                                                             int comp_args_on_stack,
 601                                                             const BasicType *sig_bt,
 602                                                             const VMRegPair *regs,
 603                                                             AdapterFingerPrint* fingerprint) {
 604   address i2c_entry = __ pc();
 605   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 606 
 607   address c2i_unverified_entry = __ pc();
 608   Label skip_fixup;
 609 
 610   Label ok;
 611 
 612   const Register holder = t1;
 613   const Register receiver = j_rarg0;
 614   const Register tmp = t2;  // A call-clobbered register not used for arg passing
 615 
 616   // -------------------------------------------------------------------------
 617   // Generate a C2I adapter.  On entry we know xmethod holds the Method* during calls
 618   // to the interpreter.  The args start out packed in the compiled layout.  They
 619   // need to be unpacked into the interpreter layout.  This will almost always
 620   // require some stack space.  We grow the current (compiled) stack, then repack
 621   // the args.  We  finally end in a jump to the generic interpreter entry point.
 622   // On exit from the interpreter, the interpreter will restore our SP (lest the
 623   // compiled code, which relys solely on SP and not FP, get sick).
 624 
 625   {
 626     __ block_comment("c2i_unverified_entry {");
 627     __ load_klass(t0, receiver, tmp);
 628     __ ld(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
 629     __ ld(xmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
 630     __ beq(t0, tmp, ok);
 631     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 632 
 633     __ bind(ok);
 634     // Method might have been compiled since the call site was patched to
 635     // interpreted; if that is the case treat it as a miss so we can get
 636     // the call site corrected.
 637     __ ld(t0, Address(xmethod, in_bytes(Method::code_offset())));
 638     __ beqz(t0, skip_fixup);
 639     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 640     __ block_comment("} c2i_unverified_entry");
 641   }
 642 
 643   address c2i_entry = __ pc();
 644 
 645   // Class initialization barrier for static methods
 646   address c2i_no_clinit_check_entry = NULL;
 647   if (VM_Version::supports_fast_class_init_checks()) {
 648     Label L_skip_barrier;
 649 
 650     { // Bypass the barrier for non-static methods
 651       __ lwu(t0, Address(xmethod, Method::access_flags_offset()));
 652       __ andi(t1, t0, JVM_ACC_STATIC);
 653       __ beqz(t1, L_skip_barrier); // non-static
 654     }
 655 
 656     __ load_method_holder(t1, xmethod);
 657     __ clinit_barrier(t1, t0, &L_skip_barrier);
 658     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 659 
 660     __ bind(L_skip_barrier);
 661     c2i_no_clinit_check_entry = __ pc();
 662   }
 663 
 664   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 665   bs->c2i_entry_barrier(masm);
 666 
 667   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 668 
 669   __ flush();
 670   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
 671 }
 672 
 673 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
 674                                              uint num_bits,
 675                                              uint total_args_passed) {
 676   Unimplemented();
 677   return 0;
 678 }
 679 
 680 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 681                                          VMRegPair *regs,
 682                                          VMRegPair *regs2,
 683                                          int total_args_passed) {
 684   assert(regs2 == NULL, "not needed on riscv");
 685 
 686   // We return the amount of VMRegImpl stack slots we need to reserve for all
 687   // the arguments NOT counting out_preserve_stack_slots.
 688 
 689   static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 690     c_rarg0, c_rarg1, c_rarg2, c_rarg3,
 691     c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 692   };
 693   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 694     c_farg0, c_farg1, c_farg2, c_farg3,
 695     c_farg4, c_farg5, c_farg6, c_farg7
 696   };
 697 
 698   uint int_args = 0;
 699   uint fp_args = 0;
 700   uint stk_args = 0; // inc by 2 each time
 701 
 702   for (int i = 0; i < total_args_passed; i++) {
 703     switch (sig_bt[i]) {
 704       case T_BOOLEAN:  // fall through
 705       case T_CHAR:     // fall through
 706       case T_BYTE:     // fall through
 707       case T_SHORT:    // fall through
 708       case T_INT:
 709         if (int_args < Argument::n_int_register_parameters_c) {
 710           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 711         } else {
 712           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 713           stk_args += 2;
 714         }
 715         break;
 716       case T_LONG:      // fall through
 717         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 718       case T_OBJECT:    // fall through
 719       case T_ARRAY:     // fall through
 720       case T_ADDRESS:   // fall through
 721       case T_METADATA:
 722         if (int_args < Argument::n_int_register_parameters_c) {
 723           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 724         } else {
 725           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 726           stk_args += 2;
 727         }
 728         break;
 729       case T_FLOAT:
 730         if (fp_args < Argument::n_float_register_parameters_c) {
 731           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 732         } else if (int_args < Argument::n_int_register_parameters_c) {
 733           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 734         } else {
 735           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 736           stk_args += 2;
 737         }
 738         break;
 739       case T_DOUBLE:
 740         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 741         if (fp_args < Argument::n_float_register_parameters_c) {
 742           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 743         } else if (int_args < Argument::n_int_register_parameters_c) {
 744           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 745         } else {
 746           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 747           stk_args += 2;
 748         }
 749         break;
 750       case T_VOID: // Halves of longs and doubles
 751         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 752         regs[i].set_bad();
 753         break;
 754       default:
 755         ShouldNotReachHere();
 756     }
 757   }
 758 
 759   return stk_args;
 760 }
 761 
 762 // On 64 bit we will store integer like items to the stack as
 763 // 64 bits items (riscv64 abi) even though java would only store
 764 // 32bits for a parameter. On 32bit it will simply be 32 bits
 765 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
 766 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
 767   assert_cond(masm != NULL);
 768   if (src.first()->is_stack()) {
 769     if (dst.first()->is_stack()) {
 770       // stack to stack
 771       __ ld(t0, Address(fp, reg2offset_in(src.first())));
 772       __ sd(t0, Address(sp, reg2offset_out(dst.first())));
 773     } else {
 774       // stack to reg
 775       __ lw(dst.first()->as_Register(), Address(fp, reg2offset_in(src.first())));
 776     }
 777   } else if (dst.first()->is_stack()) {
 778     // reg to stack
 779     __ sd(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
 780   } else {
 781     if (dst.first() != src.first()) {
 782       // 32bits extend sign
 783       __ addw(dst.first()->as_Register(), src.first()->as_Register(), zr);
 784     }
 785   }
 786 }
 787 
 788 // An oop arg. Must pass a handle not the oop itself
 789 static void object_move(MacroAssembler* masm,
 790                         OopMap* map,
 791                         int oop_handle_offset,
 792                         int framesize_in_slots,
 793                         VMRegPair src,
 794                         VMRegPair dst,
 795                         bool is_receiver,
 796                         int* receiver_offset) {
 797   assert_cond(masm != NULL && map != NULL && receiver_offset != NULL);
 798   // must pass a handle. First figure out the location we use as a handle
 799   Register rHandle = dst.first()->is_stack() ? t1 : dst.first()->as_Register();
 800 
 801   // See if oop is NULL if it is we need no handle
 802 
 803   if (src.first()->is_stack()) {
 804 
 805     // Oop is already on the stack as an argument
 806     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
 807     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
 808     if (is_receiver) {
 809       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
 810     }
 811 
 812     __ ld(t0, Address(fp, reg2offset_in(src.first())));
 813     __ la(rHandle, Address(fp, reg2offset_in(src.first())));
 814     // conditionally move a NULL
 815     Label notZero1;
 816     __ bnez(t0, notZero1);
 817     __ mv(rHandle, zr);
 818     __ bind(notZero1);
 819   } else {
 820 
 821     // Oop is in an a register we must store it to the space we reserve
 822     // on the stack for oop_handles and pass a handle if oop is non-NULL
 823 
 824     const Register rOop = src.first()->as_Register();
 825     int oop_slot = -1;
 826     if (rOop == j_rarg0) {
 827       oop_slot = 0;
 828     } else if (rOop == j_rarg1) {
 829       oop_slot = 1;
 830     } else if (rOop == j_rarg2) {
 831       oop_slot = 2;
 832     } else if (rOop == j_rarg3) {
 833       oop_slot = 3;
 834     } else if (rOop == j_rarg4) {
 835       oop_slot = 4;
 836     } else if (rOop == j_rarg5) {
 837       oop_slot = 5;
 838     } else if (rOop == j_rarg6) {
 839       oop_slot = 6;
 840     } else {
 841       assert(rOop == j_rarg7, "wrong register");
 842       oop_slot = 7;
 843     }
 844 
 845     oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
 846     int offset = oop_slot * VMRegImpl::stack_slot_size;
 847 
 848     map->set_oop(VMRegImpl::stack2reg(oop_slot));
 849     // Store oop in handle area, may be NULL
 850     __ sd(rOop, Address(sp, offset));
 851     if (is_receiver) {
 852       *receiver_offset = offset;
 853     }
 854 
 855     //rOop maybe the same as rHandle
 856     if (rOop == rHandle) {
 857       Label isZero;
 858       __ beqz(rOop, isZero);
 859       __ la(rHandle, Address(sp, offset));
 860       __ bind(isZero);
 861     } else {
 862       Label notZero2;
 863       __ la(rHandle, Address(sp, offset));
 864       __ bnez(rOop, notZero2);
 865       __ mv(rHandle, zr);
 866       __ bind(notZero2);
 867     }
 868   }
 869 
 870   // If arg is on the stack then place it otherwise it is already in correct reg.
 871   if (dst.first()->is_stack()) {
 872     __ sd(rHandle, Address(sp, reg2offset_out(dst.first())));
 873   }
 874 }
 875 
 876 // A float arg may have to do float reg int reg conversion
 877 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
 878   assert(src.first()->is_stack() && dst.first()->is_stack() ||
 879          src.first()->is_reg() && dst.first()->is_reg() || src.first()->is_stack() && dst.first()->is_reg(), "Unexpected error");
 880   assert_cond(masm != NULL);
 881   if (src.first()->is_stack()) {
 882     if (dst.first()->is_stack()) {
 883       __ lwu(t0, Address(fp, reg2offset_in(src.first())));
 884       __ sw(t0, Address(sp, reg2offset_out(dst.first())));
 885     } else if (dst.first()->is_Register()) {
 886       __ lwu(dst.first()->as_Register(), Address(fp, reg2offset_in(src.first())));
 887     } else {
 888       ShouldNotReachHere();
 889     }
 890   } else if (src.first() != dst.first()) {
 891     if (src.is_single_phys_reg() && dst.is_single_phys_reg()) {
 892       __ fmv_s(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
 893     } else {
 894       ShouldNotReachHere();
 895     }
 896   }
 897 }
 898 
 899 // A long move
 900 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
 901   assert_cond(masm != NULL);
 902   if (src.first()->is_stack()) {
 903     if (dst.first()->is_stack()) {
 904       // stack to stack
 905       __ ld(t0, Address(fp, reg2offset_in(src.first())));
 906       __ sd(t0, Address(sp, reg2offset_out(dst.first())));
 907     } else {
 908       // stack to reg
 909       __ ld(dst.first()->as_Register(), Address(fp, reg2offset_in(src.first())));
 910     }
 911   } else if (dst.first()->is_stack()) {
 912     // reg to stack
 913     __ sd(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
 914   } else {
 915     if (dst.first() != src.first()) {
 916       __ mv(dst.first()->as_Register(), src.first()->as_Register());
 917     }
 918   }
 919 }
 920 
 921 // A double move
 922 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
 923   assert(src.first()->is_stack() && dst.first()->is_stack() ||
 924          src.first()->is_reg() && dst.first()->is_reg() || src.first()->is_stack() && dst.first()->is_reg(), "Unexpected error");
 925   assert_cond(masm != NULL);
 926   if (src.first()->is_stack()) {
 927     if (dst.first()->is_stack()) {
 928       __ ld(t0, Address(fp, reg2offset_in(src.first())));
 929       __ sd(t0, Address(sp, reg2offset_out(dst.first())));
 930     } else if (dst.first()-> is_Register()) {
 931       __ ld(dst.first()->as_Register(), Address(fp, reg2offset_in(src.first())));
 932     } else {
 933       ShouldNotReachHere();
 934     }
 935   } else if (src.first() != dst.first()) {
 936     if (src.is_single_phys_reg() && dst.is_single_phys_reg()) {
 937       __ fmv_d(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
 938     } else {
 939       ShouldNotReachHere();
 940     }
 941   }
 942 }
 943 
 944 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
 945   assert_cond(masm != NULL);
 946   // We always ignore the frame_slots arg and just use the space just below frame pointer
 947   // which by this time is free to use
 948   switch (ret_type) {
 949     case T_FLOAT:
 950       __ fsw(f10, Address(fp, -3 * wordSize));
 951       break;
 952     case T_DOUBLE:
 953       __ fsd(f10, Address(fp, -3 * wordSize));
 954       break;
 955     case T_VOID:  break;
 956     default: {
 957       __ sd(x10, Address(fp, -3 * wordSize));
 958     }
 959   }
 960 }
 961 
 962 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
 963   assert_cond(masm != NULL);
 964   // We always ignore the frame_slots arg and just use the space just below frame pointer
 965   // which by this time is free to use
 966   switch (ret_type) {
 967     case T_FLOAT:
 968       __ flw(f10, Address(fp, -3 * wordSize));
 969       break;
 970     case T_DOUBLE:
 971       __ fld(f10, Address(fp, -3 * wordSize));
 972       break;
 973     case T_VOID:  break;
 974     default: {
 975       __ ld(x10, Address(fp, -3 * wordSize));
 976     }
 977   }
 978 }
 979 
 980 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
 981   assert_cond(masm != NULL && args != NULL);
 982   RegSet x;
 983   for ( int i = first_arg ; i < arg_count ; i++ ) {
 984     if (args[i].first()->is_Register()) {
 985       x = x + args[i].first()->as_Register();
 986     } else if (args[i].first()->is_FloatRegister()) {
 987       __ addi(sp, sp, -2 * wordSize);
 988       __ fsd(args[i].first()->as_FloatRegister(), Address(sp, 0));
 989     }
 990   }
 991   __ push_reg(x, sp);
 992 }
 993 
 994 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
 995   assert_cond(masm != NULL && args != NULL);
 996   RegSet x;
 997   for ( int i = first_arg ; i < arg_count ; i++ ) {
 998     if (args[i].first()->is_Register()) {
 999       x = x + args[i].first()->as_Register();
1000     } else {
1001       ;
1002     }
1003   }
1004   __ pop_reg(x, sp);
1005   for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1006     if (args[i].first()->is_Register()) {
1007       ;
1008     } else if (args[i].first()->is_FloatRegister()) {
1009       __ fld(args[i].first()->as_FloatRegister(), Address(sp, 0));
1010       __ add(sp, sp, 2 * wordSize);
1011     }
1012   }
1013 }
1014 
1015 // Unpack an array argument into a pointer to the body and the length
1016 // if the array is non-null, otherwise pass 0 for both.
1017 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { Unimplemented(); }
1018 
1019 class ComputeMoveOrder: public StackObj {
1020   class MoveOperation: public ResourceObj {
1021     friend class ComputeMoveOrder;
1022    private:
1023     VMRegPair        _src;
1024     VMRegPair        _dst;
1025     int              _src_index;
1026     int              _dst_index;
1027     bool             _processed;
1028     MoveOperation*  _next;
1029     MoveOperation*  _prev;
1030 
1031     static int get_id(VMRegPair r) { Unimplemented(); return 0; }
1032 
1033    public:
1034     MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1035       _src(src)
1036     , _dst(dst)
1037     , _src_index(src_index)
1038     , _dst_index(dst_index)
1039     , _processed(false)
1040     , _next(NULL)
1041     , _prev(NULL) { Unimplemented(); }
1042 
1043     VMRegPair src() const              { Unimplemented(); return _src; }
1044     int src_id() const                 { Unimplemented(); return 0; }
1045     int src_index() const              { Unimplemented(); return 0; }
1046     VMRegPair dst() const              { Unimplemented(); return _src; }
1047     void set_dst(int i, VMRegPair dst) { Unimplemented(); }
1048     int dst_index() const              { Unimplemented(); return 0; }
1049     int dst_id() const                 { Unimplemented(); return 0; }
1050     MoveOperation* next() const        { Unimplemented(); return 0; }
1051     MoveOperation* prev() const        { Unimplemented(); return 0; }
1052     void set_processed()               { Unimplemented(); }
1053     bool is_processed() const          { Unimplemented(); return 0; }
1054 
1055     // insert
1056     void break_cycle(VMRegPair temp_register) { Unimplemented(); }
1057 
1058     void link(GrowableArray<MoveOperation*>& killer) { Unimplemented(); }
1059   };
1060 
1061  private:
1062   GrowableArray<MoveOperation*> edges;
1063 
1064  public:
1065   ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1066                     BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) { Unimplemented(); }
1067 
1068   // Collected all the move operations
1069   void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) { Unimplemented(); }
1070 
1071   // Walk the edges breaking cycles between moves.  The result list
1072   // can be walked in order to produce the proper set of loads
1073   GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) { Unimplemented(); return 0; }
1074 };
1075 
1076 static void rt_call(MacroAssembler* masm, address dest) {
1077   assert_cond(masm != NULL);
1078   CodeBlob *cb = CodeCache::find_blob(dest);
1079   if (cb) {
1080     __ far_call(RuntimeAddress(dest));
1081   } else {
1082     int32_t offset = 0;
1083     __ la_patchable(t0, RuntimeAddress(dest), offset);
1084     __ jalr(x1, t0, offset);
1085   }
1086 }
1087 
1088 static void verify_oop_args(MacroAssembler* masm,
1089                             const methodHandle& method,
1090                             const BasicType* sig_bt,
1091                             const VMRegPair* regs) {
1092   const Register temp_reg = x9;  // not part of any compiled calling seq
1093   if (VerifyOops) {
1094     for (int i = 0; i < method->size_of_parameters(); i++) {
1095       if (sig_bt[i] == T_OBJECT ||
1096           sig_bt[i] == T_ARRAY) {
1097         VMReg r = regs[i].first();
1098         assert(r->is_valid(), "bad oop arg");
1099         if (r->is_stack()) {
1100           __ ld(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1101           __ verify_oop(temp_reg);
1102         } else {
1103           __ verify_oop(r->as_Register());
1104         }
1105       }
1106     }
1107   }
1108 }
1109 
1110 static void gen_special_dispatch(MacroAssembler* masm,
1111                                  const methodHandle& method,
1112                                  const BasicType* sig_bt,
1113                                  const VMRegPair* regs) {
1114   verify_oop_args(masm, method, sig_bt, regs);
1115   vmIntrinsics::ID iid = method->intrinsic_id();
1116 
1117   // Now write the args into the outgoing interpreter space
1118   bool     has_receiver   = false;
1119   Register receiver_reg   = noreg;
1120   int      member_arg_pos = -1;
1121   Register member_reg     = noreg;
1122   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1123   if (ref_kind != 0) {
1124     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1125     member_reg = x9;  // known to be free at this point
1126     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1127   } else if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) {
1128     has_receiver = true;
1129   } else {
1130     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1131   }
1132 
1133   if (member_reg != noreg) {
1134     // Load the member_arg into register, if necessary.
1135     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1136     VMReg r = regs[member_arg_pos].first();
1137     if (r->is_stack()) {
1138       __ ld(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1139     } else {
1140       // no data motion is needed
1141       member_reg = r->as_Register();
1142     }
1143   }
1144 
1145   if (has_receiver) {
1146     // Make sure the receiver is loaded into a register.
1147     assert(method->size_of_parameters() > 0, "oob");
1148     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1149     VMReg r = regs[0].first();
1150     assert(r->is_valid(), "bad receiver arg");
1151     if (r->is_stack()) {
1152       // Porting note:  This assumes that compiled calling conventions always
1153       // pass the receiver oop in a register.  If this is not true on some
1154       // platform, pick a temp and load the receiver from stack.
1155       fatal("receiver always in a register");
1156       receiver_reg = x12;  // known to be free at this point
1157       __ ld(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1158     } else {
1159       // no data motion is needed
1160       receiver_reg = r->as_Register();
1161     }
1162   }
1163 
1164   // Figure out which address we are really jumping to:
1165   MethodHandles::generate_method_handle_dispatch(masm, iid,
1166                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1167 }
1168 
1169 // ---------------------------------------------------------------------------
1170 // Generate a native wrapper for a given method.  The method takes arguments
1171 // in the Java compiled code convention, marshals them to the native
1172 // convention (handlizes oops, etc), transitions to native, makes the call,
1173 // returns to java state (possibly blocking), unhandlizes any result and
1174 // returns.
1175 //
1176 // Critical native functions are a shorthand for the use of
1177 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1178 // functions.  The wrapper is expected to unpack the arguments before
1179 // passing them to the callee and perform checks before and after the
1180 // native call to ensure that they GCLocker
1181 // lock_critical/unlock_critical semantics are followed.  Some other
1182 // parts of JNI setup are skipped like the tear down of the JNI handle
1183 // block and the check for pending exceptions it's impossible for them
1184 // to be thrown.
1185 //
1186 // They are roughly structured like this:
1187 //    if (GCLocker::needs_gc()) SharedRuntime::block_for_jni_critical()
1188 //    tranistion to thread_in_native
1189 //    unpack arrray arguments and call native entry point
1190 //    check for safepoint in progress
1191 //    check if any thread suspend flags are set
1192 //      call into JVM and possible unlock the JNI critical
1193 //      if a GC was suppressed while in the critical native.
1194 //    transition back to thread_in_Java
1195 //    return to caller
1196 //
1197 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1198                                                 const methodHandle& method,
1199                                                 int compile_id,
1200                                                 BasicType* in_sig_bt,
1201                                                 VMRegPair* in_regs,
1202                                                 BasicType ret_type,
1203                                                 address critical_entry) {
1204   if (method->is_method_handle_intrinsic()) {
1205     vmIntrinsics::ID iid = method->intrinsic_id();
1206     intptr_t start = (intptr_t)__ pc();
1207     int vep_offset = ((intptr_t)__ pc()) - start;
1208 
1209     // First instruction must be a nop as it may need to be patched on deoptimisation
1210     MacroAssembler::assert_alignment(__ pc());
1211     __ nop();
1212     gen_special_dispatch(masm,
1213                          method,
1214                          in_sig_bt,
1215                          in_regs);
1216     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1217     __ flush();
1218     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1219     return nmethod::new_native_nmethod(method,
1220                                        compile_id,
1221                                        masm->code(),
1222                                        vep_offset,
1223                                        frame_complete,
1224                                        stack_slots / VMRegImpl::slots_per_word,
1225                                        in_ByteSize(-1),
1226                                        in_ByteSize(-1),
1227                                        (OopMapSet*)NULL);
1228   }
1229   bool is_critical_native = true;
1230   address native_func = critical_entry;
1231   if (native_func == NULL) {
1232     native_func = method->native_function();
1233     is_critical_native = false;
1234   }
1235   assert(native_func != NULL, "must have function");
1236 
1237   // An OopMap for lock (and class if static)
1238   OopMapSet *oop_maps = new OopMapSet();
1239   assert_cond(oop_maps != NULL);
1240   intptr_t start = (intptr_t)__ pc();
1241 
1242   // We have received a description of where all the java arg are located
1243   // on entry to the wrapper. We need to convert these args to where
1244   // the jni function will expect them. To figure out where they go
1245   // we convert the java signature to a C signature by inserting
1246   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1247 
1248   const int total_in_args = method->size_of_parameters();
1249   int total_c_args = total_in_args;
1250   if (!is_critical_native) {
1251     total_c_args += 1;
1252     if (method->is_static()) {
1253       total_c_args++;
1254     }
1255   } else {
1256     for (int i = 0; i < total_in_args; i++) {
1257       if (in_sig_bt[i] == T_ARRAY) {
1258         total_c_args++;
1259       }
1260     }
1261   }
1262 
1263   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1264   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1265   BasicType* in_elem_bt = NULL;
1266 
1267   int argc = 0;
1268   if (!is_critical_native) {
1269     out_sig_bt[argc++] = T_ADDRESS;
1270     if (method->is_static()) {
1271       out_sig_bt[argc++] = T_OBJECT;
1272     }
1273 
1274     for (int i = 0; i < total_in_args ; i++ ) {
1275       out_sig_bt[argc++] = in_sig_bt[i];
1276     }
1277   } else {
1278     in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1279     SignatureStream ss(method->signature());
1280     for (int i = 0; i < total_in_args ; i++ ) {
1281       if (in_sig_bt[i] == T_ARRAY) {
1282         // Arrays are passed as int, elem* pair
1283         out_sig_bt[argc++] = T_INT;
1284         out_sig_bt[argc++] = T_ADDRESS;
1285         ss.skip_array_prefix(1);  // skip one '['
1286         assert(ss.is_primitive(), "primitive type expected");
1287         in_elem_bt[i] = ss.type();
1288       } else {
1289         out_sig_bt[argc++] = in_sig_bt[i];
1290         in_elem_bt[i] = T_VOID;
1291       }
1292       if (in_sig_bt[i] != T_VOID) {
1293         assert(in_sig_bt[i] == ss.type() ||
1294                in_sig_bt[i] == T_ARRAY, "must match");
1295         ss.next();
1296       }
1297     }
1298   }
1299 
1300   // Now figure out where the args must be stored and how much stack space
1301   // they require.
1302   int out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1303 
1304   // Compute framesize for the wrapper.  We need to handlize all oops in
1305   // incoming registers
1306 
1307   // Calculate the total number of stack slots we will need.
1308 
1309   // First count the abi requirement plus all of the outgoing args
1310   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1311 
1312   // Now the space for the inbound oop handle area
1313   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1314   if (is_critical_native) {
1315     // Critical natives may have to call out so they need a save area
1316     // for register arguments.
1317     int double_slots = 0;
1318     int single_slots = 0;
1319     for ( int i = 0; i < total_in_args; i++) {
1320       if (in_regs[i].first()->is_Register()) {
1321         const Register reg = in_regs[i].first()->as_Register();
1322         switch (in_sig_bt[i]) {
1323           case T_BOOLEAN:
1324           case T_BYTE:
1325           case T_SHORT:
1326           case T_CHAR:
1327           case T_INT:  single_slots++; break;
1328           case T_ARRAY:  // specific to LP64 (7145024)
1329           case T_LONG: double_slots++; break;
1330           default:  ShouldNotReachHere();
1331         }
1332       } else if (in_regs[i].first()->is_FloatRegister()) {
1333         ShouldNotReachHere();
1334       }
1335     }
1336     total_save_slots = double_slots * 2 + single_slots;
1337     // align the save area
1338     if (double_slots != 0) {
1339       stack_slots = align_up(stack_slots, 2);
1340     }
1341   }
1342 
1343   int oop_handle_offset = stack_slots;
1344   stack_slots += total_save_slots;
1345 
1346   // Now any space we need for handlizing a klass if static method
1347 
1348   int klass_slot_offset = 0;
1349   int klass_offset = -1;
1350   int lock_slot_offset = 0;
1351   bool is_static = false;
1352 
1353   if (method->is_static()) {
1354     klass_slot_offset = stack_slots;
1355     stack_slots += VMRegImpl::slots_per_word;
1356     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1357     is_static = true;
1358   }
1359 
1360   // Plus a lock if needed
1361 
1362   if (method->is_synchronized()) {
1363     lock_slot_offset = stack_slots;
1364     stack_slots += VMRegImpl::slots_per_word;
1365   }
1366 
1367   // Now a place (+2) to save return values or temp during shuffling
1368   // + 4 for return address (which we own) and saved fp
1369   stack_slots += 6;
1370 
1371   // Ok The space we have allocated will look like:
1372   //
1373   //
1374   // FP-> |                     |
1375   //      | 2 slots (ra)        |
1376   //      | 2 slots (fp)        |
1377   //      |---------------------|
1378   //      | 2 slots for moves   |
1379   //      |---------------------|
1380   //      | lock box (if sync)  |
1381   //      |---------------------| <- lock_slot_offset
1382   //      | klass (if static)   |
1383   //      |---------------------| <- klass_slot_offset
1384   //      | oopHandle area      |
1385   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1386   //      | outbound memory     |
1387   //      | based arguments     |
1388   //      |                     |
1389   //      |---------------------|
1390   //      |                     |
1391   // SP-> | out_preserved_slots |
1392   //
1393   //
1394 
1395 
1396   // Now compute actual number of stack words we need rounding to make
1397   // stack properly aligned.
1398   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1399 
1400   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1401 
1402   // First thing make an ic check to see if we should even be here
1403 
1404   // We are free to use all registers as temps without saving them and
1405   // restoring them except fp. fp is the only callee save register
1406   // as far as the interpreter and the compiler(s) are concerned.
1407 
1408 
1409   const Register ic_reg = t1;
1410   const Register receiver = j_rarg0;
1411 
1412   Label hit;
1413   Label exception_pending;
1414 
1415   __ verify_oop(receiver);
1416   assert_different_registers(ic_reg, receiver, t0, t2);
1417   __ cmp_klass(receiver, ic_reg, t0, t2 /* call-clobbered t2 as a tmp */, hit);
1418 
1419   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1420 
1421   // Verified entry point must be aligned
1422   __ align(8);
1423 
1424   __ bind(hit);
1425 
1426   int vep_offset = ((intptr_t)__ pc()) - start;
1427 
1428   // If we have to make this method not-entrant we'll overwrite its
1429   // first instruction with a jump.
1430   MacroAssembler::assert_alignment(__ pc());
1431   __ nop();
1432 
1433   if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
1434     Label L_skip_barrier;
1435     __ mov_metadata(t1, method->method_holder()); // InstanceKlass*
1436     __ clinit_barrier(t1, t0, &L_skip_barrier);
1437     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1438 
1439     __ bind(L_skip_barrier);
1440   }
1441 
1442   // Generate stack overflow check
1443   __ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size()));
1444 
1445   // Generate a new frame for the wrapper.
1446   __ enter();
1447   // -2 because return address is already present and so is saved fp
1448   __ sub(sp, sp, stack_size - 2 * wordSize);
1449 
1450   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1451   assert_cond(bs != NULL);
1452   bs->nmethod_entry_barrier(masm);
1453 
1454   // Frame is now completed as far as size and linkage.
1455   int frame_complete = ((intptr_t)__ pc()) - start;
1456 
1457   // We use x18 as the oop handle for the receiver/klass
1458   // It is callee save so it survives the call to native
1459 
1460   const Register oop_handle_reg = x18;
1461 
1462   //
1463   // We immediately shuffle the arguments so that any vm call we have to
1464   // make from here on out (sync slow path, jvmti, etc.) we will have
1465   // captured the oops from our caller and have a valid oopMap for
1466   // them.
1467 
1468   // -----------------
1469   // The Grand Shuffle
1470 
1471   // The Java calling convention is either equal (linux) or denser (win64) than the
1472   // c calling convention. However the because of the jni_env argument the c calling
1473   // convention always has at least one more (and two for static) arguments than Java.
1474   // Therefore if we move the args from java -> c backwards then we will never have
1475   // a register->register conflict and we don't have to build a dependency graph
1476   // and figure out how to break any cycles.
1477   //
1478 
1479   // Record esp-based slot for receiver on stack for non-static methods
1480   int receiver_offset = -1;
1481 
1482   // This is a trick. We double the stack slots so we can claim
1483   // the oops in the caller's frame. Since we are sure to have
1484   // more args than the caller doubling is enough to make
1485   // sure we can capture all the incoming oop args from the
1486   // caller.
1487   //
1488   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1489   assert_cond(map != NULL);
1490 
1491   int float_args = 0;
1492   int int_args = 0;
1493 
1494 #ifdef ASSERT
1495   bool reg_destroyed[RegisterImpl::number_of_registers];
1496   bool freg_destroyed[FloatRegisterImpl::number_of_registers];
1497   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
1498     reg_destroyed[r] = false;
1499   }
1500   for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
1501     freg_destroyed[f] = false;
1502   }
1503 
1504 #endif /* ASSERT */
1505 
1506   // This may iterate in two different directions depending on the
1507   // kind of native it is.  The reason is that for regular JNI natives
1508   // the incoming and outgoing registers are offset upwards and for
1509   // critical natives they are offset down.
1510   GrowableArray<int> arg_order(2 * total_in_args);
1511   VMRegPair tmp_vmreg;
1512   tmp_vmreg.set2(x9->as_VMReg());
1513 
1514   if (!is_critical_native) {
1515     for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1516       arg_order.push(i);
1517       arg_order.push(c_arg);
1518     }
1519   } else {
1520     // Compute a valid move order, using tmp_vmreg to break any cycles
1521     ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
1522   }
1523 
1524   int temploc = -1;
1525   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1526     int i = arg_order.at(ai);
1527     int c_arg = arg_order.at(ai + 1);
1528     __ block_comment(err_msg("mv %d -> %d", i, c_arg));
1529     if (c_arg == -1) {
1530       assert(is_critical_native, "should only be required for critical natives");
1531       // This arg needs to be moved to a temporary
1532       __ mv(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
1533       in_regs[i] = tmp_vmreg;
1534       temploc = i;
1535       continue;
1536     } else if (i == -1) {
1537       assert(is_critical_native, "should only be required for critical natives");
1538       // Read from the temporary location
1539       assert(temploc != -1, "must be valid");
1540       i = temploc;
1541       temploc = -1;
1542     }
1543 #ifdef ASSERT
1544     if (in_regs[i].first()->is_Register()) {
1545       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1546     } else if (in_regs[i].first()->is_FloatRegister()) {
1547       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1548     }
1549     if (out_regs[c_arg].first()->is_Register()) {
1550       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1551     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1552       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1553     }
1554 #endif /* ASSERT */
1555     switch (in_sig_bt[i]) {
1556       case T_ARRAY:
1557         if (is_critical_native) {
1558           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1559           c_arg++;
1560 #ifdef ASSERT
1561           if (out_regs[c_arg].first()->is_Register()) {
1562             reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1563           } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1564             freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1565           }
1566 #endif
1567           int_args++;
1568           break;
1569         }
1570       case T_OBJECT:
1571         assert(!is_critical_native, "no oop arguments");
1572         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1573                     ((i == 0) && (!is_static)),
1574                     &receiver_offset);
1575         int_args++;
1576         break;
1577       case T_VOID:
1578         break;
1579 
1580       case T_FLOAT:
1581         float_move(masm, in_regs[i], out_regs[c_arg]);
1582         float_args++;
1583         break;
1584 
1585       case T_DOUBLE:
1586         assert( i + 1 < total_in_args &&
1587                 in_sig_bt[i + 1] == T_VOID &&
1588                 out_sig_bt[c_arg + 1] == T_VOID, "bad arg list");
1589         double_move(masm, in_regs[i], out_regs[c_arg]);
1590         float_args++;
1591         break;
1592 
1593       case T_LONG :
1594         long_move(masm, in_regs[i], out_regs[c_arg]);
1595         int_args++;
1596         break;
1597 
1598       case T_ADDRESS:
1599         assert(false, "found T_ADDRESS in java args");
1600         break;
1601 
1602       default:
1603         move32_64(masm, in_regs[i], out_regs[c_arg]);
1604         int_args++;
1605     }
1606   }
1607 
1608   // point c_arg at the first arg that is already loaded in case we
1609   // need to spill before we call out
1610   int c_arg = total_c_args - total_in_args;
1611 
1612   // Pre-load a static method's oop into c_rarg1.
1613   if (method->is_static() && !is_critical_native) {
1614 
1615     //  load oop into a register
1616     __ movoop(c_rarg1,
1617               JNIHandles::make_local(method->method_holder()->java_mirror()),
1618               /*immediate*/true);
1619 
1620     // Now handlize the static class mirror it's known not-null.
1621     __ sd(c_rarg1, Address(sp, klass_offset));
1622     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1623 
1624     // Now get the handle
1625     __ la(c_rarg1, Address(sp, klass_offset));
1626     // and protect the arg if we must spill
1627     c_arg--;
1628   }
1629 
1630   // Change state to native (we save the return address in the thread, since it might not
1631   // be pushed on the stack when we do a stack traversal).
1632   // We use the same pc/oopMap repeatedly when we call out
1633 
1634   Label native_return;
1635   __ set_last_Java_frame(sp, noreg, native_return, t0);
1636 
1637   Label dtrace_method_entry, dtrace_method_entry_done;
1638   {
1639     int32_t offset = 0;
1640     __ la_patchable(t0, ExternalAddress((address)&DTraceMethodProbes), offset);
1641     __ lbu(t0, Address(t0, offset));
1642     __ addw(t0, t0, zr);
1643     __ bnez(t0, dtrace_method_entry);
1644     __ bind(dtrace_method_entry_done);
1645   }
1646 
1647   // RedefineClasses() tracing support for obsolete method entry
1648   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1649     // protect the args we've loaded
1650     save_args(masm, total_c_args, c_arg, out_regs);
1651     __ mov_metadata(c_rarg1, method());
1652     __ call_VM_leaf(
1653       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1654       xthread, c_rarg1);
1655     restore_args(masm, total_c_args, c_arg, out_regs);
1656   }
1657 
1658   // Lock a synchronized method
1659 
1660   // Register definitions used by locking and unlocking
1661 
1662   const Register swap_reg = x10;
1663   const Register obj_reg  = x9;  // Will contain the oop
1664   const Register lock_reg = x30;  // Address of compiler lock object (BasicLock)
1665   const Register old_hdr  = x30;  // value of old header at unlock time
1666   const Register tmp      = ra;
1667 
1668   Label slow_path_lock;
1669   Label lock_done;
1670 
1671   if (method->is_synchronized()) {
1672 
1673     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1674 
1675     // Get the handle (the 2nd argument)
1676     __ mv(oop_handle_reg, c_rarg1);
1677 
1678     // Get address of the box
1679     __ la(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1680 
1681     // Load the oop from the handle
1682     __ ld(obj_reg, Address(oop_handle_reg, 0));
1683 
1684     if (UseBiasedLocking) {
1685       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1686     }
1687 
1688     // Load (object->mark() | 1) into swap_reg % x10
1689     __ ld(t0, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1690     __ ori(swap_reg, t0, 1);
1691 
1692     // Save (object->mark() | 1) into BasicLock's displaced header
1693     __ sd(swap_reg, Address(lock_reg, mark_word_offset));
1694 
1695     // src -> dest if dest == x10 else x10 <- dest
1696     {
1697       Label here;
1698       __ cmpxchg_obj_header(x10, lock_reg, obj_reg, t0, lock_done, /*fallthrough*/NULL);
1699     }
1700 
1701     // Test if the oopMark is an obvious stack pointer, i.e.,
1702     //  1) (mark & 3) == 0, and
1703     //  2) sp <= mark < mark + os::pagesize()
1704     // These 3 tests can be done by evaluating the following
1705     // expression: ((mark - sp) & (3 - os::vm_page_size())),
1706     // assuming both stack pointer and pagesize have their
1707     // least significant 2 bits clear.
1708     // NOTE: the oopMark is in swap_reg % 10 as the result of cmpxchg
1709 
1710     __ sub(swap_reg, swap_reg, sp);
1711     __ andi(swap_reg, swap_reg, 3 - os::vm_page_size());
1712 
1713     // Save the test result, for recursive case, the result is zero
1714     __ sd(swap_reg, Address(lock_reg, mark_word_offset));
1715     __ bnez(swap_reg, slow_path_lock);
1716 
1717     // Slow path will re-enter here
1718     __ bind(lock_done);
1719   }
1720 
1721 
1722   // Finally just about ready to make the JNI call
1723 
1724   // get JNIEnv* which is first argument to native
1725   if (!is_critical_native) {
1726     __ la(c_rarg0, Address(xthread, in_bytes(JavaThread::jni_environment_offset())));
1727 
1728     // Now set thread in native
1729     __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1730     __ mv(t0, _thread_in_native);
1731     __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1732     __ sw(t0, Address(t1));
1733   }
1734 
1735   rt_call(masm, native_func);
1736 
1737   __ bind(native_return);
1738 
1739   intptr_t return_pc = (intptr_t) __ pc();
1740   oop_maps->add_gc_map(return_pc - start, map);
1741 
1742   // Unpack native results.
1743   if (ret_type != T_OBJECT && ret_type != T_ARRAY) {
1744     __ cast_primitive_type(ret_type, x10);
1745   }
1746 
1747   Label safepoint_in_progress, safepoint_in_progress_done;
1748   Label after_transition;
1749 
1750   // If this is a critical native, check for a safepoint or suspend request after the call.
1751   // If a safepoint is needed, transition to native, then to native_trans to handle
1752   // safepoint like the native methods that are not critical natives.
1753   if (is_critical_native) {
1754     Label needs_safepoint;
1755     __ safepoint_poll(needs_safepoint, false /* as_return */, true /* acquire */, false /* in_nmethod */);
1756     __ lwu(t0, Address(xthread, JavaThread::suspend_flags_offset()));
1757     __ bnez(t0, needs_safepoint);
1758     __ j(after_transition);
1759     __ bind(needs_safepoint);
1760   }
1761 
1762   // Switch thread to "native transition" state before reading the synchronization state.
1763   // This additional state is necessary because reading and testing the synchronization
1764   // state is not atomic w.r.t. GC, as this scenario demonstrates:
1765   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1766   //     VM thread changes sync state to synchronizing and suspends threads for GC.
1767   //     Thread A is resumed to finish this native method, but doesn't block here since it
1768   //     didn't see any synchronization is progress, and escapes.
1769   __ mv(t0, _thread_in_native_trans);
1770 
1771   __ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
1772 
1773   // Force this write out before the read below
1774   __ membar(MacroAssembler::AnyAny);
1775 
1776   // check for safepoint operation in progress and/or pending suspend requests
1777   {
1778     // We need an acquire here to ensure that any subsequent load of the
1779     // global SafepointSynchronize::_state flag is ordered after this load
1780     // of the thread-local polling word. We don't want this poll to
1781     // return false (i.e. not safepointing) and a later poll of the global
1782     // SafepointSynchronize::_state spuriously to return true.
1783     // This is to avoid a race when we're in a native->Java transition
1784     // racing the code which wakes up from a safepoint.
1785 
1786     __ safepoint_poll(safepoint_in_progress, true /* at_return */, true /* acquire */, false /* in_nmethod */);
1787     __ lwu(t0, Address(xthread, JavaThread::suspend_flags_offset()));
1788     __ bnez(t0, safepoint_in_progress);
1789     __ bind(safepoint_in_progress_done);
1790   }
1791 
1792   // change thread state
1793   __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1794   __ mv(t0, _thread_in_Java);
1795   __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1796   __ sw(t0, Address(t1));
1797   __ bind(after_transition);
1798 
1799   Label reguard;
1800   Label reguard_done;
1801   __ lbu(t0, Address(xthread, JavaThread::stack_guard_state_offset()));
1802   __ mv(t1, StackOverflow::stack_guard_yellow_reserved_disabled);
1803   __ beq(t0, t1, reguard);
1804   __ bind(reguard_done);
1805 
1806   // native result if any is live
1807 
1808   // Unlock
1809   Label unlock_done;
1810   Label slow_path_unlock;
1811   if (method->is_synchronized()) {
1812 
1813     // Get locked oop from the handle we passed to jni
1814     __ ld(obj_reg, Address(oop_handle_reg, 0));
1815 
1816     Label done;
1817 
1818     if (UseBiasedLocking) {
1819       __ biased_locking_exit(obj_reg, old_hdr, done);
1820     }
1821 
1822     // Simple recursive lock?
1823     __ ld(t0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1824     __ beqz(t0, done);
1825 
1826     // Must save x10 if it is live now because cmpxchg must use it
1827     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1828       save_native_result(masm, ret_type, stack_slots);
1829     }
1830 
1831     // get address of the stack lock
1832     __ la(x10, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1833     //  get old displaced header
1834     __ ld(old_hdr, Address(x10, 0));
1835 
1836     // Atomic swap old header if oop still contains the stack lock
1837     Label succeed;
1838     __ cmpxchg_obj_header(x10, old_hdr, obj_reg, t0, succeed, &slow_path_unlock);
1839     __ bind(succeed);
1840 
1841     // slow path re-enters here
1842     __ bind(unlock_done);
1843     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1844       restore_native_result(masm, ret_type, stack_slots);
1845     }
1846 
1847     __ bind(done);
1848   }
1849 
1850   Label dtrace_method_exit, dtrace_method_exit_done;
1851   {
1852     int32_t offset = 0;
1853     __ la_patchable(t0, ExternalAddress((address)&DTraceMethodProbes), offset);
1854     __ lbu(t0, Address(t0, offset));
1855     __ bnez(t0, dtrace_method_exit);
1856     __ bind(dtrace_method_exit_done);
1857   }
1858 
1859   __ reset_last_Java_frame(false);
1860 
1861   // Unbox oop result, e.g. JNIHandles::resolve result.
1862   if (is_reference_type(ret_type)) {
1863     __ resolve_jobject(x10, xthread, t1);
1864   }
1865 
1866   if (CheckJNICalls) {
1867     // clear_pending_jni_exception_check
1868     __ sd(zr, Address(xthread, JavaThread::pending_jni_exception_check_fn_offset()));
1869   }
1870 
1871   if (!is_critical_native) {
1872     // reset handle block
1873     __ ld(x12, Address(xthread, JavaThread::active_handles_offset()));
1874     __ sd(zr, Address(x12, JNIHandleBlock::top_offset_in_bytes()));
1875   }
1876 
1877   __ leave();
1878 
1879   if (!is_critical_native) {
1880     // Any exception pending?
1881     __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1882     __ bnez(t0, exception_pending);
1883   }
1884 
1885   // We're done
1886   __ ret();
1887 
1888   // Unexpected paths are out of line and go here
1889 
1890   if (!is_critical_native) {
1891     // forward the exception
1892     __ bind(exception_pending);
1893 
1894     // and forward the exception
1895     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1896   }
1897 
1898   // Slow path locking & unlocking
1899   if (method->is_synchronized()) {
1900 
1901     __ block_comment("Slow path lock {");
1902     __ bind(slow_path_lock);
1903 
1904     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1905     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1906 
1907     // protect the args we've loaded
1908     save_args(masm, total_c_args, c_arg, out_regs);
1909 
1910     __ mv(c_rarg0, obj_reg);
1911     __ mv(c_rarg1, lock_reg);
1912     __ mv(c_rarg2, xthread);
1913 
1914     // Not a leaf but we have last_Java_frame setup as we want
1915     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
1916     restore_args(masm, total_c_args, c_arg, out_regs);
1917 
1918 #ifdef ASSERT
1919     { Label L;
1920       __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1921       __ beqz(t0, L);
1922       __ stop("no pending exception allowed on exit from monitorenter");
1923       __ bind(L);
1924     }
1925 #endif
1926     __ j(lock_done);
1927 
1928     __ block_comment("} Slow path lock");
1929 
1930     __ block_comment("Slow path unlock {");
1931     __ bind(slow_path_unlock);
1932 
1933     if (ret_type == T_FLOAT || ret_type == T_DOUBLE) {
1934       save_native_result(masm, ret_type, stack_slots);
1935     }
1936 
1937     __ mv(c_rarg2, xthread);
1938     __ la(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1939     __ mv(c_rarg0, obj_reg);
1940 
1941     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
1942     // NOTE that obj_reg == x9 currently
1943     __ ld(x9, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1944     __ sd(zr, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1945 
1946     rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
1947 
1948 #ifdef ASSERT
1949     {
1950       Label L;
1951       __ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1952       __ beqz(t0, L);
1953       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
1954       __ bind(L);
1955     }
1956 #endif /* ASSERT */
1957 
1958     __ sd(x9, Address(xthread, in_bytes(Thread::pending_exception_offset())));
1959 
1960     if (ret_type == T_FLOAT || ret_type == T_DOUBLE) {
1961       restore_native_result(masm, ret_type, stack_slots);
1962     }
1963     __ j(unlock_done);
1964 
1965     __ block_comment("} Slow path unlock");
1966 
1967   } // synchronized
1968 
1969   // SLOW PATH Reguard the stack if needed
1970 
1971   __ bind(reguard);
1972   save_native_result(masm, ret_type, stack_slots);
1973   rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
1974   restore_native_result(masm, ret_type, stack_slots);
1975   // and continue
1976   __ j(reguard_done);
1977 
1978   // SLOW PATH safepoint
1979   {
1980     __ block_comment("safepoint {");
1981     __ bind(safepoint_in_progress);
1982 
1983     // Don't use call_VM as it will see a possible pending exception and forward it
1984     // and never return here preventing us from clearing _last_native_pc down below.
1985     //
1986     save_native_result(masm, ret_type, stack_slots);
1987     __ mv(c_rarg0, xthread);
1988 #ifndef PRODUCT
1989     assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
1990 #endif
1991     int32_t offset = 0;
1992     __ la_patchable(t0, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)), offset);
1993     __ jalr(x1, t0, offset);
1994 
1995     // Restore any method result value
1996     restore_native_result(masm, ret_type, stack_slots);
1997 
1998     __ j(safepoint_in_progress_done);
1999     __ block_comment("} safepoint");
2000   }
2001 
2002   // SLOW PATH dtrace support
2003   {
2004     __ block_comment("dtrace entry {");
2005     __ bind(dtrace_method_entry);
2006 
2007     // We have all of the arguments setup at this point. We must not touch any register
2008     // argument registers at this point (what if we save/restore them there are no oop?
2009 
2010     save_args(masm, total_c_args, c_arg, out_regs);
2011     __ mov_metadata(c_rarg1, method());
2012     __ call_VM_leaf(
2013       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2014       xthread, c_rarg1);
2015     restore_args(masm, total_c_args, c_arg, out_regs);
2016     __ j(dtrace_method_entry_done);
2017     __ block_comment("} dtrace entry");
2018   }
2019 
2020   {
2021     __ block_comment("dtrace exit {");
2022     __ bind(dtrace_method_exit);
2023     save_native_result(masm, ret_type, stack_slots);
2024     __ mov_metadata(c_rarg1, method());
2025     __ call_VM_leaf(
2026          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2027          xthread, c_rarg1);
2028     restore_native_result(masm, ret_type, stack_slots);
2029     __ j(dtrace_method_exit_done);
2030     __ block_comment("} dtrace exit");
2031   }
2032 
2033   __ flush();
2034 
2035   nmethod *nm = nmethod::new_native_nmethod(method,
2036                                             compile_id,
2037                                             masm->code(),
2038                                             vep_offset,
2039                                             frame_complete,
2040                                             stack_slots / VMRegImpl::slots_per_word,
2041                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2042                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2043                                             oop_maps);
2044   assert(nm != NULL, "create native nmethod fail!");
2045   return nm;
2046 }
2047 
2048 // this function returns the adjust size (in number of words) to a c2i adapter
2049 // activation for use during deoptimization
2050 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2051   assert(callee_locals >= callee_parameters,
2052          "test and remove; got more parms than locals");
2053   if (callee_locals < callee_parameters) {
2054     return 0;                   // No adjustment for negative locals
2055   }
2056   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2057   // diff is counted in stack words
2058   return align_up(diff, 2);
2059 }
2060 
2061 //------------------------------generate_deopt_blob----------------------------
2062 void SharedRuntime::generate_deopt_blob() {
2063   // Allocate space for the code
2064   ResourceMark rm;
2065   // Setup code generation tools
2066   int pad = 0;
2067   CodeBuffer buffer("deopt_blob", 2048 + pad, 1024);
2068   MacroAssembler* masm = new MacroAssembler(&buffer);
2069   int frame_size_in_words = -1;
2070   OopMap* map = NULL;
2071   OopMapSet *oop_maps = new OopMapSet();
2072   assert_cond(masm != NULL && oop_maps != NULL);
2073   RegisterSaver reg_saver(COMPILER2_OR_JVMCI != 0);
2074 
2075   // -------------
2076   // This code enters when returning to a de-optimized nmethod.  A return
2077   // address has been pushed on the the stack, and return values are in
2078   // registers.
2079   // If we are doing a normal deopt then we were called from the patched
2080   // nmethod from the point we returned to the nmethod. So the return
2081   // address on the stack is wrong by NativeCall::instruction_size
2082   // We will adjust the value so it looks like we have the original return
2083   // address on the stack (like when we eagerly deoptimized).
2084   // In the case of an exception pending when deoptimizing, we enter
2085   // with a return address on the stack that points after the call we patched
2086   // into the exception handler. We have the following register state from,
2087   // e.g., the forward exception stub (see stubGenerator_riscv.cpp).
2088   //    x10: exception oop
2089   //    x9: exception handler
2090   //    x13: throwing pc
2091   // So in this case we simply jam x13 into the useless return address and
2092   // the stack looks just like we want.
2093   //
2094   // At this point we need to de-opt.  We save the argument return
2095   // registers.  We call the first C routine, fetch_unroll_info().  This
2096   // routine captures the return values and returns a structure which
2097   // describes the current frame size and the sizes of all replacement frames.
2098   // The current frame is compiled code and may contain many inlined
2099   // functions, each with their own JVM state.  We pop the current frame, then
2100   // push all the new frames.  Then we call the C routine unpack_frames() to
2101   // populate these frames.  Finally unpack_frames() returns us the new target
2102   // address.  Notice that callee-save registers are BLOWN here; they have
2103   // already been captured in the vframeArray at the time the return PC was
2104   // patched.
2105   address start = __ pc();
2106   Label cont;
2107 
2108   // Prolog for non exception case!
2109 
2110   // Save everything in sight.
2111   map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2112 
2113   // Normal deoptimization.  Save exec mode for unpack_frames.
2114   __ mvw(xcpool, Deoptimization::Unpack_deopt); // callee-saved
2115   __ j(cont);
2116 
2117   int reexecute_offset = __ pc() - start;
2118 
2119   // Reexecute case
2120   // return address is the pc describes what bci to do re-execute at
2121 
2122   // No need to update map as each call to save_live_registers will produce identical oopmap
2123   (void) reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2124 
2125   __ mvw(xcpool, Deoptimization::Unpack_reexecute); // callee-saved
2126   __ j(cont);
2127 
2128   int exception_offset = __ pc() - start;
2129 
2130   // Prolog for exception case
2131 
2132   // all registers are dead at this entry point, except for x10, and
2133   // x13 which contain the exception oop and exception pc
2134   // respectively.  Set them in TLS and fall thru to the
2135   // unpack_with_exception_in_tls entry point.
2136 
2137   __ sd(x13, Address(xthread, JavaThread::exception_pc_offset()));
2138   __ sd(x10, Address(xthread, JavaThread::exception_oop_offset()));
2139 
2140   int exception_in_tls_offset = __ pc() - start;
2141 
2142   // new implementation because exception oop is now passed in JavaThread
2143 
2144   // Prolog for exception case
2145   // All registers must be preserved because they might be used by LinearScan
2146   // Exceptiop oop and throwing PC are passed in JavaThread
2147   // tos: stack at point of call to method that threw the exception (i.e. only
2148   // args are on the stack, no return address)
2149 
2150   // The return address pushed by save_live_registers will be patched
2151   // later with the throwing pc. The correct value is not available
2152   // now because loading it from memory would destroy registers.
2153 
2154   // NB: The SP at this point must be the SP of the method that is
2155   // being deoptimized.  Deoptimization assumes that the frame created
2156   // here by save_live_registers is immediately below the method's SP.
2157   // This is a somewhat fragile mechanism.
2158 
2159   // Save everything in sight.
2160   map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2161 
2162   // Now it is safe to overwrite any register
2163 
2164   // Deopt during an exception.  Save exec mode for unpack_frames.
2165   __ li(xcpool, Deoptimization::Unpack_exception); // callee-saved
2166 
2167   // load throwing pc from JavaThread and patch it as the return address
2168   // of the current frame. Then clear the field in JavaThread
2169 
2170   __ ld(x13, Address(xthread, JavaThread::exception_pc_offset()));
2171   __ sd(x13, Address(fp, frame::return_addr_offset * wordSize));
2172   __ sd(zr, Address(xthread, JavaThread::exception_pc_offset()));
2173 
2174 #ifdef ASSERT
2175   // verify that there is really an exception oop in JavaThread
2176   __ ld(x10, Address(xthread, JavaThread::exception_oop_offset()));
2177   __ verify_oop(x10);
2178 
2179   // verify that there is no pending exception
2180   Label no_pending_exception;
2181   __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
2182   __ beqz(t0, no_pending_exception);
2183   __ stop("must not have pending exception here");
2184   __ bind(no_pending_exception);
2185 #endif
2186 
2187   __ bind(cont);
2188 
2189   // Call C code.  Need thread and this frame, but NOT official VM entry
2190   // crud.  We cannot block on this call, no GC can happen.
2191   //
2192   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2193 
2194   // fetch_unroll_info needs to call last_java_frame().
2195 
2196   Label retaddr;
2197   __ set_last_Java_frame(sp, noreg, retaddr, t0);
2198 #ifdef ASSERT
2199   {
2200     Label L;
2201     __ ld(t0, Address(xthread,
2202                               JavaThread::last_Java_fp_offset()));
2203     __ beqz(t0, L);
2204     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2205     __ bind(L);
2206   }
2207 #endif // ASSERT
2208   __ mv(c_rarg0, xthread);
2209   __ mv(c_rarg1, xcpool);
2210   int32_t offset = 0;
2211   __ la_patchable(t0, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)), offset);
2212   __ jalr(x1, t0, offset);
2213   __ bind(retaddr);
2214 
2215   // Need to have an oopmap that tells fetch_unroll_info where to
2216   // find any register it might need.
2217   oop_maps->add_gc_map(__ pc() - start, map);
2218 
2219   __ reset_last_Java_frame(false);
2220 
2221   // Load UnrollBlock* into x15
2222   __ mv(x15, x10);
2223 
2224   __ lwu(xcpool, Address(x15, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
2225   Label noException;
2226   __ li(t0, Deoptimization::Unpack_exception);
2227   __ bne(xcpool, t0, noException); // Was exception pending?
2228   __ ld(x10, Address(xthread, JavaThread::exception_oop_offset()));
2229   __ ld(x13, Address(xthread, JavaThread::exception_pc_offset()));
2230   __ sd(zr, Address(xthread, JavaThread::exception_oop_offset()));
2231   __ sd(zr, Address(xthread, JavaThread::exception_pc_offset()));
2232 
2233   __ verify_oop(x10);
2234 
2235   // Overwrite the result registers with the exception results.
2236   __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10)));
2237 
2238   __ bind(noException);
2239 
2240   // Only register save data is on the stack.
2241   // Now restore the result registers.  Everything else is either dead
2242   // or captured in the vframeArray.
2243 
2244   // Restore fp result register
2245   __ fld(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10)));
2246   // Restore integer result register
2247   __ ld(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10)));
2248 
2249   // Pop all of the register save area off the stack
2250   __ add(sp, sp, frame_size_in_words * wordSize);
2251 
2252   // All of the register save area has been popped of the stack. Only the
2253   // return address remains.
2254 
2255   // Pop all the frames we must move/replace.
2256   //
2257   // Frame picture (youngest to oldest)
2258   // 1: self-frame (no frame link)
2259   // 2: deopting frame  (no frame link)
2260   // 3: caller of deopting frame (could be compiled/interpreted).
2261   //
2262   // Note: by leaving the return address of self-frame on the stack
2263   // and using the size of frame 2 to adjust the stack
2264   // when we are done the return to frame 3 will still be on the stack.
2265 
2266   // Pop deoptimized frame
2267   __ lwu(x12, Address(x15, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2268   __ sub(x12, x12, 2 * wordSize);
2269   __ add(sp, sp, x12);
2270   __ ld(fp, Address(sp, 0));
2271   __ ld(ra, Address(sp, wordSize));
2272   __ addi(sp, sp, 2 * wordSize);
2273   // RA should now be the return address to the caller (3)
2274 
2275 #ifdef ASSERT
2276   // Compilers generate code that bang the stack by as much as the
2277   // interpreter would need. So this stack banging should never
2278   // trigger a fault. Verify that it does not on non product builds.
2279   __ lwu(x9, Address(x15, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2280   __ bang_stack_size(x9, x12);
2281 #endif
2282   // Load address of array of frame pcs into x12
2283   __ ld(x12, Address(x15, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2284 
2285   // Load address of array of frame sizes into x14
2286   __ ld(x14, Address(x15, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2287 
2288   // Load counter into x13
2289   __ lwu(x13, Address(x15, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2290 
2291   // Now adjust the caller's stack to make up for the extra locals
2292   // but record the original sp so that we can save it in the skeletal interpreter
2293   // frame and the stack walking of interpreter_sender will get the unextended sp
2294   // value and not the "real" sp value.
2295 
2296   const Register sender_sp = x16;
2297 
2298   __ mv(sender_sp, sp);
2299   __ lwu(x9, Address(x15,
2300                      Deoptimization::UnrollBlock::
2301                      caller_adjustment_offset_in_bytes()));
2302   __ sub(sp, sp, x9);
2303 
2304   // Push interpreter frames in a loop
2305   __ li(t0, 0xDEADDEAD);               // Make a recognizable pattern
2306   __ mv(t1, t0);
2307   Label loop;
2308   __ bind(loop);
2309   __ ld(x9, Address(x14, 0));          // Load frame size
2310   __ addi(x14, x14, wordSize);
2311   __ sub(x9, x9, 2 * wordSize);        // We'll push pc and fp by hand
2312   __ ld(ra, Address(x12, 0));          // Load pc
2313   __ addi(x12, x12, wordSize);
2314   __ enter();                          // Save old & set new fp
2315   __ sub(sp, sp, x9);                  // Prolog
2316   // This value is corrected by layout_activation_impl
2317   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
2318   __ sd(sender_sp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2319   __ mv(sender_sp, sp);                // Pass sender_sp to next frame
2320   __ addi(x13, x13, -1);               // Decrement counter
2321   __ bnez(x13, loop);
2322 
2323     // Re-push self-frame
2324   __ ld(ra, Address(x12));
2325   __ enter();
2326 
2327   // Allocate a full sized register save area.  We subtract 2 because
2328   // enter() just pushed 2 words
2329   __ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2330 
2331   // Restore frame locals after moving the frame
2332   __ fsd(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10)));
2333   __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10)));
2334 
2335   // Call C code.  Need thread but NOT official VM entry
2336   // crud.  We cannot block on this call, no GC can happen.  Call should
2337   // restore return values to their stack-slots with the new SP.
2338   //
2339   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2340 
2341   // Use fp because the frames look interpreted now
2342   // Don't need the precise return PC here, just precise enough to point into this code blob.
2343   address the_pc = __ pc();
2344   __ set_last_Java_frame(sp, fp, the_pc, t0);
2345 
2346   __ mv(c_rarg0, xthread);
2347   __ mv(c_rarg1, xcpool); // second arg: exec_mode
2348   offset = 0;
2349   __ la_patchable(t0, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)), offset);
2350   __ jalr(x1, t0, offset);
2351 
2352   // Set an oopmap for the call site
2353   // Use the same PC we used for the last java frame
2354   oop_maps->add_gc_map(the_pc - start,
2355                        new OopMap(frame_size_in_words, 0));
2356 
2357   // Clear fp AND pc
2358   __ reset_last_Java_frame(true);
2359 
2360   // Collect return values
2361   __ fld(f10, Address(sp, reg_saver.freg_offset_in_bytes(f10)));
2362   __ ld(x10, Address(sp, reg_saver.reg_offset_in_bytes(x10)));
2363 
2364   // Pop self-frame.
2365   __ leave();                           // Epilog
2366 
2367   // Jump to interpreter
2368   __ ret();
2369 
2370   // Make sure all code is generated
2371   masm->flush();
2372 
2373   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2374   assert(_deopt_blob != NULL, "create deoptimization blob fail!");
2375   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2376 }
2377 
2378 // Number of stack slots between incoming argument block and the start of
2379 // a new frame. The PROLOG must add this many slots to the stack. The
2380 // EPILOG must remove this many slots.
2381 // RISCV needs two words for RA (return address) and FP (frame pointer).
2382 uint SharedRuntime::in_preserve_stack_slots() {
2383   return 2 * VMRegImpl::slots_per_word;
2384 }
2385 
2386 uint SharedRuntime::out_preserve_stack_slots() {
2387   return 0;
2388 }
2389 
2390 #ifdef COMPILER2
2391 //------------------------------generate_uncommon_trap_blob--------------------
2392 void SharedRuntime::generate_uncommon_trap_blob() {
2393   // Allocate space for the code
2394   ResourceMark rm;
2395   // Setup code generation tools
2396   CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2397   MacroAssembler* masm = new MacroAssembler(&buffer);
2398   assert_cond(masm != NULL);
2399 
2400   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2401 
2402   address start = __ pc();
2403 
2404   // Push self-frame.  We get here with a return address in RA
2405   // and sp should be 16 byte aligned
2406   // push fp and retaddr by hand
2407   __ addi(sp, sp, -2 * wordSize);
2408   __ sd(ra, Address(sp, wordSize));
2409   __ sd(fp, Address(sp, 0));
2410   // we don't expect an arg reg save area
2411 #ifndef PRODUCT
2412   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2413 #endif
2414   // compiler left unloaded_class_index in j_rarg0 move to where the
2415   // runtime expects it.
2416   __ addiw(c_rarg1, j_rarg0, 0);
2417 
2418   // we need to set the past SP to the stack pointer of the stub frame
2419   // and the pc to the address where this runtime call will return
2420   // although actually any pc in this code blob will do).
2421   Label retaddr;
2422   __ set_last_Java_frame(sp, noreg, retaddr, t0);
2423 
2424   // Call C code.  Need thread but NOT official VM entry
2425   // crud.  We cannot block on this call, no GC can happen.  Call should
2426   // capture callee-saved registers as well as return values.
2427   //
2428   // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index, jint exec_mode)
2429   //
2430   // n.b. 3 gp args, 0 fp args, integral return type
2431 
2432   __ mv(c_rarg0, xthread);
2433   __ mvw(c_rarg2, (unsigned)Deoptimization::Unpack_uncommon_trap);
2434   int32_t offset = 0;
2435   __ la_patchable(t0,
2436         RuntimeAddress(CAST_FROM_FN_PTR(address,
2437                                         Deoptimization::uncommon_trap)), offset);
2438   __ jalr(x1, t0, offset);
2439   __ bind(retaddr);
2440 
2441   // Set an oopmap for the call site
2442   OopMapSet* oop_maps = new OopMapSet();
2443   OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
2444   assert_cond(oop_maps != NULL && map != NULL);
2445 
2446   // location of fp is known implicitly by the frame sender code
2447 
2448   oop_maps->add_gc_map(__ pc() - start, map);
2449 
2450   __ reset_last_Java_frame(false);
2451 
2452   // move UnrollBlock* into x14
2453   __ mv(x14, x10);
2454 
2455 #ifdef ASSERT
2456   { Label L;
2457     __ lwu(t0, Address(x14, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
2458     __ mvw(t1, Deoptimization::Unpack_uncommon_trap);
2459     __ beq(t0, t1, L);
2460     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2461     __ bind(L);
2462   }
2463 #endif
2464 
2465   // Pop all the frames we must move/replace.
2466   //
2467   // Frame picture (youngest to oldest)
2468   // 1: self-frame (no frame link)
2469   // 2: deopting frame  (no frame link)
2470   // 3: caller of deopting frame (could be compiled/interpreted).
2471 
2472   __ add(sp, sp, (SimpleRuntimeFrame::framesize) << LogBytesPerInt); // Epilog!
2473 
2474   // Pop deoptimized frame (int)
2475   __ lwu(x12, Address(x14,
2476                       Deoptimization::UnrollBlock::
2477                       size_of_deoptimized_frame_offset_in_bytes()));
2478   __ sub(x12, x12, 2 * wordSize);
2479   __ add(sp, sp, x12);
2480   __ ld(fp, sp, 0);
2481   __ ld(ra, sp, wordSize);
2482   __ addi(sp, sp, 2 * wordSize);
2483   // RA should now be the return address to the caller (3) frame
2484 
2485 #ifdef ASSERT
2486   // Compilers generate code that bang the stack by as much as the
2487   // interpreter would need. So this stack banging should never
2488   // trigger a fault. Verify that it does not on non product builds.
2489   __ lwu(x11, Address(x14,
2490                       Deoptimization::UnrollBlock::
2491                       total_frame_sizes_offset_in_bytes()));
2492   __ bang_stack_size(x11, x12);
2493 #endif
2494 
2495   // Load address of array of frame pcs into x12 (address*)
2496   __ ld(x12, Address(x14,
2497                      Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2498 
2499   // Load address of array of frame sizes into x15 (intptr_t*)
2500   __ ld(x15, Address(x14,
2501                      Deoptimization::UnrollBlock::
2502                      frame_sizes_offset_in_bytes()));
2503 
2504   // Counter
2505   __ lwu(x13, Address(x14,
2506                       Deoptimization::UnrollBlock::
2507                       number_of_frames_offset_in_bytes())); // (int)
2508 
2509   // Now adjust the caller's stack to make up for the extra locals but
2510   // record the original sp so that we can save it in the skeletal
2511   // interpreter frame and the stack walking of interpreter_sender
2512   // will get the unextended sp value and not the "real" sp value.
2513 
2514   const Register sender_sp = t1; // temporary register
2515 
2516   __ lwu(x11, Address(x14,
2517                       Deoptimization::UnrollBlock::
2518                       caller_adjustment_offset_in_bytes())); // (int)
2519   __ mv(sender_sp, sp);
2520   __ sub(sp, sp, x11);
2521 
2522   // Push interpreter frames in a loop
2523   Label loop;
2524   __ bind(loop);
2525   __ ld(x11, Address(x15, 0));       // Load frame size
2526   __ sub(x11, x11, 2 * wordSize);    // We'll push pc and fp by hand
2527   __ ld(ra, Address(x12, 0));        // Save return address
2528   __ enter();                        // and old fp & set new fp
2529   __ sub(sp, sp, x11);               // Prolog
2530   __ sd(sender_sp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2531   // This value is corrected by layout_activation_impl
2532   __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
2533   __ mv(sender_sp, sp);              // Pass sender_sp to next frame
2534   __ add(x15, x15, wordSize);        // Bump array pointer (sizes)
2535   __ add(x12, x12, wordSize);        // Bump array pointer (pcs)
2536   __ subw(x13, x13, 1);              // Decrement counter
2537   __ bgtz(x13, loop);
2538   __ ld(ra, Address(x12, 0));        // save final return address
2539   // Re-push self-frame
2540   __ enter();                        // & old fp & set new fp
2541 
2542   // Use fp because the frames look interpreted now
2543   // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
2544   // Don't need the precise return PC here, just precise enough to point into this code blob.
2545   address the_pc = __ pc();
2546   __ set_last_Java_frame(sp, fp, the_pc, t0);
2547 
2548   // Call C code.  Need thread but NOT official VM entry
2549   // crud.  We cannot block on this call, no GC can happen.  Call should
2550   // restore return values to their stack-slots with the new SP.
2551   //
2552   // BasicType unpack_frames(JavaThread* thread, int exec_mode)
2553   //
2554 
2555   // n.b. 2 gp args, 0 fp args, integral return type
2556 
2557   // sp should already be aligned
2558   __ mv(c_rarg0, xthread);
2559   __ mvw(c_rarg1, (unsigned)Deoptimization::Unpack_uncommon_trap);
2560   offset = 0;
2561   __ la_patchable(t0, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)), offset);
2562   __ jalr(x1, t0, offset);
2563 
2564   // Set an oopmap for the call site
2565   // Use the same PC we used for the last java frame
2566   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
2567 
2568   // Clear fp AND pc
2569   __ reset_last_Java_frame(true);
2570 
2571   // Pop self-frame.
2572   __ leave();                 // Epilog
2573 
2574   // Jump to interpreter
2575   __ ret();
2576 
2577   // Make sure all code is generated
2578   masm->flush();
2579 
2580   _uncommon_trap_blob =  UncommonTrapBlob::create(&buffer, oop_maps,
2581                                                   SimpleRuntimeFrame::framesize >> 1);
2582 }
2583 #endif // COMPILER2
2584 
2585 //------------------------------generate_handler_blob------
2586 //
2587 // Generate a special Compile2Runtime blob that saves all registers,
2588 // and setup oopmap.
2589 //
2590 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
2591   ResourceMark rm;
2592   OopMapSet *oop_maps = new OopMapSet();
2593   assert_cond(oop_maps != NULL);
2594   OopMap* map = NULL;
2595 
2596   // Allocate space for the code.  Setup code generation tools.
2597   CodeBuffer buffer("handler_blob", 2048, 1024);
2598   MacroAssembler* masm = new MacroAssembler(&buffer);
2599   assert_cond(masm != NULL);
2600 
2601   address start   = __ pc();
2602   address call_pc = NULL;
2603   int frame_size_in_words = -1;
2604   bool cause_return = (poll_type == POLL_AT_RETURN);
2605   RegisterSaver reg_saver(poll_type == POLL_AT_VECTOR_LOOP /* save_vectors */);
2606 
2607   // Save Integer and Float registers.
2608   map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2609 
2610   // The following is basically a call_VM.  However, we need the precise
2611   // address of the call in order to generate an oopmap. Hence, we do all the
2612   // work outselves.
2613 
2614   Label retaddr;
2615   __ set_last_Java_frame(sp, noreg, retaddr, t0);
2616 
2617   // The return address must always be correct so that frame constructor never
2618   // sees an invalid pc.
2619 
2620   if (!cause_return) {
2621     // overwrite the return address pushed by save_live_registers
2622     // Additionally, x18 is a callee-saved register so we can look at
2623     // it later to determine if someone changed the return address for
2624     // us!
2625     __ ld(x18, Address(xthread, JavaThread::saved_exception_pc_offset()));
2626     __ sd(x18, Address(fp, frame::return_addr_offset * wordSize));
2627   }
2628 
2629   // Do the call
2630   __ mv(c_rarg0, xthread);
2631   int32_t offset = 0;
2632   __ la_patchable(t0, RuntimeAddress(call_ptr), offset);
2633   __ jalr(x1, t0, offset);
2634   __ bind(retaddr);
2635 
2636   // Set an oopmap for the call site.  This oopmap will map all
2637   // oop-registers and debug-info registers as callee-saved.  This
2638   // will allow deoptimization at this safepoint to find all possible
2639   // debug-info recordings, as well as let GC find all oops.
2640 
2641   oop_maps->add_gc_map( __ pc() - start, map);
2642 
2643   Label noException;
2644 
2645   __ reset_last_Java_frame(false);
2646 
2647   __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
2648 
2649   __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
2650   __ beqz(t0, noException);
2651 
2652   // Exception pending
2653 
2654   reg_saver.restore_live_registers(masm);
2655 
2656   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2657 
2658   // No exception case
2659   __ bind(noException);
2660 
2661   Label no_adjust, bail;
2662   if (!cause_return) {
2663     // If our stashed return pc was modified by the runtime we avoid touching it
2664     __ ld(t0, Address(fp, frame::return_addr_offset * wordSize));
2665     __ bne(x18, t0, no_adjust);
2666 
2667 #ifdef ASSERT
2668     // Verify the correct encoding of the poll we're about to skip.
2669     // See NativeInstruction::is_lwu_to_zr()
2670     __ lwu(t0, Address(x18));
2671     __ andi(t1, t0, 0b0000011);
2672     __ mv(t2, 0b0000011);
2673     __ bne(t1, t2, bail); // 0-6:0b0000011
2674     __ srli(t1, t0, 7);
2675     __ andi(t1, t1, 0b00000);
2676     __ bnez(t1, bail);    // 7-11:0b00000
2677     __ srli(t1, t0, 12);
2678     __ andi(t1, t1, 0b110);
2679     __ mv(t2, 0b110);
2680     __ bne(t1, t2, bail); // 12-14:0b110
2681 #endif
2682     // Adjust return pc forward to step over the safepoint poll instruction
2683     __ add(x18, x18, NativeInstruction::instruction_size);
2684     __ sd(x18, Address(fp, frame::return_addr_offset * wordSize));
2685   }
2686 
2687   __ bind(no_adjust);
2688   // Normal exit, restore registers and exit.
2689 
2690   reg_saver.restore_live_registers(masm);
2691   __ ret();
2692 
2693 #ifdef ASSERT
2694   __ bind(bail);
2695   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2696 #endif
2697 
2698   // Make sure all code is generated
2699   masm->flush();
2700 
2701   // Fill-out other meta info
2702   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2703 }
2704 
2705 //
2706 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2707 //
2708 // Generate a stub that calls into vm to find out the proper destination
2709 // of a java call. All the argument registers are live at this point
2710 // but since this is generic code we don't know what they are and the caller
2711 // must do any gc of the args.
2712 //
2713 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
2714   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2715 
2716   // allocate space for the code
2717   ResourceMark rm;
2718 
2719   CodeBuffer buffer(name, 1000, 512);
2720   MacroAssembler* masm = new MacroAssembler(&buffer);
2721   assert_cond(masm != NULL);
2722 
2723   int frame_size_in_words = -1;
2724   RegisterSaver reg_saver(false /* save_vectors */);
2725 
2726   OopMapSet *oop_maps = new OopMapSet();
2727   assert_cond(oop_maps != NULL);
2728   OopMap* map = NULL;
2729 
2730   int start = __ offset();
2731 
2732   map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words);
2733 
2734   int frame_complete = __ offset();
2735 
2736   {
2737     Label retaddr;
2738     __ set_last_Java_frame(sp, noreg, retaddr, t0);
2739 
2740     __ mv(c_rarg0, xthread);
2741     int32_t offset = 0;
2742     __ la_patchable(t0, RuntimeAddress(destination), offset);
2743     __ jalr(x1, t0, offset);
2744     __ bind(retaddr);
2745   }
2746 
2747   // Set an oopmap for the call site.
2748   // We need this not only for callee-saved registers, but also for volatile
2749   // registers that the compiler might be keeping live across a safepoint.
2750 
2751   oop_maps->add_gc_map( __ offset() - start, map);
2752 
2753   // x10 contains the address we are going to jump to assuming no exception got installed
2754 
2755   // clear last_Java_sp
2756   __ reset_last_Java_frame(false);
2757   // check for pending exceptions
2758   Label pending;
2759   __ ld(t0, Address(xthread, Thread::pending_exception_offset()));
2760   __ bnez(t0, pending);
2761 
2762   // get the returned Method*
2763   __ get_vm_result_2(xmethod, xthread);
2764   __ sd(xmethod, Address(sp, reg_saver.reg_offset_in_bytes(xmethod)));
2765 
2766   // x10 is where we want to jump, overwrite t0 which is saved and temporary
2767   __ sd(x10, Address(sp, reg_saver.reg_offset_in_bytes(t0)));
2768   reg_saver.restore_live_registers(masm);
2769 
2770   // We are back the the original state on entry and ready to go.
2771 
2772   __ jr(t0);
2773 
2774   // Pending exception after the safepoint
2775 
2776   __ bind(pending);
2777 
2778   reg_saver.restore_live_registers(masm);
2779 
2780   // exception pending => remove activation and forward to exception handler
2781 
2782   __ sd(zr, Address(xthread, JavaThread::vm_result_offset()));
2783 
2784   __ ld(x10, Address(xthread, Thread::pending_exception_offset()));
2785   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2786 
2787   // -------------
2788   // make sure all code is generated
2789   masm->flush();
2790 
2791   // return the  blob
2792   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2793 }
2794 
2795 #ifdef COMPILER2
2796 RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
2797                                                 int shadow_space_bytes,
2798                                                 const GrowableArray<VMReg>& input_registers,
2799                                                 const GrowableArray<VMReg>& output_registers) {
2800   Unimplemented();
2801   return nullptr;
2802 }
2803 
2804 //------------------------------generate_exception_blob---------------------------
2805 // creates exception blob at the end
2806 // Using exception blob, this code is jumped from a compiled method.
2807 // (see emit_exception_handler in riscv.ad file)
2808 //
2809 // Given an exception pc at a call we call into the runtime for the
2810 // handler in this method. This handler might merely restore state
2811 // (i.e. callee save registers) unwind the frame and jump to the
2812 // exception handler for the nmethod if there is no Java level handler
2813 // for the nmethod.
2814 //
2815 // This code is entered with a jmp.
2816 //
2817 // Arguments:
2818 //   x10: exception oop
2819 //   x13: exception pc
2820 //
2821 // Results:
2822 //   x10: exception oop
2823 //   x13: exception pc in caller
2824 //   destination: exception handler of caller
2825 //
2826 // Note: the exception pc MUST be at a call (precise debug information)
2827 //       Registers x10, x13, x12, x14, x15, t0 are not callee saved.
2828 //
2829 
2830 void OptoRuntime::generate_exception_blob() {
2831   assert(!OptoRuntime::is_callee_saved_register(R13_num), "");
2832   assert(!OptoRuntime::is_callee_saved_register(R10_num), "");
2833   assert(!OptoRuntime::is_callee_saved_register(R12_num), "");
2834 
2835   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2836 
2837   // Allocate space for the code
2838   ResourceMark rm;
2839   // Setup code generation tools
2840   CodeBuffer buffer("exception_blob", 2048, 1024);
2841   MacroAssembler* masm = new MacroAssembler(&buffer);
2842   assert_cond(masm != NULL);
2843 
2844   // TODO check various assumptions made here
2845   //
2846   // make sure we do so before running this
2847 
2848   address start = __ pc();
2849 
2850   // push fp and retaddr by hand
2851   // Exception pc is 'return address' for stack walker
2852   __ addi(sp, sp, -2 * wordSize);
2853   __ sd(ra, Address(sp, wordSize));
2854   __ sd(fp, Address(sp));
2855   // there are no callee save registers and we don't expect an
2856   // arg reg save area
2857 #ifndef PRODUCT
2858   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2859 #endif
2860   // Store exception in Thread object. We cannot pass any arguments to the
2861   // handle_exception call, since we do not want to make any assumption
2862   // about the size of the frame where the exception happened in.
2863   __ sd(x10, Address(xthread, JavaThread::exception_oop_offset()));
2864   __ sd(x13, Address(xthread, JavaThread::exception_pc_offset()));
2865 
2866   // This call does all the hard work.  It checks if an exception handler
2867   // exists in the method.
2868   // If so, it returns the handler address.
2869   // If not, it prepares for stack-unwinding, restoring the callee-save
2870   // registers of the frame being removed.
2871   //
2872   // address OptoRuntime::handle_exception_C(JavaThread* thread)
2873   //
2874   // n.b. 1 gp arg, 0 fp args, integral return type
2875 
2876   // the stack should always be aligned
2877   address the_pc = __ pc();
2878   __ set_last_Java_frame(sp, noreg, the_pc, t0);
2879   __ mv(c_rarg0, xthread);
2880   int32_t offset = 0;
2881   __ la_patchable(t0, RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)), offset);
2882   __ jalr(x1, t0, offset);
2883 
2884 
2885   // handle_exception_C is a special VM call which does not require an explicit
2886   // instruction sync afterwards.
2887 
2888   // Set an oopmap for the call site.  This oopmap will only be used if we
2889   // are unwinding the stack.  Hence, all locations will be dead.
2890   // Callee-saved registers will be the same as the frame above (i.e.,
2891   // handle_exception_stub), since they were restored when we got the
2892   // exception.
2893 
2894   OopMapSet* oop_maps = new OopMapSet();
2895   assert_cond(oop_maps != NULL);
2896 
2897   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
2898 
2899   __ reset_last_Java_frame(false);
2900 
2901   // Restore callee-saved registers
2902 
2903   // fp is an implicitly saved callee saved register (i.e. the calling
2904   // convention will save restore it in prolog/epilog) Other than that
2905   // there are no callee save registers now that adapter frames are gone.
2906   // and we dont' expect an arg reg save area
2907   __ ld(fp, Address(sp));
2908   __ ld(x13, Address(sp, wordSize));
2909   __ addi(sp, sp , 2 * wordSize);
2910 
2911   // x10: exception handler
2912 
2913   // We have a handler in x10 (could be deopt blob).
2914   __ mv(t0, x10);
2915 
2916   // Get the exception oop
2917   __ ld(x10, Address(xthread, JavaThread::exception_oop_offset()));
2918   // Get the exception pc in case we are deoptimized
2919   __ ld(x14, Address(xthread, JavaThread::exception_pc_offset()));
2920 #ifdef ASSERT
2921   __ sd(zr, Address(xthread, JavaThread::exception_handler_pc_offset()));
2922   __ sd(zr, Address(xthread, JavaThread::exception_pc_offset()));
2923 #endif
2924   // Clear the exception oop so GC no longer processes it as a root.
2925   __ sd(zr, Address(xthread, JavaThread::exception_oop_offset()));
2926 
2927   // x10: exception oop
2928   // t0:  exception handler
2929   // x14: exception pc
2930   // Jump to handler
2931 
2932   __ jr(t0);
2933 
2934   // Make sure all code is generated
2935   masm->flush();
2936 
2937   // Set exception blob
2938   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
2939 }
2940 #endif // COMPILER2