1 /*
   2  * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/compiledIC.hpp"
  31 #include "code/debugInfoRec.hpp"
  32 #include "code/vtableStubs.hpp"
  33 #include "compiler/oopMap.hpp"
  34 #include "gc/shared/barrierSetAssembler.hpp"
  35 #include "interpreter/interpreter.hpp"
  36 #include "interpreter/interp_masm.hpp"
  37 #include "logging/log.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "nativeInst_aarch64.hpp"
  40 #include "oops/klass.inline.hpp"
  41 #include "oops/method.inline.hpp"
  42 #include "prims/methodHandles.hpp"
  43 #include "runtime/continuation.hpp"
  44 #include "runtime/continuationEntry.inline.hpp"
  45 #include "runtime/globals.hpp"
  46 #include "runtime/jniHandles.hpp"
  47 #include "runtime/safepointMechanism.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "runtime/signature.hpp"
  50 #include "runtime/stubRoutines.hpp"
  51 #include "runtime/timerTrace.hpp"
  52 #include "runtime/vframeArray.hpp"
  53 #include "utilities/align.hpp"
  54 #include "utilities/formatBuffer.hpp"
  55 #include "vmreg_aarch64.inline.hpp"
  56 #ifdef COMPILER1
  57 #include "c1/c1_Runtime1.hpp"
  58 #endif
  59 #ifdef COMPILER2
  60 #include "adfiles/ad_aarch64.hpp"
  61 #include "opto/runtime.hpp"
  62 #endif
  63 #if INCLUDE_JVMCI
  64 #include "jvmci/jvmciJavaClasses.hpp"
  65 #endif
  66 
  67 #define __ masm->
  68 
  69 #ifdef PRODUCT
  70 #define BLOCK_COMMENT(str) /* nothing */
  71 #else
  72 #define BLOCK_COMMENT(str) __ block_comment(str)
  73 #endif
  74 
  75 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  76 
  77 // FIXME -- this is used by C1
  78 class RegisterSaver {
  79   const bool _save_vectors;
  80  public:
  81   RegisterSaver(bool save_vectors) : _save_vectors(save_vectors) {}
  82 
  83   OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
  84   void restore_live_registers(MacroAssembler* masm);
  85 
  86   // Offsets into the register save area
  87   // Used by deoptimization when it is managing result register
  88   // values on its own
  89 
  90   int reg_offset_in_bytes(Register r);
  91   int r0_offset_in_bytes()    { return reg_offset_in_bytes(r0); }
  92   int rscratch1_offset_in_bytes()    { return reg_offset_in_bytes(rscratch1); }
  93   int v0_offset_in_bytes();
  94 
  95   // Total stack size in bytes for saving sve predicate registers.
  96   int total_sve_predicate_in_bytes();
  97 
  98   // Capture info about frame layout
  99   // Note this is only correct when not saving full vectors.
 100   enum layout {
 101                 fpu_state_off = 0,
 102                 fpu_state_end = fpu_state_off + FPUStateSizeInWords - 1,
 103                 // The frame sender code expects that rfp will be in
 104                 // the "natural" place and will override any oopMap
 105                 // setting for it. We must therefore force the layout
 106                 // so that it agrees with the frame sender code.
 107                 r0_off = fpu_state_off + FPUStateSizeInWords,
 108                 rfp_off = r0_off + (Register::number_of_registers - 2) * Register::max_slots_per_register,
 109                 return_off = rfp_off + Register::max_slots_per_register,      // slot for return address
 110                 reg_save_size = return_off + Register::max_slots_per_register};
 111 
 112 };
 113 
 114 int RegisterSaver::reg_offset_in_bytes(Register r) {
 115   // The integer registers are located above the floating point
 116   // registers in the stack frame pushed by save_live_registers() so the
 117   // offset depends on whether we are saving full vectors, and whether
 118   // those vectors are NEON or SVE.
 119 
 120   int slots_per_vect = FloatRegister::save_slots_per_register;
 121 
 122 #if COMPILER2_OR_JVMCI
 123   if (_save_vectors) {
 124     slots_per_vect = FloatRegister::slots_per_neon_register;
 125 
 126 #ifdef COMPILER2
 127     if (Matcher::supports_scalable_vector()) {
 128       slots_per_vect = Matcher::scalable_vector_reg_size(T_FLOAT);
 129     }
 130 #endif
 131   }
 132 #endif
 133 
 134   int r0_offset = v0_offset_in_bytes() + (slots_per_vect * FloatRegister::number_of_registers) * BytesPerInt;
 135   return r0_offset + r->encoding() * wordSize;
 136 }
 137 
 138 int RegisterSaver::v0_offset_in_bytes() {
 139   // The floating point registers are located above the predicate registers if
 140   // they are present in the stack frame pushed by save_live_registers(). So the
 141   // offset depends on the saved total predicate vectors in the stack frame.
 142   return (total_sve_predicate_in_bytes() / VMRegImpl::stack_slot_size) * BytesPerInt;
 143 }
 144 
 145 int RegisterSaver::total_sve_predicate_in_bytes() {
 146 #ifdef COMPILER2
 147   if (_save_vectors && Matcher::supports_scalable_vector()) {
 148     return (Matcher::scalable_vector_reg_size(T_BYTE) >> LogBitsPerByte) *
 149            PRegister::number_of_registers;
 150   }
 151 #endif
 152   return 0;
 153 }
 154 
 155 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
 156   bool use_sve = false;
 157   int sve_vector_size_in_bytes = 0;
 158   int sve_vector_size_in_slots = 0;
 159   int sve_predicate_size_in_slots = 0;
 160   int total_predicate_in_bytes = total_sve_predicate_in_bytes();
 161   int total_predicate_in_slots = total_predicate_in_bytes / VMRegImpl::stack_slot_size;
 162 
 163 #ifdef COMPILER2
 164   use_sve = Matcher::supports_scalable_vector();
 165   if (use_sve) {
 166     sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 167     sve_vector_size_in_slots = Matcher::scalable_vector_reg_size(T_FLOAT);
 168     sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
 169   }
 170 #endif
 171 
 172 #if COMPILER2_OR_JVMCI
 173   if (_save_vectors) {
 174     int extra_save_slots_per_register = 0;
 175     // Save upper half of vector registers
 176     if (use_sve) {
 177       extra_save_slots_per_register = sve_vector_size_in_slots - FloatRegister::save_slots_per_register;
 178     } else {
 179       extra_save_slots_per_register = FloatRegister::extra_save_slots_per_neon_register;
 180     }
 181     int extra_vector_bytes = extra_save_slots_per_register *
 182                              VMRegImpl::stack_slot_size *
 183                              FloatRegister::number_of_registers;
 184     additional_frame_words += ((extra_vector_bytes + total_predicate_in_bytes) / wordSize);
 185   }
 186 #else
 187   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 188 #endif
 189 
 190   int frame_size_in_bytes = align_up(additional_frame_words * wordSize +
 191                                      reg_save_size * BytesPerInt, 16);
 192   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 193   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 194   // The caller will allocate additional_frame_words
 195   int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
 196   // CodeBlob frame size is in words.
 197   int frame_size_in_words = frame_size_in_bytes / wordSize;
 198   *total_frame_words = frame_size_in_words;
 199 
 200   // Save Integer and Float registers.
 201   __ enter();
 202   __ push_CPU_state(_save_vectors, use_sve, sve_vector_size_in_bytes, total_predicate_in_bytes);
 203 
 204   // Set an oopmap for the call site.  This oopmap will map all
 205   // oop-registers and debug-info registers as callee-saved.  This
 206   // will allow deoptimization at this safepoint to find all possible
 207   // debug-info recordings, as well as let GC find all oops.
 208 
 209   OopMapSet *oop_maps = new OopMapSet();
 210   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 211 
 212   for (int i = 0; i < Register::number_of_registers; i++) {
 213     Register r = as_Register(i);
 214     if (i <= rfp->encoding() && r != rscratch1 && r != rscratch2) {
 215       // SP offsets are in 4-byte words.
 216       // Register slots are 8 bytes wide, 32 floating-point registers.
 217       int sp_offset = Register::max_slots_per_register * i +
 218                       FloatRegister::save_slots_per_register * FloatRegister::number_of_registers;
 219       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots), r->as_VMReg());
 220     }
 221   }
 222 
 223   for (int i = 0; i < FloatRegister::number_of_registers; i++) {
 224     FloatRegister r = as_FloatRegister(i);
 225     int sp_offset = 0;
 226     if (_save_vectors) {
 227       sp_offset = use_sve ? (total_predicate_in_slots + sve_vector_size_in_slots * i) :
 228                             (FloatRegister::slots_per_neon_register * i);
 229     } else {
 230       sp_offset = FloatRegister::save_slots_per_register * i;
 231     }
 232     oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), r->as_VMReg());
 233   }
 234 
 235   return oop_map;
 236 }
 237 
 238 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 239 #ifdef COMPILER2
 240   __ pop_CPU_state(_save_vectors, Matcher::supports_scalable_vector(),
 241                    Matcher::scalable_vector_reg_size(T_BYTE), total_sve_predicate_in_bytes());
 242 #else
 243 #if !INCLUDE_JVMCI
 244   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 245 #endif
 246   __ pop_CPU_state(_save_vectors);
 247 #endif
 248   __ ldp(rfp, lr, Address(__ post(sp, 2 * wordSize)));
 249   __ authenticate_return_address();
 250 }
 251 
 252 // Is vector's size (in bytes) bigger than a size saved by default?
 253 // 8 bytes vector registers are saved by default on AArch64.
 254 // The SVE supported min vector size is 8 bytes and we need to save
 255 // predicate registers when the vector size is 8 bytes as well.
 256 bool SharedRuntime::is_wide_vector(int size) {
 257   return size > 8 || (UseSVE > 0 && size >= 8);
 258 }
 259 
 260 // ---------------------------------------------------------------------------
 261 // Read the array of BasicTypes from a signature, and compute where the
 262 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 263 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 264 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 265 // as framesizes are fixed.
 266 // VMRegImpl::stack0 refers to the first slot 0(sp).
 267 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
 268 // Register up to Register::number_of_registers are the 64-bit
 269 // integer registers.
 270 
 271 // Note: the INPUTS in sig_bt are in units of Java argument words,
 272 // which are 64-bit.  The OUTPUTS are in 32-bit units.
 273 
 274 // The Java calling convention is a "shifted" version of the C ABI.
 275 // By skipping the first C ABI register we can call non-static jni
 276 // methods with small numbers of arguments without having to shuffle
 277 // the arguments at all. Since we control the java ABI we ought to at
 278 // least get some advantage out of it.
 279 
 280 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 281                                            VMRegPair *regs,
 282                                            int total_args_passed) {
 283 
 284   // Create the mapping between argument positions and
 285   // registers.
 286   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 287     j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7
 288   };
 289   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 290     j_farg0, j_farg1, j_farg2, j_farg3,
 291     j_farg4, j_farg5, j_farg6, j_farg7
 292   };
 293 
 294 
 295   uint int_args = 0;
 296   uint fp_args = 0;
 297   uint stk_args = 0;
 298 
 299   for (int i = 0; i < total_args_passed; i++) {
 300     switch (sig_bt[i]) {
 301     case T_BOOLEAN:
 302     case T_CHAR:
 303     case T_BYTE:
 304     case T_SHORT:
 305     case T_INT:
 306       if (int_args < Argument::n_int_register_parameters_j) {
 307         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 308       } else {
 309         stk_args = align_up(stk_args, 2);
 310         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 311         stk_args += 1;
 312       }
 313       break;
 314     case T_VOID:
 315       // halves of T_LONG or T_DOUBLE
 316       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 317       regs[i].set_bad();
 318       break;
 319     case T_LONG:
 320       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 321       // fall through
 322     case T_OBJECT:
 323     case T_ARRAY:
 324     case T_ADDRESS:
 325       if (int_args < Argument::n_int_register_parameters_j) {
 326         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 327       } else {
 328         stk_args = align_up(stk_args, 2);
 329         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 330         stk_args += 2;
 331       }
 332       break;
 333     case T_FLOAT:
 334       if (fp_args < Argument::n_float_register_parameters_j) {
 335         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 336       } else {
 337         stk_args = align_up(stk_args, 2);
 338         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 339         stk_args += 1;
 340       }
 341       break;
 342     case T_DOUBLE:
 343       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 344       if (fp_args < Argument::n_float_register_parameters_j) {
 345         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 346       } else {
 347         stk_args = align_up(stk_args, 2);
 348         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 349         stk_args += 2;
 350       }
 351       break;
 352     default:
 353       ShouldNotReachHere();
 354       break;
 355     }
 356   }
 357 
 358   return stk_args;
 359 }
 360 
 361 // Patch the callers callsite with entry to compiled code if it exists.
 362 static void patch_callers_callsite(MacroAssembler *masm) {
 363   Label L;
 364   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 365   __ cbz(rscratch1, L);
 366 
 367   __ enter();
 368   __ push_CPU_state();
 369 
 370   // VM needs caller's callsite
 371   // VM needs target method
 372   // This needs to be a long call since we will relocate this adapter to
 373   // the codeBuffer and it may not reach
 374 
 375 #ifndef PRODUCT
 376   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 377 #endif
 378 
 379   __ mov(c_rarg0, rmethod);
 380   __ mov(c_rarg1, lr);
 381   __ authenticate_return_address(c_rarg1);
 382   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 383   __ blr(rscratch1);
 384 
 385   // Explicit isb required because fixup_callers_callsite may change the code
 386   // stream.
 387   __ safepoint_isb();
 388 
 389   __ pop_CPU_state();
 390   // restore sp
 391   __ leave();
 392   __ bind(L);
 393 }
 394 
 395 static void gen_c2i_adapter(MacroAssembler *masm,
 396                             int total_args_passed,
 397                             int comp_args_on_stack,
 398                             const BasicType *sig_bt,
 399                             const VMRegPair *regs,
 400                             Label& skip_fixup) {
 401   // Before we get into the guts of the C2I adapter, see if we should be here
 402   // at all.  We've come from compiled code and are attempting to jump to the
 403   // interpreter, which means the caller made a static call to get here
 404   // (vcalls always get a compiled target if there is one).  Check for a
 405   // compiled target.  If there is one, we need to patch the caller's call.
 406   patch_callers_callsite(masm);
 407 
 408   __ bind(skip_fixup);
 409 
 410   int words_pushed = 0;
 411 
 412   // Since all args are passed on the stack, total_args_passed *
 413   // Interpreter::stackElementSize is the space we need.
 414 
 415   int extraspace = total_args_passed * Interpreter::stackElementSize;
 416 
 417   __ mov(r19_sender_sp, sp);
 418 
 419   // stack is aligned, keep it that way
 420   extraspace = align_up(extraspace, 2*wordSize);
 421 
 422   if (extraspace)
 423     __ sub(sp, sp, extraspace);
 424 
 425   // Now write the args into the outgoing interpreter space
 426   for (int i = 0; i < total_args_passed; i++) {
 427     if (sig_bt[i] == T_VOID) {
 428       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 429       continue;
 430     }
 431 
 432     // offset to start parameters
 433     int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 434     int next_off = st_off - Interpreter::stackElementSize;
 435 
 436     // Say 4 args:
 437     // i   st_off
 438     // 0   32 T_LONG
 439     // 1   24 T_VOID
 440     // 2   16 T_OBJECT
 441     // 3    8 T_BOOL
 442     // -    0 return address
 443     //
 444     // However to make thing extra confusing. Because we can fit a Java long/double in
 445     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 446     // leaves one slot empty and only stores to a single slot. In this case the
 447     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 448 
 449     VMReg r_1 = regs[i].first();
 450     VMReg r_2 = regs[i].second();
 451     if (!r_1->is_valid()) {
 452       assert(!r_2->is_valid(), "");
 453       continue;
 454     }
 455     if (r_1->is_stack()) {
 456       // memory to memory use rscratch1
 457       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
 458                     + extraspace
 459                     + words_pushed * wordSize);
 460       if (!r_2->is_valid()) {
 461         // sign extend??
 462         __ ldrw(rscratch1, Address(sp, ld_off));
 463         __ str(rscratch1, Address(sp, st_off));
 464 
 465       } else {
 466 
 467         __ ldr(rscratch1, Address(sp, ld_off));
 468 
 469         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 470         // T_DOUBLE and T_LONG use two slots in the interpreter
 471         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 472           // ld_off == LSW, ld_off+wordSize == MSW
 473           // st_off == MSW, next_off == LSW
 474           __ str(rscratch1, Address(sp, next_off));
 475 #ifdef ASSERT
 476           // Overwrite the unused slot with known junk
 477           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaaaull);
 478           __ str(rscratch1, Address(sp, st_off));
 479 #endif /* ASSERT */
 480         } else {
 481           __ str(rscratch1, Address(sp, st_off));
 482         }
 483       }
 484     } else if (r_1->is_Register()) {
 485       Register r = r_1->as_Register();
 486       if (!r_2->is_valid()) {
 487         // must be only an int (or less ) so move only 32bits to slot
 488         // why not sign extend??
 489         __ str(r, Address(sp, st_off));
 490       } else {
 491         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 492         // T_DOUBLE and T_LONG use two slots in the interpreter
 493         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 494           // jlong/double in gpr
 495 #ifdef ASSERT
 496           // Overwrite the unused slot with known junk
 497           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaabull);
 498           __ str(rscratch1, Address(sp, st_off));
 499 #endif /* ASSERT */
 500           __ str(r, Address(sp, next_off));
 501         } else {
 502           __ str(r, Address(sp, st_off));
 503         }
 504       }
 505     } else {
 506       assert(r_1->is_FloatRegister(), "");
 507       if (!r_2->is_valid()) {
 508         // only a float use just part of the slot
 509         __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
 510       } else {
 511 #ifdef ASSERT
 512         // Overwrite the unused slot with known junk
 513         __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaacull);
 514         __ str(rscratch1, Address(sp, st_off));
 515 #endif /* ASSERT */
 516         __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
 517       }
 518     }
 519   }
 520 
 521   __ mov(esp, sp); // Interp expects args on caller's expression stack
 522 
 523   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 524   __ br(rscratch1);
 525 }
 526 
 527 
 528 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 529                                     int total_args_passed,
 530                                     int comp_args_on_stack,
 531                                     const BasicType *sig_bt,
 532                                     const VMRegPair *regs) {
 533 
 534   // Note: r19_sender_sp contains the senderSP on entry. We must
 535   // preserve it since we may do a i2c -> c2i transition if we lose a
 536   // race where compiled code goes non-entrant while we get args
 537   // ready.
 538 
 539   // Adapters are frameless.
 540 
 541   // An i2c adapter is frameless because the *caller* frame, which is
 542   // interpreted, routinely repairs its own esp (from
 543   // interpreter_frame_last_sp), even if a callee has modified the
 544   // stack pointer.  It also recalculates and aligns sp.
 545 
 546   // A c2i adapter is frameless because the *callee* frame, which is
 547   // interpreted, routinely repairs its caller's sp (from sender_sp,
 548   // which is set up via the senderSP register).
 549 
 550   // In other words, if *either* the caller or callee is interpreted, we can
 551   // get the stack pointer repaired after a call.
 552 
 553   // This is why c2i and i2c adapters cannot be indefinitely composed.
 554   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 555   // both caller and callee would be compiled methods, and neither would
 556   // clean up the stack pointer changes performed by the two adapters.
 557   // If this happens, control eventually transfers back to the compiled
 558   // caller, but with an uncorrected stack, causing delayed havoc.
 559 
 560   if (VerifyAdapterCalls &&
 561       (Interpreter::code() != nullptr || StubRoutines::final_stubs_code() != nullptr)) {
 562 #if 0
 563     // So, let's test for cascading c2i/i2c adapters right now.
 564     //  assert(Interpreter::contains($return_addr) ||
 565     //         StubRoutines::contains($return_addr),
 566     //         "i2c adapter must return to an interpreter frame");
 567     __ block_comment("verify_i2c { ");
 568     Label L_ok;
 569     if (Interpreter::code() != nullptr) {
 570       range_check(masm, rax, r11,
 571                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 572                   L_ok);
 573     }
 574     if (StubRoutines::initial_stubs_code() != nullptr) {
 575       range_check(masm, rax, r11,
 576                   StubRoutines::initial_stubs_code()->code_begin(),
 577                   StubRoutines::initial_stubs_code()->code_end(),
 578                   L_ok);
 579     }
 580     if (StubRoutines::final_stubs_code() != nullptr) {
 581       range_check(masm, rax, r11,
 582                   StubRoutines::final_stubs_code()->code_begin(),
 583                   StubRoutines::final_stubs_code()->code_end(),
 584                   L_ok);
 585     }
 586     const char* msg = "i2c adapter must return to an interpreter frame";
 587     __ block_comment(msg);
 588     __ stop(msg);
 589     __ bind(L_ok);
 590     __ block_comment("} verify_i2ce ");
 591 #endif
 592   }
 593 
 594   // Cut-out for having no stack args.
 595   int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 596   if (comp_args_on_stack) {
 597     __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 598     __ andr(sp, rscratch1, -16);
 599   }
 600 
 601   // Will jump to the compiled code just as if compiled code was doing it.
 602   // Pre-load the register-jump target early, to schedule it better.
 603   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
 604 
 605 #if INCLUDE_JVMCI
 606   if (EnableJVMCI) {
 607     // check if this call should be routed towards a specific entry point
 608     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 609     Label no_alternative_target;
 610     __ cbz(rscratch2, no_alternative_target);
 611     __ mov(rscratch1, rscratch2);
 612     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 613     __ bind(no_alternative_target);
 614   }
 615 #endif // INCLUDE_JVMCI
 616 
 617   // Now generate the shuffle code.
 618   for (int i = 0; i < total_args_passed; i++) {
 619     if (sig_bt[i] == T_VOID) {
 620       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 621       continue;
 622     }
 623 
 624     // Pick up 0, 1 or 2 words from SP+offset.
 625 
 626     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 627             "scrambled load targets?");
 628     // Load in argument order going down.
 629     int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
 630     // Point to interpreter value (vs. tag)
 631     int next_off = ld_off - Interpreter::stackElementSize;
 632     //
 633     //
 634     //
 635     VMReg r_1 = regs[i].first();
 636     VMReg r_2 = regs[i].second();
 637     if (!r_1->is_valid()) {
 638       assert(!r_2->is_valid(), "");
 639       continue;
 640     }
 641     if (r_1->is_stack()) {
 642       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 643       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
 644       if (!r_2->is_valid()) {
 645         // sign extend???
 646         __ ldrsw(rscratch2, Address(esp, ld_off));
 647         __ str(rscratch2, Address(sp, st_off));
 648       } else {
 649         //
 650         // We are using two optoregs. This can be either T_OBJECT,
 651         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 652         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 653         // So we must adjust where to pick up the data to match the
 654         // interpreter.
 655         //
 656         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 657         // are accessed as negative so LSW is at LOW address
 658 
 659         // ld_off is MSW so get LSW
 660         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 661                            next_off : ld_off;
 662         __ ldr(rscratch2, Address(esp, offset));
 663         // st_off is LSW (i.e. reg.first())
 664         __ str(rscratch2, Address(sp, st_off));
 665       }
 666     } else if (r_1->is_Register()) {  // Register argument
 667       Register r = r_1->as_Register();
 668       if (r_2->is_valid()) {
 669         //
 670         // We are using two VMRegs. This can be either T_OBJECT,
 671         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 672         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 673         // So we must adjust where to pick up the data to match the
 674         // interpreter.
 675 
 676         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 677                            next_off : ld_off;
 678 
 679         // this can be a misaligned move
 680         __ ldr(r, Address(esp, offset));
 681       } else {
 682         // sign extend and use a full word?
 683         __ ldrw(r, Address(esp, ld_off));
 684       }
 685     } else {
 686       if (!r_2->is_valid()) {
 687         __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 688       } else {
 689         __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 690       }
 691     }
 692   }
 693 
 694   __ mov(rscratch2, rscratch1);
 695   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 696   __ mov(rscratch1, rscratch2);
 697 
 698   // 6243940 We might end up in handle_wrong_method if
 699   // the callee is deoptimized as we race thru here. If that
 700   // happens we don't want to take a safepoint because the
 701   // caller frame will look interpreted and arguments are now
 702   // "compiled" so it is much better to make this transition
 703   // invisible to the stack walking code. Unfortunately if
 704   // we try and find the callee by normal means a safepoint
 705   // is possible. So we stash the desired callee in the thread
 706   // and the vm will find there should this case occur.
 707 
 708   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 709 
 710   __ br(rscratch1);
 711 }
 712 
 713 // ---------------------------------------------------------------
 714 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 715                                                             int total_args_passed,
 716                                                             int comp_args_on_stack,
 717                                                             const BasicType *sig_bt,
 718                                                             const VMRegPair *regs,
 719                                                             AdapterFingerPrint* fingerprint) {
 720   address i2c_entry = __ pc();
 721 
 722   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 723 
 724   address c2i_unverified_entry = __ pc();
 725   Label skip_fixup;
 726 
 727   Register data = rscratch2;
 728   Register receiver = j_rarg0;
 729   Register tmp = r10;  // A call-clobbered register not used for arg passing
 730 
 731   // -------------------------------------------------------------------------
 732   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 733   // to the interpreter.  The args start out packed in the compiled layout.  They
 734   // need to be unpacked into the interpreter layout.  This will almost always
 735   // require some stack space.  We grow the current (compiled) stack, then repack
 736   // the args.  We  finally end in a jump to the generic interpreter entry point.
 737   // On exit from the interpreter, the interpreter will restore our SP (lest the
 738   // compiled code, which relies solely on SP and not FP, get sick).
 739 
 740   {
 741     __ block_comment("c2i_unverified_entry {");
 742     // Method might have been compiled since the call site was patched to
 743     // interpreted; if that is the case treat it as a miss so we can get
 744     // the call site corrected.
 745     __ ic_check(1 /* end_alignment */);
 746     __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));
 747 
 748     __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 749     __ cbz(rscratch1, skip_fixup);
 750     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 751     __ block_comment("} c2i_unverified_entry");
 752   }
 753 
 754   address c2i_entry = __ pc();
 755 
 756   // Class initialization barrier for static methods
 757   address c2i_no_clinit_check_entry = nullptr;
 758   if (VM_Version::supports_fast_class_init_checks()) {
 759     Label L_skip_barrier;
 760 
 761     { // Bypass the barrier for non-static methods
 762       __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
 763       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 764       __ br(Assembler::EQ, L_skip_barrier); // non-static
 765     }
 766 
 767     __ load_method_holder(rscratch2, rmethod);
 768     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 769     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 770 
 771     __ bind(L_skip_barrier);
 772     c2i_no_clinit_check_entry = __ pc();
 773   }
 774 
 775   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 776   bs->c2i_entry_barrier(masm);
 777 
 778   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 779 
 780   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
 781 }
 782 
 783 static int c_calling_convention_priv(const BasicType *sig_bt,
 784                                          VMRegPair *regs,
 785                                          int total_args_passed) {
 786 
 787 // We return the amount of VMRegImpl stack slots we need to reserve for all
 788 // the arguments NOT counting out_preserve_stack_slots.
 789 
 790     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 791       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 792     };
 793     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 794       c_farg0, c_farg1, c_farg2, c_farg3,
 795       c_farg4, c_farg5, c_farg6, c_farg7
 796     };
 797 
 798     uint int_args = 0;
 799     uint fp_args = 0;
 800     uint stk_args = 0; // inc by 2 each time
 801 
 802     for (int i = 0; i < total_args_passed; i++) {
 803       switch (sig_bt[i]) {
 804       case T_BOOLEAN:
 805       case T_CHAR:
 806       case T_BYTE:
 807       case T_SHORT:
 808       case T_INT:
 809         if (int_args < Argument::n_int_register_parameters_c) {
 810           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 811         } else {
 812 #ifdef __APPLE__
 813           // Less-than word types are stored one after another.
 814           // The code is unable to handle this so bailout.
 815           return -1;
 816 #endif
 817           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 818           stk_args += 2;
 819         }
 820         break;
 821       case T_LONG:
 822         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 823         // fall through
 824       case T_OBJECT:
 825       case T_ARRAY:
 826       case T_ADDRESS:
 827       case T_METADATA:
 828         if (int_args < Argument::n_int_register_parameters_c) {
 829           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 830         } else {
 831           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 832           stk_args += 2;
 833         }
 834         break;
 835       case T_FLOAT:
 836         if (fp_args < Argument::n_float_register_parameters_c) {
 837           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 838         } else {
 839 #ifdef __APPLE__
 840           // Less-than word types are stored one after another.
 841           // The code is unable to handle this so bailout.
 842           return -1;
 843 #endif
 844           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 845           stk_args += 2;
 846         }
 847         break;
 848       case T_DOUBLE:
 849         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 850         if (fp_args < Argument::n_float_register_parameters_c) {
 851           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 852         } else {
 853           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 854           stk_args += 2;
 855         }
 856         break;
 857       case T_VOID: // Halves of longs and doubles
 858         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 859         regs[i].set_bad();
 860         break;
 861       default:
 862         ShouldNotReachHere();
 863         break;
 864       }
 865     }
 866 
 867   return stk_args;
 868 }
 869 
 870 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
 871                                              uint num_bits,
 872                                              uint total_args_passed) {
 873   // More than 8 argument inputs are not supported now.
 874   assert(total_args_passed <= Argument::n_float_register_parameters_c, "unsupported");
 875   assert(num_bits >= 64 && num_bits <= 2048 && is_power_of_2(num_bits), "unsupported");
 876 
 877   static const FloatRegister VEC_ArgReg[Argument::n_float_register_parameters_c] = {
 878     v0, v1, v2, v3, v4, v5, v6, v7
 879   };
 880 
 881   // On SVE, we use the same vector registers with 128-bit vector registers on NEON.
 882   int next_reg_val = num_bits == 64 ? 1 : 3;
 883   for (uint i = 0; i < total_args_passed; i++) {
 884     VMReg vmreg = VEC_ArgReg[i]->as_VMReg();
 885     regs[i].set_pair(vmreg->next(next_reg_val), vmreg);
 886   }
 887   return 0;
 888 }
 889 
 890 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 891                                          VMRegPair *regs,
 892                                          int total_args_passed)
 893 {
 894   int result = c_calling_convention_priv(sig_bt, regs, total_args_passed);
 895   guarantee(result >= 0, "Unsupported arguments configuration");
 896   return result;
 897 }
 898 
 899 
 900 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
 901   // We always ignore the frame_slots arg and just use the space just below frame pointer
 902   // which by this time is free to use
 903   switch (ret_type) {
 904   case T_FLOAT:
 905     __ strs(v0, Address(rfp, -wordSize));
 906     break;
 907   case T_DOUBLE:
 908     __ strd(v0, Address(rfp, -wordSize));
 909     break;
 910   case T_VOID:  break;
 911   default: {
 912     __ str(r0, Address(rfp, -wordSize));
 913     }
 914   }
 915 }
 916 
 917 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
 918   // We always ignore the frame_slots arg and just use the space just below frame pointer
 919   // which by this time is free to use
 920   switch (ret_type) {
 921   case T_FLOAT:
 922     __ ldrs(v0, Address(rfp, -wordSize));
 923     break;
 924   case T_DOUBLE:
 925     __ ldrd(v0, Address(rfp, -wordSize));
 926     break;
 927   case T_VOID:  break;
 928   default: {
 929     __ ldr(r0, Address(rfp, -wordSize));
 930     }
 931   }
 932 }
 933 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
 934   RegSet x;
 935   for ( int i = first_arg ; i < arg_count ; i++ ) {
 936     if (args[i].first()->is_Register()) {
 937       x = x + args[i].first()->as_Register();
 938     } else if (args[i].first()->is_FloatRegister()) {
 939       __ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize)));
 940     }
 941   }
 942   __ push(x, sp);
 943 }
 944 
 945 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
 946   RegSet x;
 947   for ( int i = first_arg ; i < arg_count ; i++ ) {
 948     if (args[i].first()->is_Register()) {
 949       x = x + args[i].first()->as_Register();
 950     } else {
 951       ;
 952     }
 953   }
 954   __ pop(x, sp);
 955   for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
 956     if (args[i].first()->is_Register()) {
 957       ;
 958     } else if (args[i].first()->is_FloatRegister()) {
 959       __ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize)));
 960     }
 961   }
 962 }
 963 
 964 static void verify_oop_args(MacroAssembler* masm,
 965                             const methodHandle& method,
 966                             const BasicType* sig_bt,
 967                             const VMRegPair* regs) {
 968   Register temp_reg = r19;  // not part of any compiled calling seq
 969   if (VerifyOops) {
 970     for (int i = 0; i < method->size_of_parameters(); i++) {
 971       if (sig_bt[i] == T_OBJECT ||
 972           sig_bt[i] == T_ARRAY) {
 973         VMReg r = regs[i].first();
 974         assert(r->is_valid(), "bad oop arg");
 975         if (r->is_stack()) {
 976           __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
 977           __ verify_oop(temp_reg);
 978         } else {
 979           __ verify_oop(r->as_Register());
 980         }
 981       }
 982     }
 983   }
 984 }
 985 
 986 // on exit, sp points to the ContinuationEntry
 987 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) {
 988   assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, "");
 989   assert(in_bytes(ContinuationEntry::cont_offset())  % VMRegImpl::stack_slot_size == 0, "");
 990   assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, "");
 991 
 992   stack_slots += (int)ContinuationEntry::size()/wordSize;
 993   __ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata
 994 
 995   OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize)/ VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
 996 
 997   __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
 998   __ str(rscratch1, Address(sp, ContinuationEntry::parent_offset()));
 999   __ mov(rscratch1, sp); // we can't use sp as the source in str
1000   __ str(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1001 
1002   return map;
1003 }
1004 
1005 // on entry c_rarg1 points to the continuation
1006 //          sp points to ContinuationEntry
1007 //          c_rarg3 -- isVirtualThread
1008 static void fill_continuation_entry(MacroAssembler* masm) {
1009 #ifdef ASSERT
1010   __ movw(rscratch1, ContinuationEntry::cookie_value());
1011   __ strw(rscratch1, Address(sp, ContinuationEntry::cookie_offset()));
1012 #endif
1013 
1014   __ str (c_rarg1, Address(sp, ContinuationEntry::cont_offset()));
1015   __ strw(c_rarg3, Address(sp, ContinuationEntry::flags_offset()));
1016   __ str (zr,      Address(sp, ContinuationEntry::chunk_offset()));
1017   __ strw(zr,      Address(sp, ContinuationEntry::argsize_offset()));
1018   __ strw(zr,      Address(sp, ContinuationEntry::pin_count_offset()));
1019 
1020   __ ldr(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1021   __ str(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1022   __ ldr(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
1023   __ str(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
1024 
1025   __ str(zr, Address(rthread, JavaThread::cont_fastpath_offset()));
1026   __ str(zr, Address(rthread, JavaThread::held_monitor_count_offset()));
1027 }
1028 
1029 // on entry, sp points to the ContinuationEntry
1030 // on exit, rfp points to the spilled rfp in the entry frame
1031 static void continuation_enter_cleanup(MacroAssembler* masm) {
1032 #ifndef PRODUCT
1033   Label OK;
1034   __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1035   __ cmp(sp, rscratch1);
1036   __ br(Assembler::EQ, OK);
1037   __ stop("incorrect sp1");
1038   __ bind(OK);
1039 #endif
1040   __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1041   __ str(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1042 
1043   if (CheckJNICalls) {
1044     // Check if this is a virtual thread continuation
1045     Label L_skip_vthread_code;
1046     __ ldrw(rscratch1, Address(sp, ContinuationEntry::flags_offset()));
1047     __ cbzw(rscratch1, L_skip_vthread_code);
1048 
1049     // If the held monitor count is > 0 and this vthread is terminating then
1050     // it failed to release a JNI monitor. So we issue the same log message
1051     // that JavaThread::exit does.
1052     __ ldr(rscratch1, Address(rthread, JavaThread::jni_monitor_count_offset()));
1053     __ cbz(rscratch1, L_skip_vthread_code);
1054 
1055     // Save return value potentially containing the exception oop in callee-saved R19.
1056     __ mov(r19, r0);
1057     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held));
1058     // Restore potential return value.
1059     __ mov(r0, r19);
1060 
1061     // For vthreads we have to explicitly zero the JNI monitor count of the carrier
1062     // on termination. The held count is implicitly zeroed below when we restore from
1063     // the parent held count (which has to be zero).
1064     __ str(zr, Address(rthread, JavaThread::jni_monitor_count_offset()));
1065 
1066     __ bind(L_skip_vthread_code);
1067   }
1068 #ifdef ASSERT
1069   else {
1070     // Check if this is a virtual thread continuation
1071     Label L_skip_vthread_code;
1072     __ ldrw(rscratch1, Address(sp, ContinuationEntry::flags_offset()));
1073     __ cbzw(rscratch1, L_skip_vthread_code);
1074 
1075     // See comment just above. If not checking JNI calls the JNI count is only
1076     // needed for assertion checking.
1077     __ str(zr, Address(rthread, JavaThread::jni_monitor_count_offset()));
1078 
1079     __ bind(L_skip_vthread_code);
1080   }
1081 #endif
1082 
1083   __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
1084   __ str(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
1085 
1086   __ ldr(rscratch2, Address(sp, ContinuationEntry::parent_offset()));
1087   __ str(rscratch2, Address(rthread, JavaThread::cont_entry_offset()));
1088   __ add(rfp, sp, (int)ContinuationEntry::size());
1089 }
1090 
1091 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
1092 // On entry: c_rarg1 -- the continuation object
1093 //           c_rarg2 -- isContinue
1094 //           c_rarg3 -- isVirtualThread
1095 static void gen_continuation_enter(MacroAssembler* masm,
1096                                  const methodHandle& method,
1097                                  const BasicType* sig_bt,
1098                                  const VMRegPair* regs,
1099                                  int& exception_offset,
1100                                  OopMapSet*oop_maps,
1101                                  int& frame_complete,
1102                                  int& stack_slots,
1103                                  int& interpreted_entry_offset,
1104                                  int& compiled_entry_offset) {
1105   //verify_oop_args(masm, method, sig_bt, regs);
1106   Address resolve(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
1107 
1108   address start = __ pc();
1109 
1110   Label call_thaw, exit;
1111 
1112   // i2i entry used at interp_only_mode only
1113   interpreted_entry_offset = __ pc() - start;
1114   {
1115 
1116 #ifdef ASSERT
1117     Label is_interp_only;
1118     __ ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
1119     __ cbnzw(rscratch1, is_interp_only);
1120     __ stop("enterSpecial interpreter entry called when not in interp_only_mode");
1121     __ bind(is_interp_only);
1122 #endif
1123 
1124     // Read interpreter arguments into registers (this is an ad-hoc i2c adapter)
1125     __ ldr(c_rarg1, Address(esp, Interpreter::stackElementSize*2));
1126     __ ldr(c_rarg2, Address(esp, Interpreter::stackElementSize*1));
1127     __ ldr(c_rarg3, Address(esp, Interpreter::stackElementSize*0));
1128     __ push_cont_fastpath(rthread);
1129 
1130     __ enter();
1131     stack_slots = 2; // will be adjusted in setup
1132     OopMap* map = continuation_enter_setup(masm, stack_slots);
1133     // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe,
1134     // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
1135 
1136     fill_continuation_entry(masm);
1137 
1138     __ cbnz(c_rarg2, call_thaw);
1139 
1140     const address tr_call = __ trampoline_call(resolve);
1141     if (tr_call == nullptr) {
1142       fatal("CodeCache is full at gen_continuation_enter");
1143     }
1144 
1145     oop_maps->add_gc_map(__ pc() - start, map);
1146     __ post_call_nop();
1147 
1148     __ b(exit);
1149 
1150     address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1151     if (stub == nullptr) {
1152       fatal("CodeCache is full at gen_continuation_enter");
1153     }
1154   }
1155 
1156   // compiled entry
1157   __ align(CodeEntryAlignment);
1158   compiled_entry_offset = __ pc() - start;
1159 
1160   __ enter();
1161   stack_slots = 2; // will be adjusted in setup
1162   OopMap* map = continuation_enter_setup(masm, stack_slots);
1163   frame_complete = __ pc() - start;
1164 
1165   fill_continuation_entry(masm);
1166 
1167   __ cbnz(c_rarg2, call_thaw);
1168 
1169   const address tr_call = __ trampoline_call(resolve);
1170   if (tr_call == nullptr) {
1171     fatal("CodeCache is full at gen_continuation_enter");
1172   }
1173 
1174   oop_maps->add_gc_map(__ pc() - start, map);
1175   __ post_call_nop();
1176 
1177   __ b(exit);
1178 
1179   __ bind(call_thaw);
1180 
1181   ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1182   __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1183   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1184   ContinuationEntry::_return_pc_offset = __ pc() - start;
1185   __ post_call_nop();
1186 
1187   __ bind(exit);
1188   ContinuationEntry::_cleanup_offset = __ pc() - start;
1189   continuation_enter_cleanup(masm);
1190   __ leave();
1191   __ ret(lr);
1192 
1193   /// exception handling
1194 
1195   exception_offset = __ pc() - start;
1196   {
1197       __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1198 
1199       continuation_enter_cleanup(masm);
1200 
1201       __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1202       __ authenticate_return_address(c_rarg1);
1203       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1);
1204 
1205       // see OptoRuntime::generate_exception_blob: r0 -- exception oop, r3 -- exception pc
1206 
1207       __ mov(r1, r0); // the exception handler
1208       __ mov(r0, r19); // restore return value contaning the exception oop
1209       __ verify_oop(r0);
1210 
1211       __ leave();
1212       __ mov(r3, lr);
1213       __ br(r1); // the exception handler
1214   }
1215 
1216   address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1217   if (stub == nullptr) {
1218     fatal("CodeCache is full at gen_continuation_enter");
1219   }
1220 }
1221 
1222 static void gen_continuation_yield(MacroAssembler* masm,
1223                                    const methodHandle& method,
1224                                    const BasicType* sig_bt,
1225                                    const VMRegPair* regs,
1226                                    OopMapSet* oop_maps,
1227                                    int& frame_complete,
1228                                    int& stack_slots,
1229                                    int& compiled_entry_offset) {
1230     enum layout {
1231       rfp_off1,
1232       rfp_off2,
1233       lr_off,
1234       lr_off2,
1235       framesize // inclusive of return address
1236     };
1237     // assert(is_even(framesize/2), "sp not 16-byte aligned");
1238     stack_slots = framesize /  VMRegImpl::slots_per_word;
1239     assert(stack_slots == 2, "recheck layout");
1240 
1241     address start = __ pc();
1242 
1243     compiled_entry_offset = __ pc() - start;
1244     __ enter();
1245 
1246     __ mov(c_rarg1, sp);
1247 
1248     frame_complete = __ pc() - start;
1249     address the_pc = __ pc();
1250 
1251     __ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup
1252 
1253     __ mov(c_rarg0, rthread);
1254     __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
1255     __ call_VM_leaf(Continuation::freeze_entry(), 2);
1256     __ reset_last_Java_frame(true);
1257 
1258     Label pinned;
1259 
1260     __ cbnz(r0, pinned);
1261 
1262     // We've succeeded, set sp to the ContinuationEntry
1263     __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1264     __ mov(sp, rscratch1);
1265     continuation_enter_cleanup(masm);
1266 
1267     __ bind(pinned); // pinned -- return to caller
1268 
1269     // handle pending exception thrown by freeze
1270     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1271     Label ok;
1272     __ cbz(rscratch1, ok);
1273     __ leave();
1274     __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1275     __ br(rscratch1);
1276     __ bind(ok);
1277 
1278     __ leave();
1279     __ ret(lr);
1280 
1281     OopMap* map = new OopMap(framesize, 1);
1282     oop_maps->add_gc_map(the_pc - start, map);
1283 }
1284 
1285 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1286   ::continuation_enter_cleanup(masm);
1287 }
1288 
1289 static void gen_special_dispatch(MacroAssembler* masm,
1290                                  const methodHandle& method,
1291                                  const BasicType* sig_bt,
1292                                  const VMRegPair* regs) {
1293   verify_oop_args(masm, method, sig_bt, regs);
1294   vmIntrinsics::ID iid = method->intrinsic_id();
1295 
1296   // Now write the args into the outgoing interpreter space
1297   bool     has_receiver   = false;
1298   Register receiver_reg   = noreg;
1299   int      member_arg_pos = -1;
1300   Register member_reg     = noreg;
1301   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1302   if (ref_kind != 0) {
1303     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1304     member_reg = r19;  // known to be free at this point
1305     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1306   } else if (iid == vmIntrinsics::_invokeBasic) {
1307     has_receiver = true;
1308   } else if (iid == vmIntrinsics::_linkToNative) {
1309     member_arg_pos = method->size_of_parameters() - 1;  // trailing NativeEntryPoint argument
1310     member_reg = r19;  // known to be free at this point
1311   } else {
1312     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1313   }
1314 
1315   if (member_reg != noreg) {
1316     // Load the member_arg into register, if necessary.
1317     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1318     VMReg r = regs[member_arg_pos].first();
1319     if (r->is_stack()) {
1320       __ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1321     } else {
1322       // no data motion is needed
1323       member_reg = r->as_Register();
1324     }
1325   }
1326 
1327   if (has_receiver) {
1328     // Make sure the receiver is loaded into a register.
1329     assert(method->size_of_parameters() > 0, "oob");
1330     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1331     VMReg r = regs[0].first();
1332     assert(r->is_valid(), "bad receiver arg");
1333     if (r->is_stack()) {
1334       // Porting note:  This assumes that compiled calling conventions always
1335       // pass the receiver oop in a register.  If this is not true on some
1336       // platform, pick a temp and load the receiver from stack.
1337       fatal("receiver always in a register");
1338       receiver_reg = r2;  // known to be free at this point
1339       __ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1340     } else {
1341       // no data motion is needed
1342       receiver_reg = r->as_Register();
1343     }
1344   }
1345 
1346   // Figure out which address we are really jumping to:
1347   MethodHandles::generate_method_handle_dispatch(masm, iid,
1348                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1349 }
1350 
1351 // ---------------------------------------------------------------------------
1352 // Generate a native wrapper for a given method.  The method takes arguments
1353 // in the Java compiled code convention, marshals them to the native
1354 // convention (handlizes oops, etc), transitions to native, makes the call,
1355 // returns to java state (possibly blocking), unhandlizes any result and
1356 // returns.
1357 //
1358 // Critical native functions are a shorthand for the use of
1359 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1360 // functions.  The wrapper is expected to unpack the arguments before
1361 // passing them to the callee. Critical native functions leave the state _in_Java,
1362 // since they block out GC.
1363 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1364 // block and the check for pending exceptions it's impossible for them
1365 // to be thrown.
1366 //
1367 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1368                                                 const methodHandle& method,
1369                                                 int compile_id,
1370                                                 BasicType* in_sig_bt,
1371                                                 VMRegPair* in_regs,
1372                                                 BasicType ret_type) {
1373   if (method->is_continuation_native_intrinsic()) {
1374     int exception_offset = -1;
1375     OopMapSet* oop_maps = new OopMapSet();
1376     int frame_complete = -1;
1377     int stack_slots = -1;
1378     int interpreted_entry_offset = -1;
1379     int vep_offset = -1;
1380     if (method->is_continuation_enter_intrinsic()) {
1381       gen_continuation_enter(masm,
1382                              method,
1383                              in_sig_bt,
1384                              in_regs,
1385                              exception_offset,
1386                              oop_maps,
1387                              frame_complete,
1388                              stack_slots,
1389                              interpreted_entry_offset,
1390                              vep_offset);
1391     } else if (method->is_continuation_yield_intrinsic()) {
1392       gen_continuation_yield(masm,
1393                              method,
1394                              in_sig_bt,
1395                              in_regs,
1396                              oop_maps,
1397                              frame_complete,
1398                              stack_slots,
1399                              vep_offset);
1400     } else {
1401       guarantee(false, "Unknown Continuation native intrinsic");
1402     }
1403 
1404 #ifdef ASSERT
1405     if (method->is_continuation_enter_intrinsic()) {
1406       assert(interpreted_entry_offset != -1, "Must be set");
1407       assert(exception_offset != -1,         "Must be set");
1408     } else {
1409       assert(interpreted_entry_offset == -1, "Must be unset");
1410       assert(exception_offset == -1,         "Must be unset");
1411     }
1412     assert(frame_complete != -1,    "Must be set");
1413     assert(stack_slots != -1,       "Must be set");
1414     assert(vep_offset != -1,        "Must be set");
1415 #endif
1416 
1417     __ flush();
1418     nmethod* nm = nmethod::new_native_nmethod(method,
1419                                               compile_id,
1420                                               masm->code(),
1421                                               vep_offset,
1422                                               frame_complete,
1423                                               stack_slots,
1424                                               in_ByteSize(-1),
1425                                               in_ByteSize(-1),
1426                                               oop_maps,
1427                                               exception_offset);
1428     if (nm == nullptr) return nm;
1429     if (method->is_continuation_enter_intrinsic()) {
1430       ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1431     } else if (method->is_continuation_yield_intrinsic()) {
1432       _cont_doYield_stub = nm;
1433     } else {
1434       guarantee(false, "Unknown Continuation native intrinsic");
1435     }
1436     return nm;
1437   }
1438 
1439   if (method->is_method_handle_intrinsic()) {
1440     vmIntrinsics::ID iid = method->intrinsic_id();
1441     intptr_t start = (intptr_t)__ pc();
1442     int vep_offset = ((intptr_t)__ pc()) - start;
1443 
1444     // First instruction must be a nop as it may need to be patched on deoptimisation
1445     __ nop();
1446     gen_special_dispatch(masm,
1447                          method,
1448                          in_sig_bt,
1449                          in_regs);
1450     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1451     __ flush();
1452     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1453     return nmethod::new_native_nmethod(method,
1454                                        compile_id,
1455                                        masm->code(),
1456                                        vep_offset,
1457                                        frame_complete,
1458                                        stack_slots / VMRegImpl::slots_per_word,
1459                                        in_ByteSize(-1),
1460                                        in_ByteSize(-1),
1461                                        nullptr);
1462   }
1463   address native_func = method->native_function();
1464   assert(native_func != nullptr, "must have function");
1465 
1466   // An OopMap for lock (and class if static)
1467   OopMapSet *oop_maps = new OopMapSet();
1468   intptr_t start = (intptr_t)__ pc();
1469 
1470   // We have received a description of where all the java arg are located
1471   // on entry to the wrapper. We need to convert these args to where
1472   // the jni function will expect them. To figure out where they go
1473   // we convert the java signature to a C signature by inserting
1474   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1475 
1476   const int total_in_args = method->size_of_parameters();
1477   int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1478 
1479   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1480   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1481 
1482   int argc = 0;
1483   out_sig_bt[argc++] = T_ADDRESS;
1484   if (method->is_static()) {
1485     out_sig_bt[argc++] = T_OBJECT;
1486   }
1487 
1488   for (int i = 0; i < total_in_args ; i++ ) {
1489     out_sig_bt[argc++] = in_sig_bt[i];
1490   }
1491 
1492   // Now figure out where the args must be stored and how much stack space
1493   // they require.
1494   int out_arg_slots;
1495   out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, total_c_args);
1496 
1497   if (out_arg_slots < 0) {
1498     return nullptr;
1499   }
1500 
1501   // Compute framesize for the wrapper.  We need to handlize all oops in
1502   // incoming registers
1503 
1504   // Calculate the total number of stack slots we will need.
1505 
1506   // First count the abi requirement plus all of the outgoing args
1507   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1508 
1509   // Now the space for the inbound oop handle area
1510   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1511 
1512   int oop_handle_offset = stack_slots;
1513   stack_slots += total_save_slots;
1514 
1515   // Now any space we need for handlizing a klass if static method
1516 
1517   int klass_slot_offset = 0;
1518   int klass_offset = -1;
1519   int lock_slot_offset = 0;
1520   bool is_static = false;
1521 
1522   if (method->is_static()) {
1523     klass_slot_offset = stack_slots;
1524     stack_slots += VMRegImpl::slots_per_word;
1525     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1526     is_static = true;
1527   }
1528 
1529   // Plus a lock if needed
1530 
1531   if (method->is_synchronized()) {
1532     lock_slot_offset = stack_slots;
1533     stack_slots += VMRegImpl::slots_per_word;
1534   }
1535 
1536   // Now a place (+2) to save return values or temp during shuffling
1537   // + 4 for return address (which we own) and saved rfp
1538   stack_slots += 6;
1539 
1540   // Ok The space we have allocated will look like:
1541   //
1542   //
1543   // FP-> |                     |
1544   //      |---------------------|
1545   //      | 2 slots for moves   |
1546   //      |---------------------|
1547   //      | lock box (if sync)  |
1548   //      |---------------------| <- lock_slot_offset
1549   //      | klass (if static)   |
1550   //      |---------------------| <- klass_slot_offset
1551   //      | oopHandle area      |
1552   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1553   //      | outbound memory     |
1554   //      | based arguments     |
1555   //      |                     |
1556   //      |---------------------|
1557   //      |                     |
1558   // SP-> | out_preserved_slots |
1559   //
1560   //
1561 
1562 
1563   // Now compute actual number of stack words we need rounding to make
1564   // stack properly aligned.
1565   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1566 
1567   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1568 
1569   // First thing make an ic check to see if we should even be here
1570 
1571   // We are free to use all registers as temps without saving them and
1572   // restoring them except rfp. rfp is the only callee save register
1573   // as far as the interpreter and the compiler(s) are concerned.
1574 
1575   const Register receiver = j_rarg0;
1576 
1577   Label exception_pending;
1578 
1579   assert_different_registers(receiver, rscratch1);
1580   __ verify_oop(receiver);
1581   __ ic_check(8 /* end_alignment */);
1582 
1583   // Verified entry point must be aligned
1584   int vep_offset = ((intptr_t)__ pc()) - start;
1585 
1586   // If we have to make this method not-entrant we'll overwrite its
1587   // first instruction with a jump.  For this action to be legal we
1588   // must ensure that this first instruction is a B, BL, NOP, BKPT,
1589   // SVC, HVC, or SMC.  Make it a NOP.
1590   __ nop();
1591 
1592   if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
1593     Label L_skip_barrier;
1594     __ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
1595     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1596     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1597 
1598     __ bind(L_skip_barrier);
1599   }
1600 
1601   // Generate stack overflow check
1602   __ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size()));
1603 
1604   // Generate a new frame for the wrapper.
1605   __ enter();
1606   // -2 because return address is already present and so is saved rfp
1607   __ sub(sp, sp, stack_size - 2*wordSize);
1608 
1609   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1610   bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
1611 
1612   // Frame is now completed as far as size and linkage.
1613   int frame_complete = ((intptr_t)__ pc()) - start;
1614 
1615   // We use r20 as the oop handle for the receiver/klass
1616   // It is callee save so it survives the call to native
1617 
1618   const Register oop_handle_reg = r20;
1619 
1620   //
1621   // We immediately shuffle the arguments so that any vm call we have to
1622   // make from here on out (sync slow path, jvmti, etc.) we will have
1623   // captured the oops from our caller and have a valid oopMap for
1624   // them.
1625 
1626   // -----------------
1627   // The Grand Shuffle
1628 
1629   // The Java calling convention is either equal (linux) or denser (win64) than the
1630   // c calling convention. However the because of the jni_env argument the c calling
1631   // convention always has at least one more (and two for static) arguments than Java.
1632   // Therefore if we move the args from java -> c backwards then we will never have
1633   // a register->register conflict and we don't have to build a dependency graph
1634   // and figure out how to break any cycles.
1635   //
1636 
1637   // Record esp-based slot for receiver on stack for non-static methods
1638   int receiver_offset = -1;
1639 
1640   // This is a trick. We double the stack slots so we can claim
1641   // the oops in the caller's frame. Since we are sure to have
1642   // more args than the caller doubling is enough to make
1643   // sure we can capture all the incoming oop args from the
1644   // caller.
1645   //
1646   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1647 
1648   // Mark location of rfp (someday)
1649   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp));
1650 
1651 
1652   int float_args = 0;
1653   int int_args = 0;
1654 
1655 #ifdef ASSERT
1656   bool reg_destroyed[Register::number_of_registers];
1657   bool freg_destroyed[FloatRegister::number_of_registers];
1658   for ( int r = 0 ; r < Register::number_of_registers ; r++ ) {
1659     reg_destroyed[r] = false;
1660   }
1661   for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) {
1662     freg_destroyed[f] = false;
1663   }
1664 
1665 #endif /* ASSERT */
1666 
1667   // For JNI natives the incoming and outgoing registers are offset upwards.
1668   GrowableArray<int> arg_order(2 * total_in_args);
1669 
1670   for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1671     arg_order.push(i);
1672     arg_order.push(c_arg);
1673   }
1674 
1675   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1676     int i = arg_order.at(ai);
1677     int c_arg = arg_order.at(ai + 1);
1678     __ block_comment(err_msg("move %d -> %d", i, c_arg));
1679     assert(c_arg != -1 && i != -1, "wrong order");
1680 #ifdef ASSERT
1681     if (in_regs[i].first()->is_Register()) {
1682       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1683     } else if (in_regs[i].first()->is_FloatRegister()) {
1684       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1685     }
1686     if (out_regs[c_arg].first()->is_Register()) {
1687       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1688     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1689       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1690     }
1691 #endif /* ASSERT */
1692     switch (in_sig_bt[i]) {
1693       case T_ARRAY:
1694       case T_OBJECT:
1695         __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1696                        ((i == 0) && (!is_static)),
1697                        &receiver_offset);
1698         int_args++;
1699         break;
1700       case T_VOID:
1701         break;
1702 
1703       case T_FLOAT:
1704         __ float_move(in_regs[i], out_regs[c_arg]);
1705         float_args++;
1706         break;
1707 
1708       case T_DOUBLE:
1709         assert( i + 1 < total_in_args &&
1710                 in_sig_bt[i + 1] == T_VOID &&
1711                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1712         __ double_move(in_regs[i], out_regs[c_arg]);
1713         float_args++;
1714         break;
1715 
1716       case T_LONG :
1717         __ long_move(in_regs[i], out_regs[c_arg]);
1718         int_args++;
1719         break;
1720 
1721       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1722 
1723       default:
1724         __ move32_64(in_regs[i], out_regs[c_arg]);
1725         int_args++;
1726     }
1727   }
1728 
1729   // point c_arg at the first arg that is already loaded in case we
1730   // need to spill before we call out
1731   int c_arg = total_c_args - total_in_args;
1732 
1733   // Pre-load a static method's oop into c_rarg1.
1734   if (method->is_static()) {
1735 
1736     //  load oop into a register
1737     __ movoop(c_rarg1,
1738               JNIHandles::make_local(method->method_holder()->java_mirror()));
1739 
1740     // Now handlize the static class mirror it's known not-null.
1741     __ str(c_rarg1, Address(sp, klass_offset));
1742     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1743 
1744     // Now get the handle
1745     __ lea(c_rarg1, Address(sp, klass_offset));
1746     // and protect the arg if we must spill
1747     c_arg--;
1748   }
1749 
1750   // Change state to native (we save the return address in the thread, since it might not
1751   // be pushed on the stack when we do a stack traversal). It is enough that the pc()
1752   // points into the right code segment. It does not have to be the correct return pc.
1753   // We use the same pc/oopMap repeatedly when we call out.
1754 
1755   Label native_return;
1756   if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
1757     // For convenience we use the pc we want to resume to in case of preemption on Object.wait.
1758     __ set_last_Java_frame(sp, noreg, native_return, rscratch1);
1759   } else {
1760     intptr_t the_pc = (intptr_t) __ pc();
1761     oop_maps->add_gc_map(the_pc - start, map);
1762 
1763     __ set_last_Java_frame(sp, noreg, __ pc(), rscratch1);
1764   }
1765 
1766   Label dtrace_method_entry, dtrace_method_entry_done;
1767   if (DTraceMethodProbes) {
1768     __ b(dtrace_method_entry);
1769     __ bind(dtrace_method_entry_done);
1770   }
1771 
1772   // RedefineClasses() tracing support for obsolete method entry
1773   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1774     // protect the args we've loaded
1775     save_args(masm, total_c_args, c_arg, out_regs);
1776     __ mov_metadata(c_rarg1, method());
1777     __ call_VM_leaf(
1778       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1779       rthread, c_rarg1);
1780     restore_args(masm, total_c_args, c_arg, out_regs);
1781   }
1782 
1783   // Lock a synchronized method
1784 
1785   // Register definitions used by locking and unlocking
1786 
1787   const Register swap_reg = r0;
1788   const Register obj_reg  = r19;  // Will contain the oop
1789   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
1790   const Register old_hdr  = r13;  // value of old header at unlock time
1791   const Register lock_tmp = r14;  // Temporary used by lightweight_lock/unlock
1792   const Register tmp = lr;
1793 
1794   Label slow_path_lock;
1795   Label lock_done;
1796 
1797   if (method->is_synchronized()) {
1798     Label count;
1799     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1800 
1801     // Get the handle (the 2nd argument)
1802     __ mov(oop_handle_reg, c_rarg1);
1803 
1804     // Get address of the box
1805 
1806     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1807 
1808     // Load the oop from the handle
1809     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1810 
1811     if (LockingMode == LM_MONITOR) {
1812       __ b(slow_path_lock);
1813     } else if (LockingMode == LM_LEGACY) {
1814       // Load (object->mark() | 1) into swap_reg %r0
1815       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1816       __ orr(swap_reg, rscratch1, 1);
1817 
1818       // Save (object->mark() | 1) into BasicLock's displaced header
1819       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1820 
1821       // src -> dest iff dest == r0 else r0 <- dest
1822       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1823 
1824       // Hmm should this move to the slow path code area???
1825 
1826       // Test if the oopMark is an obvious stack pointer, i.e.,
1827       //  1) (mark & 3) == 0, and
1828       //  2) sp <= mark < mark + os::pagesize()
1829       // These 3 tests can be done by evaluating the following
1830       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1831       // assuming both stack pointer and pagesize have their
1832       // least significant 2 bits clear.
1833       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1834 
1835       __ sub(swap_reg, sp, swap_reg);
1836       __ neg(swap_reg, swap_reg);
1837       __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1838 
1839       // Save the test result, for recursive case, the result is zero
1840       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1841       __ br(Assembler::NE, slow_path_lock);
1842 
1843       __ bind(count);
1844       __ inc_held_monitor_count(rscratch1);
1845     } else {
1846       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1847       __ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1848     }
1849 
1850     // Slow path will re-enter here
1851     __ bind(lock_done);
1852   }
1853 
1854 
1855   // Finally just about ready to make the JNI call
1856 
1857   // get JNIEnv* which is first argument to native
1858   __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1859 
1860   // Now set thread in native
1861   __ mov(rscratch1, _thread_in_native);
1862   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1863   __ stlrw(rscratch1, rscratch2);
1864 
1865   __ rt_call(native_func);
1866 
1867   // Verify or restore cpu control state after JNI call
1868   __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
1869 
1870   // Unpack native results.
1871   switch (ret_type) {
1872   case T_BOOLEAN: __ c2bool(r0);                     break;
1873   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
1874   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
1875   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
1876   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
1877   case T_DOUBLE :
1878   case T_FLOAT  :
1879     // Result is in v0 we'll save as needed
1880     break;
1881   case T_ARRAY:                 // Really a handle
1882   case T_OBJECT:                // Really a handle
1883       break; // can't de-handlize until after safepoint check
1884   case T_VOID: break;
1885   case T_LONG: break;
1886   default       : ShouldNotReachHere();
1887   }
1888 
1889   Label safepoint_in_progress, safepoint_in_progress_done;
1890 
1891   // Switch thread to "native transition" state before reading the synchronization state.
1892   // This additional state is necessary because reading and testing the synchronization
1893   // state is not atomic w.r.t. GC, as this scenario demonstrates:
1894   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1895   //     VM thread changes sync state to synchronizing and suspends threads for GC.
1896   //     Thread A is resumed to finish this native method, but doesn't block here since it
1897   //     didn't see any synchronization is progress, and escapes.
1898   __ mov(rscratch1, _thread_in_native_trans);
1899 
1900   __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
1901 
1902   // Force this write out before the read below
1903   if (!UseSystemMemoryBarrier) {
1904     __ dmb(Assembler::ISH);
1905   }
1906 
1907   __ verify_sve_vector_length();
1908 
1909   // Check for safepoint operation in progress and/or pending suspend requests.
1910   {
1911     // No need for acquire as Java threads always disarm themselves.
1912     __ safepoint_poll(safepoint_in_progress, true /* at_return */, false /* acquire */, false /* in_nmethod */);
1913     __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
1914     __ cbnzw(rscratch1, safepoint_in_progress);
1915     __ bind(safepoint_in_progress_done);
1916   }
1917 
1918   // change thread state
1919   __ mov(rscratch1, _thread_in_Java);
1920   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1921   __ stlrw(rscratch1, rscratch2);
1922 
1923   if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
1924     // Check preemption for Object.wait()
1925     __ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
1926     __ cbz(rscratch1, native_return);
1927     __ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
1928     __ br(rscratch1);
1929     __ bind(native_return);
1930 
1931     intptr_t the_pc = (intptr_t) __ pc();
1932     oop_maps->add_gc_map(the_pc - start, map);
1933   }
1934 
1935   Label reguard;
1936   Label reguard_done;
1937   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1938   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1939   __ br(Assembler::EQ, reguard);
1940   __ bind(reguard_done);
1941 
1942   // native result if any is live
1943 
1944   // Unlock
1945   Label unlock_done;
1946   Label slow_path_unlock;
1947   if (method->is_synchronized()) {
1948 
1949     // Get locked oop from the handle we passed to jni
1950     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1951 
1952     Label done, not_recursive;
1953 
1954     if (LockingMode == LM_LEGACY) {
1955       // Simple recursive lock?
1956       __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1957       __ cbnz(rscratch1, not_recursive);
1958       __ dec_held_monitor_count(rscratch1);
1959       __ b(done);
1960     }
1961 
1962     __ bind(not_recursive);
1963 
1964     // Must save r0 if if it is live now because cmpxchg must use it
1965     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1966       save_native_result(masm, ret_type, stack_slots);
1967     }
1968 
1969     if (LockingMode == LM_MONITOR) {
1970       __ b(slow_path_unlock);
1971     } else if (LockingMode == LM_LEGACY) {
1972       // get address of the stack lock
1973       __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1974       //  get old displaced header
1975       __ ldr(old_hdr, Address(r0, 0));
1976 
1977       // Atomic swap old header if oop still contains the stack lock
1978       Label count;
1979       __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1980       __ bind(count);
1981       __ dec_held_monitor_count(rscratch1);
1982     } else {
1983       assert(LockingMode == LM_LIGHTWEIGHT, "");
1984       __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1985     }
1986 
1987     // slow path re-enters here
1988     __ bind(unlock_done);
1989     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1990       restore_native_result(masm, ret_type, stack_slots);
1991     }
1992 
1993     __ bind(done);
1994   }
1995 
1996   Label dtrace_method_exit, dtrace_method_exit_done;
1997   if (DTraceMethodProbes) {
1998     __ b(dtrace_method_exit);
1999     __ bind(dtrace_method_exit_done);
2000   }
2001 
2002   __ reset_last_Java_frame(false);
2003 
2004   // Unbox oop result, e.g. JNIHandles::resolve result.
2005   if (is_reference_type(ret_type)) {
2006     __ resolve_jobject(r0, r1, r2);
2007   }
2008 
2009   if (CheckJNICalls) {
2010     // clear_pending_jni_exception_check
2011     __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
2012   }
2013 
2014   // reset handle block
2015   __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
2016   __ str(zr, Address(r2, JNIHandleBlock::top_offset()));
2017 
2018   __ leave();
2019 
2020   // Any exception pending?
2021   __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2022   __ cbnz(rscratch1, exception_pending);
2023 
2024   // We're done
2025   __ ret(lr);
2026 
2027   // Unexpected paths are out of line and go here
2028 
2029   // forward the exception
2030   __ bind(exception_pending);
2031 
2032   // and forward the exception
2033   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2034 
2035   // Slow path locking & unlocking
2036   if (method->is_synchronized()) {
2037 
2038     __ block_comment("Slow path lock {");
2039     __ bind(slow_path_lock);
2040 
2041     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2042     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2043 
2044     // protect the args we've loaded
2045     save_args(masm, total_c_args, c_arg, out_regs);
2046 
2047     __ mov(c_rarg0, obj_reg);
2048     __ mov(c_rarg1, lock_reg);
2049     __ mov(c_rarg2, rthread);
2050 
2051     // Not a leaf but we have last_Java_frame setup as we want.
2052     // We don't want to unmount in case of contention since that would complicate preserving
2053     // the arguments that had already been marshalled into the native convention. So we force
2054     // the freeze slow path to find this native wrapper frame (see recurse_freeze_native_frame())
2055     // and pin the vthread. Otherwise the fast path won't find it since we don't walk the stack.
2056     __ push_cont_fastpath();
2057     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2058     __ pop_cont_fastpath();
2059     restore_args(masm, total_c_args, c_arg, out_regs);
2060 
2061 #ifdef ASSERT
2062     { Label L;
2063       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2064       __ cbz(rscratch1, L);
2065       __ stop("no pending exception allowed on exit from monitorenter");
2066       __ bind(L);
2067     }
2068 #endif
2069     __ b(lock_done);
2070 
2071     __ block_comment("} Slow path lock");
2072 
2073     __ block_comment("Slow path unlock {");
2074     __ bind(slow_path_unlock);
2075 
2076     // If we haven't already saved the native result we must save it now as xmm registers
2077     // are still exposed.
2078 
2079     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2080       save_native_result(masm, ret_type, stack_slots);
2081     }
2082 
2083     __ mov(c_rarg2, rthread);
2084     __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2085     __ mov(c_rarg0, obj_reg);
2086 
2087     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2088     // NOTE that obj_reg == r19 currently
2089     __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2090     __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2091 
2092     __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
2093 
2094 #ifdef ASSERT
2095     {
2096       Label L;
2097       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2098       __ cbz(rscratch1, L);
2099       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2100       __ bind(L);
2101     }
2102 #endif /* ASSERT */
2103 
2104     __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2105 
2106     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2107       restore_native_result(masm, ret_type, stack_slots);
2108     }
2109     __ b(unlock_done);
2110 
2111     __ block_comment("} Slow path unlock");
2112 
2113   } // synchronized
2114 
2115   // SLOW PATH Reguard the stack if needed
2116 
2117   __ bind(reguard);
2118   save_native_result(masm, ret_type, stack_slots);
2119   __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2120   restore_native_result(masm, ret_type, stack_slots);
2121   // and continue
2122   __ b(reguard_done);
2123 
2124   // SLOW PATH safepoint
2125   {
2126     __ block_comment("safepoint {");
2127     __ bind(safepoint_in_progress);
2128 
2129     // Don't use call_VM as it will see a possible pending exception and forward it
2130     // and never return here preventing us from clearing _last_native_pc down below.
2131     //
2132     save_native_result(masm, ret_type, stack_slots);
2133     __ mov(c_rarg0, rthread);
2134 #ifndef PRODUCT
2135   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2136 #endif
2137     __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2138     __ blr(rscratch1);
2139 
2140     // Restore any method result value
2141     restore_native_result(masm, ret_type, stack_slots);
2142 
2143     __ b(safepoint_in_progress_done);
2144     __ block_comment("} safepoint");
2145   }
2146 
2147   // SLOW PATH dtrace support
2148   if (DTraceMethodProbes) {
2149     {
2150       __ block_comment("dtrace entry {");
2151       __ bind(dtrace_method_entry);
2152 
2153       // We have all of the arguments setup at this point. We must not touch any register
2154       // argument registers at this point (what if we save/restore them there are no oop?
2155 
2156       save_args(masm, total_c_args, c_arg, out_regs);
2157       __ mov_metadata(c_rarg1, method());
2158       __ call_VM_leaf(
2159         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2160         rthread, c_rarg1);
2161       restore_args(masm, total_c_args, c_arg, out_regs);
2162       __ b(dtrace_method_entry_done);
2163       __ block_comment("} dtrace entry");
2164     }
2165 
2166     {
2167       __ block_comment("dtrace exit {");
2168       __ bind(dtrace_method_exit);
2169       save_native_result(masm, ret_type, stack_slots);
2170       __ mov_metadata(c_rarg1, method());
2171       __ call_VM_leaf(
2172         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2173         rthread, c_rarg1);
2174       restore_native_result(masm, ret_type, stack_slots);
2175       __ b(dtrace_method_exit_done);
2176       __ block_comment("} dtrace exit");
2177     }
2178   }
2179 
2180   __ flush();
2181 
2182   nmethod *nm = nmethod::new_native_nmethod(method,
2183                                             compile_id,
2184                                             masm->code(),
2185                                             vep_offset,
2186                                             frame_complete,
2187                                             stack_slots / VMRegImpl::slots_per_word,
2188                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2189                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2190                                             oop_maps);
2191 
2192   return nm;
2193 }
2194 
2195 // this function returns the adjust size (in number of words) to a c2i adapter
2196 // activation for use during deoptimization
2197 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2198   assert(callee_locals >= callee_parameters,
2199           "test and remove; got more parms than locals");
2200   if (callee_locals < callee_parameters)
2201     return 0;                   // No adjustment for negative locals
2202   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2203   // diff is counted in stack words
2204   return align_up(diff, 2);
2205 }
2206 
2207 
2208 //------------------------------generate_deopt_blob----------------------------
2209 void SharedRuntime::generate_deopt_blob() {
2210   // Allocate space for the code
2211   ResourceMark rm;
2212   // Setup code generation tools
2213   int pad = 0;
2214 #if INCLUDE_JVMCI
2215   if (EnableJVMCI) {
2216     pad += 512; // Increase the buffer size when compiling for JVMCI
2217   }
2218 #endif
2219   const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id);
2220   CodeBuffer buffer(name, 2048+pad, 1024);
2221   MacroAssembler* masm = new MacroAssembler(&buffer);
2222   int frame_size_in_words;
2223   OopMap* map = nullptr;
2224   OopMapSet *oop_maps = new OopMapSet();
2225   RegisterSaver reg_save(COMPILER2_OR_JVMCI != 0);
2226 
2227   // -------------
2228   // This code enters when returning to a de-optimized nmethod.  A return
2229   // address has been pushed on the stack, and return values are in
2230   // registers.
2231   // If we are doing a normal deopt then we were called from the patched
2232   // nmethod from the point we returned to the nmethod. So the return
2233   // address on the stack is wrong by NativeCall::instruction_size
2234   // We will adjust the value so it looks like we have the original return
2235   // address on the stack (like when we eagerly deoptimized).
2236   // In the case of an exception pending when deoptimizing, we enter
2237   // with a return address on the stack that points after the call we patched
2238   // into the exception handler. We have the following register state from,
2239   // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2240   //    r0: exception oop
2241   //    r19: exception handler
2242   //    r3: throwing pc
2243   // So in this case we simply jam r3 into the useless return address and
2244   // the stack looks just like we want.
2245   //
2246   // At this point we need to de-opt.  We save the argument return
2247   // registers.  We call the first C routine, fetch_unroll_info().  This
2248   // routine captures the return values and returns a structure which
2249   // describes the current frame size and the sizes of all replacement frames.
2250   // The current frame is compiled code and may contain many inlined
2251   // functions, each with their own JVM state.  We pop the current frame, then
2252   // push all the new frames.  Then we call the C routine unpack_frames() to
2253   // populate these frames.  Finally unpack_frames() returns us the new target
2254   // address.  Notice that callee-save registers are BLOWN here; they have
2255   // already been captured in the vframeArray at the time the return PC was
2256   // patched.
2257   address start = __ pc();
2258   Label cont;
2259 
2260   // Prolog for non exception case!
2261 
2262   // Save everything in sight.
2263   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2264 
2265   // Normal deoptimization.  Save exec mode for unpack_frames.
2266   __ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved
2267   __ b(cont);
2268 
2269   int reexecute_offset = __ pc() - start;
2270 #if INCLUDE_JVMCI && !defined(COMPILER1)
2271   if (UseJVMCICompiler) {
2272     // JVMCI does not use this kind of deoptimization
2273     __ should_not_reach_here();
2274   }
2275 #endif
2276 
2277   // Reexecute case
2278   // return address is the pc describes what bci to do re-execute at
2279 
2280   // No need to update map as each call to save_live_registers will produce identical oopmap
2281   (void) reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2282 
2283   __ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved
2284   __ b(cont);
2285 
2286 #if INCLUDE_JVMCI
2287   Label after_fetch_unroll_info_call;
2288   int implicit_exception_uncommon_trap_offset = 0;
2289   int uncommon_trap_offset = 0;
2290 
2291   if (EnableJVMCI) {
2292     implicit_exception_uncommon_trap_offset = __ pc() - start;
2293 
2294     __ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2295     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2296 
2297     uncommon_trap_offset = __ pc() - start;
2298 
2299     // Save everything in sight.
2300     reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2301     // fetch_unroll_info needs to call last_java_frame()
2302     Label retaddr;
2303     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2304 
2305     __ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2306     __ movw(rscratch1, -1);
2307     __ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2308 
2309     __ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute);
2310     __ mov(c_rarg0, rthread);
2311     __ movw(c_rarg2, rcpool); // exec mode
2312     __ lea(rscratch1,
2313            RuntimeAddress(CAST_FROM_FN_PTR(address,
2314                                            Deoptimization::uncommon_trap)));
2315     __ blr(rscratch1);
2316     __ bind(retaddr);
2317     oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2318 
2319     __ reset_last_Java_frame(false);
2320 
2321     __ b(after_fetch_unroll_info_call);
2322   } // EnableJVMCI
2323 #endif // INCLUDE_JVMCI
2324 
2325   int exception_offset = __ pc() - start;
2326 
2327   // Prolog for exception case
2328 
2329   // all registers are dead at this entry point, except for r0, and
2330   // r3 which contain the exception oop and exception pc
2331   // respectively.  Set them in TLS and fall thru to the
2332   // unpack_with_exception_in_tls entry point.
2333 
2334   __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
2335   __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
2336 
2337   int exception_in_tls_offset = __ pc() - start;
2338 
2339   // new implementation because exception oop is now passed in JavaThread
2340 
2341   // Prolog for exception case
2342   // All registers must be preserved because they might be used by LinearScan
2343   // Exceptiop oop and throwing PC are passed in JavaThread
2344   // tos: stack at point of call to method that threw the exception (i.e. only
2345   // args are on the stack, no return address)
2346 
2347   // The return address pushed by save_live_registers will be patched
2348   // later with the throwing pc. The correct value is not available
2349   // now because loading it from memory would destroy registers.
2350 
2351   // NB: The SP at this point must be the SP of the method that is
2352   // being deoptimized.  Deoptimization assumes that the frame created
2353   // here by save_live_registers is immediately below the method's SP.
2354   // This is a somewhat fragile mechanism.
2355 
2356   // Save everything in sight.
2357   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2358 
2359   // Now it is safe to overwrite any register
2360 
2361   // Deopt during an exception.  Save exec mode for unpack_frames.
2362   __ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved
2363 
2364   // load throwing pc from JavaThread and patch it as the return address
2365   // of the current frame. Then clear the field in JavaThread
2366   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2367   __ protect_return_address(r3);
2368   __ str(r3, Address(rfp, wordSize));
2369   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2370 
2371 #ifdef ASSERT
2372   // verify that there is really an exception oop in JavaThread
2373   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2374   __ verify_oop(r0);
2375 
2376   // verify that there is no pending exception
2377   Label no_pending_exception;
2378   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2379   __ cbz(rscratch1, no_pending_exception);
2380   __ stop("must not have pending exception here");
2381   __ bind(no_pending_exception);
2382 #endif
2383 
2384   __ bind(cont);
2385 
2386   // Call C code.  Need thread and this frame, but NOT official VM entry
2387   // crud.  We cannot block on this call, no GC can happen.
2388   //
2389   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2390 
2391   // fetch_unroll_info needs to call last_java_frame().
2392 
2393   Label retaddr;
2394   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2395 #ifdef ASSERT
2396   { Label L;
2397     __ ldr(rscratch1, Address(rthread, JavaThread::last_Java_fp_offset()));
2398     __ cbz(rscratch1, L);
2399     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2400     __ bind(L);
2401   }
2402 #endif // ASSERT
2403   __ mov(c_rarg0, rthread);
2404   __ mov(c_rarg1, rcpool);
2405   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2406   __ blr(rscratch1);
2407   __ bind(retaddr);
2408 
2409   // Need to have an oopmap that tells fetch_unroll_info where to
2410   // find any register it might need.
2411   oop_maps->add_gc_map(__ pc() - start, map);
2412 
2413   __ reset_last_Java_frame(false);
2414 
2415 #if INCLUDE_JVMCI
2416   if (EnableJVMCI) {
2417     __ bind(after_fetch_unroll_info_call);
2418   }
2419 #endif
2420 
2421   // Load UnrollBlock* into r5
2422   __ mov(r5, r0);
2423 
2424   __ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset()));
2425    Label noException;
2426   __ cmpw(rcpool, Deoptimization::Unpack_exception);   // Was exception pending?
2427   __ br(Assembler::NE, noException);
2428   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2429   // QQQ this is useless it was null above
2430   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2431   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
2432   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2433 
2434   __ verify_oop(r0);
2435 
2436   // Overwrite the result registers with the exception results.
2437   __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2438   // I think this is useless
2439   // __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2440 
2441   __ bind(noException);
2442 
2443   // Only register save data is on the stack.
2444   // Now restore the result registers.  Everything else is either dead
2445   // or captured in the vframeArray.
2446 
2447   // Restore fp result register
2448   __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2449   // Restore integer result register
2450   __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2451 
2452   // Pop all of the register save area off the stack
2453   __ add(sp, sp, frame_size_in_words * wordSize);
2454 
2455   // All of the register save area has been popped of the stack. Only the
2456   // return address remains.
2457 
2458   // Pop all the frames we must move/replace.
2459   //
2460   // Frame picture (youngest to oldest)
2461   // 1: self-frame (no frame link)
2462   // 2: deopting frame  (no frame link)
2463   // 3: caller of deopting frame (could be compiled/interpreted).
2464   //
2465   // Note: by leaving the return address of self-frame on the stack
2466   // and using the size of frame 2 to adjust the stack
2467   // when we are done the return to frame 3 will still be on the stack.
2468 
2469   // Pop deoptimized frame
2470   __ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2471   __ sub(r2, r2, 2 * wordSize);
2472   __ add(sp, sp, r2);
2473   __ ldp(rfp, zr, __ post(sp, 2 * wordSize));
2474 
2475 #ifdef ASSERT
2476   // Compilers generate code that bang the stack by as much as the
2477   // interpreter would need. So this stack banging should never
2478   // trigger a fault. Verify that it does not on non product builds.
2479   __ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2480   __ bang_stack_size(r19, r2);
2481 #endif
2482   // Load address of array of frame pcs into r2
2483   __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset()));
2484 
2485   // Trash the old pc
2486   // __ addptr(sp, wordSize);  FIXME ????
2487 
2488   // Load address of array of frame sizes into r4
2489   __ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset()));
2490 
2491   // Load counter into r3
2492   __ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset()));
2493 
2494   // Now adjust the caller's stack to make up for the extra locals
2495   // but record the original sp so that we can save it in the skeletal interpreter
2496   // frame and the stack walking of interpreter_sender will get the unextended sp
2497   // value and not the "real" sp value.
2498 
2499   const Register sender_sp = r6;
2500 
2501   __ mov(sender_sp, sp);
2502   __ ldrw(r19, Address(r5,
2503                        Deoptimization::UnrollBlock::
2504                        caller_adjustment_offset()));
2505   __ sub(sp, sp, r19);
2506 
2507   // Push interpreter frames in a loop
2508   __ mov(rscratch1, (uint64_t)0xDEADDEAD);        // Make a recognizable pattern
2509   __ mov(rscratch2, rscratch1);
2510   Label loop;
2511   __ bind(loop);
2512   __ ldr(r19, Address(__ post(r4, wordSize)));          // Load frame size
2513   __ sub(r19, r19, 2*wordSize);           // We'll push pc and fp by hand
2514   __ ldr(lr, Address(__ post(r2, wordSize)));  // Load pc
2515   __ enter();                           // Save old & set new fp
2516   __ sub(sp, sp, r19);                  // Prolog
2517   // This value is corrected by layout_activation_impl
2518   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2519   __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2520   __ mov(sender_sp, sp);               // Pass sender_sp to next frame
2521   __ sub(r3, r3, 1);                   // Decrement counter
2522   __ cbnz(r3, loop);
2523 
2524     // Re-push self-frame
2525   __ ldr(lr, Address(r2));
2526   __ enter();
2527 
2528   // Allocate a full sized register save area.  We subtract 2 because
2529   // enter() just pushed 2 words
2530   __ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2531 
2532   // Restore frame locals after moving the frame
2533   __ strd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2534   __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2535 
2536   // Call C code.  Need thread but NOT official VM entry
2537   // crud.  We cannot block on this call, no GC can happen.  Call should
2538   // restore return values to their stack-slots with the new SP.
2539   //
2540   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2541 
2542   // Use rfp because the frames look interpreted now
2543   // Don't need the precise return PC here, just precise enough to point into this code blob.
2544   address the_pc = __ pc();
2545   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2546 
2547   __ mov(c_rarg0, rthread);
2548   __ movw(c_rarg1, rcpool); // second arg: exec_mode
2549   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2550   __ blr(rscratch1);
2551 
2552   // Set an oopmap for the call site
2553   // Use the same PC we used for the last java frame
2554   oop_maps->add_gc_map(the_pc - start,
2555                        new OopMap( frame_size_in_words, 0 ));
2556 
2557   // Clear fp AND pc
2558   __ reset_last_Java_frame(true);
2559 
2560   // Collect return values
2561   __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2562   __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2563   // I think this is useless (throwing pc?)
2564   // __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2565 
2566   // Pop self-frame.
2567   __ leave();                           // Epilog
2568 
2569   // Jump to interpreter
2570   __ ret(lr);
2571 
2572   // Make sure all code is generated
2573   masm->flush();
2574 
2575   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2576   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2577 #if INCLUDE_JVMCI
2578   if (EnableJVMCI) {
2579     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2580     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2581   }
2582 #endif
2583 }
2584 
2585 // Number of stack slots between incoming argument block and the start of
2586 // a new frame.  The PROLOG must add this many slots to the stack.  The
2587 // EPILOG must remove this many slots. aarch64 needs two slots for
2588 // return address and fp.
2589 // TODO think this is correct but check
2590 uint SharedRuntime::in_preserve_stack_slots() {
2591   return 4;
2592 }
2593 
2594 uint SharedRuntime::out_preserve_stack_slots() {
2595   return 0;
2596 }
2597 
2598 
2599 VMReg SharedRuntime::thread_register() {
2600   return rthread->as_VMReg();
2601 }
2602 
2603 //------------------------------generate_handler_blob------
2604 //
2605 // Generate a special Compile2Runtime blob that saves all registers,
2606 // and setup oopmap.
2607 //
2608 SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) {
2609   assert(is_polling_page_id(id), "expected a polling page stub id");
2610 
2611   ResourceMark rm;
2612   OopMapSet *oop_maps = new OopMapSet();
2613   OopMap* map;
2614 
2615   // Allocate space for the code.  Setup code generation tools.
2616   const char* name = SharedRuntime::stub_name(id);
2617   CodeBuffer buffer(name, 2048, 1024);
2618   MacroAssembler* masm = new MacroAssembler(&buffer);
2619 
2620   address start   = __ pc();
2621   address call_pc = nullptr;
2622   int frame_size_in_words;
2623   bool cause_return = (id == SharedStubId::polling_page_return_handler_id);
2624   RegisterSaver reg_save(id == SharedStubId::polling_page_vectors_safepoint_handler_id /* save_vectors */);
2625 
2626   // When the signal occurred, the LR was either signed and stored on the stack (in which
2627   // case it will be restored from the stack before being used) or unsigned and not stored
2628   // on the stack. Stipping ensures we get the right value.
2629   __ strip_return_address();
2630 
2631   // Save Integer and Float registers.
2632   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2633 
2634   // The following is basically a call_VM.  However, we need the precise
2635   // address of the call in order to generate an oopmap. Hence, we do all the
2636   // work ourselves.
2637 
2638   Label retaddr;
2639   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2640 
2641   // The return address must always be correct so that frame constructor never
2642   // sees an invalid pc.
2643 
2644   if (!cause_return) {
2645     // overwrite the return address pushed by save_live_registers
2646     // Additionally, r20 is a callee-saved register so we can look at
2647     // it later to determine if someone changed the return address for
2648     // us!
2649     __ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset()));
2650     __ protect_return_address(r20);
2651     __ str(r20, Address(rfp, wordSize));
2652   }
2653 
2654   // Do the call
2655   __ mov(c_rarg0, rthread);
2656   __ lea(rscratch1, RuntimeAddress(call_ptr));
2657   __ blr(rscratch1);
2658   __ bind(retaddr);
2659 
2660   // Set an oopmap for the call site.  This oopmap will map all
2661   // oop-registers and debug-info registers as callee-saved.  This
2662   // will allow deoptimization at this safepoint to find all possible
2663   // debug-info recordings, as well as let GC find all oops.
2664 
2665   oop_maps->add_gc_map( __ pc() - start, map);
2666 
2667   Label noException;
2668 
2669   __ reset_last_Java_frame(false);
2670 
2671   __ membar(Assembler::LoadLoad | Assembler::LoadStore);
2672 
2673   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2674   __ cbz(rscratch1, noException);
2675 
2676   // Exception pending
2677 
2678   reg_save.restore_live_registers(masm);
2679 
2680   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2681 
2682   // No exception case
2683   __ bind(noException);
2684 
2685   Label no_adjust, bail;
2686   if (!cause_return) {
2687     // If our stashed return pc was modified by the runtime we avoid touching it
2688     __ ldr(rscratch1, Address(rfp, wordSize));
2689     __ cmp(r20, rscratch1);
2690     __ br(Assembler::NE, no_adjust);
2691     __ authenticate_return_address(r20);
2692 
2693 #ifdef ASSERT
2694     // Verify the correct encoding of the poll we're about to skip.
2695     // See NativeInstruction::is_ldrw_to_zr()
2696     __ ldrw(rscratch1, Address(r20));
2697     __ ubfx(rscratch2, rscratch1, 22, 10);
2698     __ cmpw(rscratch2, 0b1011100101);
2699     __ br(Assembler::NE, bail);
2700     __ ubfx(rscratch2, rscratch1, 0, 5);
2701     __ cmpw(rscratch2, 0b11111);
2702     __ br(Assembler::NE, bail);
2703 #endif
2704     // Adjust return pc forward to step over the safepoint poll instruction
2705     __ add(r20, r20, NativeInstruction::instruction_size);
2706     __ protect_return_address(r20);
2707     __ str(r20, Address(rfp, wordSize));
2708   }
2709 
2710   __ bind(no_adjust);
2711   // Normal exit, restore registers and exit.
2712   reg_save.restore_live_registers(masm);
2713 
2714   __ ret(lr);
2715 
2716 #ifdef ASSERT
2717   __ bind(bail);
2718   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2719 #endif
2720 
2721   // Make sure all code is generated
2722   masm->flush();
2723 
2724   // Fill-out other meta info
2725   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2726 }
2727 
2728 //
2729 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2730 //
2731 // Generate a stub that calls into vm to find out the proper destination
2732 // of a java call. All the argument registers are live at this point
2733 // but since this is generic code we don't know what they are and the caller
2734 // must do any gc of the args.
2735 //
2736 RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) {
2737   assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2738   assert(is_resolve_id(id), "expected a resolve stub id");
2739 
2740   // allocate space for the code
2741   ResourceMark rm;
2742 
2743   const char* name = SharedRuntime::stub_name(id);
2744   CodeBuffer buffer(name, 1000, 512);
2745   MacroAssembler* masm                = new MacroAssembler(&buffer);
2746 
2747   int frame_size_in_words;
2748   RegisterSaver reg_save(false /* save_vectors */);
2749 
2750   OopMapSet *oop_maps = new OopMapSet();
2751   OopMap* map = nullptr;
2752 
2753   int start = __ offset();
2754 
2755   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2756 
2757   int frame_complete = __ offset();
2758 
2759   {
2760     Label retaddr;
2761     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2762 
2763     __ mov(c_rarg0, rthread);
2764     __ lea(rscratch1, RuntimeAddress(destination));
2765 
2766     __ blr(rscratch1);
2767     __ bind(retaddr);
2768   }
2769 
2770   // Set an oopmap for the call site.
2771   // We need this not only for callee-saved registers, but also for volatile
2772   // registers that the compiler might be keeping live across a safepoint.
2773 
2774   oop_maps->add_gc_map( __ offset() - start, map);
2775 
2776   // r0 contains the address we are going to jump to assuming no exception got installed
2777 
2778   // clear last_Java_sp
2779   __ reset_last_Java_frame(false);
2780   // check for pending exceptions
2781   Label pending;
2782   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2783   __ cbnz(rscratch1, pending);
2784 
2785   // get the returned Method*
2786   __ get_vm_result_2(rmethod, rthread);
2787   __ str(rmethod, Address(sp, reg_save.reg_offset_in_bytes(rmethod)));
2788 
2789   // r0 is where we want to jump, overwrite rscratch1 which is saved and scratch
2790   __ str(r0, Address(sp, reg_save.rscratch1_offset_in_bytes()));
2791   reg_save.restore_live_registers(masm);
2792 
2793   // We are back to the original state on entry and ready to go.
2794 
2795   __ br(rscratch1);
2796 
2797   // Pending exception after the safepoint
2798 
2799   __ bind(pending);
2800 
2801   reg_save.restore_live_registers(masm);
2802 
2803   // exception pending => remove activation and forward to exception handler
2804 
2805   __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
2806 
2807   __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
2808   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2809 
2810   // -------------
2811   // make sure all code is generated
2812   masm->flush();
2813 
2814   // return the  blob
2815   // frame_size_words or bytes??
2816   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2817 }
2818 
2819 // Continuation point for throwing of implicit exceptions that are
2820 // not handled in the current activation. Fabricates an exception
2821 // oop and initiates normal exception dispatching in this
2822 // frame. Since we need to preserve callee-saved values (currently
2823 // only for C2, but done for C1 as well) we need a callee-saved oop
2824 // map and therefore have to make these stubs into RuntimeStubs
2825 // rather than BufferBlobs.  If the compiler needs all registers to
2826 // be preserved between the fault point and the exception handler
2827 // then it must assume responsibility for that in
2828 // AbstractCompiler::continuation_for_implicit_null_exception or
2829 // continuation_for_implicit_division_by_zero_exception. All other
2830 // implicit exceptions (e.g., NullPointerException or
2831 // AbstractMethodError on entry) are either at call sites or
2832 // otherwise assume that stack unwinding will be initiated, so
2833 // caller saved registers were assumed volatile in the compiler.
2834 
2835 RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) {
2836   assert(is_throw_id(id), "expected a throw stub id");
2837 
2838   const char* name = SharedRuntime::stub_name(id);
2839 
2840   // Information about frame layout at time of blocking runtime call.
2841   // Note that we only have to preserve callee-saved registers since
2842   // the compilers are responsible for supplying a continuation point
2843   // if they expect all registers to be preserved.
2844   // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
2845   enum layout {
2846     rfp_off = 0,
2847     rfp_off2,
2848     return_off,
2849     return_off2,
2850     framesize // inclusive of return address
2851   };
2852 
2853   int insts_size = 512;
2854   int locs_size  = 64;
2855 
2856   ResourceMark rm;
2857   const char* timer_msg = "SharedRuntime generate_throw_exception";
2858   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
2859 
2860   CodeBuffer code(name, insts_size, locs_size);
2861   OopMapSet* oop_maps  = new OopMapSet();
2862   MacroAssembler* masm = new MacroAssembler(&code);
2863 
2864   address start = __ pc();
2865 
2866   // This is an inlined and slightly modified version of call_VM
2867   // which has the ability to fetch the return PC out of
2868   // thread-local storage and also sets up last_Java_sp slightly
2869   // differently than the real call_VM
2870 
2871   __ enter(); // Save FP and LR before call
2872 
2873   assert(is_even(framesize/2), "sp not 16-byte aligned");
2874 
2875   // lr and fp are already in place
2876   __ sub(sp, rfp, ((uint64_t)framesize-4) << LogBytesPerInt); // prolog
2877 
2878   int frame_complete = __ pc() - start;
2879 
2880   // Set up last_Java_sp and last_Java_fp
2881   address the_pc = __ pc();
2882   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2883 
2884   __ mov(c_rarg0, rthread);
2885   BLOCK_COMMENT("call runtime_entry");
2886   __ mov(rscratch1, runtime_entry);
2887   __ blr(rscratch1);
2888 
2889   // Generate oop map
2890   OopMap* map = new OopMap(framesize, 0);
2891 
2892   oop_maps->add_gc_map(the_pc - start, map);
2893 
2894   __ reset_last_Java_frame(true);
2895 
2896   // Reinitialize the ptrue predicate register, in case the external runtime
2897   // call clobbers ptrue reg, as we may return to SVE compiled code.
2898   __ reinitialize_ptrue();
2899 
2900   __ leave();
2901 
2902   // check for pending exceptions
2903 #ifdef ASSERT
2904   Label L;
2905   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2906   __ cbnz(rscratch1, L);
2907   __ should_not_reach_here();
2908   __ bind(L);
2909 #endif // ASSERT
2910   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2911 
2912   // codeBlob framesize is in words (not VMRegImpl::slot_size)
2913   RuntimeStub* stub =
2914     RuntimeStub::new_runtime_stub(name,
2915                                   &code,
2916                                   frame_complete,
2917                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2918                                   oop_maps, false);
2919   return stub;
2920 }
2921 
2922 #if INCLUDE_JFR
2923 
2924 static void jfr_prologue(address the_pc, MacroAssembler* masm, Register thread) {
2925   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2926   __ mov(c_rarg0, thread);
2927 }
2928 
2929 // The handle is dereferenced through a load barrier.
2930 static void jfr_epilogue(MacroAssembler* masm) {
2931   __ reset_last_Java_frame(true);
2932 }
2933 
2934 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
2935 // It returns a jobject handle to the event writer.
2936 // The handle is dereferenced and the return value is the event writer oop.
2937 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
2938   enum layout {
2939     rbp_off,
2940     rbpH_off,
2941     return_off,
2942     return_off2,
2943     framesize // inclusive of return address
2944   };
2945 
2946   int insts_size = 1024;
2947   int locs_size = 64;
2948   const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id);
2949   CodeBuffer code(name, insts_size, locs_size);
2950   OopMapSet* oop_maps = new OopMapSet();
2951   MacroAssembler* masm = new MacroAssembler(&code);
2952 
2953   address start = __ pc();
2954   __ enter();
2955   int frame_complete = __ pc() - start;
2956   address the_pc = __ pc();
2957   jfr_prologue(the_pc, masm, rthread);
2958   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
2959   jfr_epilogue(masm);
2960   __ resolve_global_jobject(r0, rscratch1, rscratch2);
2961   __ leave();
2962   __ ret(lr);
2963 
2964   OopMap* map = new OopMap(framesize, 1); // rfp
2965   oop_maps->add_gc_map(the_pc - start, map);
2966 
2967   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
2968     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
2969                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2970                                   oop_maps, false);
2971   return stub;
2972 }
2973 
2974 // For c2: call to return a leased buffer.
2975 RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
2976   enum layout {
2977     rbp_off,
2978     rbpH_off,
2979     return_off,
2980     return_off2,
2981     framesize // inclusive of return address
2982   };
2983 
2984   int insts_size = 1024;
2985   int locs_size = 64;
2986 
2987   const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id);
2988   CodeBuffer code(name, insts_size, locs_size);
2989   OopMapSet* oop_maps = new OopMapSet();
2990   MacroAssembler* masm = new MacroAssembler(&code);
2991 
2992   address start = __ pc();
2993   __ enter();
2994   int frame_complete = __ pc() - start;
2995   address the_pc = __ pc();
2996   jfr_prologue(the_pc, masm, rthread);
2997   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
2998   jfr_epilogue(masm);
2999 
3000   __ leave();
3001   __ ret(lr);
3002 
3003   OopMap* map = new OopMap(framesize, 1); // rfp
3004   oop_maps->add_gc_map(the_pc - start, map);
3005 
3006   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
3007     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
3008                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3009                                   oop_maps, false);
3010   return stub;
3011 }
3012 
3013 #endif // INCLUDE_JFR