1 /*
   2  * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "asm/macroAssembler.inline.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/compiledIC.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "interpreter/interp_masm.hpp"
  38 #include "logging/log.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/klass.inline.hpp"
  42 #include "oops/method.inline.hpp"
  43 #include "prims/methodHandles.hpp"
  44 #include "runtime/continuation.hpp"
  45 #include "runtime/continuationEntry.inline.hpp"
  46 #include "runtime/globals.hpp"
  47 #include "runtime/jniHandles.hpp"
  48 #include "runtime/safepointMechanism.hpp"
  49 #include "runtime/sharedRuntime.hpp"
  50 #include "runtime/signature.hpp"
  51 #include "runtime/stubRoutines.hpp"
  52 #include "runtime/timerTrace.hpp"
  53 #include "runtime/vframeArray.hpp"
  54 #include "utilities/align.hpp"
  55 #include "utilities/formatBuffer.hpp"
  56 #include "vmreg_aarch64.inline.hpp"
  57 #ifdef COMPILER1
  58 #include "c1/c1_Runtime1.hpp"
  59 #endif
  60 #ifdef COMPILER2
  61 #include "adfiles/ad_aarch64.hpp"
  62 #include "opto/runtime.hpp"
  63 #endif
  64 #if INCLUDE_JVMCI
  65 #include "jvmci/jvmciJavaClasses.hpp"
  66 #endif
  67 
  68 #define __ masm->
  69 
  70 #ifdef PRODUCT
  71 #define BLOCK_COMMENT(str) /* nothing */
  72 #else
  73 #define BLOCK_COMMENT(str) __ block_comment(str)
  74 #endif
  75 
  76 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  77 
  78 // FIXME -- this is used by C1
  79 class RegisterSaver {
  80   const bool _save_vectors;
  81  public:
  82   RegisterSaver(bool save_vectors) : _save_vectors(save_vectors) {}
  83 
  84   OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
  85   void restore_live_registers(MacroAssembler* masm);
  86 
  87   // Offsets into the register save area
  88   // Used by deoptimization when it is managing result register
  89   // values on its own
  90 
  91   int reg_offset_in_bytes(Register r);
  92   int r0_offset_in_bytes()    { return reg_offset_in_bytes(r0); }
  93   int rscratch1_offset_in_bytes()    { return reg_offset_in_bytes(rscratch1); }
  94   int v0_offset_in_bytes();
  95 
  96   // Total stack size in bytes for saving sve predicate registers.
  97   int total_sve_predicate_in_bytes();
  98 
  99   // Capture info about frame layout
 100   // Note this is only correct when not saving full vectors.
 101   enum layout {
 102                 fpu_state_off = 0,
 103                 fpu_state_end = fpu_state_off + FPUStateSizeInWords - 1,
 104                 // The frame sender code expects that rfp will be in
 105                 // the "natural" place and will override any oopMap
 106                 // setting for it. We must therefore force the layout
 107                 // so that it agrees with the frame sender code.
 108                 r0_off = fpu_state_off + FPUStateSizeInWords,
 109                 rfp_off = r0_off + (Register::number_of_registers - 2) * Register::max_slots_per_register,
 110                 return_off = rfp_off + Register::max_slots_per_register,      // slot for return address
 111                 reg_save_size = return_off + Register::max_slots_per_register};
 112 
 113 };
 114 
 115 int RegisterSaver::reg_offset_in_bytes(Register r) {
 116   // The integer registers are located above the floating point
 117   // registers in the stack frame pushed by save_live_registers() so the
 118   // offset depends on whether we are saving full vectors, and whether
 119   // those vectors are NEON or SVE.
 120 
 121   int slots_per_vect = FloatRegister::save_slots_per_register;
 122 
 123 #if COMPILER2_OR_JVMCI
 124   if (_save_vectors) {
 125     slots_per_vect = FloatRegister::slots_per_neon_register;
 126 
 127 #ifdef COMPILER2
 128     if (Matcher::supports_scalable_vector()) {
 129       slots_per_vect = Matcher::scalable_vector_reg_size(T_FLOAT);
 130     }
 131 #endif
 132   }
 133 #endif
 134 
 135   int r0_offset = v0_offset_in_bytes() + (slots_per_vect * FloatRegister::number_of_registers) * BytesPerInt;
 136   return r0_offset + r->encoding() * wordSize;
 137 }
 138 
 139 int RegisterSaver::v0_offset_in_bytes() {
 140   // The floating point registers are located above the predicate registers if
 141   // they are present in the stack frame pushed by save_live_registers(). So the
 142   // offset depends on the saved total predicate vectors in the stack frame.
 143   return (total_sve_predicate_in_bytes() / VMRegImpl::stack_slot_size) * BytesPerInt;
 144 }
 145 
 146 int RegisterSaver::total_sve_predicate_in_bytes() {
 147 #ifdef COMPILER2
 148   if (_save_vectors && Matcher::supports_scalable_vector()) {
 149     return (Matcher::scalable_vector_reg_size(T_BYTE) >> LogBitsPerByte) *
 150            PRegister::number_of_registers;
 151   }
 152 #endif
 153   return 0;
 154 }
 155 
 156 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
 157   bool use_sve = false;
 158   int sve_vector_size_in_bytes = 0;
 159   int sve_vector_size_in_slots = 0;
 160   int sve_predicate_size_in_slots = 0;
 161   int total_predicate_in_bytes = total_sve_predicate_in_bytes();
 162   int total_predicate_in_slots = total_predicate_in_bytes / VMRegImpl::stack_slot_size;
 163 
 164 #ifdef COMPILER2
 165   use_sve = Matcher::supports_scalable_vector();
 166   if (use_sve) {
 167     sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 168     sve_vector_size_in_slots = Matcher::scalable_vector_reg_size(T_FLOAT);
 169     sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
 170   }
 171 #endif
 172 
 173 #if COMPILER2_OR_JVMCI
 174   if (_save_vectors) {
 175     int extra_save_slots_per_register = 0;
 176     // Save upper half of vector registers
 177     if (use_sve) {
 178       extra_save_slots_per_register = sve_vector_size_in_slots - FloatRegister::save_slots_per_register;
 179     } else {
 180       extra_save_slots_per_register = FloatRegister::extra_save_slots_per_neon_register;
 181     }
 182     int extra_vector_bytes = extra_save_slots_per_register *
 183                              VMRegImpl::stack_slot_size *
 184                              FloatRegister::number_of_registers;
 185     additional_frame_words += ((extra_vector_bytes + total_predicate_in_bytes) / wordSize);
 186   }
 187 #else
 188   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 189 #endif
 190 
 191   int frame_size_in_bytes = align_up(additional_frame_words * wordSize +
 192                                      reg_save_size * BytesPerInt, 16);
 193   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 194   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 195   // The caller will allocate additional_frame_words
 196   int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
 197   // CodeBlob frame size is in words.
 198   int frame_size_in_words = frame_size_in_bytes / wordSize;
 199   *total_frame_words = frame_size_in_words;
 200 
 201   // Save Integer and Float registers.
 202   __ enter();
 203   __ push_CPU_state(_save_vectors, use_sve, sve_vector_size_in_bytes, total_predicate_in_bytes);
 204 
 205   // Set an oopmap for the call site.  This oopmap will map all
 206   // oop-registers and debug-info registers as callee-saved.  This
 207   // will allow deoptimization at this safepoint to find all possible
 208   // debug-info recordings, as well as let GC find all oops.
 209 
 210   OopMapSet *oop_maps = new OopMapSet();
 211   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 212 
 213   for (int i = 0; i < Register::number_of_registers; i++) {
 214     Register r = as_Register(i);
 215     if (i <= rfp->encoding() && r != rscratch1 && r != rscratch2) {
 216       // SP offsets are in 4-byte words.
 217       // Register slots are 8 bytes wide, 32 floating-point registers.
 218       int sp_offset = Register::max_slots_per_register * i +
 219                       FloatRegister::save_slots_per_register * FloatRegister::number_of_registers;
 220       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots), r->as_VMReg());
 221     }
 222   }
 223 
 224   for (int i = 0; i < FloatRegister::number_of_registers; i++) {
 225     FloatRegister r = as_FloatRegister(i);
 226     int sp_offset = 0;
 227     if (_save_vectors) {
 228       sp_offset = use_sve ? (total_predicate_in_slots + sve_vector_size_in_slots * i) :
 229                             (FloatRegister::slots_per_neon_register * i);
 230     } else {
 231       sp_offset = FloatRegister::save_slots_per_register * i;
 232     }
 233     oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), r->as_VMReg());
 234   }
 235 
 236   return oop_map;
 237 }
 238 
 239 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 240 #ifdef COMPILER2
 241   __ pop_CPU_state(_save_vectors, Matcher::supports_scalable_vector(),
 242                    Matcher::scalable_vector_reg_size(T_BYTE), total_sve_predicate_in_bytes());
 243 #else
 244 #if !INCLUDE_JVMCI
 245   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 246 #endif
 247   __ pop_CPU_state(_save_vectors);
 248 #endif
 249   __ ldp(rfp, lr, Address(__ post(sp, 2 * wordSize)));
 250   __ authenticate_return_address();
 251 }
 252 
 253 // Is vector's size (in bytes) bigger than a size saved by default?
 254 // 8 bytes vector registers are saved by default on AArch64.
 255 // The SVE supported min vector size is 8 bytes and we need to save
 256 // predicate registers when the vector size is 8 bytes as well.
 257 bool SharedRuntime::is_wide_vector(int size) {
 258   return size > 8 || (UseSVE > 0 && size >= 8);
 259 }
 260 
 261 // ---------------------------------------------------------------------------
 262 // Read the array of BasicTypes from a signature, and compute where the
 263 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 264 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 265 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 266 // as framesizes are fixed.
 267 // VMRegImpl::stack0 refers to the first slot 0(sp).
 268 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
 269 // Register up to Register::number_of_registers are the 64-bit
 270 // integer registers.
 271 
 272 // Note: the INPUTS in sig_bt are in units of Java argument words,
 273 // which are 64-bit.  The OUTPUTS are in 32-bit units.
 274 
 275 // The Java calling convention is a "shifted" version of the C ABI.
 276 // By skipping the first C ABI register we can call non-static jni
 277 // methods with small numbers of arguments without having to shuffle
 278 // the arguments at all. Since we control the java ABI we ought to at
 279 // least get some advantage out of it.
 280 
 281 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 282                                            VMRegPair *regs,
 283                                            int total_args_passed) {
 284 
 285   // Create the mapping between argument positions and
 286   // registers.
 287   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 288     j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7
 289   };
 290   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 291     j_farg0, j_farg1, j_farg2, j_farg3,
 292     j_farg4, j_farg5, j_farg6, j_farg7
 293   };
 294 
 295 
 296   uint int_args = 0;
 297   uint fp_args = 0;
 298   uint stk_args = 0;
 299 
 300   for (int i = 0; i < total_args_passed; i++) {
 301     switch (sig_bt[i]) {
 302     case T_BOOLEAN:
 303     case T_CHAR:
 304     case T_BYTE:
 305     case T_SHORT:
 306     case T_INT:
 307       if (int_args < Argument::n_int_register_parameters_j) {
 308         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 309       } else {
 310         stk_args = align_up(stk_args, 2);
 311         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 312         stk_args += 1;
 313       }
 314       break;
 315     case T_VOID:
 316       // halves of T_LONG or T_DOUBLE
 317       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 318       regs[i].set_bad();
 319       break;
 320     case T_LONG:
 321       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 322       // fall through
 323     case T_OBJECT:
 324     case T_ARRAY:
 325     case T_ADDRESS:
 326       if (int_args < Argument::n_int_register_parameters_j) {
 327         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 328       } else {
 329         stk_args = align_up(stk_args, 2);
 330         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 331         stk_args += 2;
 332       }
 333       break;
 334     case T_FLOAT:
 335       if (fp_args < Argument::n_float_register_parameters_j) {
 336         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 337       } else {
 338         stk_args = align_up(stk_args, 2);
 339         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 340         stk_args += 1;
 341       }
 342       break;
 343     case T_DOUBLE:
 344       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 345       if (fp_args < Argument::n_float_register_parameters_j) {
 346         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 347       } else {
 348         stk_args = align_up(stk_args, 2);
 349         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 350         stk_args += 2;
 351       }
 352       break;
 353     default:
 354       ShouldNotReachHere();
 355       break;
 356     }
 357   }
 358 
 359   return stk_args;
 360 }
 361 
 362 // Patch the callers callsite with entry to compiled code if it exists.
 363 static void patch_callers_callsite(MacroAssembler *masm) {
 364   Label L;
 365   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 366   __ cbz(rscratch1, L);
 367 
 368   __ enter();
 369   __ push_CPU_state();
 370 
 371   // VM needs caller's callsite
 372   // VM needs target method
 373   // This needs to be a long call since we will relocate this adapter to
 374   // the codeBuffer and it may not reach
 375 
 376 #ifndef PRODUCT
 377   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 378 #endif
 379 
 380   __ mov(c_rarg0, rmethod);
 381   __ mov(c_rarg1, lr);
 382   __ authenticate_return_address(c_rarg1);
 383   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 384   __ blr(rscratch1);
 385 
 386   // Explicit isb required because fixup_callers_callsite may change the code
 387   // stream.
 388   __ safepoint_isb();
 389 
 390   __ pop_CPU_state();
 391   // restore sp
 392   __ leave();
 393   __ bind(L);
 394 }
 395 
 396 static void gen_c2i_adapter(MacroAssembler *masm,
 397                             int total_args_passed,
 398                             int comp_args_on_stack,
 399                             const BasicType *sig_bt,
 400                             const VMRegPair *regs,
 401                             Label& skip_fixup) {
 402   // Before we get into the guts of the C2I adapter, see if we should be here
 403   // at all.  We've come from compiled code and are attempting to jump to the
 404   // interpreter, which means the caller made a static call to get here
 405   // (vcalls always get a compiled target if there is one).  Check for a
 406   // compiled target.  If there is one, we need to patch the caller's call.
 407   patch_callers_callsite(masm);
 408 
 409   __ bind(skip_fixup);
 410 
 411   int words_pushed = 0;
 412 
 413   // Since all args are passed on the stack, total_args_passed *
 414   // Interpreter::stackElementSize is the space we need.
 415 
 416   int extraspace = total_args_passed * Interpreter::stackElementSize;
 417 
 418   __ mov(r19_sender_sp, sp);
 419 
 420   // stack is aligned, keep it that way
 421   extraspace = align_up(extraspace, 2*wordSize);
 422 
 423   if (extraspace)
 424     __ sub(sp, sp, extraspace);
 425 
 426   // Now write the args into the outgoing interpreter space
 427   for (int i = 0; i < total_args_passed; i++) {
 428     if (sig_bt[i] == T_VOID) {
 429       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 430       continue;
 431     }
 432 
 433     // offset to start parameters
 434     int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 435     int next_off = st_off - Interpreter::stackElementSize;
 436 
 437     // Say 4 args:
 438     // i   st_off
 439     // 0   32 T_LONG
 440     // 1   24 T_VOID
 441     // 2   16 T_OBJECT
 442     // 3    8 T_BOOL
 443     // -    0 return address
 444     //
 445     // However to make thing extra confusing. Because we can fit a Java long/double in
 446     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 447     // leaves one slot empty and only stores to a single slot. In this case the
 448     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 449 
 450     VMReg r_1 = regs[i].first();
 451     VMReg r_2 = regs[i].second();
 452     if (!r_1->is_valid()) {
 453       assert(!r_2->is_valid(), "");
 454       continue;
 455     }
 456     if (r_1->is_stack()) {
 457       // memory to memory use rscratch1
 458       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
 459                     + extraspace
 460                     + words_pushed * wordSize);
 461       if (!r_2->is_valid()) {
 462         // sign extend??
 463         __ ldrw(rscratch1, Address(sp, ld_off));
 464         __ str(rscratch1, Address(sp, st_off));
 465 
 466       } else {
 467 
 468         __ ldr(rscratch1, Address(sp, ld_off));
 469 
 470         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 471         // T_DOUBLE and T_LONG use two slots in the interpreter
 472         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 473           // ld_off == LSW, ld_off+wordSize == MSW
 474           // st_off == MSW, next_off == LSW
 475           __ str(rscratch1, Address(sp, next_off));
 476 #ifdef ASSERT
 477           // Overwrite the unused slot with known junk
 478           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaaaull);
 479           __ str(rscratch1, Address(sp, st_off));
 480 #endif /* ASSERT */
 481         } else {
 482           __ str(rscratch1, Address(sp, st_off));
 483         }
 484       }
 485     } else if (r_1->is_Register()) {
 486       Register r = r_1->as_Register();
 487       if (!r_2->is_valid()) {
 488         // must be only an int (or less ) so move only 32bits to slot
 489         // why not sign extend??
 490         __ str(r, Address(sp, st_off));
 491       } else {
 492         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 493         // T_DOUBLE and T_LONG use two slots in the interpreter
 494         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 495           // jlong/double in gpr
 496 #ifdef ASSERT
 497           // Overwrite the unused slot with known junk
 498           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaabull);
 499           __ str(rscratch1, Address(sp, st_off));
 500 #endif /* ASSERT */
 501           __ str(r, Address(sp, next_off));
 502         } else {
 503           __ str(r, Address(sp, st_off));
 504         }
 505       }
 506     } else {
 507       assert(r_1->is_FloatRegister(), "");
 508       if (!r_2->is_valid()) {
 509         // only a float use just part of the slot
 510         __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
 511       } else {
 512 #ifdef ASSERT
 513         // Overwrite the unused slot with known junk
 514         __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaacull);
 515         __ str(rscratch1, Address(sp, st_off));
 516 #endif /* ASSERT */
 517         __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
 518       }
 519     }
 520   }
 521 
 522   __ mov(esp, sp); // Interp expects args on caller's expression stack
 523 
 524   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 525   __ br(rscratch1);
 526 }
 527 
 528 
 529 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 530                                     int total_args_passed,
 531                                     int comp_args_on_stack,
 532                                     const BasicType *sig_bt,
 533                                     const VMRegPair *regs) {
 534 
 535   // Note: r19_sender_sp contains the senderSP on entry. We must
 536   // preserve it since we may do a i2c -> c2i transition if we lose a
 537   // race where compiled code goes non-entrant while we get args
 538   // ready.
 539 
 540   // Adapters are frameless.
 541 
 542   // An i2c adapter is frameless because the *caller* frame, which is
 543   // interpreted, routinely repairs its own esp (from
 544   // interpreter_frame_last_sp), even if a callee has modified the
 545   // stack pointer.  It also recalculates and aligns sp.
 546 
 547   // A c2i adapter is frameless because the *callee* frame, which is
 548   // interpreted, routinely repairs its caller's sp (from sender_sp,
 549   // which is set up via the senderSP register).
 550 
 551   // In other words, if *either* the caller or callee is interpreted, we can
 552   // get the stack pointer repaired after a call.
 553 
 554   // This is why c2i and i2c adapters cannot be indefinitely composed.
 555   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 556   // both caller and callee would be compiled methods, and neither would
 557   // clean up the stack pointer changes performed by the two adapters.
 558   // If this happens, control eventually transfers back to the compiled
 559   // caller, but with an uncorrected stack, causing delayed havoc.
 560 
 561   if (VerifyAdapterCalls &&
 562       (Interpreter::code() != nullptr || StubRoutines::final_stubs_code() != nullptr)) {
 563 #if 0
 564     // So, let's test for cascading c2i/i2c adapters right now.
 565     //  assert(Interpreter::contains($return_addr) ||
 566     //         StubRoutines::contains($return_addr),
 567     //         "i2c adapter must return to an interpreter frame");
 568     __ block_comment("verify_i2c { ");
 569     Label L_ok;
 570     if (Interpreter::code() != nullptr) {
 571       range_check(masm, rax, r11,
 572                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 573                   L_ok);
 574     }
 575     if (StubRoutines::initial_stubs_code() != nullptr) {
 576       range_check(masm, rax, r11,
 577                   StubRoutines::initial_stubs_code()->code_begin(),
 578                   StubRoutines::initial_stubs_code()->code_end(),
 579                   L_ok);
 580     }
 581     if (StubRoutines::final_stubs_code() != nullptr) {
 582       range_check(masm, rax, r11,
 583                   StubRoutines::final_stubs_code()->code_begin(),
 584                   StubRoutines::final_stubs_code()->code_end(),
 585                   L_ok);
 586     }
 587     const char* msg = "i2c adapter must return to an interpreter frame";
 588     __ block_comment(msg);
 589     __ stop(msg);
 590     __ bind(L_ok);
 591     __ block_comment("} verify_i2ce ");
 592 #endif
 593   }
 594 
 595   // Cut-out for having no stack args.
 596   int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 597   if (comp_args_on_stack) {
 598     __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 599     __ andr(sp, rscratch1, -16);
 600   }
 601 
 602   // Will jump to the compiled code just as if compiled code was doing it.
 603   // Pre-load the register-jump target early, to schedule it better.
 604   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
 605 
 606 #if INCLUDE_JVMCI
 607   if (EnableJVMCI) {
 608     // check if this call should be routed towards a specific entry point
 609     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 610     Label no_alternative_target;
 611     __ cbz(rscratch2, no_alternative_target);
 612     __ mov(rscratch1, rscratch2);
 613     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 614     __ bind(no_alternative_target);
 615   }
 616 #endif // INCLUDE_JVMCI
 617 
 618   // Now generate the shuffle code.
 619   for (int i = 0; i < total_args_passed; i++) {
 620     if (sig_bt[i] == T_VOID) {
 621       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 622       continue;
 623     }
 624 
 625     // Pick up 0, 1 or 2 words from SP+offset.
 626 
 627     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 628             "scrambled load targets?");
 629     // Load in argument order going down.
 630     int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
 631     // Point to interpreter value (vs. tag)
 632     int next_off = ld_off - Interpreter::stackElementSize;
 633     //
 634     //
 635     //
 636     VMReg r_1 = regs[i].first();
 637     VMReg r_2 = regs[i].second();
 638     if (!r_1->is_valid()) {
 639       assert(!r_2->is_valid(), "");
 640       continue;
 641     }
 642     if (r_1->is_stack()) {
 643       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 644       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
 645       if (!r_2->is_valid()) {
 646         // sign extend???
 647         __ ldrsw(rscratch2, Address(esp, ld_off));
 648         __ str(rscratch2, Address(sp, st_off));
 649       } else {
 650         //
 651         // We are using two optoregs. This can be either T_OBJECT,
 652         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 653         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 654         // So we must adjust where to pick up the data to match the
 655         // interpreter.
 656         //
 657         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 658         // are accessed as negative so LSW is at LOW address
 659 
 660         // ld_off is MSW so get LSW
 661         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 662                            next_off : ld_off;
 663         __ ldr(rscratch2, Address(esp, offset));
 664         // st_off is LSW (i.e. reg.first())
 665         __ str(rscratch2, Address(sp, st_off));
 666       }
 667     } else if (r_1->is_Register()) {  // Register argument
 668       Register r = r_1->as_Register();
 669       if (r_2->is_valid()) {
 670         //
 671         // We are using two VMRegs. This can be either T_OBJECT,
 672         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 673         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 674         // So we must adjust where to pick up the data to match the
 675         // interpreter.
 676 
 677         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 678                            next_off : ld_off;
 679 
 680         // this can be a misaligned move
 681         __ ldr(r, Address(esp, offset));
 682       } else {
 683         // sign extend and use a full word?
 684         __ ldrw(r, Address(esp, ld_off));
 685       }
 686     } else {
 687       if (!r_2->is_valid()) {
 688         __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 689       } else {
 690         __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 691       }
 692     }
 693   }
 694 
 695   __ mov(rscratch2, rscratch1);
 696   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 697   __ mov(rscratch1, rscratch2);
 698 
 699   // 6243940 We might end up in handle_wrong_method if
 700   // the callee is deoptimized as we race thru here. If that
 701   // happens we don't want to take a safepoint because the
 702   // caller frame will look interpreted and arguments are now
 703   // "compiled" so it is much better to make this transition
 704   // invisible to the stack walking code. Unfortunately if
 705   // we try and find the callee by normal means a safepoint
 706   // is possible. So we stash the desired callee in the thread
 707   // and the vm will find there should this case occur.
 708 
 709   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 710 
 711   __ br(rscratch1);
 712 }
 713 
 714 // ---------------------------------------------------------------
 715 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 716                                                             int total_args_passed,
 717                                                             int comp_args_on_stack,
 718                                                             const BasicType *sig_bt,
 719                                                             const VMRegPair *regs,
 720                                                             AdapterFingerPrint* fingerprint) {
 721   address i2c_entry = __ pc();
 722 
 723   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 724 
 725   address c2i_unverified_entry = __ pc();
 726   Label skip_fixup;
 727 
 728   Register data = rscratch2;
 729   Register receiver = j_rarg0;
 730   Register tmp = r10;  // A call-clobbered register not used for arg passing
 731 
 732   // -------------------------------------------------------------------------
 733   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 734   // to the interpreter.  The args start out packed in the compiled layout.  They
 735   // need to be unpacked into the interpreter layout.  This will almost always
 736   // require some stack space.  We grow the current (compiled) stack, then repack
 737   // the args.  We  finally end in a jump to the generic interpreter entry point.
 738   // On exit from the interpreter, the interpreter will restore our SP (lest the
 739   // compiled code, which relies solely on SP and not FP, get sick).
 740 
 741   {
 742     __ block_comment("c2i_unverified_entry {");
 743     // Method might have been compiled since the call site was patched to
 744     // interpreted; if that is the case treat it as a miss so we can get
 745     // the call site corrected.
 746     __ ic_check(1 /* end_alignment */);
 747     __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));
 748 
 749     __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 750     __ cbz(rscratch1, skip_fixup);
 751     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 752     __ block_comment("} c2i_unverified_entry");
 753   }
 754 
 755   address c2i_entry = __ pc();
 756 
 757   // Class initialization barrier for static methods
 758   address c2i_no_clinit_check_entry = nullptr;
 759   if (VM_Version::supports_fast_class_init_checks()) {
 760     Label L_skip_barrier;
 761 
 762     { // Bypass the barrier for non-static methods
 763       __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
 764       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 765       __ br(Assembler::EQ, L_skip_barrier); // non-static
 766     }
 767 
 768     __ load_method_holder(rscratch2, rmethod);
 769     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 770     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 771 
 772     __ bind(L_skip_barrier);
 773     c2i_no_clinit_check_entry = __ pc();
 774   }
 775 
 776   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 777   bs->c2i_entry_barrier(masm);
 778 
 779   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 780 
 781   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
 782 }
 783 
 784 static int c_calling_convention_priv(const BasicType *sig_bt,
 785                                          VMRegPair *regs,
 786                                          int total_args_passed) {
 787 
 788 // We return the amount of VMRegImpl stack slots we need to reserve for all
 789 // the arguments NOT counting out_preserve_stack_slots.
 790 
 791     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 792       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 793     };
 794     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 795       c_farg0, c_farg1, c_farg2, c_farg3,
 796       c_farg4, c_farg5, c_farg6, c_farg7
 797     };
 798 
 799     uint int_args = 0;
 800     uint fp_args = 0;
 801     uint stk_args = 0; // inc by 2 each time
 802 
 803     for (int i = 0; i < total_args_passed; i++) {
 804       switch (sig_bt[i]) {
 805       case T_BOOLEAN:
 806       case T_CHAR:
 807       case T_BYTE:
 808       case T_SHORT:
 809       case T_INT:
 810         if (int_args < Argument::n_int_register_parameters_c) {
 811           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 812         } else {
 813 #ifdef __APPLE__
 814           // Less-than word types are stored one after another.
 815           // The code is unable to handle this so bailout.
 816           return -1;
 817 #endif
 818           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 819           stk_args += 2;
 820         }
 821         break;
 822       case T_LONG:
 823         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 824         // fall through
 825       case T_OBJECT:
 826       case T_ARRAY:
 827       case T_ADDRESS:
 828       case T_METADATA:
 829         if (int_args < Argument::n_int_register_parameters_c) {
 830           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 831         } else {
 832           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 833           stk_args += 2;
 834         }
 835         break;
 836       case T_FLOAT:
 837         if (fp_args < Argument::n_float_register_parameters_c) {
 838           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 839         } else {
 840 #ifdef __APPLE__
 841           // Less-than word types are stored one after another.
 842           // The code is unable to handle this so bailout.
 843           return -1;
 844 #endif
 845           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 846           stk_args += 2;
 847         }
 848         break;
 849       case T_DOUBLE:
 850         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 851         if (fp_args < Argument::n_float_register_parameters_c) {
 852           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 853         } else {
 854           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 855           stk_args += 2;
 856         }
 857         break;
 858       case T_VOID: // Halves of longs and doubles
 859         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 860         regs[i].set_bad();
 861         break;
 862       default:
 863         ShouldNotReachHere();
 864         break;
 865       }
 866     }
 867 
 868   return stk_args;
 869 }
 870 
 871 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
 872                                              uint num_bits,
 873                                              uint total_args_passed) {
 874   // More than 8 argument inputs are not supported now.
 875   assert(total_args_passed <= Argument::n_float_register_parameters_c, "unsupported");
 876   assert(num_bits >= 64 && num_bits <= 2048 && is_power_of_2(num_bits), "unsupported");
 877 
 878   static const FloatRegister VEC_ArgReg[Argument::n_float_register_parameters_c] = {
 879     v0, v1, v2, v3, v4, v5, v6, v7
 880   };
 881 
 882   // On SVE, we use the same vector registers with 128-bit vector registers on NEON.
 883   int next_reg_val = num_bits == 64 ? 1 : 3;
 884   for (uint i = 0; i < total_args_passed; i++) {
 885     VMReg vmreg = VEC_ArgReg[i]->as_VMReg();
 886     regs[i].set_pair(vmreg->next(next_reg_val), vmreg);
 887   }
 888   return 0;
 889 }
 890 
 891 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 892                                          VMRegPair *regs,
 893                                          int total_args_passed)
 894 {
 895   int result = c_calling_convention_priv(sig_bt, regs, total_args_passed);
 896   guarantee(result >= 0, "Unsupported arguments configuration");
 897   return result;
 898 }
 899 
 900 
 901 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
 902   // We always ignore the frame_slots arg and just use the space just below frame pointer
 903   // which by this time is free to use
 904   switch (ret_type) {
 905   case T_FLOAT:
 906     __ strs(v0, Address(rfp, -wordSize));
 907     break;
 908   case T_DOUBLE:
 909     __ strd(v0, Address(rfp, -wordSize));
 910     break;
 911   case T_VOID:  break;
 912   default: {
 913     __ str(r0, Address(rfp, -wordSize));
 914     }
 915   }
 916 }
 917 
 918 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
 919   // We always ignore the frame_slots arg and just use the space just below frame pointer
 920   // which by this time is free to use
 921   switch (ret_type) {
 922   case T_FLOAT:
 923     __ ldrs(v0, Address(rfp, -wordSize));
 924     break;
 925   case T_DOUBLE:
 926     __ ldrd(v0, Address(rfp, -wordSize));
 927     break;
 928   case T_VOID:  break;
 929   default: {
 930     __ ldr(r0, Address(rfp, -wordSize));
 931     }
 932   }
 933 }
 934 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
 935   RegSet x;
 936   for ( int i = first_arg ; i < arg_count ; i++ ) {
 937     if (args[i].first()->is_Register()) {
 938       x = x + args[i].first()->as_Register();
 939     } else if (args[i].first()->is_FloatRegister()) {
 940       __ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize)));
 941     }
 942   }
 943   __ push(x, sp);
 944 }
 945 
 946 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
 947   RegSet x;
 948   for ( int i = first_arg ; i < arg_count ; i++ ) {
 949     if (args[i].first()->is_Register()) {
 950       x = x + args[i].first()->as_Register();
 951     } else {
 952       ;
 953     }
 954   }
 955   __ pop(x, sp);
 956   for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
 957     if (args[i].first()->is_Register()) {
 958       ;
 959     } else if (args[i].first()->is_FloatRegister()) {
 960       __ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize)));
 961     }
 962   }
 963 }
 964 
 965 static void verify_oop_args(MacroAssembler* masm,
 966                             const methodHandle& method,
 967                             const BasicType* sig_bt,
 968                             const VMRegPair* regs) {
 969   Register temp_reg = r19;  // not part of any compiled calling seq
 970   if (VerifyOops) {
 971     for (int i = 0; i < method->size_of_parameters(); i++) {
 972       if (sig_bt[i] == T_OBJECT ||
 973           sig_bt[i] == T_ARRAY) {
 974         VMReg r = regs[i].first();
 975         assert(r->is_valid(), "bad oop arg");
 976         if (r->is_stack()) {
 977           __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
 978           __ verify_oop(temp_reg);
 979         } else {
 980           __ verify_oop(r->as_Register());
 981         }
 982       }
 983     }
 984   }
 985 }
 986 
 987 // on exit, sp points to the ContinuationEntry
 988 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) {
 989   assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, "");
 990   assert(in_bytes(ContinuationEntry::cont_offset())  % VMRegImpl::stack_slot_size == 0, "");
 991   assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, "");
 992 
 993   stack_slots += (int)ContinuationEntry::size()/wordSize;
 994   __ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata
 995 
 996   OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize)/ VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
 997 
 998   __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
 999   __ str(rscratch1, Address(sp, ContinuationEntry::parent_offset()));
1000   __ mov(rscratch1, sp); // we can't use sp as the source in str
1001   __ str(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1002 
1003   return map;
1004 }
1005 
1006 // on entry c_rarg1 points to the continuation
1007 //          sp points to ContinuationEntry
1008 //          c_rarg3 -- isVirtualThread
1009 static void fill_continuation_entry(MacroAssembler* masm) {
1010 #ifdef ASSERT
1011   __ movw(rscratch1, ContinuationEntry::cookie_value());
1012   __ strw(rscratch1, Address(sp, ContinuationEntry::cookie_offset()));
1013 #endif
1014 
1015   __ str (c_rarg1, Address(sp, ContinuationEntry::cont_offset()));
1016   __ strw(c_rarg3, Address(sp, ContinuationEntry::flags_offset()));
1017   __ str (zr,      Address(sp, ContinuationEntry::chunk_offset()));
1018   __ strw(zr,      Address(sp, ContinuationEntry::argsize_offset()));
1019   __ strw(zr,      Address(sp, ContinuationEntry::pin_count_offset()));
1020 
1021   __ ldr(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1022   __ str(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1023   __ ldr(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
1024   __ str(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
1025 
1026   __ str(zr, Address(rthread, JavaThread::cont_fastpath_offset()));
1027   __ str(zr, Address(rthread, JavaThread::held_monitor_count_offset()));
1028 }
1029 
1030 // on entry, sp points to the ContinuationEntry
1031 // on exit, rfp points to the spilled rfp in the entry frame
1032 static void continuation_enter_cleanup(MacroAssembler* masm) {
1033 #ifndef PRODUCT
1034   Label OK;
1035   __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1036   __ cmp(sp, rscratch1);
1037   __ br(Assembler::EQ, OK);
1038   __ stop("incorrect sp1");
1039   __ bind(OK);
1040 #endif
1041   __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1042   __ str(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1043 
1044   if (CheckJNICalls) {
1045     // Check if this is a virtual thread continuation
1046     Label L_skip_vthread_code;
1047     __ ldrw(rscratch1, Address(sp, ContinuationEntry::flags_offset()));
1048     __ cbzw(rscratch1, L_skip_vthread_code);
1049 
1050     // If the held monitor count is > 0 and this vthread is terminating then
1051     // it failed to release a JNI monitor. So we issue the same log message
1052     // that JavaThread::exit does.
1053     __ ldr(rscratch1, Address(rthread, JavaThread::jni_monitor_count_offset()));
1054     __ cbz(rscratch1, L_skip_vthread_code);
1055 
1056     // Save return value potentially containing the exception oop in callee-saved R19.
1057     __ mov(r19, r0);
1058     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held));
1059     // Restore potential return value.
1060     __ mov(r0, r19);
1061 
1062     // For vthreads we have to explicitly zero the JNI monitor count of the carrier
1063     // on termination. The held count is implicitly zeroed below when we restore from
1064     // the parent held count (which has to be zero).
1065     __ str(zr, Address(rthread, JavaThread::jni_monitor_count_offset()));
1066 
1067     __ bind(L_skip_vthread_code);
1068   }
1069 #ifdef ASSERT
1070   else {
1071     // Check if this is a virtual thread continuation
1072     Label L_skip_vthread_code;
1073     __ ldrw(rscratch1, Address(sp, ContinuationEntry::flags_offset()));
1074     __ cbzw(rscratch1, L_skip_vthread_code);
1075 
1076     // See comment just above. If not checking JNI calls the JNI count is only
1077     // needed for assertion checking.
1078     __ str(zr, Address(rthread, JavaThread::jni_monitor_count_offset()));
1079 
1080     __ bind(L_skip_vthread_code);
1081   }
1082 #endif
1083 
1084   __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
1085   __ str(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
1086 
1087   __ ldr(rscratch2, Address(sp, ContinuationEntry::parent_offset()));
1088   __ str(rscratch2, Address(rthread, JavaThread::cont_entry_offset()));
1089   __ add(rfp, sp, (int)ContinuationEntry::size());
1090 }
1091 
1092 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
1093 // On entry: c_rarg1 -- the continuation object
1094 //           c_rarg2 -- isContinue
1095 //           c_rarg3 -- isVirtualThread
1096 static void gen_continuation_enter(MacroAssembler* masm,
1097                                  const methodHandle& method,
1098                                  const BasicType* sig_bt,
1099                                  const VMRegPair* regs,
1100                                  int& exception_offset,
1101                                  OopMapSet*oop_maps,
1102                                  int& frame_complete,
1103                                  int& stack_slots,
1104                                  int& interpreted_entry_offset,
1105                                  int& compiled_entry_offset) {
1106   //verify_oop_args(masm, method, sig_bt, regs);
1107   Address resolve(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
1108 
1109   address start = __ pc();
1110 
1111   Label call_thaw, exit;
1112 
1113   // i2i entry used at interp_only_mode only
1114   interpreted_entry_offset = __ pc() - start;
1115   {
1116 
1117 #ifdef ASSERT
1118     Label is_interp_only;
1119     __ ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
1120     __ cbnzw(rscratch1, is_interp_only);
1121     __ stop("enterSpecial interpreter entry called when not in interp_only_mode");
1122     __ bind(is_interp_only);
1123 #endif
1124 
1125     // Read interpreter arguments into registers (this is an ad-hoc i2c adapter)
1126     __ ldr(c_rarg1, Address(esp, Interpreter::stackElementSize*2));
1127     __ ldr(c_rarg2, Address(esp, Interpreter::stackElementSize*1));
1128     __ ldr(c_rarg3, Address(esp, Interpreter::stackElementSize*0));
1129     __ push_cont_fastpath(rthread);
1130 
1131     __ enter();
1132     stack_slots = 2; // will be adjusted in setup
1133     OopMap* map = continuation_enter_setup(masm, stack_slots);
1134     // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe,
1135     // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
1136 
1137     fill_continuation_entry(masm);
1138 
1139     __ cbnz(c_rarg2, call_thaw);
1140 
1141     const address tr_call = __ trampoline_call(resolve);
1142     if (tr_call == nullptr) {
1143       fatal("CodeCache is full at gen_continuation_enter");
1144     }
1145 
1146     oop_maps->add_gc_map(__ pc() - start, map);
1147     __ post_call_nop();
1148 
1149     __ b(exit);
1150 
1151     address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1152     if (stub == nullptr) {
1153       fatal("CodeCache is full at gen_continuation_enter");
1154     }
1155   }
1156 
1157   // compiled entry
1158   __ align(CodeEntryAlignment);
1159   compiled_entry_offset = __ pc() - start;
1160 
1161   __ enter();
1162   stack_slots = 2; // will be adjusted in setup
1163   OopMap* map = continuation_enter_setup(masm, stack_slots);
1164   frame_complete = __ pc() - start;
1165 
1166   fill_continuation_entry(masm);
1167 
1168   __ cbnz(c_rarg2, call_thaw);
1169 
1170   const address tr_call = __ trampoline_call(resolve);
1171   if (tr_call == nullptr) {
1172     fatal("CodeCache is full at gen_continuation_enter");
1173   }
1174 
1175   oop_maps->add_gc_map(__ pc() - start, map);
1176   __ post_call_nop();
1177 
1178   __ b(exit);
1179 
1180   __ bind(call_thaw);
1181 
1182   ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1183   __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1184   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1185   ContinuationEntry::_return_pc_offset = __ pc() - start;
1186   __ post_call_nop();
1187 
1188   __ bind(exit);
1189   ContinuationEntry::_cleanup_offset = __ pc() - start;
1190   continuation_enter_cleanup(masm);
1191   __ leave();
1192   __ ret(lr);
1193 
1194   /// exception handling
1195 
1196   exception_offset = __ pc() - start;
1197   {
1198       __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1199 
1200       continuation_enter_cleanup(masm);
1201 
1202       __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1203       __ authenticate_return_address(c_rarg1);
1204       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1);
1205 
1206       // see OptoRuntime::generate_exception_blob: r0 -- exception oop, r3 -- exception pc
1207 
1208       __ mov(r1, r0); // the exception handler
1209       __ mov(r0, r19); // restore return value contaning the exception oop
1210       __ verify_oop(r0);
1211 
1212       __ leave();
1213       __ mov(r3, lr);
1214       __ br(r1); // the exception handler
1215   }
1216 
1217   address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1218   if (stub == nullptr) {
1219     fatal("CodeCache is full at gen_continuation_enter");
1220   }
1221 }
1222 
1223 static void gen_continuation_yield(MacroAssembler* masm,
1224                                    const methodHandle& method,
1225                                    const BasicType* sig_bt,
1226                                    const VMRegPair* regs,
1227                                    OopMapSet* oop_maps,
1228                                    int& frame_complete,
1229                                    int& stack_slots,
1230                                    int& compiled_entry_offset) {
1231     enum layout {
1232       rfp_off1,
1233       rfp_off2,
1234       lr_off,
1235       lr_off2,
1236       framesize // inclusive of return address
1237     };
1238     // assert(is_even(framesize/2), "sp not 16-byte aligned");
1239     stack_slots = framesize /  VMRegImpl::slots_per_word;
1240     assert(stack_slots == 2, "recheck layout");
1241 
1242     address start = __ pc();
1243 
1244     compiled_entry_offset = __ pc() - start;
1245     __ enter();
1246 
1247     __ mov(c_rarg1, sp);
1248 
1249     frame_complete = __ pc() - start;
1250     address the_pc = __ pc();
1251 
1252     __ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup
1253 
1254     __ mov(c_rarg0, rthread);
1255     __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
1256     __ call_VM_leaf(Continuation::freeze_entry(), 2);
1257     __ reset_last_Java_frame(true);
1258 
1259     Label pinned;
1260 
1261     __ cbnz(r0, pinned);
1262 
1263     // We've succeeded, set sp to the ContinuationEntry
1264     __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1265     __ mov(sp, rscratch1);
1266     continuation_enter_cleanup(masm);
1267 
1268     __ bind(pinned); // pinned -- return to caller
1269 
1270     // handle pending exception thrown by freeze
1271     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1272     Label ok;
1273     __ cbz(rscratch1, ok);
1274     __ leave();
1275     __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1276     __ br(rscratch1);
1277     __ bind(ok);
1278 
1279     __ leave();
1280     __ ret(lr);
1281 
1282     OopMap* map = new OopMap(framesize, 1);
1283     oop_maps->add_gc_map(the_pc - start, map);
1284 }
1285 
1286 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1287   ::continuation_enter_cleanup(masm);
1288 }
1289 
1290 static void gen_special_dispatch(MacroAssembler* masm,
1291                                  const methodHandle& method,
1292                                  const BasicType* sig_bt,
1293                                  const VMRegPair* regs) {
1294   verify_oop_args(masm, method, sig_bt, regs);
1295   vmIntrinsics::ID iid = method->intrinsic_id();
1296 
1297   // Now write the args into the outgoing interpreter space
1298   bool     has_receiver   = false;
1299   Register receiver_reg   = noreg;
1300   int      member_arg_pos = -1;
1301   Register member_reg     = noreg;
1302   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1303   if (ref_kind != 0) {
1304     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1305     member_reg = r19;  // known to be free at this point
1306     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1307   } else if (iid == vmIntrinsics::_invokeBasic) {
1308     has_receiver = true;
1309   } else if (iid == vmIntrinsics::_linkToNative) {
1310     member_arg_pos = method->size_of_parameters() - 1;  // trailing NativeEntryPoint argument
1311     member_reg = r19;  // known to be free at this point
1312   } else {
1313     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1314   }
1315 
1316   if (member_reg != noreg) {
1317     // Load the member_arg into register, if necessary.
1318     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1319     VMReg r = regs[member_arg_pos].first();
1320     if (r->is_stack()) {
1321       __ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1322     } else {
1323       // no data motion is needed
1324       member_reg = r->as_Register();
1325     }
1326   }
1327 
1328   if (has_receiver) {
1329     // Make sure the receiver is loaded into a register.
1330     assert(method->size_of_parameters() > 0, "oob");
1331     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1332     VMReg r = regs[0].first();
1333     assert(r->is_valid(), "bad receiver arg");
1334     if (r->is_stack()) {
1335       // Porting note:  This assumes that compiled calling conventions always
1336       // pass the receiver oop in a register.  If this is not true on some
1337       // platform, pick a temp and load the receiver from stack.
1338       fatal("receiver always in a register");
1339       receiver_reg = r2;  // known to be free at this point
1340       __ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1341     } else {
1342       // no data motion is needed
1343       receiver_reg = r->as_Register();
1344     }
1345   }
1346 
1347   // Figure out which address we are really jumping to:
1348   MethodHandles::generate_method_handle_dispatch(masm, iid,
1349                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1350 }
1351 
1352 // ---------------------------------------------------------------------------
1353 // Generate a native wrapper for a given method.  The method takes arguments
1354 // in the Java compiled code convention, marshals them to the native
1355 // convention (handlizes oops, etc), transitions to native, makes the call,
1356 // returns to java state (possibly blocking), unhandlizes any result and
1357 // returns.
1358 //
1359 // Critical native functions are a shorthand for the use of
1360 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1361 // functions.  The wrapper is expected to unpack the arguments before
1362 // passing them to the callee. Critical native functions leave the state _in_Java,
1363 // since they block out GC.
1364 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1365 // block and the check for pending exceptions it's impossible for them
1366 // to be thrown.
1367 //
1368 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1369                                                 const methodHandle& method,
1370                                                 int compile_id,
1371                                                 BasicType* in_sig_bt,
1372                                                 VMRegPair* in_regs,
1373                                                 BasicType ret_type) {
1374   if (method->is_continuation_native_intrinsic()) {
1375     int exception_offset = -1;
1376     OopMapSet* oop_maps = new OopMapSet();
1377     int frame_complete = -1;
1378     int stack_slots = -1;
1379     int interpreted_entry_offset = -1;
1380     int vep_offset = -1;
1381     if (method->is_continuation_enter_intrinsic()) {
1382       gen_continuation_enter(masm,
1383                              method,
1384                              in_sig_bt,
1385                              in_regs,
1386                              exception_offset,
1387                              oop_maps,
1388                              frame_complete,
1389                              stack_slots,
1390                              interpreted_entry_offset,
1391                              vep_offset);
1392     } else if (method->is_continuation_yield_intrinsic()) {
1393       gen_continuation_yield(masm,
1394                              method,
1395                              in_sig_bt,
1396                              in_regs,
1397                              oop_maps,
1398                              frame_complete,
1399                              stack_slots,
1400                              vep_offset);
1401     } else {
1402       guarantee(false, "Unknown Continuation native intrinsic");
1403     }
1404 
1405 #ifdef ASSERT
1406     if (method->is_continuation_enter_intrinsic()) {
1407       assert(interpreted_entry_offset != -1, "Must be set");
1408       assert(exception_offset != -1,         "Must be set");
1409     } else {
1410       assert(interpreted_entry_offset == -1, "Must be unset");
1411       assert(exception_offset == -1,         "Must be unset");
1412     }
1413     assert(frame_complete != -1,    "Must be set");
1414     assert(stack_slots != -1,       "Must be set");
1415     assert(vep_offset != -1,        "Must be set");
1416 #endif
1417 
1418     __ flush();
1419     nmethod* nm = nmethod::new_native_nmethod(method,
1420                                               compile_id,
1421                                               masm->code(),
1422                                               vep_offset,
1423                                               frame_complete,
1424                                               stack_slots,
1425                                               in_ByteSize(-1),
1426                                               in_ByteSize(-1),
1427                                               oop_maps,
1428                                               exception_offset);
1429     if (nm == nullptr) return nm;
1430     if (method->is_continuation_enter_intrinsic()) {
1431       ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1432     } else if (method->is_continuation_yield_intrinsic()) {
1433       _cont_doYield_stub = nm;
1434     } else {
1435       guarantee(false, "Unknown Continuation native intrinsic");
1436     }
1437     return nm;
1438   }
1439 
1440   if (method->is_method_handle_intrinsic()) {
1441     vmIntrinsics::ID iid = method->intrinsic_id();
1442     intptr_t start = (intptr_t)__ pc();
1443     int vep_offset = ((intptr_t)__ pc()) - start;
1444 
1445     // First instruction must be a nop as it may need to be patched on deoptimisation
1446     __ nop();
1447     gen_special_dispatch(masm,
1448                          method,
1449                          in_sig_bt,
1450                          in_regs);
1451     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1452     __ flush();
1453     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1454     return nmethod::new_native_nmethod(method,
1455                                        compile_id,
1456                                        masm->code(),
1457                                        vep_offset,
1458                                        frame_complete,
1459                                        stack_slots / VMRegImpl::slots_per_word,
1460                                        in_ByteSize(-1),
1461                                        in_ByteSize(-1),
1462                                        nullptr);
1463   }
1464   address native_func = method->native_function();
1465   assert(native_func != nullptr, "must have function");
1466 
1467   // An OopMap for lock (and class if static)
1468   OopMapSet *oop_maps = new OopMapSet();
1469   intptr_t start = (intptr_t)__ pc();
1470 
1471   // We have received a description of where all the java arg are located
1472   // on entry to the wrapper. We need to convert these args to where
1473   // the jni function will expect them. To figure out where they go
1474   // we convert the java signature to a C signature by inserting
1475   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1476 
1477   const int total_in_args = method->size_of_parameters();
1478   int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1479 
1480   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1481   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1482 
1483   int argc = 0;
1484   out_sig_bt[argc++] = T_ADDRESS;
1485   if (method->is_static()) {
1486     out_sig_bt[argc++] = T_OBJECT;
1487   }
1488 
1489   for (int i = 0; i < total_in_args ; i++ ) {
1490     out_sig_bt[argc++] = in_sig_bt[i];
1491   }
1492 
1493   // Now figure out where the args must be stored and how much stack space
1494   // they require.
1495   int out_arg_slots;
1496   out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, total_c_args);
1497 
1498   if (out_arg_slots < 0) {
1499     return nullptr;
1500   }
1501 
1502   // Compute framesize for the wrapper.  We need to handlize all oops in
1503   // incoming registers
1504 
1505   // Calculate the total number of stack slots we will need.
1506 
1507   // First count the abi requirement plus all of the outgoing args
1508   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1509 
1510   // Now the space for the inbound oop handle area
1511   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1512 
1513   int oop_handle_offset = stack_slots;
1514   stack_slots += total_save_slots;
1515 
1516   // Now any space we need for handlizing a klass if static method
1517 
1518   int klass_slot_offset = 0;
1519   int klass_offset = -1;
1520   int lock_slot_offset = 0;
1521   bool is_static = false;
1522 
1523   if (method->is_static()) {
1524     klass_slot_offset = stack_slots;
1525     stack_slots += VMRegImpl::slots_per_word;
1526     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1527     is_static = true;
1528   }
1529 
1530   // Plus a lock if needed
1531 
1532   if (method->is_synchronized()) {
1533     lock_slot_offset = stack_slots;
1534     stack_slots += VMRegImpl::slots_per_word;
1535   }
1536 
1537   // Now a place (+2) to save return values or temp during shuffling
1538   // + 4 for return address (which we own) and saved rfp
1539   stack_slots += 6;
1540 
1541   // Ok The space we have allocated will look like:
1542   //
1543   //
1544   // FP-> |                     |
1545   //      |---------------------|
1546   //      | 2 slots for moves   |
1547   //      |---------------------|
1548   //      | lock box (if sync)  |
1549   //      |---------------------| <- lock_slot_offset
1550   //      | klass (if static)   |
1551   //      |---------------------| <- klass_slot_offset
1552   //      | oopHandle area      |
1553   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1554   //      | outbound memory     |
1555   //      | based arguments     |
1556   //      |                     |
1557   //      |---------------------|
1558   //      |                     |
1559   // SP-> | out_preserved_slots |
1560   //
1561   //
1562 
1563 
1564   // Now compute actual number of stack words we need rounding to make
1565   // stack properly aligned.
1566   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1567 
1568   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1569 
1570   // First thing make an ic check to see if we should even be here
1571 
1572   // We are free to use all registers as temps without saving them and
1573   // restoring them except rfp. rfp is the only callee save register
1574   // as far as the interpreter and the compiler(s) are concerned.
1575 
1576   const Register receiver = j_rarg0;
1577 
1578   Label exception_pending;
1579 
1580   assert_different_registers(receiver, rscratch1);
1581   __ verify_oop(receiver);
1582   __ ic_check(8 /* end_alignment */);
1583 
1584   // Verified entry point must be aligned
1585   int vep_offset = ((intptr_t)__ pc()) - start;
1586 
1587   // If we have to make this method not-entrant we'll overwrite its
1588   // first instruction with a jump.  For this action to be legal we
1589   // must ensure that this first instruction is a B, BL, NOP, BKPT,
1590   // SVC, HVC, or SMC.  Make it a NOP.
1591   __ nop();
1592 
1593   if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
1594     Label L_skip_barrier;
1595     __ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
1596     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1597     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1598 
1599     __ bind(L_skip_barrier);
1600   }
1601 
1602   // Generate stack overflow check
1603   __ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size()));
1604 
1605   // Generate a new frame for the wrapper.
1606   __ enter();
1607   // -2 because return address is already present and so is saved rfp
1608   __ sub(sp, sp, stack_size - 2*wordSize);
1609 
1610   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1611   bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
1612 
1613   // Frame is now completed as far as size and linkage.
1614   int frame_complete = ((intptr_t)__ pc()) - start;
1615 
1616   // We use r20 as the oop handle for the receiver/klass
1617   // It is callee save so it survives the call to native
1618 
1619   const Register oop_handle_reg = r20;
1620 
1621   //
1622   // We immediately shuffle the arguments so that any vm call we have to
1623   // make from here on out (sync slow path, jvmti, etc.) we will have
1624   // captured the oops from our caller and have a valid oopMap for
1625   // them.
1626 
1627   // -----------------
1628   // The Grand Shuffle
1629 
1630   // The Java calling convention is either equal (linux) or denser (win64) than the
1631   // c calling convention. However the because of the jni_env argument the c calling
1632   // convention always has at least one more (and two for static) arguments than Java.
1633   // Therefore if we move the args from java -> c backwards then we will never have
1634   // a register->register conflict and we don't have to build a dependency graph
1635   // and figure out how to break any cycles.
1636   //
1637 
1638   // Record esp-based slot for receiver on stack for non-static methods
1639   int receiver_offset = -1;
1640 
1641   // This is a trick. We double the stack slots so we can claim
1642   // the oops in the caller's frame. Since we are sure to have
1643   // more args than the caller doubling is enough to make
1644   // sure we can capture all the incoming oop args from the
1645   // caller.
1646   //
1647   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1648 
1649   // Mark location of rfp (someday)
1650   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp));
1651 
1652 
1653   int float_args = 0;
1654   int int_args = 0;
1655 
1656 #ifdef ASSERT
1657   bool reg_destroyed[Register::number_of_registers];
1658   bool freg_destroyed[FloatRegister::number_of_registers];
1659   for ( int r = 0 ; r < Register::number_of_registers ; r++ ) {
1660     reg_destroyed[r] = false;
1661   }
1662   for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) {
1663     freg_destroyed[f] = false;
1664   }
1665 
1666 #endif /* ASSERT */
1667 
1668   // For JNI natives the incoming and outgoing registers are offset upwards.
1669   GrowableArray<int> arg_order(2 * total_in_args);
1670 
1671   for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1672     arg_order.push(i);
1673     arg_order.push(c_arg);
1674   }
1675 
1676   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1677     int i = arg_order.at(ai);
1678     int c_arg = arg_order.at(ai + 1);
1679     __ block_comment(err_msg("move %d -> %d", i, c_arg));
1680     assert(c_arg != -1 && i != -1, "wrong order");
1681 #ifdef ASSERT
1682     if (in_regs[i].first()->is_Register()) {
1683       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1684     } else if (in_regs[i].first()->is_FloatRegister()) {
1685       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1686     }
1687     if (out_regs[c_arg].first()->is_Register()) {
1688       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1689     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1690       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1691     }
1692 #endif /* ASSERT */
1693     switch (in_sig_bt[i]) {
1694       case T_ARRAY:
1695       case T_OBJECT:
1696         __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1697                        ((i == 0) && (!is_static)),
1698                        &receiver_offset);
1699         int_args++;
1700         break;
1701       case T_VOID:
1702         break;
1703 
1704       case T_FLOAT:
1705         __ float_move(in_regs[i], out_regs[c_arg]);
1706         float_args++;
1707         break;
1708 
1709       case T_DOUBLE:
1710         assert( i + 1 < total_in_args &&
1711                 in_sig_bt[i + 1] == T_VOID &&
1712                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1713         __ double_move(in_regs[i], out_regs[c_arg]);
1714         float_args++;
1715         break;
1716 
1717       case T_LONG :
1718         __ long_move(in_regs[i], out_regs[c_arg]);
1719         int_args++;
1720         break;
1721 
1722       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1723 
1724       default:
1725         __ move32_64(in_regs[i], out_regs[c_arg]);
1726         int_args++;
1727     }
1728   }
1729 
1730   // point c_arg at the first arg that is already loaded in case we
1731   // need to spill before we call out
1732   int c_arg = total_c_args - total_in_args;
1733 
1734   // Pre-load a static method's oop into c_rarg1.
1735   if (method->is_static()) {
1736 
1737     //  load oop into a register
1738     __ movoop(c_rarg1,
1739               JNIHandles::make_local(method->method_holder()->java_mirror()));
1740 
1741     // Now handlize the static class mirror it's known not-null.
1742     __ str(c_rarg1, Address(sp, klass_offset));
1743     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1744 
1745     // Now get the handle
1746     __ lea(c_rarg1, Address(sp, klass_offset));
1747     // and protect the arg if we must spill
1748     c_arg--;
1749   }
1750 
1751   // Change state to native (we save the return address in the thread, since it might not
1752   // be pushed on the stack when we do a stack traversal). It is enough that the pc()
1753   // points into the right code segment. It does not have to be the correct return pc.
1754   // We use the same pc/oopMap repeatedly when we call out.
1755 
1756   Label native_return;
1757   if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
1758     // For convenience we use the pc we want to resume to in case of preemption on Object.wait.
1759     __ set_last_Java_frame(sp, noreg, native_return, rscratch1);
1760   } else {
1761     intptr_t the_pc = (intptr_t) __ pc();
1762     oop_maps->add_gc_map(the_pc - start, map);
1763 
1764     __ set_last_Java_frame(sp, noreg, __ pc(), rscratch1);
1765   }
1766 
1767   Label dtrace_method_entry, dtrace_method_entry_done;
1768   if (DTraceMethodProbes) {
1769     __ b(dtrace_method_entry);
1770     __ bind(dtrace_method_entry_done);
1771   }
1772 
1773   // RedefineClasses() tracing support for obsolete method entry
1774   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1775     // protect the args we've loaded
1776     save_args(masm, total_c_args, c_arg, out_regs);
1777     __ mov_metadata(c_rarg1, method());
1778     __ call_VM_leaf(
1779       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1780       rthread, c_rarg1);
1781     restore_args(masm, total_c_args, c_arg, out_regs);
1782   }
1783 
1784   // Lock a synchronized method
1785 
1786   // Register definitions used by locking and unlocking
1787 
1788   const Register swap_reg = r0;
1789   const Register obj_reg  = r19;  // Will contain the oop
1790   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
1791   const Register old_hdr  = r13;  // value of old header at unlock time
1792   const Register lock_tmp = r14;  // Temporary used by lightweight_lock/unlock
1793   const Register tmp = lr;
1794 
1795   Label slow_path_lock;
1796   Label lock_done;
1797 
1798   if (method->is_synchronized()) {
1799     Label count;
1800     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1801 
1802     // Get the handle (the 2nd argument)
1803     __ mov(oop_handle_reg, c_rarg1);
1804 
1805     // Get address of the box
1806 
1807     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1808 
1809     // Load the oop from the handle
1810     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1811 
1812     if (LockingMode == LM_MONITOR) {
1813       __ b(slow_path_lock);
1814     } else if (LockingMode == LM_LEGACY) {
1815       // Load (object->mark() | 1) into swap_reg %r0
1816       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1817       __ orr(swap_reg, rscratch1, 1);
1818 
1819       // Save (object->mark() | 1) into BasicLock's displaced header
1820       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1821 
1822       // src -> dest iff dest == r0 else r0 <- dest
1823       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1824 
1825       // Hmm should this move to the slow path code area???
1826 
1827       // Test if the oopMark is an obvious stack pointer, i.e.,
1828       //  1) (mark & 3) == 0, and
1829       //  2) sp <= mark < mark + os::pagesize()
1830       // These 3 tests can be done by evaluating the following
1831       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1832       // assuming both stack pointer and pagesize have their
1833       // least significant 2 bits clear.
1834       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1835 
1836       __ sub(swap_reg, sp, swap_reg);
1837       __ neg(swap_reg, swap_reg);
1838       __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1839 
1840       // Save the test result, for recursive case, the result is zero
1841       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1842       __ br(Assembler::NE, slow_path_lock);
1843 
1844       __ bind(count);
1845       __ inc_held_monitor_count(rscratch1);
1846     } else {
1847       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1848       __ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1849     }
1850 
1851     // Slow path will re-enter here
1852     __ bind(lock_done);
1853   }
1854 
1855 
1856   // Finally just about ready to make the JNI call
1857 
1858   // get JNIEnv* which is first argument to native
1859   __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1860 
1861   // Now set thread in native
1862   __ mov(rscratch1, _thread_in_native);
1863   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1864   __ stlrw(rscratch1, rscratch2);
1865 
1866   __ rt_call(native_func);
1867 
1868   // Verify or restore cpu control state after JNI call
1869   __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
1870 
1871   // Unpack native results.
1872   switch (ret_type) {
1873   case T_BOOLEAN: __ c2bool(r0);                     break;
1874   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
1875   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
1876   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
1877   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
1878   case T_DOUBLE :
1879   case T_FLOAT  :
1880     // Result is in v0 we'll save as needed
1881     break;
1882   case T_ARRAY:                 // Really a handle
1883   case T_OBJECT:                // Really a handle
1884       break; // can't de-handlize until after safepoint check
1885   case T_VOID: break;
1886   case T_LONG: break;
1887   default       : ShouldNotReachHere();
1888   }
1889 
1890   Label safepoint_in_progress, safepoint_in_progress_done;
1891 
1892   // Switch thread to "native transition" state before reading the synchronization state.
1893   // This additional state is necessary because reading and testing the synchronization
1894   // state is not atomic w.r.t. GC, as this scenario demonstrates:
1895   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1896   //     VM thread changes sync state to synchronizing and suspends threads for GC.
1897   //     Thread A is resumed to finish this native method, but doesn't block here since it
1898   //     didn't see any synchronization is progress, and escapes.
1899   __ mov(rscratch1, _thread_in_native_trans);
1900 
1901   __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
1902 
1903   // Force this write out before the read below
1904   if (!UseSystemMemoryBarrier) {
1905     __ dmb(Assembler::ISH);
1906   }
1907 
1908   __ verify_sve_vector_length();
1909 
1910   // Check for safepoint operation in progress and/or pending suspend requests.
1911   {
1912     // No need for acquire as Java threads always disarm themselves.
1913     __ safepoint_poll(safepoint_in_progress, true /* at_return */, false /* acquire */, false /* in_nmethod */);
1914     __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
1915     __ cbnzw(rscratch1, safepoint_in_progress);
1916     __ bind(safepoint_in_progress_done);
1917   }
1918 
1919   // change thread state
1920   __ mov(rscratch1, _thread_in_Java);
1921   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1922   __ stlrw(rscratch1, rscratch2);
1923 
1924   if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
1925     // Check preemption for Object.wait()
1926     __ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
1927     __ cbz(rscratch1, native_return);
1928     __ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
1929     __ br(rscratch1);
1930     __ bind(native_return);
1931 
1932     intptr_t the_pc = (intptr_t) __ pc();
1933     oop_maps->add_gc_map(the_pc - start, map);
1934   }
1935 
1936   Label reguard;
1937   Label reguard_done;
1938   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1939   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1940   __ br(Assembler::EQ, reguard);
1941   __ bind(reguard_done);
1942 
1943   // native result if any is live
1944 
1945   // Unlock
1946   Label unlock_done;
1947   Label slow_path_unlock;
1948   if (method->is_synchronized()) {
1949 
1950     // Get locked oop from the handle we passed to jni
1951     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1952 
1953     Label done, not_recursive;
1954 
1955     if (LockingMode == LM_LEGACY) {
1956       // Simple recursive lock?
1957       __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1958       __ cbnz(rscratch1, not_recursive);
1959       __ dec_held_monitor_count(rscratch1);
1960       __ b(done);
1961     }
1962 
1963     __ bind(not_recursive);
1964 
1965     // Must save r0 if if it is live now because cmpxchg must use it
1966     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1967       save_native_result(masm, ret_type, stack_slots);
1968     }
1969 
1970     if (LockingMode == LM_MONITOR) {
1971       __ b(slow_path_unlock);
1972     } else if (LockingMode == LM_LEGACY) {
1973       // get address of the stack lock
1974       __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1975       //  get old displaced header
1976       __ ldr(old_hdr, Address(r0, 0));
1977 
1978       // Atomic swap old header if oop still contains the stack lock
1979       Label count;
1980       __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1981       __ bind(count);
1982       __ dec_held_monitor_count(rscratch1);
1983     } else {
1984       assert(LockingMode == LM_LIGHTWEIGHT, "");
1985       __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1986     }
1987 
1988     // slow path re-enters here
1989     __ bind(unlock_done);
1990     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1991       restore_native_result(masm, ret_type, stack_slots);
1992     }
1993 
1994     __ bind(done);
1995   }
1996 
1997   Label dtrace_method_exit, dtrace_method_exit_done;
1998   if (DTraceMethodProbes) {
1999     __ b(dtrace_method_exit);
2000     __ bind(dtrace_method_exit_done);
2001   }
2002 
2003   __ reset_last_Java_frame(false);
2004 
2005   // Unbox oop result, e.g. JNIHandles::resolve result.
2006   if (is_reference_type(ret_type)) {
2007     __ resolve_jobject(r0, r1, r2);
2008   }
2009 
2010   if (CheckJNICalls) {
2011     // clear_pending_jni_exception_check
2012     __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
2013   }
2014 
2015   // reset handle block
2016   __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
2017   __ str(zr, Address(r2, JNIHandleBlock::top_offset()));
2018 
2019   __ leave();
2020 
2021   // Any exception pending?
2022   __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2023   __ cbnz(rscratch1, exception_pending);
2024 
2025   // We're done
2026   __ ret(lr);
2027 
2028   // Unexpected paths are out of line and go here
2029 
2030   // forward the exception
2031   __ bind(exception_pending);
2032 
2033   // and forward the exception
2034   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2035 
2036   // Slow path locking & unlocking
2037   if (method->is_synchronized()) {
2038 
2039     __ block_comment("Slow path lock {");
2040     __ bind(slow_path_lock);
2041 
2042     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2043     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2044 
2045     // protect the args we've loaded
2046     save_args(masm, total_c_args, c_arg, out_regs);
2047 
2048     __ mov(c_rarg0, obj_reg);
2049     __ mov(c_rarg1, lock_reg);
2050     __ mov(c_rarg2, rthread);
2051 
2052     // Not a leaf but we have last_Java_frame setup as we want.
2053     // We don't want to unmount in case of contention since that would complicate preserving
2054     // the arguments that had already been marshalled into the native convention. So we force
2055     // the freeze slow path to find this native wrapper frame (see recurse_freeze_native_frame())
2056     // and pin the vthread. Otherwise the fast path won't find it since we don't walk the stack.
2057     __ push_cont_fastpath();
2058     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2059     __ pop_cont_fastpath();
2060     restore_args(masm, total_c_args, c_arg, out_regs);
2061 
2062 #ifdef ASSERT
2063     { Label L;
2064       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2065       __ cbz(rscratch1, L);
2066       __ stop("no pending exception allowed on exit from monitorenter");
2067       __ bind(L);
2068     }
2069 #endif
2070     __ b(lock_done);
2071 
2072     __ block_comment("} Slow path lock");
2073 
2074     __ block_comment("Slow path unlock {");
2075     __ bind(slow_path_unlock);
2076 
2077     // If we haven't already saved the native result we must save it now as xmm registers
2078     // are still exposed.
2079 
2080     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2081       save_native_result(masm, ret_type, stack_slots);
2082     }
2083 
2084     __ mov(c_rarg2, rthread);
2085     __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2086     __ mov(c_rarg0, obj_reg);
2087 
2088     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2089     // NOTE that obj_reg == r19 currently
2090     __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2091     __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2092 
2093     __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
2094 
2095 #ifdef ASSERT
2096     {
2097       Label L;
2098       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2099       __ cbz(rscratch1, L);
2100       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2101       __ bind(L);
2102     }
2103 #endif /* ASSERT */
2104 
2105     __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2106 
2107     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2108       restore_native_result(masm, ret_type, stack_slots);
2109     }
2110     __ b(unlock_done);
2111 
2112     __ block_comment("} Slow path unlock");
2113 
2114   } // synchronized
2115 
2116   // SLOW PATH Reguard the stack if needed
2117 
2118   __ bind(reguard);
2119   save_native_result(masm, ret_type, stack_slots);
2120   __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2121   restore_native_result(masm, ret_type, stack_slots);
2122   // and continue
2123   __ b(reguard_done);
2124 
2125   // SLOW PATH safepoint
2126   {
2127     __ block_comment("safepoint {");
2128     __ bind(safepoint_in_progress);
2129 
2130     // Don't use call_VM as it will see a possible pending exception and forward it
2131     // and never return here preventing us from clearing _last_native_pc down below.
2132     //
2133     save_native_result(masm, ret_type, stack_slots);
2134     __ mov(c_rarg0, rthread);
2135 #ifndef PRODUCT
2136   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2137 #endif
2138     __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2139     __ blr(rscratch1);
2140 
2141     // Restore any method result value
2142     restore_native_result(masm, ret_type, stack_slots);
2143 
2144     __ b(safepoint_in_progress_done);
2145     __ block_comment("} safepoint");
2146   }
2147 
2148   // SLOW PATH dtrace support
2149   if (DTraceMethodProbes) {
2150     {
2151       __ block_comment("dtrace entry {");
2152       __ bind(dtrace_method_entry);
2153 
2154       // We have all of the arguments setup at this point. We must not touch any register
2155       // argument registers at this point (what if we save/restore them there are no oop?
2156 
2157       save_args(masm, total_c_args, c_arg, out_regs);
2158       __ mov_metadata(c_rarg1, method());
2159       __ call_VM_leaf(
2160         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2161         rthread, c_rarg1);
2162       restore_args(masm, total_c_args, c_arg, out_regs);
2163       __ b(dtrace_method_entry_done);
2164       __ block_comment("} dtrace entry");
2165     }
2166 
2167     {
2168       __ block_comment("dtrace exit {");
2169       __ bind(dtrace_method_exit);
2170       save_native_result(masm, ret_type, stack_slots);
2171       __ mov_metadata(c_rarg1, method());
2172       __ call_VM_leaf(
2173         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2174         rthread, c_rarg1);
2175       restore_native_result(masm, ret_type, stack_slots);
2176       __ b(dtrace_method_exit_done);
2177       __ block_comment("} dtrace exit");
2178     }
2179   }
2180 
2181   __ flush();
2182 
2183   nmethod *nm = nmethod::new_native_nmethod(method,
2184                                             compile_id,
2185                                             masm->code(),
2186                                             vep_offset,
2187                                             frame_complete,
2188                                             stack_slots / VMRegImpl::slots_per_word,
2189                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2190                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2191                                             oop_maps);
2192 
2193   return nm;
2194 }
2195 
2196 // this function returns the adjust size (in number of words) to a c2i adapter
2197 // activation for use during deoptimization
2198 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2199   assert(callee_locals >= callee_parameters,
2200           "test and remove; got more parms than locals");
2201   if (callee_locals < callee_parameters)
2202     return 0;                   // No adjustment for negative locals
2203   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2204   // diff is counted in stack words
2205   return align_up(diff, 2);
2206 }
2207 
2208 
2209 //------------------------------generate_deopt_blob----------------------------
2210 void SharedRuntime::generate_deopt_blob() {
2211   // Allocate space for the code
2212   ResourceMark rm;
2213   // Setup code generation tools
2214   int pad = 0;
2215 #if INCLUDE_JVMCI
2216   if (EnableJVMCI) {
2217     pad += 512; // Increase the buffer size when compiling for JVMCI
2218   }
2219 #endif
2220   const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id);
2221   CodeBuffer buffer(name, 2048+pad, 1024);
2222   MacroAssembler* masm = new MacroAssembler(&buffer);
2223   int frame_size_in_words;
2224   OopMap* map = nullptr;
2225   OopMapSet *oop_maps = new OopMapSet();
2226   RegisterSaver reg_save(COMPILER2_OR_JVMCI != 0);
2227 
2228   // -------------
2229   // This code enters when returning to a de-optimized nmethod.  A return
2230   // address has been pushed on the stack, and return values are in
2231   // registers.
2232   // If we are doing a normal deopt then we were called from the patched
2233   // nmethod from the point we returned to the nmethod. So the return
2234   // address on the stack is wrong by NativeCall::instruction_size
2235   // We will adjust the value so it looks like we have the original return
2236   // address on the stack (like when we eagerly deoptimized).
2237   // In the case of an exception pending when deoptimizing, we enter
2238   // with a return address on the stack that points after the call we patched
2239   // into the exception handler. We have the following register state from,
2240   // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2241   //    r0: exception oop
2242   //    r19: exception handler
2243   //    r3: throwing pc
2244   // So in this case we simply jam r3 into the useless return address and
2245   // the stack looks just like we want.
2246   //
2247   // At this point we need to de-opt.  We save the argument return
2248   // registers.  We call the first C routine, fetch_unroll_info().  This
2249   // routine captures the return values and returns a structure which
2250   // describes the current frame size and the sizes of all replacement frames.
2251   // The current frame is compiled code and may contain many inlined
2252   // functions, each with their own JVM state.  We pop the current frame, then
2253   // push all the new frames.  Then we call the C routine unpack_frames() to
2254   // populate these frames.  Finally unpack_frames() returns us the new target
2255   // address.  Notice that callee-save registers are BLOWN here; they have
2256   // already been captured in the vframeArray at the time the return PC was
2257   // patched.
2258   address start = __ pc();
2259   Label cont;
2260 
2261   // Prolog for non exception case!
2262 
2263   // Save everything in sight.
2264   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2265 
2266   // Normal deoptimization.  Save exec mode for unpack_frames.
2267   __ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved
2268   __ b(cont);
2269 
2270   int reexecute_offset = __ pc() - start;
2271 #if INCLUDE_JVMCI && !defined(COMPILER1)
2272   if (UseJVMCICompiler) {
2273     // JVMCI does not use this kind of deoptimization
2274     __ should_not_reach_here();
2275   }
2276 #endif
2277 
2278   // Reexecute case
2279   // return address is the pc describes what bci to do re-execute at
2280 
2281   // No need to update map as each call to save_live_registers will produce identical oopmap
2282   (void) reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2283 
2284   __ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved
2285   __ b(cont);
2286 
2287 #if INCLUDE_JVMCI
2288   Label after_fetch_unroll_info_call;
2289   int implicit_exception_uncommon_trap_offset = 0;
2290   int uncommon_trap_offset = 0;
2291 
2292   if (EnableJVMCI) {
2293     implicit_exception_uncommon_trap_offset = __ pc() - start;
2294 
2295     __ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2296     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2297 
2298     uncommon_trap_offset = __ pc() - start;
2299 
2300     // Save everything in sight.
2301     reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2302     // fetch_unroll_info needs to call last_java_frame()
2303     Label retaddr;
2304     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2305 
2306     __ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2307     __ movw(rscratch1, -1);
2308     __ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2309 
2310     __ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute);
2311     __ mov(c_rarg0, rthread);
2312     __ movw(c_rarg2, rcpool); // exec mode
2313     __ lea(rscratch1,
2314            RuntimeAddress(CAST_FROM_FN_PTR(address,
2315                                            Deoptimization::uncommon_trap)));
2316     __ blr(rscratch1);
2317     __ bind(retaddr);
2318     oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2319 
2320     __ reset_last_Java_frame(false);
2321 
2322     __ b(after_fetch_unroll_info_call);
2323   } // EnableJVMCI
2324 #endif // INCLUDE_JVMCI
2325 
2326   int exception_offset = __ pc() - start;
2327 
2328   // Prolog for exception case
2329 
2330   // all registers are dead at this entry point, except for r0, and
2331   // r3 which contain the exception oop and exception pc
2332   // respectively.  Set them in TLS and fall thru to the
2333   // unpack_with_exception_in_tls entry point.
2334 
2335   __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
2336   __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
2337 
2338   int exception_in_tls_offset = __ pc() - start;
2339 
2340   // new implementation because exception oop is now passed in JavaThread
2341 
2342   // Prolog for exception case
2343   // All registers must be preserved because they might be used by LinearScan
2344   // Exceptiop oop and throwing PC are passed in JavaThread
2345   // tos: stack at point of call to method that threw the exception (i.e. only
2346   // args are on the stack, no return address)
2347 
2348   // The return address pushed by save_live_registers will be patched
2349   // later with the throwing pc. The correct value is not available
2350   // now because loading it from memory would destroy registers.
2351 
2352   // NB: The SP at this point must be the SP of the method that is
2353   // being deoptimized.  Deoptimization assumes that the frame created
2354   // here by save_live_registers is immediately below the method's SP.
2355   // This is a somewhat fragile mechanism.
2356 
2357   // Save everything in sight.
2358   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2359 
2360   // Now it is safe to overwrite any register
2361 
2362   // Deopt during an exception.  Save exec mode for unpack_frames.
2363   __ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved
2364 
2365   // load throwing pc from JavaThread and patch it as the return address
2366   // of the current frame. Then clear the field in JavaThread
2367   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2368   __ protect_return_address(r3);
2369   __ str(r3, Address(rfp, wordSize));
2370   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2371 
2372 #ifdef ASSERT
2373   // verify that there is really an exception oop in JavaThread
2374   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2375   __ verify_oop(r0);
2376 
2377   // verify that there is no pending exception
2378   Label no_pending_exception;
2379   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2380   __ cbz(rscratch1, no_pending_exception);
2381   __ stop("must not have pending exception here");
2382   __ bind(no_pending_exception);
2383 #endif
2384 
2385   __ bind(cont);
2386 
2387   // Call C code.  Need thread and this frame, but NOT official VM entry
2388   // crud.  We cannot block on this call, no GC can happen.
2389   //
2390   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2391 
2392   // fetch_unroll_info needs to call last_java_frame().
2393 
2394   Label retaddr;
2395   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2396 #ifdef ASSERT
2397   { Label L;
2398     __ ldr(rscratch1, Address(rthread, JavaThread::last_Java_fp_offset()));
2399     __ cbz(rscratch1, L);
2400     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2401     __ bind(L);
2402   }
2403 #endif // ASSERT
2404   __ mov(c_rarg0, rthread);
2405   __ mov(c_rarg1, rcpool);
2406   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2407   __ blr(rscratch1);
2408   __ bind(retaddr);
2409 
2410   // Need to have an oopmap that tells fetch_unroll_info where to
2411   // find any register it might need.
2412   oop_maps->add_gc_map(__ pc() - start, map);
2413 
2414   __ reset_last_Java_frame(false);
2415 
2416 #if INCLUDE_JVMCI
2417   if (EnableJVMCI) {
2418     __ bind(after_fetch_unroll_info_call);
2419   }
2420 #endif
2421 
2422   // Load UnrollBlock* into r5
2423   __ mov(r5, r0);
2424 
2425   __ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset()));
2426    Label noException;
2427   __ cmpw(rcpool, Deoptimization::Unpack_exception);   // Was exception pending?
2428   __ br(Assembler::NE, noException);
2429   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2430   // QQQ this is useless it was null above
2431   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2432   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
2433   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2434 
2435   __ verify_oop(r0);
2436 
2437   // Overwrite the result registers with the exception results.
2438   __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2439   // I think this is useless
2440   // __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2441 
2442   __ bind(noException);
2443 
2444   // Only register save data is on the stack.
2445   // Now restore the result registers.  Everything else is either dead
2446   // or captured in the vframeArray.
2447 
2448   // Restore fp result register
2449   __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2450   // Restore integer result register
2451   __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2452 
2453   // Pop all of the register save area off the stack
2454   __ add(sp, sp, frame_size_in_words * wordSize);
2455 
2456   // All of the register save area has been popped of the stack. Only the
2457   // return address remains.
2458 
2459   // Pop all the frames we must move/replace.
2460   //
2461   // Frame picture (youngest to oldest)
2462   // 1: self-frame (no frame link)
2463   // 2: deopting frame  (no frame link)
2464   // 3: caller of deopting frame (could be compiled/interpreted).
2465   //
2466   // Note: by leaving the return address of self-frame on the stack
2467   // and using the size of frame 2 to adjust the stack
2468   // when we are done the return to frame 3 will still be on the stack.
2469 
2470   // Pop deoptimized frame
2471   __ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2472   __ sub(r2, r2, 2 * wordSize);
2473   __ add(sp, sp, r2);
2474   __ ldp(rfp, zr, __ post(sp, 2 * wordSize));
2475 
2476 #ifdef ASSERT
2477   // Compilers generate code that bang the stack by as much as the
2478   // interpreter would need. So this stack banging should never
2479   // trigger a fault. Verify that it does not on non product builds.
2480   __ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2481   __ bang_stack_size(r19, r2);
2482 #endif
2483   // Load address of array of frame pcs into r2
2484   __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset()));
2485 
2486   // Trash the old pc
2487   // __ addptr(sp, wordSize);  FIXME ????
2488 
2489   // Load address of array of frame sizes into r4
2490   __ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset()));
2491 
2492   // Load counter into r3
2493   __ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset()));
2494 
2495   // Now adjust the caller's stack to make up for the extra locals
2496   // but record the original sp so that we can save it in the skeletal interpreter
2497   // frame and the stack walking of interpreter_sender will get the unextended sp
2498   // value and not the "real" sp value.
2499 
2500   const Register sender_sp = r6;
2501 
2502   __ mov(sender_sp, sp);
2503   __ ldrw(r19, Address(r5,
2504                        Deoptimization::UnrollBlock::
2505                        caller_adjustment_offset()));
2506   __ sub(sp, sp, r19);
2507 
2508   // Push interpreter frames in a loop
2509   __ mov(rscratch1, (uint64_t)0xDEADDEAD);        // Make a recognizable pattern
2510   __ mov(rscratch2, rscratch1);
2511   Label loop;
2512   __ bind(loop);
2513   __ ldr(r19, Address(__ post(r4, wordSize)));          // Load frame size
2514   __ sub(r19, r19, 2*wordSize);           // We'll push pc and fp by hand
2515   __ ldr(lr, Address(__ post(r2, wordSize)));  // Load pc
2516   __ enter();                           // Save old & set new fp
2517   __ sub(sp, sp, r19);                  // Prolog
2518   // This value is corrected by layout_activation_impl
2519   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2520   __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2521   __ mov(sender_sp, sp);               // Pass sender_sp to next frame
2522   __ sub(r3, r3, 1);                   // Decrement counter
2523   __ cbnz(r3, loop);
2524 
2525     // Re-push self-frame
2526   __ ldr(lr, Address(r2));
2527   __ enter();
2528 
2529   // Allocate a full sized register save area.  We subtract 2 because
2530   // enter() just pushed 2 words
2531   __ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2532 
2533   // Restore frame locals after moving the frame
2534   __ strd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2535   __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2536 
2537   // Call C code.  Need thread but NOT official VM entry
2538   // crud.  We cannot block on this call, no GC can happen.  Call should
2539   // restore return values to their stack-slots with the new SP.
2540   //
2541   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2542 
2543   // Use rfp because the frames look interpreted now
2544   // Don't need the precise return PC here, just precise enough to point into this code blob.
2545   address the_pc = __ pc();
2546   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2547 
2548   __ mov(c_rarg0, rthread);
2549   __ movw(c_rarg1, rcpool); // second arg: exec_mode
2550   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2551   __ blr(rscratch1);
2552 
2553   // Set an oopmap for the call site
2554   // Use the same PC we used for the last java frame
2555   oop_maps->add_gc_map(the_pc - start,
2556                        new OopMap( frame_size_in_words, 0 ));
2557 
2558   // Clear fp AND pc
2559   __ reset_last_Java_frame(true);
2560 
2561   // Collect return values
2562   __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2563   __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2564   // I think this is useless (throwing pc?)
2565   // __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2566 
2567   // Pop self-frame.
2568   __ leave();                           // Epilog
2569 
2570   // Jump to interpreter
2571   __ ret(lr);
2572 
2573   // Make sure all code is generated
2574   masm->flush();
2575 
2576   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2577   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2578 #if INCLUDE_JVMCI
2579   if (EnableJVMCI) {
2580     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2581     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2582   }
2583 #endif
2584 }
2585 
2586 // Number of stack slots between incoming argument block and the start of
2587 // a new frame.  The PROLOG must add this many slots to the stack.  The
2588 // EPILOG must remove this many slots. aarch64 needs two slots for
2589 // return address and fp.
2590 // TODO think this is correct but check
2591 uint SharedRuntime::in_preserve_stack_slots() {
2592   return 4;
2593 }
2594 
2595 uint SharedRuntime::out_preserve_stack_slots() {
2596   return 0;
2597 }
2598 
2599 
2600 VMReg SharedRuntime::thread_register() {
2601   return rthread->as_VMReg();
2602 }
2603 
2604 //------------------------------generate_handler_blob------
2605 //
2606 // Generate a special Compile2Runtime blob that saves all registers,
2607 // and setup oopmap.
2608 //
2609 SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) {
2610   assert(is_polling_page_id(id), "expected a polling page stub id");
2611 
2612   ResourceMark rm;
2613   OopMapSet *oop_maps = new OopMapSet();
2614   OopMap* map;
2615 
2616   // Allocate space for the code.  Setup code generation tools.
2617   const char* name = SharedRuntime::stub_name(id);
2618   CodeBuffer buffer(name, 2048, 1024);
2619   MacroAssembler* masm = new MacroAssembler(&buffer);
2620 
2621   address start   = __ pc();
2622   address call_pc = nullptr;
2623   int frame_size_in_words;
2624   bool cause_return = (id == SharedStubId::polling_page_return_handler_id);
2625   RegisterSaver reg_save(id == SharedStubId::polling_page_vectors_safepoint_handler_id /* save_vectors */);
2626 
2627   // When the signal occurred, the LR was either signed and stored on the stack (in which
2628   // case it will be restored from the stack before being used) or unsigned and not stored
2629   // on the stack. Stipping ensures we get the right value.
2630   __ strip_return_address();
2631 
2632   // Save Integer and Float registers.
2633   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2634 
2635   // The following is basically a call_VM.  However, we need the precise
2636   // address of the call in order to generate an oopmap. Hence, we do all the
2637   // work ourselves.
2638 
2639   Label retaddr;
2640   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2641 
2642   // The return address must always be correct so that frame constructor never
2643   // sees an invalid pc.
2644 
2645   if (!cause_return) {
2646     // overwrite the return address pushed by save_live_registers
2647     // Additionally, r20 is a callee-saved register so we can look at
2648     // it later to determine if someone changed the return address for
2649     // us!
2650     __ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset()));
2651     __ protect_return_address(r20);
2652     __ str(r20, Address(rfp, wordSize));
2653   }
2654 
2655   // Do the call
2656   __ mov(c_rarg0, rthread);
2657   __ lea(rscratch1, RuntimeAddress(call_ptr));
2658   __ blr(rscratch1);
2659   __ bind(retaddr);
2660 
2661   // Set an oopmap for the call site.  This oopmap will map all
2662   // oop-registers and debug-info registers as callee-saved.  This
2663   // will allow deoptimization at this safepoint to find all possible
2664   // debug-info recordings, as well as let GC find all oops.
2665 
2666   oop_maps->add_gc_map( __ pc() - start, map);
2667 
2668   Label noException;
2669 
2670   __ reset_last_Java_frame(false);
2671 
2672   __ membar(Assembler::LoadLoad | Assembler::LoadStore);
2673 
2674   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2675   __ cbz(rscratch1, noException);
2676 
2677   // Exception pending
2678 
2679   reg_save.restore_live_registers(masm);
2680 
2681   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2682 
2683   // No exception case
2684   __ bind(noException);
2685 
2686   Label no_adjust, bail;
2687   if (!cause_return) {
2688     // If our stashed return pc was modified by the runtime we avoid touching it
2689     __ ldr(rscratch1, Address(rfp, wordSize));
2690     __ cmp(r20, rscratch1);
2691     __ br(Assembler::NE, no_adjust);
2692     __ authenticate_return_address(r20);
2693 
2694 #ifdef ASSERT
2695     // Verify the correct encoding of the poll we're about to skip.
2696     // See NativeInstruction::is_ldrw_to_zr()
2697     __ ldrw(rscratch1, Address(r20));
2698     __ ubfx(rscratch2, rscratch1, 22, 10);
2699     __ cmpw(rscratch2, 0b1011100101);
2700     __ br(Assembler::NE, bail);
2701     __ ubfx(rscratch2, rscratch1, 0, 5);
2702     __ cmpw(rscratch2, 0b11111);
2703     __ br(Assembler::NE, bail);
2704 #endif
2705     // Adjust return pc forward to step over the safepoint poll instruction
2706     __ add(r20, r20, NativeInstruction::instruction_size);
2707     __ protect_return_address(r20);
2708     __ str(r20, Address(rfp, wordSize));
2709   }
2710 
2711   __ bind(no_adjust);
2712   // Normal exit, restore registers and exit.
2713   reg_save.restore_live_registers(masm);
2714 
2715   __ ret(lr);
2716 
2717 #ifdef ASSERT
2718   __ bind(bail);
2719   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2720 #endif
2721 
2722   // Make sure all code is generated
2723   masm->flush();
2724 
2725   // Fill-out other meta info
2726   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2727 }
2728 
2729 //
2730 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2731 //
2732 // Generate a stub that calls into vm to find out the proper destination
2733 // of a java call. All the argument registers are live at this point
2734 // but since this is generic code we don't know what they are and the caller
2735 // must do any gc of the args.
2736 //
2737 RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) {
2738   assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2739   assert(is_resolve_id(id), "expected a resolve stub id");
2740 
2741   // allocate space for the code
2742   ResourceMark rm;
2743 
2744   const char* name = SharedRuntime::stub_name(id);
2745   CodeBuffer buffer(name, 1000, 512);
2746   MacroAssembler* masm                = new MacroAssembler(&buffer);
2747 
2748   int frame_size_in_words;
2749   RegisterSaver reg_save(false /* save_vectors */);
2750 
2751   OopMapSet *oop_maps = new OopMapSet();
2752   OopMap* map = nullptr;
2753 
2754   int start = __ offset();
2755 
2756   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2757 
2758   int frame_complete = __ offset();
2759 
2760   {
2761     Label retaddr;
2762     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2763 
2764     __ mov(c_rarg0, rthread);
2765     __ lea(rscratch1, RuntimeAddress(destination));
2766 
2767     __ blr(rscratch1);
2768     __ bind(retaddr);
2769   }
2770 
2771   // Set an oopmap for the call site.
2772   // We need this not only for callee-saved registers, but also for volatile
2773   // registers that the compiler might be keeping live across a safepoint.
2774 
2775   oop_maps->add_gc_map( __ offset() - start, map);
2776 
2777   // r0 contains the address we are going to jump to assuming no exception got installed
2778 
2779   // clear last_Java_sp
2780   __ reset_last_Java_frame(false);
2781   // check for pending exceptions
2782   Label pending;
2783   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2784   __ cbnz(rscratch1, pending);
2785 
2786   // get the returned Method*
2787   __ get_vm_result_2(rmethod, rthread);
2788   __ str(rmethod, Address(sp, reg_save.reg_offset_in_bytes(rmethod)));
2789 
2790   // r0 is where we want to jump, overwrite rscratch1 which is saved and scratch
2791   __ str(r0, Address(sp, reg_save.rscratch1_offset_in_bytes()));
2792   reg_save.restore_live_registers(masm);
2793 
2794   // We are back to the original state on entry and ready to go.
2795 
2796   __ br(rscratch1);
2797 
2798   // Pending exception after the safepoint
2799 
2800   __ bind(pending);
2801 
2802   reg_save.restore_live_registers(masm);
2803 
2804   // exception pending => remove activation and forward to exception handler
2805 
2806   __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
2807 
2808   __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
2809   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2810 
2811   // -------------
2812   // make sure all code is generated
2813   masm->flush();
2814 
2815   // return the  blob
2816   // frame_size_words or bytes??
2817   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2818 }
2819 
2820 // Continuation point for throwing of implicit exceptions that are
2821 // not handled in the current activation. Fabricates an exception
2822 // oop and initiates normal exception dispatching in this
2823 // frame. Since we need to preserve callee-saved values (currently
2824 // only for C2, but done for C1 as well) we need a callee-saved oop
2825 // map and therefore have to make these stubs into RuntimeStubs
2826 // rather than BufferBlobs.  If the compiler needs all registers to
2827 // be preserved between the fault point and the exception handler
2828 // then it must assume responsibility for that in
2829 // AbstractCompiler::continuation_for_implicit_null_exception or
2830 // continuation_for_implicit_division_by_zero_exception. All other
2831 // implicit exceptions (e.g., NullPointerException or
2832 // AbstractMethodError on entry) are either at call sites or
2833 // otherwise assume that stack unwinding will be initiated, so
2834 // caller saved registers were assumed volatile in the compiler.
2835 
2836 RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) {
2837   assert(is_throw_id(id), "expected a throw stub id");
2838 
2839   const char* name = SharedRuntime::stub_name(id);
2840 
2841   // Information about frame layout at time of blocking runtime call.
2842   // Note that we only have to preserve callee-saved registers since
2843   // the compilers are responsible for supplying a continuation point
2844   // if they expect all registers to be preserved.
2845   // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
2846   enum layout {
2847     rfp_off = 0,
2848     rfp_off2,
2849     return_off,
2850     return_off2,
2851     framesize // inclusive of return address
2852   };
2853 
2854   int insts_size = 512;
2855   int locs_size  = 64;
2856 
2857   ResourceMark rm;
2858   const char* timer_msg = "SharedRuntime generate_throw_exception";
2859   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
2860 
2861   CodeBuffer code(name, insts_size, locs_size);
2862   OopMapSet* oop_maps  = new OopMapSet();
2863   MacroAssembler* masm = new MacroAssembler(&code);
2864 
2865   address start = __ pc();
2866 
2867   // This is an inlined and slightly modified version of call_VM
2868   // which has the ability to fetch the return PC out of
2869   // thread-local storage and also sets up last_Java_sp slightly
2870   // differently than the real call_VM
2871 
2872   __ enter(); // Save FP and LR before call
2873 
2874   assert(is_even(framesize/2), "sp not 16-byte aligned");
2875 
2876   // lr and fp are already in place
2877   __ sub(sp, rfp, ((uint64_t)framesize-4) << LogBytesPerInt); // prolog
2878 
2879   int frame_complete = __ pc() - start;
2880 
2881   // Set up last_Java_sp and last_Java_fp
2882   address the_pc = __ pc();
2883   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2884 
2885   __ mov(c_rarg0, rthread);
2886   BLOCK_COMMENT("call runtime_entry");
2887   __ mov(rscratch1, runtime_entry);
2888   __ blr(rscratch1);
2889 
2890   // Generate oop map
2891   OopMap* map = new OopMap(framesize, 0);
2892 
2893   oop_maps->add_gc_map(the_pc - start, map);
2894 
2895   __ reset_last_Java_frame(true);
2896 
2897   // Reinitialize the ptrue predicate register, in case the external runtime
2898   // call clobbers ptrue reg, as we may return to SVE compiled code.
2899   __ reinitialize_ptrue();
2900 
2901   __ leave();
2902 
2903   // check for pending exceptions
2904 #ifdef ASSERT
2905   Label L;
2906   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2907   __ cbnz(rscratch1, L);
2908   __ should_not_reach_here();
2909   __ bind(L);
2910 #endif // ASSERT
2911   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2912 
2913   // codeBlob framesize is in words (not VMRegImpl::slot_size)
2914   RuntimeStub* stub =
2915     RuntimeStub::new_runtime_stub(name,
2916                                   &code,
2917                                   frame_complete,
2918                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2919                                   oop_maps, false);
2920   return stub;
2921 }
2922 
2923 #if INCLUDE_JFR
2924 
2925 static void jfr_prologue(address the_pc, MacroAssembler* masm, Register thread) {
2926   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2927   __ mov(c_rarg0, thread);
2928 }
2929 
2930 // The handle is dereferenced through a load barrier.
2931 static void jfr_epilogue(MacroAssembler* masm) {
2932   __ reset_last_Java_frame(true);
2933 }
2934 
2935 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
2936 // It returns a jobject handle to the event writer.
2937 // The handle is dereferenced and the return value is the event writer oop.
2938 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
2939   enum layout {
2940     rbp_off,
2941     rbpH_off,
2942     return_off,
2943     return_off2,
2944     framesize // inclusive of return address
2945   };
2946 
2947   int insts_size = 1024;
2948   int locs_size = 64;
2949   const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id);
2950   CodeBuffer code(name, insts_size, locs_size);
2951   OopMapSet* oop_maps = new OopMapSet();
2952   MacroAssembler* masm = new MacroAssembler(&code);
2953 
2954   address start = __ pc();
2955   __ enter();
2956   int frame_complete = __ pc() - start;
2957   address the_pc = __ pc();
2958   jfr_prologue(the_pc, masm, rthread);
2959   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
2960   jfr_epilogue(masm);
2961   __ resolve_global_jobject(r0, rscratch1, rscratch2);
2962   __ leave();
2963   __ ret(lr);
2964 
2965   OopMap* map = new OopMap(framesize, 1); // rfp
2966   oop_maps->add_gc_map(the_pc - start, map);
2967 
2968   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
2969     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
2970                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2971                                   oop_maps, false);
2972   return stub;
2973 }
2974 
2975 // For c2: call to return a leased buffer.
2976 RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
2977   enum layout {
2978     rbp_off,
2979     rbpH_off,
2980     return_off,
2981     return_off2,
2982     framesize // inclusive of return address
2983   };
2984 
2985   int insts_size = 1024;
2986   int locs_size = 64;
2987 
2988   const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id);
2989   CodeBuffer code(name, insts_size, locs_size);
2990   OopMapSet* oop_maps = new OopMapSet();
2991   MacroAssembler* masm = new MacroAssembler(&code);
2992 
2993   address start = __ pc();
2994   __ enter();
2995   int frame_complete = __ pc() - start;
2996   address the_pc = __ pc();
2997   jfr_prologue(the_pc, masm, rthread);
2998   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
2999   jfr_epilogue(masm);
3000 
3001   __ leave();
3002   __ ret(lr);
3003 
3004   OopMap* map = new OopMap(framesize, 1); // rfp
3005   oop_maps->add_gc_map(the_pc - start, map);
3006 
3007   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
3008     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
3009                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3010                                   oop_maps, false);
3011   return stub;
3012 }
3013 
3014 #endif // INCLUDE_JFR