1 /*
   2  * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "code/aotCodeCache.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/compiledIC.hpp"
  33 #include "code/debugInfoRec.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/barrierSetAssembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "interpreter/interp_masm.hpp"
  39 #include "logging/log.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "nativeInst_aarch64.hpp"
  42 #include "oops/klass.inline.hpp"
  43 #include "oops/method.inline.hpp"
  44 #include "prims/methodHandles.hpp"
  45 #include "runtime/continuation.hpp"
  46 #include "runtime/continuationEntry.inline.hpp"
  47 #include "runtime/globals.hpp"
  48 #include "runtime/jniHandles.hpp"
  49 #include "runtime/safepointMechanism.hpp"
  50 #include "runtime/sharedRuntime.hpp"
  51 #include "runtime/signature.hpp"
  52 #include "runtime/stubRoutines.hpp"
  53 #include "runtime/timerTrace.hpp"
  54 #include "runtime/vframeArray.hpp"
  55 #include "utilities/align.hpp"
  56 #include "utilities/formatBuffer.hpp"
  57 #include "vmreg_aarch64.inline.hpp"
  58 #ifdef COMPILER1
  59 #include "c1/c1_Runtime1.hpp"
  60 #endif
  61 #ifdef COMPILER2
  62 #include "adfiles/ad_aarch64.hpp"
  63 #include "opto/runtime.hpp"
  64 #endif
  65 #if INCLUDE_JVMCI
  66 #include "jvmci/jvmciJavaClasses.hpp"
  67 #endif
  68 
  69 #define __ masm->
  70 
  71 #ifdef PRODUCT
  72 #define BLOCK_COMMENT(str) /* nothing */
  73 #else
  74 #define BLOCK_COMMENT(str) __ block_comment(str)
  75 #endif
  76 
  77 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  78 
  79 // FIXME -- this is used by C1
  80 class RegisterSaver {
  81   const bool _save_vectors;
  82  public:
  83   RegisterSaver(bool save_vectors) : _save_vectors(save_vectors) {}
  84 
  85   OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
  86   void restore_live_registers(MacroAssembler* masm);
  87 
  88   // Offsets into the register save area
  89   // Used by deoptimization when it is managing result register
  90   // values on its own
  91 
  92   int reg_offset_in_bytes(Register r);
  93   int r0_offset_in_bytes()    { return reg_offset_in_bytes(r0); }
  94   int rscratch1_offset_in_bytes()    { return reg_offset_in_bytes(rscratch1); }
  95   int v0_offset_in_bytes();
  96 
  97   // Total stack size in bytes for saving sve predicate registers.
  98   int total_sve_predicate_in_bytes();
  99 
 100   // Capture info about frame layout
 101   // Note this is only correct when not saving full vectors.
 102   enum layout {
 103                 fpu_state_off = 0,
 104                 fpu_state_end = fpu_state_off + FPUStateSizeInWords - 1,
 105                 // The frame sender code expects that rfp will be in
 106                 // the "natural" place and will override any oopMap
 107                 // setting for it. We must therefore force the layout
 108                 // so that it agrees with the frame sender code.
 109                 r0_off = fpu_state_off + FPUStateSizeInWords,
 110                 rfp_off = r0_off + (Register::number_of_registers - 2) * Register::max_slots_per_register,
 111                 return_off = rfp_off + Register::max_slots_per_register,      // slot for return address
 112                 reg_save_size = return_off + Register::max_slots_per_register};
 113 
 114 };
 115 
 116 int RegisterSaver::reg_offset_in_bytes(Register r) {
 117   // The integer registers are located above the floating point
 118   // registers in the stack frame pushed by save_live_registers() so the
 119   // offset depends on whether we are saving full vectors, and whether
 120   // those vectors are NEON or SVE.
 121 
 122   int slots_per_vect = FloatRegister::save_slots_per_register;
 123 
 124 #if COMPILER2_OR_JVMCI
 125   if (_save_vectors) {
 126     slots_per_vect = FloatRegister::slots_per_neon_register;
 127 
 128 #ifdef COMPILER2
 129     if (Matcher::supports_scalable_vector()) {
 130       slots_per_vect = Matcher::scalable_vector_reg_size(T_FLOAT);
 131     }
 132 #endif
 133   }
 134 #endif
 135 
 136   int r0_offset = v0_offset_in_bytes() + (slots_per_vect * FloatRegister::number_of_registers) * BytesPerInt;
 137   return r0_offset + r->encoding() * wordSize;
 138 }
 139 
 140 int RegisterSaver::v0_offset_in_bytes() {
 141   // The floating point registers are located above the predicate registers if
 142   // they are present in the stack frame pushed by save_live_registers(). So the
 143   // offset depends on the saved total predicate vectors in the stack frame.
 144   return (total_sve_predicate_in_bytes() / VMRegImpl::stack_slot_size) * BytesPerInt;
 145 }
 146 
 147 int RegisterSaver::total_sve_predicate_in_bytes() {
 148 #ifdef COMPILER2
 149   if (_save_vectors && Matcher::supports_scalable_vector()) {
 150     return (Matcher::scalable_vector_reg_size(T_BYTE) >> LogBitsPerByte) *
 151            PRegister::number_of_registers;
 152   }
 153 #endif
 154   return 0;
 155 }
 156 
 157 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
 158   bool use_sve = false;
 159   int sve_vector_size_in_bytes = 0;
 160   int sve_vector_size_in_slots = 0;
 161   int sve_predicate_size_in_slots = 0;
 162   int total_predicate_in_bytes = total_sve_predicate_in_bytes();
 163   int total_predicate_in_slots = total_predicate_in_bytes / VMRegImpl::stack_slot_size;
 164 
 165 #ifdef COMPILER2
 166   use_sve = Matcher::supports_scalable_vector();
 167   if (use_sve) {
 168     sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 169     sve_vector_size_in_slots = Matcher::scalable_vector_reg_size(T_FLOAT);
 170     sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
 171   }
 172 #endif
 173 
 174 #if COMPILER2_OR_JVMCI
 175   if (_save_vectors) {
 176     int extra_save_slots_per_register = 0;
 177     // Save upper half of vector registers
 178     if (use_sve) {
 179       extra_save_slots_per_register = sve_vector_size_in_slots - FloatRegister::save_slots_per_register;
 180     } else {
 181       extra_save_slots_per_register = FloatRegister::extra_save_slots_per_neon_register;
 182     }
 183     int extra_vector_bytes = extra_save_slots_per_register *
 184                              VMRegImpl::stack_slot_size *
 185                              FloatRegister::number_of_registers;
 186     additional_frame_words += ((extra_vector_bytes + total_predicate_in_bytes) / wordSize);
 187   }
 188 #else
 189   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 190 #endif
 191 
 192   int frame_size_in_bytes = align_up(additional_frame_words * wordSize +
 193                                      reg_save_size * BytesPerInt, 16);
 194   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 195   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 196   // The caller will allocate additional_frame_words
 197   int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
 198   // CodeBlob frame size is in words.
 199   int frame_size_in_words = frame_size_in_bytes / wordSize;
 200   *total_frame_words = frame_size_in_words;
 201 
 202   // Save Integer and Float registers.
 203   __ enter();
 204   __ push_CPU_state(_save_vectors, use_sve, sve_vector_size_in_bytes, total_predicate_in_bytes);
 205 
 206   // Set an oopmap for the call site.  This oopmap will map all
 207   // oop-registers and debug-info registers as callee-saved.  This
 208   // will allow deoptimization at this safepoint to find all possible
 209   // debug-info recordings, as well as let GC find all oops.
 210 
 211   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 212 
 213   for (int i = 0; i < Register::number_of_registers; i++) {
 214     Register r = as_Register(i);
 215     if (i <= rfp->encoding() && r != rscratch1 && r != rscratch2) {
 216       // SP offsets are in 4-byte words.
 217       // Register slots are 8 bytes wide, 32 floating-point registers.
 218       int sp_offset = Register::max_slots_per_register * i +
 219                       FloatRegister::save_slots_per_register * FloatRegister::number_of_registers;
 220       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots), r->as_VMReg());
 221     }
 222   }
 223 
 224   for (int i = 0; i < FloatRegister::number_of_registers; i++) {
 225     FloatRegister r = as_FloatRegister(i);
 226     int sp_offset = 0;
 227     if (_save_vectors) {
 228       sp_offset = use_sve ? (total_predicate_in_slots + sve_vector_size_in_slots * i) :
 229                             (FloatRegister::slots_per_neon_register * i);
 230     } else {
 231       sp_offset = FloatRegister::save_slots_per_register * i;
 232     }
 233     oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), r->as_VMReg());
 234   }
 235 
 236   return oop_map;
 237 }
 238 
 239 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 240 #ifdef COMPILER2
 241   __ pop_CPU_state(_save_vectors, Matcher::supports_scalable_vector(),
 242                    Matcher::scalable_vector_reg_size(T_BYTE), total_sve_predicate_in_bytes());
 243 #else
 244 #if !INCLUDE_JVMCI
 245   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 246 #endif
 247   __ pop_CPU_state(_save_vectors);
 248 #endif
 249   __ ldp(rfp, lr, Address(__ post(sp, 2 * wordSize)));
 250   __ authenticate_return_address();
 251 }
 252 
 253 // Is vector's size (in bytes) bigger than a size saved by default?
 254 // 8 bytes vector registers are saved by default on AArch64.
 255 // The SVE supported min vector size is 8 bytes and we need to save
 256 // predicate registers when the vector size is 8 bytes as well.
 257 bool SharedRuntime::is_wide_vector(int size) {
 258   return size > 8 || (UseSVE > 0 && size >= 8);
 259 }
 260 
 261 // ---------------------------------------------------------------------------
 262 // Read the array of BasicTypes from a signature, and compute where the
 263 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 264 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 265 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 266 // as framesizes are fixed.
 267 // VMRegImpl::stack0 refers to the first slot 0(sp).
 268 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
 269 // Register up to Register::number_of_registers are the 64-bit
 270 // integer registers.
 271 
 272 // Note: the INPUTS in sig_bt are in units of Java argument words,
 273 // which are 64-bit.  The OUTPUTS are in 32-bit units.
 274 
 275 // The Java calling convention is a "shifted" version of the C ABI.
 276 // By skipping the first C ABI register we can call non-static jni
 277 // methods with small numbers of arguments without having to shuffle
 278 // the arguments at all. Since we control the java ABI we ought to at
 279 // least get some advantage out of it.
 280 
 281 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 282                                            VMRegPair *regs,
 283                                            int total_args_passed) {
 284 
 285   // Create the mapping between argument positions and
 286   // registers.
 287   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 288     j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7
 289   };
 290   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 291     j_farg0, j_farg1, j_farg2, j_farg3,
 292     j_farg4, j_farg5, j_farg6, j_farg7
 293   };
 294 
 295 
 296   uint int_args = 0;
 297   uint fp_args = 0;
 298   uint stk_args = 0;
 299 
 300   for (int i = 0; i < total_args_passed; i++) {
 301     switch (sig_bt[i]) {
 302     case T_BOOLEAN:
 303     case T_CHAR:
 304     case T_BYTE:
 305     case T_SHORT:
 306     case T_INT:
 307       if (int_args < Argument::n_int_register_parameters_j) {
 308         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 309       } else {
 310         stk_args = align_up(stk_args, 2);
 311         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 312         stk_args += 1;
 313       }
 314       break;
 315     case T_VOID:
 316       // halves of T_LONG or T_DOUBLE
 317       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 318       regs[i].set_bad();
 319       break;
 320     case T_LONG:
 321       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 322       // fall through
 323     case T_OBJECT:
 324     case T_ARRAY:
 325     case T_ADDRESS:
 326       if (int_args < Argument::n_int_register_parameters_j) {
 327         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 328       } else {
 329         stk_args = align_up(stk_args, 2);
 330         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 331         stk_args += 2;
 332       }
 333       break;
 334     case T_FLOAT:
 335       if (fp_args < Argument::n_float_register_parameters_j) {
 336         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 337       } else {
 338         stk_args = align_up(stk_args, 2);
 339         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 340         stk_args += 1;
 341       }
 342       break;
 343     case T_DOUBLE:
 344       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 345       if (fp_args < Argument::n_float_register_parameters_j) {
 346         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 347       } else {
 348         stk_args = align_up(stk_args, 2);
 349         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 350         stk_args += 2;
 351       }
 352       break;
 353     default:
 354       ShouldNotReachHere();
 355       break;
 356     }
 357   }
 358 
 359   return stk_args;
 360 }
 361 
 362 
 363 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j;
 364 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
 365 
 366 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) {
 367 
 368   // Create the mapping between argument positions and registers.
 369 
 370   static const Register INT_ArgReg[java_return_convention_max_int] = {
 371     r0 /* j_rarg7 */, j_rarg6, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
 372   };
 373 
 374   static const FloatRegister FP_ArgReg[java_return_convention_max_float] = {
 375     j_farg0, j_farg1, j_farg2, j_farg3, j_farg4, j_farg5, j_farg6, j_farg7
 376   };
 377 
 378   uint int_args = 0;
 379   uint fp_args = 0;
 380 
 381   for (int i = 0; i < total_args_passed; i++) {
 382     switch (sig_bt[i]) {
 383     case T_BOOLEAN:
 384     case T_CHAR:
 385     case T_BYTE:
 386     case T_SHORT:
 387     case T_INT:
 388       if (int_args < SharedRuntime::java_return_convention_max_int) {
 389         regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
 390         int_args ++;
 391       } else {
 392         return -1;
 393       }
 394       break;
 395     case T_VOID:
 396       // halves of T_LONG or T_DOUBLE
 397       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 398       regs[i].set_bad();
 399       break;
 400     case T_LONG:
 401       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 402       // fall through
 403     case T_OBJECT:
 404     case T_ARRAY:
 405     case T_ADDRESS:
 406       // Should T_METADATA be added to java_calling_convention as well ?
 407     case T_METADATA:
 408       if (int_args < SharedRuntime::java_return_convention_max_int) {
 409         regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
 410         int_args ++;
 411       } else {
 412         return -1;
 413       }
 414       break;
 415     case T_FLOAT:
 416       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 417         regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
 418         fp_args ++;
 419       } else {
 420         return -1;
 421       }
 422       break;
 423     case T_DOUBLE:
 424       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 425       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 426         regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
 427         fp_args ++;
 428       } else {
 429         return -1;
 430       }
 431       break;
 432     default:
 433       ShouldNotReachHere();
 434       break;
 435     }
 436   }
 437 
 438   return int_args + fp_args;
 439 }
 440 
 441 // Patch the callers callsite with entry to compiled code if it exists.
 442 static void patch_callers_callsite(MacroAssembler *masm) {
 443   Label L;
 444   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 445   __ cbz(rscratch1, L);
 446 
 447   __ enter();
 448   __ push_CPU_state();
 449 
 450   // VM needs caller's callsite
 451   // VM needs target method
 452   // This needs to be a long call since we will relocate this adapter to
 453   // the codeBuffer and it may not reach
 454 
 455 #ifndef PRODUCT
 456   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 457 #endif
 458 
 459   __ mov(c_rarg0, rmethod);
 460   __ mov(c_rarg1, lr);
 461   __ authenticate_return_address(c_rarg1);
 462   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 463   __ blr(rscratch1);
 464 
 465   // Explicit isb required because fixup_callers_callsite may change the code
 466   // stream.
 467   __ safepoint_isb();
 468 
 469   __ pop_CPU_state();
 470   // restore sp
 471   __ leave();
 472   __ bind(L);
 473 }
 474 
 475 // For each inline type argument, sig includes the list of fields of
 476 // the inline type. This utility function computes the number of
 477 // arguments for the call if inline types are passed by reference (the
 478 // calling convention the interpreter expects).
 479 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
 480   int total_args_passed = 0;
 481   if (InlineTypePassFieldsAsArgs) {
 482     for (int i = 0; i < sig_extended->length(); i++) {
 483       BasicType bt = sig_extended->at(i)._bt;
 484       if (bt == T_METADATA) {
 485         // In sig_extended, an inline type argument starts with:
 486         // T_METADATA, followed by the types of the fields of the
 487         // inline type and T_VOID to mark the end of the value
 488         // type. Inline types are flattened so, for instance, in the
 489         // case of an inline type with an int field and an inline type
 490         // field that itself has 2 fields, an int and a long:
 491         // T_METADATA T_INT T_METADATA T_INT T_LONG T_VOID (second
 492         // slot for the T_LONG) T_VOID (inner inline type) T_VOID
 493         // (outer inline type)
 494         total_args_passed++;
 495         int vt = 1;
 496         do {
 497           i++;
 498           BasicType bt = sig_extended->at(i)._bt;
 499           BasicType prev_bt = sig_extended->at(i-1)._bt;
 500           if (bt == T_METADATA) {
 501             vt++;
 502           } else if (bt == T_VOID &&
 503                      prev_bt != T_LONG &&
 504                      prev_bt != T_DOUBLE) {
 505             vt--;
 506           }
 507         } while (vt != 0);
 508       } else {
 509         total_args_passed++;
 510       }
 511     }
 512   } else {
 513     total_args_passed = sig_extended->length();
 514   }
 515   return total_args_passed;
 516 }
 517 
 518 
 519 static void gen_c2i_adapter_helper(MacroAssembler* masm,
 520                                    BasicType bt,
 521                                    BasicType prev_bt,
 522                                    size_t size_in_bytes,
 523                                    const VMRegPair& reg_pair,
 524                                    const Address& to,
 525                                    Register tmp1,
 526                                    Register tmp2,
 527                                    Register tmp3,
 528                                    int extraspace,
 529                                    bool is_oop) {
 530   if (bt == T_VOID) {
 531     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 532     return;
 533   }
 534 
 535   // Say 4 args:
 536   // i   st_off
 537   // 0   32 T_LONG
 538   // 1   24 T_VOID
 539   // 2   16 T_OBJECT
 540   // 3    8 T_BOOL
 541   // -    0 return address
 542   //
 543   // However to make thing extra confusing. Because we can fit a Java long/double in
 544   // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 545   // leaves one slot empty and only stores to a single slot. In this case the
 546   // slot that is occupied is the T_VOID slot. See I said it was confusing.
 547 
 548   bool wide = (size_in_bytes == wordSize);
 549   VMReg r_1 = reg_pair.first();
 550   VMReg r_2 = reg_pair.second();
 551   assert(r_2->is_valid() == wide, "invalid size");
 552   if (!r_1->is_valid()) {
 553     assert(!r_2->is_valid(), "");
 554     return;
 555   }
 556 
 557   if (!r_1->is_FloatRegister()) {
 558     Register val = r25;
 559     if (r_1->is_stack()) {
 560       // memory to memory use r25 (scratch registers is used by store_heap_oop)
 561       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 562       __ load_sized_value(val, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
 563     } else {
 564       val = r_1->as_Register();
 565     }
 566     assert_different_registers(to.base(), val, tmp1, tmp2, tmp3);
 567     if (is_oop) {
 568       // store_heap_oop transitively calls oop_store_at which corrupts to.base(). We need to keep it valid.
 569       __ push(to.base(), sp);
 570       __ store_heap_oop(to, val, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 571       __ pop(to.base(), sp);
 572     } else {
 573       __ store_sized_value(to, val, size_in_bytes);
 574     }
 575   } else {
 576     if (wide) {
 577       __ strd(r_1->as_FloatRegister(), to);
 578     } else {
 579       // only a float use just part of the slot
 580       __ strs(r_1->as_FloatRegister(), to);
 581     }
 582   }
 583 }
 584 
 585 static void gen_c2i_adapter(MacroAssembler *masm,
 586                             const GrowableArray<SigEntry>* sig_extended,
 587                             const VMRegPair *regs,
 588                             bool requires_clinit_barrier,
 589                             address& c2i_no_clinit_check_entry,
 590                             Label& skip_fixup,
 591                             address start,
 592                             OopMapSet* oop_maps,
 593                             int& frame_complete,
 594                             int& frame_size_in_words,
 595                             bool alloc_inline_receiver) {
 596   if (requires_clinit_barrier) {
 597     assert(VM_Version::supports_fast_class_init_checks(), "sanity");
 598     Label L_skip_barrier;
 599 
 600     { // Bypass the barrier for non-static methods
 601       __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
 602       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 603       __ br(Assembler::EQ, L_skip_barrier); // non-static
 604     }
 605 
 606     __ load_method_holder(rscratch2, rmethod);
 607     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 608     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 609 
 610     __ bind(L_skip_barrier);
 611     c2i_no_clinit_check_entry = __ pc();
 612   }
 613 
 614   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 615   bs->c2i_entry_barrier(masm);
 616 
 617   // Before we get into the guts of the C2I adapter, see if we should be here
 618   // at all.  We've come from compiled code and are attempting to jump to the
 619   // interpreter, which means the caller made a static call to get here
 620   // (vcalls always get a compiled target if there is one).  Check for a
 621   // compiled target.  If there is one, we need to patch the caller's call.
 622   patch_callers_callsite(masm);
 623 
 624   __ bind(skip_fixup);
 625 
 626   // Name some registers to be used in the following code. We can use
 627   // anything except r0-r7 which are arguments in the Java calling
 628   // convention, rmethod (r12), and r19 which holds the outgoing sender
 629   // SP for the interpreter.
 630   Register buf_array = r10;   // Array of buffered inline types
 631   Register buf_oop = r11;     // Buffered inline type oop
 632   Register tmp1 = r15;
 633   Register tmp2 = r16;
 634   Register tmp3 = r17;
 635 
 636 #ifndef ASSERT
 637   RegSet clobbered_gp_regs = MacroAssembler::call_clobbered_gp_registers();
 638   assert(clobbered_gp_regs.contains(buf_array), "buf_array must be saved explicitly if it's not a clobber");
 639   assert(clobbered_gp_regs.contains(buf_oop), "buf_oop must be saved explicitly if it's not a clobber");
 640   assert(clobbered_gp_regs.contains(tmp1), "tmp1 must be saved explicitly if it's not a clobber");
 641   assert(clobbered_gp_regs.contains(tmp2), "tmp2 must be saved explicitly if it's not a clobber");
 642   assert(clobbered_gp_regs.contains(tmp3), "tmp3 must be saved explicitly if it's not a clobber");
 643 #endif
 644 
 645   if (InlineTypePassFieldsAsArgs) {
 646     // Is there an inline type argument?
 647     bool has_inline_argument = false;
 648     for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) {
 649       has_inline_argument = (sig_extended->at(i)._bt == T_METADATA);
 650     }
 651     if (has_inline_argument) {
 652       // There is at least a value type argument: we're coming from
 653       // compiled code so we may not have buffers to back the value
 654       // objects. Allocate the buffers here with a runtime call for
 655       // the value arguments that needs a buffer.
 656       RegisterSaver reg_save(true /* save_vectors */);
 657       OopMap* map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
 658 
 659       frame_complete = __ offset();
 660       address the_pc = __ pc();
 661 
 662       Label retaddr;
 663       __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
 664 
 665       __ mov(c_rarg0, rthread);
 666       __ mov(c_rarg1, rmethod);
 667       __ mov(c_rarg2, (int64_t)alloc_inline_receiver);
 668 
 669       __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types)));
 670       __ blr(rscratch1);
 671       __ bind(retaddr);
 672 
 673       oop_maps->add_gc_map(__ pc() - start, map);
 674       __ reset_last_Java_frame(false);
 675 
 676       reg_save.restore_live_registers(masm);
 677 
 678       Label no_exception;
 679       __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
 680       __ cbz(rscratch1, no_exception);
 681 
 682       __ str(zr, Address(rthread, JavaThread::vm_result_oop_offset()));
 683       __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
 684       __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
 685 
 686       __ bind(no_exception);
 687 
 688       // We get an array of objects from the runtime call
 689       __ get_vm_result_oop(buf_array, rthread);
 690       __ get_vm_result_metadata(rmethod, rthread); // TODO: required to keep the callee Method live?
 691     }
 692   }
 693 
 694   // Since all args are passed on the stack, total_args_passed *
 695   // Interpreter::stackElementSize is the space we need.
 696 
 697   int total_args_passed = compute_total_args_passed_int(sig_extended);
 698   int extraspace = total_args_passed * Interpreter::stackElementSize;
 699 
 700   // stack is aligned, keep it that way
 701   extraspace = align_up(extraspace, StackAlignmentInBytes);
 702 
 703   // set senderSP value
 704   __ mov(r19_sender_sp, sp);
 705 
 706   __ sub(sp, sp, extraspace);
 707 
 708   // Now write the args into the outgoing interpreter space
 709 
 710   // next_arg_comp is the next argument from the compiler point of
 711   // view (inline type fields are passed in registers/on the stack). In
 712   // sig_extended, an inline type argument starts with: T_METADATA,
 713   // followed by the types of the fields of the inline type and T_VOID
 714   // to mark the end of the inline type. ignored counts the number of
 715   // T_METADATA/T_VOID. next_vt_arg is the next inline type argument:
 716   // used to get the buffer for that argument from the pool of buffers
 717   // we allocated above and want to pass to the
 718   // interpreter. next_arg_int is the next argument from the
 719   // interpreter point of view (inline types are passed by reference).
 720   for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
 721        next_arg_comp < sig_extended->length(); next_arg_comp++) {
 722     assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
 723     assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?");
 724     BasicType bt = sig_extended->at(next_arg_comp)._bt;
 725     int st_off = (total_args_passed - next_arg_int - 1) * Interpreter::stackElementSize;
 726     if (!InlineTypePassFieldsAsArgs || bt != T_METADATA) {
 727       int next_off = st_off - Interpreter::stackElementSize;
 728       const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
 729       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
 730       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
 731       gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 732                              size_in_bytes, reg_pair, Address(sp, offset), tmp1, tmp2, tmp3, extraspace, false);
 733       next_arg_int++;
 734 #ifdef ASSERT
 735       if (bt == T_LONG || bt == T_DOUBLE) {
 736         // Overwrite the unused slot with known junk
 737         __ mov(rscratch1, CONST64(0xdeadffffdeadaaaa));
 738         __ str(rscratch1, Address(sp, st_off));
 739       }
 740 #endif /* ASSERT */
 741     } else {
 742       ignored++;
 743       next_arg_int++;
 744       int vt = 1;
 745       // write fields we get from compiled code in registers/stack
 746       // slots to the buffer: we know we are done with that inline type
 747       // argument when we hit the T_VOID that acts as an end of inline
 748       // type delimiter for this inline type. Inline types are flattened
 749       // so we might encounter embedded inline types. Each entry in
 750       // sig_extended contains a field offset in the buffer.
 751       Label L_null;
 752       Label not_null_buffer;
 753       do {
 754         next_arg_comp++;
 755         BasicType bt = sig_extended->at(next_arg_comp)._bt;
 756         BasicType prev_bt = sig_extended->at(next_arg_comp - 1)._bt;
 757         if (bt == T_METADATA) {
 758           vt++;
 759           ignored++;
 760         } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
 761           vt--;
 762           ignored++;
 763         } else if (sig_extended->at(next_arg_comp)._vt_oop) {
 764           VMReg buffer = regs[next_arg_comp-ignored].first();
 765           if (buffer->is_stack()) {
 766             int ld_off = buffer->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 767             __ ldr(buf_oop, Address(sp, ld_off));
 768           } else {
 769             __ mov(buf_oop, buffer->as_Register());
 770           }
 771           __ cbnz(buf_oop, not_null_buffer);
 772           // get the buffer from the just allocated pool of buffers
 773           int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_OBJECT);
 774           __ load_heap_oop(buf_oop, Address(buf_array, index), rscratch1, tmp2);
 775           next_vt_arg++;
 776         } else {
 777           int off = sig_extended->at(next_arg_comp)._offset;
 778           if (off == -1) {
 779             // Nullable inline type argument, emit null check
 780             VMReg reg = regs[next_arg_comp-ignored].first();
 781             Label L_notNull;
 782             if (reg->is_stack()) {
 783               int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 784               __ ldrb(tmp1, Address(sp, ld_off));
 785               __ cbnz(tmp1, L_notNull);
 786             } else {
 787               __ cbnz(reg->as_Register(), L_notNull);
 788             }
 789             __ str(zr, Address(sp, st_off));
 790             __ b(L_null);
 791             __ bind(L_notNull);
 792             continue;
 793           }
 794           assert(off > 0, "offset in object should be positive");
 795           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 796           bool is_oop = is_reference_type(bt);
 797           gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 798                                  size_in_bytes, regs[next_arg_comp-ignored], Address(buf_oop, off), tmp1, tmp2, tmp3, extraspace, is_oop);
 799         }
 800       } while (vt != 0);
 801       // pass the buffer to the interpreter
 802       __ bind(not_null_buffer);
 803       __ str(buf_oop, Address(sp, st_off));
 804       __ bind(L_null);
 805     }
 806   }
 807 
 808   __ mov(esp, sp); // Interp expects args on caller's expression stack
 809 
 810   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 811   __ br(rscratch1);
 812 }
 813 
 814 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs) {
 815 
 816 
 817   // Note: r19_sender_sp contains the senderSP on entry. We must
 818   // preserve it since we may do a i2c -> c2i transition if we lose a
 819   // race where compiled code goes non-entrant while we get args
 820   // ready.
 821 
 822   // Adapters are frameless.
 823 
 824   // An i2c adapter is frameless because the *caller* frame, which is
 825   // interpreted, routinely repairs its own esp (from
 826   // interpreter_frame_last_sp), even if a callee has modified the
 827   // stack pointer.  It also recalculates and aligns sp.
 828 
 829   // A c2i adapter is frameless because the *callee* frame, which is
 830   // interpreted, routinely repairs its caller's sp (from sender_sp,
 831   // which is set up via the senderSP register).
 832 
 833   // In other words, if *either* the caller or callee is interpreted, we can
 834   // get the stack pointer repaired after a call.
 835 
 836   // This is why c2i and i2c adapters cannot be indefinitely composed.
 837   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 838   // both caller and callee would be compiled methods, and neither would
 839   // clean up the stack pointer changes performed by the two adapters.
 840   // If this happens, control eventually transfers back to the compiled
 841   // caller, but with an uncorrected stack, causing delayed havoc.
 842 
 843   // Cut-out for having no stack args.
 844   int comp_words_on_stack = 0;
 845   if (comp_args_on_stack) {
 846      comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord;
 847      __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 848      __ andr(sp, rscratch1, -16);
 849   }
 850 
 851   // Will jump to the compiled code just as if compiled code was doing it.
 852   // Pre-load the register-jump target early, to schedule it better.
 853   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_inline_offset())));
 854 
 855 #if INCLUDE_JVMCI
 856   if (EnableJVMCI) {
 857     // check if this call should be routed towards a specific entry point
 858     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 859     Label no_alternative_target;
 860     __ cbz(rscratch2, no_alternative_target);
 861     __ mov(rscratch1, rscratch2);
 862     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 863     __ bind(no_alternative_target);
 864   }
 865 #endif // INCLUDE_JVMCI
 866 
 867   int total_args_passed = sig->length();
 868 
 869   // Now generate the shuffle code.
 870   for (int i = 0; i < total_args_passed; i++) {
 871     BasicType bt = sig->at(i)._bt;
 872     if (bt == T_VOID) {
 873       assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
 874       continue;
 875     }
 876 
 877     // Pick up 0, 1 or 2 words from SP+offset.
 878     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?");
 879 
 880     // Load in argument order going down.
 881     int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 882     // Point to interpreter value (vs. tag)
 883     int next_off = ld_off - Interpreter::stackElementSize;
 884     //
 885     //
 886     //
 887     VMReg r_1 = regs[i].first();
 888     VMReg r_2 = regs[i].second();
 889     if (!r_1->is_valid()) {
 890       assert(!r_2->is_valid(), "");
 891       continue;
 892     }
 893     if (r_1->is_stack()) {
 894       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 895       int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size;
 896       if (!r_2->is_valid()) {
 897         // sign extend???
 898         __ ldrsw(rscratch2, Address(esp, ld_off));
 899         __ str(rscratch2, Address(sp, st_off));
 900       } else {
 901         //
 902         // We are using two optoregs. This can be either T_OBJECT,
 903         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 904         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 905         // So we must adjust where to pick up the data to match the
 906         // interpreter.
 907         //
 908         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 909         // are accessed as negative so LSW is at LOW address
 910 
 911         // ld_off is MSW so get LSW
 912         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
 913         __ ldr(rscratch2, Address(esp, offset));
 914         // st_off is LSW (i.e. reg.first())
 915          __ str(rscratch2, Address(sp, st_off));
 916        }
 917      } else if (r_1->is_Register()) {  // Register argument
 918        Register r = r_1->as_Register();
 919        if (r_2->is_valid()) {
 920          //
 921          // We are using two VMRegs. This can be either T_OBJECT,
 922          // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 923          // two slots but only uses one for thr T_LONG or T_DOUBLE case
 924          // So we must adjust where to pick up the data to match the
 925          // interpreter.
 926 
 927         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
 928 
 929          // this can be a misaligned move
 930          __ ldr(r, Address(esp, offset));
 931        } else {
 932          // sign extend and use a full word?
 933          __ ldrw(r, Address(esp, ld_off));
 934        }
 935      } else {
 936        if (!r_2->is_valid()) {
 937          __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 938        } else {
 939          __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 940        }
 941      }
 942    }
 943 
 944 
 945   __ mov(rscratch2, rscratch1);
 946   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 947   __ mov(rscratch1, rscratch2);
 948 
 949   // 6243940 We might end up in handle_wrong_method if
 950   // the callee is deoptimized as we race thru here. If that
 951   // happens we don't want to take a safepoint because the
 952   // caller frame will look interpreted and arguments are now
 953   // "compiled" so it is much better to make this transition
 954   // invisible to the stack walking code. Unfortunately if
 955   // we try and find the callee by normal means a safepoint
 956   // is possible. So we stash the desired callee in the thread
 957   // and the vm will find there should this case occur.
 958 
 959   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 960   __ br(rscratch1);
 961 }
 962 
 963 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) {
 964   Register data = rscratch2;
 965   __ ic_check(1 /* end_alignment */);
 966   __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));
 967 
 968   // Method might have been compiled since the call site was patched to
 969   // interpreted; if that is the case treat it as a miss so we can get
 970   // the call site corrected.
 971   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 972   __ cbz(rscratch1, skip_fixup);
 973   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 974 }
 975 
 976 // ---------------------------------------------------------------
 977 void SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,
 978                                             int comp_args_on_stack,
 979                                             const GrowableArray<SigEntry>* sig,
 980                                             const VMRegPair* regs,
 981                                             const GrowableArray<SigEntry>* sig_cc,
 982                                             const VMRegPair* regs_cc,
 983                                             const GrowableArray<SigEntry>* sig_cc_ro,
 984                                             const VMRegPair* regs_cc_ro,
 985                                             address entry_address[AdapterBlob::ENTRY_COUNT],
 986                                             AdapterBlob*& new_adapter,
 987                                             bool allocate_code_blob) {
 988 
 989   entry_address[AdapterBlob::I2C] = __ pc();
 990   gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);
 991 
 992   // -------------------------------------------------------------------------
 993   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 994   // to the interpreter.  The args start out packed in the compiled layout.  They
 995   // need to be unpacked into the interpreter layout.  This will almost always
 996   // require some stack space.  We grow the current (compiled) stack, then repack
 997   // the args.  We  finally end in a jump to the generic interpreter entry point.
 998   // On exit from the interpreter, the interpreter will restore our SP (lest the
 999   // compiled code, which relies solely on SP and not FP, get sick).
1000 
1001   entry_address[AdapterBlob::C2I_Unverified] = __ pc();
1002   entry_address[AdapterBlob::C2I_Unverified_Inline] = __ pc();
1003   Label skip_fixup;
1004 
1005   gen_inline_cache_check(masm, skip_fixup);
1006 
1007   OopMapSet* oop_maps = new OopMapSet();
1008   int frame_complete = CodeOffsets::frame_never_safe;
1009   int frame_size_in_words = 0;
1010 
1011   // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver)
1012   entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
1013   entry_address[AdapterBlob::C2I_Inline_RO] = __ pc();
1014   if (regs_cc != regs_cc_ro) {
1015     // No class init barrier needed because method is guaranteed to be non-static
1016     gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, /* requires_clinit_barrier = */ false, entry_address[AdapterBlob::C2I_No_Clinit_Check],
1017                     skip_fixup, entry_address[AdapterBlob::I2C], oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1018     skip_fixup.reset();
1019   }
1020 
1021   // Scalarized c2i adapter
1022   entry_address[AdapterBlob::C2I]        = __ pc();
1023   entry_address[AdapterBlob::C2I_Inline] = __ pc();
1024   gen_c2i_adapter(masm, sig_cc, regs_cc, /* requires_clinit_barrier = */ true, entry_address[AdapterBlob::C2I_No_Clinit_Check],
1025                   skip_fixup, entry_address[AdapterBlob::I2C], oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ true);
1026 
1027   // Non-scalarized c2i adapter
1028   if (regs != regs_cc) {
1029     entry_address[AdapterBlob::C2I_Unverified_Inline] = __ pc();
1030     Label inline_entry_skip_fixup;
1031     gen_inline_cache_check(masm, inline_entry_skip_fixup);
1032 
1033     entry_address[AdapterBlob::C2I_Inline] = __ pc();
1034     gen_c2i_adapter(masm, sig, regs, /* requires_clinit_barrier = */ true, entry_address[AdapterBlob::C2I_No_Clinit_Check],
1035                     inline_entry_skip_fixup, entry_address[AdapterBlob::I2C], oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1036   }
1037 
1038   // The c2i adapters might safepoint and trigger a GC. The caller must make sure that
1039   // the GC knows about the location of oop argument locations passed to the c2i adapter.
1040   if (allocate_code_blob) {
1041     bool caller_must_gc_arguments = (regs != regs_cc);
1042     int entry_offset[AdapterHandlerEntry::ENTRIES_COUNT];
1043     assert(AdapterHandlerEntry::ENTRIES_COUNT == 7, "sanity");
1044     AdapterHandlerLibrary::address_to_offset(entry_address, entry_offset);
1045     new_adapter = AdapterBlob::create(masm->code(), entry_offset, frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);
1046   }
1047 }
1048 
1049 static int c_calling_convention_priv(const BasicType *sig_bt,
1050                                          VMRegPair *regs,
1051                                          int total_args_passed) {
1052 
1053 // We return the amount of VMRegImpl stack slots we need to reserve for all
1054 // the arguments NOT counting out_preserve_stack_slots.
1055 
1056     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1057       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
1058     };
1059     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1060       c_farg0, c_farg1, c_farg2, c_farg3,
1061       c_farg4, c_farg5, c_farg6, c_farg7
1062     };
1063 
1064     uint int_args = 0;
1065     uint fp_args = 0;
1066     uint stk_args = 0; // inc by 2 each time
1067 
1068     for (int i = 0; i < total_args_passed; i++) {
1069       switch (sig_bt[i]) {
1070       case T_BOOLEAN:
1071       case T_CHAR:
1072       case T_BYTE:
1073       case T_SHORT:
1074       case T_INT:
1075         if (int_args < Argument::n_int_register_parameters_c) {
1076           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1077         } else {
1078 #ifdef __APPLE__
1079           // Less-than word types are stored one after another.
1080           // The code is unable to handle this so bailout.
1081           return -1;
1082 #endif
1083           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1084           stk_args += 2;
1085         }
1086         break;
1087       case T_LONG:
1088         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1089         // fall through
1090       case T_OBJECT:
1091       case T_ARRAY:
1092       case T_ADDRESS:
1093       case T_METADATA:
1094         if (int_args < Argument::n_int_register_parameters_c) {
1095           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1096         } else {
1097           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1098           stk_args += 2;
1099         }
1100         break;
1101       case T_FLOAT:
1102         if (fp_args < Argument::n_float_register_parameters_c) {
1103           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1104         } else {
1105 #ifdef __APPLE__
1106           // Less-than word types are stored one after another.
1107           // The code is unable to handle this so bailout.
1108           return -1;
1109 #endif
1110           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1111           stk_args += 2;
1112         }
1113         break;
1114       case T_DOUBLE:
1115         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1116         if (fp_args < Argument::n_float_register_parameters_c) {
1117           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
1118         } else {
1119           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1120           stk_args += 2;
1121         }
1122         break;
1123       case T_VOID: // Halves of longs and doubles
1124         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1125         regs[i].set_bad();
1126         break;
1127       default:
1128         ShouldNotReachHere();
1129         break;
1130       }
1131     }
1132 
1133   return stk_args;
1134 }
1135 
1136 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
1137                                              uint num_bits,
1138                                              uint total_args_passed) {
1139   // More than 8 argument inputs are not supported now.
1140   assert(total_args_passed <= Argument::n_float_register_parameters_c, "unsupported");
1141   assert(num_bits >= 64 && num_bits <= 2048 && is_power_of_2(num_bits), "unsupported");
1142 
1143   static const FloatRegister VEC_ArgReg[Argument::n_float_register_parameters_c] = {
1144     v0, v1, v2, v3, v4, v5, v6, v7
1145   };
1146 
1147   // On SVE, we use the same vector registers with 128-bit vector registers on NEON.
1148   int next_reg_val = num_bits == 64 ? 1 : 3;
1149   for (uint i = 0; i < total_args_passed; i++) {
1150     VMReg vmreg = VEC_ArgReg[i]->as_VMReg();
1151     regs[i].set_pair(vmreg->next(next_reg_val), vmreg);
1152   }
1153   return 0;
1154 }
1155 
1156 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1157                                          VMRegPair *regs,
1158                                          int total_args_passed)
1159 {
1160   int result = c_calling_convention_priv(sig_bt, regs, total_args_passed);
1161   guarantee(result >= 0, "Unsupported arguments configuration");
1162   return result;
1163 }
1164 
1165 
1166 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1167   // We always ignore the frame_slots arg and just use the space just below frame pointer
1168   // which by this time is free to use
1169   switch (ret_type) {
1170   case T_FLOAT:
1171     __ strs(v0, Address(rfp, -wordSize));
1172     break;
1173   case T_DOUBLE:
1174     __ strd(v0, Address(rfp, -wordSize));
1175     break;
1176   case T_VOID:  break;
1177   default: {
1178     __ str(r0, Address(rfp, -wordSize));
1179     }
1180   }
1181 }
1182 
1183 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1184   // We always ignore the frame_slots arg and just use the space just below frame pointer
1185   // which by this time is free to use
1186   switch (ret_type) {
1187   case T_FLOAT:
1188     __ ldrs(v0, Address(rfp, -wordSize));
1189     break;
1190   case T_DOUBLE:
1191     __ ldrd(v0, Address(rfp, -wordSize));
1192     break;
1193   case T_VOID:  break;
1194   default: {
1195     __ ldr(r0, Address(rfp, -wordSize));
1196     }
1197   }
1198 }
1199 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1200   RegSet x;
1201   for ( int i = first_arg ; i < arg_count ; i++ ) {
1202     if (args[i].first()->is_Register()) {
1203       x = x + args[i].first()->as_Register();
1204     } else if (args[i].first()->is_FloatRegister()) {
1205       __ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize)));
1206     }
1207   }
1208   __ push(x, sp);
1209 }
1210 
1211 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1212   RegSet x;
1213   for ( int i = first_arg ; i < arg_count ; i++ ) {
1214     if (args[i].first()->is_Register()) {
1215       x = x + args[i].first()->as_Register();
1216     } else {
1217       ;
1218     }
1219   }
1220   __ pop(x, sp);
1221   for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1222     if (args[i].first()->is_Register()) {
1223       ;
1224     } else if (args[i].first()->is_FloatRegister()) {
1225       __ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize)));
1226     }
1227   }
1228 }
1229 
1230 static void verify_oop_args(MacroAssembler* masm,
1231                             const methodHandle& method,
1232                             const BasicType* sig_bt,
1233                             const VMRegPair* regs) {
1234   Register temp_reg = r19;  // not part of any compiled calling seq
1235   if (VerifyOops) {
1236     for (int i = 0; i < method->size_of_parameters(); i++) {
1237       if (sig_bt[i] == T_OBJECT ||
1238           sig_bt[i] == T_ARRAY) {
1239         VMReg r = regs[i].first();
1240         assert(r->is_valid(), "bad oop arg");
1241         if (r->is_stack()) {
1242           __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1243           __ verify_oop(temp_reg);
1244         } else {
1245           __ verify_oop(r->as_Register());
1246         }
1247       }
1248     }
1249   }
1250 }
1251 
1252 // on exit, sp points to the ContinuationEntry
1253 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) {
1254   assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, "");
1255   assert(in_bytes(ContinuationEntry::cont_offset())  % VMRegImpl::stack_slot_size == 0, "");
1256   assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, "");
1257 
1258   stack_slots += (int)ContinuationEntry::size()/wordSize;
1259   __ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata
1260 
1261   OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize)/ VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
1262 
1263   __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1264   __ str(rscratch1, Address(sp, ContinuationEntry::parent_offset()));
1265   __ mov(rscratch1, sp); // we can't use sp as the source in str
1266   __ str(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1267 
1268   return map;
1269 }
1270 
1271 // on entry c_rarg1 points to the continuation
1272 //          sp points to ContinuationEntry
1273 //          c_rarg3 -- isVirtualThread
1274 static void fill_continuation_entry(MacroAssembler* masm) {
1275 #ifdef ASSERT
1276   __ movw(rscratch1, ContinuationEntry::cookie_value());
1277   __ strw(rscratch1, Address(sp, ContinuationEntry::cookie_offset()));
1278 #endif
1279 
1280   __ str (c_rarg1, Address(sp, ContinuationEntry::cont_offset()));
1281   __ strw(c_rarg3, Address(sp, ContinuationEntry::flags_offset()));
1282   __ str (zr,      Address(sp, ContinuationEntry::chunk_offset()));
1283   __ strw(zr,      Address(sp, ContinuationEntry::argsize_offset()));
1284   __ strw(zr,      Address(sp, ContinuationEntry::pin_count_offset()));
1285 
1286   __ ldr(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1287   __ str(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1288 
1289   __ str(zr, Address(rthread, JavaThread::cont_fastpath_offset()));
1290 }
1291 
1292 // on entry, sp points to the ContinuationEntry
1293 // on exit, rfp points to the spilled rfp in the entry frame
1294 static void continuation_enter_cleanup(MacroAssembler* masm) {
1295 #ifndef PRODUCT
1296   Label OK;
1297   __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1298   __ cmp(sp, rscratch1);
1299   __ br(Assembler::EQ, OK);
1300   __ stop("incorrect sp1");
1301   __ bind(OK);
1302 #endif
1303   __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1304   __ str(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1305   __ ldr(rscratch2, Address(sp, ContinuationEntry::parent_offset()));
1306   __ str(rscratch2, Address(rthread, JavaThread::cont_entry_offset()));
1307   __ add(rfp, sp, (int)ContinuationEntry::size());
1308 }
1309 
1310 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
1311 // On entry: c_rarg1 -- the continuation object
1312 //           c_rarg2 -- isContinue
1313 //           c_rarg3 -- isVirtualThread
1314 static void gen_continuation_enter(MacroAssembler* masm,
1315                                  const methodHandle& method,
1316                                  const BasicType* sig_bt,
1317                                  const VMRegPair* regs,
1318                                  int& exception_offset,
1319                                  OopMapSet*oop_maps,
1320                                  int& frame_complete,
1321                                  int& stack_slots,
1322                                  int& interpreted_entry_offset,
1323                                  int& compiled_entry_offset) {
1324   //verify_oop_args(masm, method, sig_bt, regs);
1325   Address resolve(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
1326 
1327   address start = __ pc();
1328 
1329   Label call_thaw, exit;
1330 
1331   // i2i entry used at interp_only_mode only
1332   interpreted_entry_offset = __ pc() - start;
1333   {
1334 
1335 #ifdef ASSERT
1336     Label is_interp_only;
1337     __ ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
1338     __ cbnzw(rscratch1, is_interp_only);
1339     __ stop("enterSpecial interpreter entry called when not in interp_only_mode");
1340     __ bind(is_interp_only);
1341 #endif
1342 
1343     // Read interpreter arguments into registers (this is an ad-hoc i2c adapter)
1344     __ ldr(c_rarg1, Address(esp, Interpreter::stackElementSize*2));
1345     __ ldr(c_rarg2, Address(esp, Interpreter::stackElementSize*1));
1346     __ ldr(c_rarg3, Address(esp, Interpreter::stackElementSize*0));
1347     __ push_cont_fastpath(rthread);
1348 
1349     __ enter();
1350     stack_slots = 2; // will be adjusted in setup
1351     OopMap* map = continuation_enter_setup(masm, stack_slots);
1352     // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe,
1353     // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
1354 
1355     fill_continuation_entry(masm);
1356 
1357     __ cbnz(c_rarg2, call_thaw);
1358 
1359     const address tr_call = __ trampoline_call(resolve);
1360     if (tr_call == nullptr) {
1361       fatal("CodeCache is full at gen_continuation_enter");
1362     }
1363 
1364     oop_maps->add_gc_map(__ pc() - start, map);
1365     __ post_call_nop();
1366 
1367     __ b(exit);
1368 
1369     address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1370     if (stub == nullptr) {
1371       fatal("CodeCache is full at gen_continuation_enter");
1372     }
1373   }
1374 
1375   // compiled entry
1376   __ align(CodeEntryAlignment);
1377   compiled_entry_offset = __ pc() - start;
1378 
1379   __ enter();
1380   stack_slots = 2; // will be adjusted in setup
1381   OopMap* map = continuation_enter_setup(masm, stack_slots);
1382   frame_complete = __ pc() - start;
1383 
1384   fill_continuation_entry(masm);
1385 
1386   __ cbnz(c_rarg2, call_thaw);
1387 
1388   const address tr_call = __ trampoline_call(resolve);
1389   if (tr_call == nullptr) {
1390     fatal("CodeCache is full at gen_continuation_enter");
1391   }
1392 
1393   oop_maps->add_gc_map(__ pc() - start, map);
1394   __ post_call_nop();
1395 
1396   __ b(exit);
1397 
1398   __ bind(call_thaw);
1399 
1400   ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1401   __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1402   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1403   ContinuationEntry::_return_pc_offset = __ pc() - start;
1404   __ post_call_nop();
1405 
1406   __ bind(exit);
1407   ContinuationEntry::_cleanup_offset = __ pc() - start;
1408   continuation_enter_cleanup(masm);
1409   __ leave();
1410   __ ret(lr);
1411 
1412   /// exception handling
1413 
1414   exception_offset = __ pc() - start;
1415   {
1416       __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1417 
1418       continuation_enter_cleanup(masm);
1419 
1420       __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1421       __ authenticate_return_address(c_rarg1);
1422       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1);
1423 
1424       // see OptoRuntime::generate_exception_blob: r0 -- exception oop, r3 -- exception pc
1425 
1426       __ mov(r1, r0); // the exception handler
1427       __ mov(r0, r19); // restore return value contaning the exception oop
1428       __ verify_oop(r0);
1429 
1430       __ leave();
1431       __ mov(r3, lr);
1432       __ br(r1); // the exception handler
1433   }
1434 
1435   address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1436   if (stub == nullptr) {
1437     fatal("CodeCache is full at gen_continuation_enter");
1438   }
1439 }
1440 
1441 static void gen_continuation_yield(MacroAssembler* masm,
1442                                    const methodHandle& method,
1443                                    const BasicType* sig_bt,
1444                                    const VMRegPair* regs,
1445                                    OopMapSet* oop_maps,
1446                                    int& frame_complete,
1447                                    int& stack_slots,
1448                                    int& compiled_entry_offset) {
1449     enum layout {
1450       rfp_off1,
1451       rfp_off2,
1452       lr_off,
1453       lr_off2,
1454       framesize // inclusive of return address
1455     };
1456     // assert(is_even(framesize/2), "sp not 16-byte aligned");
1457     stack_slots = framesize /  VMRegImpl::slots_per_word;
1458     assert(stack_slots == 2, "recheck layout");
1459 
1460     address start = __ pc();
1461 
1462     compiled_entry_offset = __ pc() - start;
1463     __ enter();
1464 
1465     __ mov(c_rarg1, sp);
1466 
1467     frame_complete = __ pc() - start;
1468     address the_pc = __ pc();
1469 
1470     __ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup
1471 
1472     __ mov(c_rarg0, rthread);
1473     __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
1474     __ call_VM_leaf(Continuation::freeze_entry(), 2);
1475     __ reset_last_Java_frame(true);
1476 
1477     Label pinned;
1478 
1479     __ cbnz(r0, pinned);
1480 
1481     // We've succeeded, set sp to the ContinuationEntry
1482     __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1483     __ mov(sp, rscratch1);
1484     continuation_enter_cleanup(masm);
1485 
1486     __ bind(pinned); // pinned -- return to caller
1487 
1488     // handle pending exception thrown by freeze
1489     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1490     Label ok;
1491     __ cbz(rscratch1, ok);
1492     __ leave();
1493     __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1494     __ br(rscratch1);
1495     __ bind(ok);
1496 
1497     __ leave();
1498     __ ret(lr);
1499 
1500     OopMap* map = new OopMap(framesize, 1);
1501     oop_maps->add_gc_map(the_pc - start, map);
1502 }
1503 
1504 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1505   ::continuation_enter_cleanup(masm);
1506 }
1507 
1508 static void gen_special_dispatch(MacroAssembler* masm,
1509                                  const methodHandle& method,
1510                                  const BasicType* sig_bt,
1511                                  const VMRegPair* regs) {
1512   verify_oop_args(masm, method, sig_bt, regs);
1513   vmIntrinsics::ID iid = method->intrinsic_id();
1514 
1515   // Now write the args into the outgoing interpreter space
1516   bool     has_receiver   = false;
1517   Register receiver_reg   = noreg;
1518   int      member_arg_pos = -1;
1519   Register member_reg     = noreg;
1520   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1521   if (ref_kind != 0) {
1522     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1523     member_reg = r19;  // known to be free at this point
1524     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1525   } else if (iid == vmIntrinsics::_invokeBasic) {
1526     has_receiver = true;
1527   } else if (iid == vmIntrinsics::_linkToNative) {
1528     member_arg_pos = method->size_of_parameters() - 1;  // trailing NativeEntryPoint argument
1529     member_reg = r19;  // known to be free at this point
1530   } else {
1531     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1532   }
1533 
1534   if (member_reg != noreg) {
1535     // Load the member_arg into register, if necessary.
1536     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1537     VMReg r = regs[member_arg_pos].first();
1538     if (r->is_stack()) {
1539       __ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1540     } else {
1541       // no data motion is needed
1542       member_reg = r->as_Register();
1543     }
1544   }
1545 
1546   if (has_receiver) {
1547     // Make sure the receiver is loaded into a register.
1548     assert(method->size_of_parameters() > 0, "oob");
1549     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1550     VMReg r = regs[0].first();
1551     assert(r->is_valid(), "bad receiver arg");
1552     if (r->is_stack()) {
1553       // Porting note:  This assumes that compiled calling conventions always
1554       // pass the receiver oop in a register.  If this is not true on some
1555       // platform, pick a temp and load the receiver from stack.
1556       fatal("receiver always in a register");
1557       receiver_reg = r2;  // known to be free at this point
1558       __ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1559     } else {
1560       // no data motion is needed
1561       receiver_reg = r->as_Register();
1562     }
1563   }
1564 
1565   // Figure out which address we are really jumping to:
1566   MethodHandles::generate_method_handle_dispatch(masm, iid,
1567                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1568 }
1569 
1570 // ---------------------------------------------------------------------------
1571 // Generate a native wrapper for a given method.  The method takes arguments
1572 // in the Java compiled code convention, marshals them to the native
1573 // convention (handlizes oops, etc), transitions to native, makes the call,
1574 // returns to java state (possibly blocking), unhandlizes any result and
1575 // returns.
1576 //
1577 // Critical native functions are a shorthand for the use of
1578 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1579 // functions.  The wrapper is expected to unpack the arguments before
1580 // passing them to the callee. Critical native functions leave the state _in_Java,
1581 // since they block out GC.
1582 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1583 // block and the check for pending exceptions it's impossible for them
1584 // to be thrown.
1585 //
1586 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1587                                                 const methodHandle& method,
1588                                                 int compile_id,
1589                                                 BasicType* in_sig_bt,
1590                                                 VMRegPair* in_regs,
1591                                                 BasicType ret_type) {
1592   if (method->is_continuation_native_intrinsic()) {
1593     int exception_offset = -1;
1594     OopMapSet* oop_maps = new OopMapSet();
1595     int frame_complete = -1;
1596     int stack_slots = -1;
1597     int interpreted_entry_offset = -1;
1598     int vep_offset = -1;
1599     if (method->is_continuation_enter_intrinsic()) {
1600       gen_continuation_enter(masm,
1601                              method,
1602                              in_sig_bt,
1603                              in_regs,
1604                              exception_offset,
1605                              oop_maps,
1606                              frame_complete,
1607                              stack_slots,
1608                              interpreted_entry_offset,
1609                              vep_offset);
1610     } else if (method->is_continuation_yield_intrinsic()) {
1611       gen_continuation_yield(masm,
1612                              method,
1613                              in_sig_bt,
1614                              in_regs,
1615                              oop_maps,
1616                              frame_complete,
1617                              stack_slots,
1618                              vep_offset);
1619     } else {
1620       guarantee(false, "Unknown Continuation native intrinsic");
1621     }
1622 
1623 #ifdef ASSERT
1624     if (method->is_continuation_enter_intrinsic()) {
1625       assert(interpreted_entry_offset != -1, "Must be set");
1626       assert(exception_offset != -1,         "Must be set");
1627     } else {
1628       assert(interpreted_entry_offset == -1, "Must be unset");
1629       assert(exception_offset == -1,         "Must be unset");
1630     }
1631     assert(frame_complete != -1,    "Must be set");
1632     assert(stack_slots != -1,       "Must be set");
1633     assert(vep_offset != -1,        "Must be set");
1634 #endif
1635 
1636     __ flush();
1637     nmethod* nm = nmethod::new_native_nmethod(method,
1638                                               compile_id,
1639                                               masm->code(),
1640                                               vep_offset,
1641                                               frame_complete,
1642                                               stack_slots,
1643                                               in_ByteSize(-1),
1644                                               in_ByteSize(-1),
1645                                               oop_maps,
1646                                               exception_offset);
1647     if (nm == nullptr) return nm;
1648     if (method->is_continuation_enter_intrinsic()) {
1649       ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1650     } else if (method->is_continuation_yield_intrinsic()) {
1651       _cont_doYield_stub = nm;
1652     } else {
1653       guarantee(false, "Unknown Continuation native intrinsic");
1654     }
1655     return nm;
1656   }
1657 
1658   if (method->is_method_handle_intrinsic()) {
1659     vmIntrinsics::ID iid = method->intrinsic_id();
1660     intptr_t start = (intptr_t)__ pc();
1661     int vep_offset = ((intptr_t)__ pc()) - start;
1662 
1663     // First instruction must be a nop as it may need to be patched on deoptimisation
1664     __ nop();
1665     gen_special_dispatch(masm,
1666                          method,
1667                          in_sig_bt,
1668                          in_regs);
1669     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1670     __ flush();
1671     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1672     return nmethod::new_native_nmethod(method,
1673                                        compile_id,
1674                                        masm->code(),
1675                                        vep_offset,
1676                                        frame_complete,
1677                                        stack_slots / VMRegImpl::slots_per_word,
1678                                        in_ByteSize(-1),
1679                                        in_ByteSize(-1),
1680                                        nullptr);
1681   }
1682   address native_func = method->native_function();
1683   assert(native_func != nullptr, "must have function");
1684 
1685   // An OopMap for lock (and class if static)
1686   OopMapSet *oop_maps = new OopMapSet();
1687   intptr_t start = (intptr_t)__ pc();
1688 
1689   // We have received a description of where all the java arg are located
1690   // on entry to the wrapper. We need to convert these args to where
1691   // the jni function will expect them. To figure out where they go
1692   // we convert the java signature to a C signature by inserting
1693   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1694 
1695   const int total_in_args = method->size_of_parameters();
1696   int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1697 
1698   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1699   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1700 
1701   int argc = 0;
1702   out_sig_bt[argc++] = T_ADDRESS;
1703   if (method->is_static()) {
1704     out_sig_bt[argc++] = T_OBJECT;
1705   }
1706 
1707   for (int i = 0; i < total_in_args ; i++ ) {
1708     out_sig_bt[argc++] = in_sig_bt[i];
1709   }
1710 
1711   // Now figure out where the args must be stored and how much stack space
1712   // they require.
1713   int out_arg_slots;
1714   out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, total_c_args);
1715 
1716   if (out_arg_slots < 0) {
1717     return nullptr;
1718   }
1719 
1720   // Compute framesize for the wrapper.  We need to handlize all oops in
1721   // incoming registers
1722 
1723   // Calculate the total number of stack slots we will need.
1724 
1725   // First count the abi requirement plus all of the outgoing args
1726   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1727 
1728   // Now the space for the inbound oop handle area
1729   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1730 
1731   int oop_handle_offset = stack_slots;
1732   stack_slots += total_save_slots;
1733 
1734   // Now any space we need for handlizing a klass if static method
1735 
1736   int klass_slot_offset = 0;
1737   int klass_offset = -1;
1738   int lock_slot_offset = 0;
1739   bool is_static = false;
1740 
1741   if (method->is_static()) {
1742     klass_slot_offset = stack_slots;
1743     stack_slots += VMRegImpl::slots_per_word;
1744     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1745     is_static = true;
1746   }
1747 
1748   // Plus a lock if needed
1749 
1750   if (method->is_synchronized()) {
1751     lock_slot_offset = stack_slots;
1752     stack_slots += VMRegImpl::slots_per_word;
1753   }
1754 
1755   // Now a place (+2) to save return values or temp during shuffling
1756   // + 4 for return address (which we own) and saved rfp
1757   stack_slots += 6;
1758 
1759   // Ok The space we have allocated will look like:
1760   //
1761   //
1762   // FP-> |                     |
1763   //      |---------------------|
1764   //      | 2 slots for moves   |
1765   //      |---------------------|
1766   //      | lock box (if sync)  |
1767   //      |---------------------| <- lock_slot_offset
1768   //      | klass (if static)   |
1769   //      |---------------------| <- klass_slot_offset
1770   //      | oopHandle area      |
1771   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1772   //      | outbound memory     |
1773   //      | based arguments     |
1774   //      |                     |
1775   //      |---------------------|
1776   //      |                     |
1777   // SP-> | out_preserved_slots |
1778   //
1779   //
1780 
1781 
1782   // Now compute actual number of stack words we need rounding to make
1783   // stack properly aligned.
1784   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1785 
1786   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1787 
1788   // First thing make an ic check to see if we should even be here
1789 
1790   // We are free to use all registers as temps without saving them and
1791   // restoring them except rfp. rfp is the only callee save register
1792   // as far as the interpreter and the compiler(s) are concerned.
1793 
1794   const Register receiver = j_rarg0;
1795 
1796   Label exception_pending;
1797 
1798   assert_different_registers(receiver, rscratch1);
1799   __ verify_oop(receiver);
1800   __ ic_check(8 /* end_alignment */);
1801 
1802   // Verified entry point must be aligned
1803   int vep_offset = ((intptr_t)__ pc()) - start;
1804 
1805   // If we have to make this method not-entrant we'll overwrite its
1806   // first instruction with a jump.  For this action to be legal we
1807   // must ensure that this first instruction is a B, BL, NOP, BKPT,
1808   // SVC, HVC, or SMC.  Make it a NOP.
1809   __ nop();
1810 
1811   if (method->needs_clinit_barrier()) {
1812     assert(VM_Version::supports_fast_class_init_checks(), "sanity");
1813     Label L_skip_barrier;
1814     __ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
1815     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1816     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1817 
1818     __ bind(L_skip_barrier);
1819   }
1820 
1821   // Generate stack overflow check
1822   __ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size()));
1823 
1824   // Generate a new frame for the wrapper.
1825   __ enter();
1826   // -2 because return address is already present and so is saved rfp
1827   __ sub(sp, sp, stack_size - 2*wordSize);
1828 
1829   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1830   bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
1831 
1832   // Frame is now completed as far as size and linkage.
1833   int frame_complete = ((intptr_t)__ pc()) - start;
1834 
1835   // We use r20 as the oop handle for the receiver/klass
1836   // It is callee save so it survives the call to native
1837 
1838   const Register oop_handle_reg = r20;
1839 
1840   //
1841   // We immediately shuffle the arguments so that any vm call we have to
1842   // make from here on out (sync slow path, jvmti, etc.) we will have
1843   // captured the oops from our caller and have a valid oopMap for
1844   // them.
1845 
1846   // -----------------
1847   // The Grand Shuffle
1848 
1849   // The Java calling convention is either equal (linux) or denser (win64) than the
1850   // c calling convention. However the because of the jni_env argument the c calling
1851   // convention always has at least one more (and two for static) arguments than Java.
1852   // Therefore if we move the args from java -> c backwards then we will never have
1853   // a register->register conflict and we don't have to build a dependency graph
1854   // and figure out how to break any cycles.
1855   //
1856 
1857   // Record esp-based slot for receiver on stack for non-static methods
1858   int receiver_offset = -1;
1859 
1860   // This is a trick. We double the stack slots so we can claim
1861   // the oops in the caller's frame. Since we are sure to have
1862   // more args than the caller doubling is enough to make
1863   // sure we can capture all the incoming oop args from the
1864   // caller.
1865   //
1866   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1867 
1868   // Mark location of rfp (someday)
1869   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp));
1870 
1871 
1872   int float_args = 0;
1873   int int_args = 0;
1874 
1875 #ifdef ASSERT
1876   bool reg_destroyed[Register::number_of_registers];
1877   bool freg_destroyed[FloatRegister::number_of_registers];
1878   for ( int r = 0 ; r < Register::number_of_registers ; r++ ) {
1879     reg_destroyed[r] = false;
1880   }
1881   for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) {
1882     freg_destroyed[f] = false;
1883   }
1884 
1885 #endif /* ASSERT */
1886 
1887   // For JNI natives the incoming and outgoing registers are offset upwards.
1888   GrowableArray<int> arg_order(2 * total_in_args);
1889 
1890   for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1891     arg_order.push(i);
1892     arg_order.push(c_arg);
1893   }
1894 
1895   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1896     int i = arg_order.at(ai);
1897     int c_arg = arg_order.at(ai + 1);
1898     __ block_comment(err_msg("move %d -> %d", i, c_arg));
1899     assert(c_arg != -1 && i != -1, "wrong order");
1900 #ifdef ASSERT
1901     if (in_regs[i].first()->is_Register()) {
1902       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1903     } else if (in_regs[i].first()->is_FloatRegister()) {
1904       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1905     }
1906     if (out_regs[c_arg].first()->is_Register()) {
1907       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1908     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1909       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1910     }
1911 #endif /* ASSERT */
1912     switch (in_sig_bt[i]) {
1913       case T_ARRAY:
1914       case T_OBJECT:
1915         __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1916                        ((i == 0) && (!is_static)),
1917                        &receiver_offset);
1918         int_args++;
1919         break;
1920       case T_VOID:
1921         break;
1922 
1923       case T_FLOAT:
1924         __ float_move(in_regs[i], out_regs[c_arg]);
1925         float_args++;
1926         break;
1927 
1928       case T_DOUBLE:
1929         assert( i + 1 < total_in_args &&
1930                 in_sig_bt[i + 1] == T_VOID &&
1931                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1932         __ double_move(in_regs[i], out_regs[c_arg]);
1933         float_args++;
1934         break;
1935 
1936       case T_LONG :
1937         __ long_move(in_regs[i], out_regs[c_arg]);
1938         int_args++;
1939         break;
1940 
1941       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1942 
1943       default:
1944         __ move32_64(in_regs[i], out_regs[c_arg]);
1945         int_args++;
1946     }
1947   }
1948 
1949   // point c_arg at the first arg that is already loaded in case we
1950   // need to spill before we call out
1951   int c_arg = total_c_args - total_in_args;
1952 
1953   // Pre-load a static method's oop into c_rarg1.
1954   if (method->is_static()) {
1955 
1956     //  load oop into a register
1957     __ movoop(c_rarg1,
1958               JNIHandles::make_local(method->method_holder()->java_mirror()));
1959 
1960     // Now handlize the static class mirror it's known not-null.
1961     __ str(c_rarg1, Address(sp, klass_offset));
1962     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1963 
1964     // Now get the handle
1965     __ lea(c_rarg1, Address(sp, klass_offset));
1966     // and protect the arg if we must spill
1967     c_arg--;
1968   }
1969 
1970   // Change state to native (we save the return address in the thread, since it might not
1971   // be pushed on the stack when we do a stack traversal). It is enough that the pc()
1972   // points into the right code segment. It does not have to be the correct return pc.
1973   // We use the same pc/oopMap repeatedly when we call out.
1974 
1975   Label native_return;
1976   if (method->is_object_wait0()) {
1977     // For convenience we use the pc we want to resume to in case of preemption on Object.wait.
1978     __ set_last_Java_frame(sp, noreg, native_return, rscratch1);
1979   } else {
1980     intptr_t the_pc = (intptr_t) __ pc();
1981     oop_maps->add_gc_map(the_pc - start, map);
1982 
1983     __ set_last_Java_frame(sp, noreg, __ pc(), rscratch1);
1984   }
1985 
1986   Label dtrace_method_entry, dtrace_method_entry_done;
1987   if (DTraceMethodProbes) {
1988     __ b(dtrace_method_entry);
1989     __ bind(dtrace_method_entry_done);
1990   }
1991 
1992   // RedefineClasses() tracing support for obsolete method entry
1993   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1994     // protect the args we've loaded
1995     save_args(masm, total_c_args, c_arg, out_regs);
1996     __ mov_metadata(c_rarg1, method());
1997     __ call_VM_leaf(
1998       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1999       rthread, c_rarg1);
2000     restore_args(masm, total_c_args, c_arg, out_regs);
2001   }
2002 
2003   // Lock a synchronized method
2004 
2005   // Register definitions used by locking and unlocking
2006 
2007   const Register swap_reg = r0;
2008   const Register obj_reg  = r19;  // Will contain the oop
2009   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
2010   const Register old_hdr  = r13;  // value of old header at unlock time
2011   const Register lock_tmp = r14;  // Temporary used by fast_lock/unlock
2012   const Register tmp = lr;
2013 
2014   Label slow_path_lock;
2015   Label lock_done;
2016 
2017   if (method->is_synchronized()) {
2018     // Get the handle (the 2nd argument)
2019     __ mov(oop_handle_reg, c_rarg1);
2020 
2021     // Get address of the box
2022 
2023     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2024 
2025     // Load the oop from the handle
2026     __ ldr(obj_reg, Address(oop_handle_reg, 0));
2027 
2028     __ fast_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
2029 
2030     // Slow path will re-enter here
2031     __ bind(lock_done);
2032   }
2033 
2034 
2035   // Finally just about ready to make the JNI call
2036 
2037   // get JNIEnv* which is first argument to native
2038   __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
2039 
2040   // Now set thread in native
2041   __ mov(rscratch1, _thread_in_native);
2042   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
2043   __ stlrw(rscratch1, rscratch2);
2044 
2045   __ rt_call(native_func);
2046 
2047   // Verify or restore cpu control state after JNI call
2048   __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
2049 
2050   // Unpack native results.
2051   switch (ret_type) {
2052   case T_BOOLEAN: __ c2bool(r0);                     break;
2053   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
2054   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
2055   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
2056   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
2057   case T_DOUBLE :
2058   case T_FLOAT  :
2059     // Result is in v0 we'll save as needed
2060     break;
2061   case T_ARRAY:                 // Really a handle
2062   case T_OBJECT:                // Really a handle
2063       break; // can't de-handlize until after safepoint check
2064   case T_VOID: break;
2065   case T_LONG: break;
2066   default       : ShouldNotReachHere();
2067   }
2068 
2069   Label safepoint_in_progress, safepoint_in_progress_done;
2070 
2071   // Switch thread to "native transition" state before reading the synchronization state.
2072   // This additional state is necessary because reading and testing the synchronization
2073   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2074   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2075   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2076   //     Thread A is resumed to finish this native method, but doesn't block here since it
2077   //     didn't see any synchronization is progress, and escapes.
2078   __ mov(rscratch1, _thread_in_native_trans);
2079 
2080   __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
2081 
2082   // Force this write out before the read below
2083   if (!UseSystemMemoryBarrier) {
2084     __ dmb(Assembler::ISH);
2085   }
2086 
2087   __ verify_sve_vector_length();
2088 
2089   // Check for safepoint operation in progress and/or pending suspend requests.
2090   {
2091     // No need for acquire as Java threads always disarm themselves.
2092     __ safepoint_poll(safepoint_in_progress, true /* at_return */, false /* in_nmethod */);
2093     __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
2094     __ cbnzw(rscratch1, safepoint_in_progress);
2095     __ bind(safepoint_in_progress_done);
2096   }
2097 
2098   // change thread state
2099   __ mov(rscratch1, _thread_in_Java);
2100   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
2101   __ stlrw(rscratch1, rscratch2);
2102 
2103   if (method->is_object_wait0()) {
2104     // Check preemption for Object.wait()
2105     __ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
2106     __ cbz(rscratch1, native_return);
2107     __ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
2108     __ br(rscratch1);
2109     __ bind(native_return);
2110 
2111     intptr_t the_pc = (intptr_t) __ pc();
2112     oop_maps->add_gc_map(the_pc - start, map);
2113   }
2114 
2115   Label reguard;
2116   Label reguard_done;
2117   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
2118   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
2119   __ br(Assembler::EQ, reguard);
2120   __ bind(reguard_done);
2121 
2122   // native result if any is live
2123 
2124   // Unlock
2125   Label unlock_done;
2126   Label slow_path_unlock;
2127   if (method->is_synchronized()) {
2128 
2129     // Get locked oop from the handle we passed to jni
2130     __ ldr(obj_reg, Address(oop_handle_reg, 0));
2131 
2132     // Must save r0 if if it is live now because cmpxchg must use it
2133     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2134       save_native_result(masm, ret_type, stack_slots);
2135     }
2136 
2137     __ fast_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
2138 
2139     // slow path re-enters here
2140     __ bind(unlock_done);
2141     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2142       restore_native_result(masm, ret_type, stack_slots);
2143     }
2144   }
2145 
2146   Label dtrace_method_exit, dtrace_method_exit_done;
2147   if (DTraceMethodProbes) {
2148     __ b(dtrace_method_exit);
2149     __ bind(dtrace_method_exit_done);
2150   }
2151 
2152   __ reset_last_Java_frame(false);
2153 
2154   // Unbox oop result, e.g. JNIHandles::resolve result.
2155   if (is_reference_type(ret_type)) {
2156     __ resolve_jobject(r0, r1, r2);
2157   }
2158 
2159   if (CheckJNICalls) {
2160     // clear_pending_jni_exception_check
2161     __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
2162   }
2163 
2164   // reset handle block
2165   __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
2166   __ str(zr, Address(r2, JNIHandleBlock::top_offset()));
2167 
2168   __ leave();
2169 
2170   #if INCLUDE_JFR
2171   // We need to do a poll test after unwind in case the sampler
2172   // managed to sample the native frame after returning to Java.
2173   Label L_return;
2174   __ ldr(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
2175   address poll_test_pc = __ pc();
2176   __ relocate(relocInfo::poll_return_type);
2177   __ tbz(rscratch1, log2i_exact(SafepointMechanism::poll_bit()), L_return);
2178   assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
2179     "polling page return stub not created yet");
2180   address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
2181   __ adr(rscratch1, InternalAddress(poll_test_pc));
2182   __ str(rscratch1, Address(rthread, JavaThread::saved_exception_pc_offset()));
2183   __ far_jump(RuntimeAddress(stub));
2184   __ bind(L_return);
2185 #endif // INCLUDE_JFR
2186 
2187   // Any exception pending?
2188   __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2189   __ cbnz(rscratch1, exception_pending);
2190 
2191   // We're done
2192   __ ret(lr);
2193 
2194   // Unexpected paths are out of line and go here
2195 
2196   // forward the exception
2197   __ bind(exception_pending);
2198 
2199   // and forward the exception
2200   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2201 
2202   // Slow path locking & unlocking
2203   if (method->is_synchronized()) {
2204 
2205     __ block_comment("Slow path lock {");
2206     __ bind(slow_path_lock);
2207 
2208     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2209     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2210 
2211     // protect the args we've loaded
2212     save_args(masm, total_c_args, c_arg, out_regs);
2213 
2214     __ mov(c_rarg0, obj_reg);
2215     __ mov(c_rarg1, lock_reg);
2216     __ mov(c_rarg2, rthread);
2217 
2218     // Not a leaf but we have last_Java_frame setup as we want.
2219     // We don't want to unmount in case of contention since that would complicate preserving
2220     // the arguments that had already been marshalled into the native convention. So we force
2221     // the freeze slow path to find this native wrapper frame (see recurse_freeze_native_frame())
2222     // and pin the vthread. Otherwise the fast path won't find it since we don't walk the stack.
2223     __ push_cont_fastpath();
2224     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2225     __ pop_cont_fastpath();
2226     restore_args(masm, total_c_args, c_arg, out_regs);
2227 
2228 #ifdef ASSERT
2229     { Label L;
2230       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2231       __ cbz(rscratch1, L);
2232       __ stop("no pending exception allowed on exit from monitorenter");
2233       __ bind(L);
2234     }
2235 #endif
2236     __ b(lock_done);
2237 
2238     __ block_comment("} Slow path lock");
2239 
2240     __ block_comment("Slow path unlock {");
2241     __ bind(slow_path_unlock);
2242 
2243     // If we haven't already saved the native result we must save it now as xmm registers
2244     // are still exposed.
2245 
2246     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2247       save_native_result(masm, ret_type, stack_slots);
2248     }
2249 
2250     __ mov(c_rarg2, rthread);
2251     __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2252     __ mov(c_rarg0, obj_reg);
2253 
2254     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2255     // NOTE that obj_reg == r19 currently
2256     __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2257     __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2258 
2259     __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
2260 
2261 #ifdef ASSERT
2262     {
2263       Label L;
2264       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2265       __ cbz(rscratch1, L);
2266       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2267       __ bind(L);
2268     }
2269 #endif /* ASSERT */
2270 
2271     __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2272 
2273     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2274       restore_native_result(masm, ret_type, stack_slots);
2275     }
2276     __ b(unlock_done);
2277 
2278     __ block_comment("} Slow path unlock");
2279 
2280   } // synchronized
2281 
2282   // SLOW PATH Reguard the stack if needed
2283 
2284   __ bind(reguard);
2285   save_native_result(masm, ret_type, stack_slots);
2286   __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2287   restore_native_result(masm, ret_type, stack_slots);
2288   // and continue
2289   __ b(reguard_done);
2290 
2291   // SLOW PATH safepoint
2292   {
2293     __ block_comment("safepoint {");
2294     __ bind(safepoint_in_progress);
2295 
2296     // Don't use call_VM as it will see a possible pending exception and forward it
2297     // and never return here preventing us from clearing _last_native_pc down below.
2298     //
2299     save_native_result(masm, ret_type, stack_slots);
2300     __ mov(c_rarg0, rthread);
2301 #ifndef PRODUCT
2302   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2303 #endif
2304     __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2305     __ blr(rscratch1);
2306 
2307     // Restore any method result value
2308     restore_native_result(masm, ret_type, stack_slots);
2309 
2310     __ b(safepoint_in_progress_done);
2311     __ block_comment("} safepoint");
2312   }
2313 
2314   // SLOW PATH dtrace support
2315   if (DTraceMethodProbes) {
2316     {
2317       __ block_comment("dtrace entry {");
2318       __ bind(dtrace_method_entry);
2319 
2320       // We have all of the arguments setup at this point. We must not touch any register
2321       // argument registers at this point (what if we save/restore them there are no oop?
2322 
2323       save_args(masm, total_c_args, c_arg, out_regs);
2324       __ mov_metadata(c_rarg1, method());
2325       __ call_VM_leaf(
2326         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2327         rthread, c_rarg1);
2328       restore_args(masm, total_c_args, c_arg, out_regs);
2329       __ b(dtrace_method_entry_done);
2330       __ block_comment("} dtrace entry");
2331     }
2332 
2333     {
2334       __ block_comment("dtrace exit {");
2335       __ bind(dtrace_method_exit);
2336       save_native_result(masm, ret_type, stack_slots);
2337       __ mov_metadata(c_rarg1, method());
2338       __ call_VM_leaf(
2339         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2340         rthread, c_rarg1);
2341       restore_native_result(masm, ret_type, stack_slots);
2342       __ b(dtrace_method_exit_done);
2343       __ block_comment("} dtrace exit");
2344     }
2345   }
2346 
2347   __ flush();
2348 
2349   nmethod *nm = nmethod::new_native_nmethod(method,
2350                                             compile_id,
2351                                             masm->code(),
2352                                             vep_offset,
2353                                             frame_complete,
2354                                             stack_slots / VMRegImpl::slots_per_word,
2355                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2356                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2357                                             oop_maps);
2358 
2359   return nm;
2360 }
2361 
2362 // this function returns the adjust size (in number of words) to a c2i adapter
2363 // activation for use during deoptimization
2364 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2365   assert(callee_locals >= callee_parameters,
2366           "test and remove; got more parms than locals");
2367   if (callee_locals < callee_parameters)
2368     return 0;                   // No adjustment for negative locals
2369   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2370   // diff is counted in stack words
2371   return align_up(diff, 2);
2372 }
2373 
2374 
2375 //------------------------------generate_deopt_blob----------------------------
2376 void SharedRuntime::generate_deopt_blob() {
2377   // Allocate space for the code
2378   ResourceMark rm;
2379   // Setup code generation tools
2380   int pad = 0;
2381 #if INCLUDE_JVMCI
2382   if (EnableJVMCI) {
2383     pad += 512; // Increase the buffer size when compiling for JVMCI
2384   }
2385 #endif
2386   const char* name = SharedRuntime::stub_name(StubId::shared_deopt_id);
2387   CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, BlobId::shared_deopt_id);
2388   if (blob != nullptr) {
2389     _deopt_blob = blob->as_deoptimization_blob();
2390     return;
2391   }
2392 
2393   CodeBuffer buffer(name, 2048+pad, 1024);
2394   MacroAssembler* masm = new MacroAssembler(&buffer);
2395   int frame_size_in_words;
2396   OopMap* map = nullptr;
2397   OopMapSet *oop_maps = new OopMapSet();
2398   RegisterSaver reg_save(COMPILER2_OR_JVMCI != 0);
2399 
2400   // -------------
2401   // This code enters when returning to a de-optimized nmethod.  A return
2402   // address has been pushed on the stack, and return values are in
2403   // registers.
2404   // If we are doing a normal deopt then we were called from the patched
2405   // nmethod from the point we returned to the nmethod. So the return
2406   // address on the stack is wrong by NativeCall::instruction_size
2407   // We will adjust the value so it looks like we have the original return
2408   // address on the stack (like when we eagerly deoptimized).
2409   // In the case of an exception pending when deoptimizing, we enter
2410   // with a return address on the stack that points after the call we patched
2411   // into the exception handler. We have the following register state from,
2412   // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2413   //    r0: exception oop
2414   //    r19: exception handler
2415   //    r3: throwing pc
2416   // So in this case we simply jam r3 into the useless return address and
2417   // the stack looks just like we want.
2418   //
2419   // At this point we need to de-opt.  We save the argument return
2420   // registers.  We call the first C routine, fetch_unroll_info().  This
2421   // routine captures the return values and returns a structure which
2422   // describes the current frame size and the sizes of all replacement frames.
2423   // The current frame is compiled code and may contain many inlined
2424   // functions, each with their own JVM state.  We pop the current frame, then
2425   // push all the new frames.  Then we call the C routine unpack_frames() to
2426   // populate these frames.  Finally unpack_frames() returns us the new target
2427   // address.  Notice that callee-save registers are BLOWN here; they have
2428   // already been captured in the vframeArray at the time the return PC was
2429   // patched.
2430   address start = __ pc();
2431   Label cont;
2432 
2433   // Prolog for non exception case!
2434 
2435   // Save everything in sight.
2436   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2437 
2438   // Normal deoptimization.  Save exec mode for unpack_frames.
2439   __ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved
2440   __ b(cont);
2441 
2442   int reexecute_offset = __ pc() - start;
2443 #if INCLUDE_JVMCI && !defined(COMPILER1)
2444   if (UseJVMCICompiler) {
2445     // JVMCI does not use this kind of deoptimization
2446     __ should_not_reach_here();
2447   }
2448 #endif
2449 
2450   // Reexecute case
2451   // return address is the pc describes what bci to do re-execute at
2452 
2453   // No need to update map as each call to save_live_registers will produce identical oopmap
2454   (void) reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2455 
2456   __ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved
2457   __ b(cont);
2458 
2459 #if INCLUDE_JVMCI
2460   Label after_fetch_unroll_info_call;
2461   int implicit_exception_uncommon_trap_offset = 0;
2462   int uncommon_trap_offset = 0;
2463 
2464   if (EnableJVMCI) {
2465     implicit_exception_uncommon_trap_offset = __ pc() - start;
2466 
2467     __ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2468     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2469 
2470     uncommon_trap_offset = __ pc() - start;
2471 
2472     // Save everything in sight.
2473     reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2474     // fetch_unroll_info needs to call last_java_frame()
2475     Label retaddr;
2476     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2477 
2478     __ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2479     __ movw(rscratch1, -1);
2480     __ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2481 
2482     __ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute);
2483     __ mov(c_rarg0, rthread);
2484     __ movw(c_rarg2, rcpool); // exec mode
2485     __ lea(rscratch1,
2486            RuntimeAddress(CAST_FROM_FN_PTR(address,
2487                                            Deoptimization::uncommon_trap)));
2488     __ blr(rscratch1);
2489     __ bind(retaddr);
2490     oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2491 
2492     __ reset_last_Java_frame(false);
2493 
2494     __ b(after_fetch_unroll_info_call);
2495   } // EnableJVMCI
2496 #endif // INCLUDE_JVMCI
2497 
2498   int exception_offset = __ pc() - start;
2499 
2500   // Prolog for exception case
2501 
2502   // all registers are dead at this entry point, except for r0, and
2503   // r3 which contain the exception oop and exception pc
2504   // respectively.  Set them in TLS and fall thru to the
2505   // unpack_with_exception_in_tls entry point.
2506 
2507   __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
2508   __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
2509 
2510   int exception_in_tls_offset = __ pc() - start;
2511 
2512   // new implementation because exception oop is now passed in JavaThread
2513 
2514   // Prolog for exception case
2515   // All registers must be preserved because they might be used by LinearScan
2516   // Exceptiop oop and throwing PC are passed in JavaThread
2517   // tos: stack at point of call to method that threw the exception (i.e. only
2518   // args are on the stack, no return address)
2519 
2520   // The return address pushed by save_live_registers will be patched
2521   // later with the throwing pc. The correct value is not available
2522   // now because loading it from memory would destroy registers.
2523 
2524   // NB: The SP at this point must be the SP of the method that is
2525   // being deoptimized.  Deoptimization assumes that the frame created
2526   // here by save_live_registers is immediately below the method's SP.
2527   // This is a somewhat fragile mechanism.
2528 
2529   // Save everything in sight.
2530   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2531 
2532   // Now it is safe to overwrite any register
2533 
2534   // Deopt during an exception.  Save exec mode for unpack_frames.
2535   __ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved
2536 
2537   // load throwing pc from JavaThread and patch it as the return address
2538   // of the current frame. Then clear the field in JavaThread
2539   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2540   __ protect_return_address(r3);
2541   __ str(r3, Address(rfp, wordSize));
2542   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2543 
2544 #ifdef ASSERT
2545   // verify that there is really an exception oop in JavaThread
2546   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2547   __ verify_oop(r0);
2548 
2549   // verify that there is no pending exception
2550   Label no_pending_exception;
2551   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2552   __ cbz(rscratch1, no_pending_exception);
2553   __ stop("must not have pending exception here");
2554   __ bind(no_pending_exception);
2555 #endif
2556 
2557   __ bind(cont);
2558 
2559   // Call C code.  Need thread and this frame, but NOT official VM entry
2560   // crud.  We cannot block on this call, no GC can happen.
2561   //
2562   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2563 
2564   // fetch_unroll_info needs to call last_java_frame().
2565 
2566   Label retaddr;
2567   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2568 #ifdef ASSERT
2569   { Label L;
2570     __ ldr(rscratch1, Address(rthread, JavaThread::last_Java_fp_offset()));
2571     __ cbz(rscratch1, L);
2572     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2573     __ bind(L);
2574   }
2575 #endif // ASSERT
2576   __ mov(c_rarg0, rthread);
2577   __ mov(c_rarg1, rcpool);
2578   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2579   __ blr(rscratch1);
2580   __ bind(retaddr);
2581 
2582   // Need to have an oopmap that tells fetch_unroll_info where to
2583   // find any register it might need.
2584   oop_maps->add_gc_map(__ pc() - start, map);
2585 
2586   __ reset_last_Java_frame(false);
2587 
2588 #if INCLUDE_JVMCI
2589   if (EnableJVMCI) {
2590     __ bind(after_fetch_unroll_info_call);
2591   }
2592 #endif
2593 
2594   // Load UnrollBlock* into r5
2595   __ mov(r5, r0);
2596 
2597   __ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset()));
2598    Label noException;
2599   __ cmpw(rcpool, Deoptimization::Unpack_exception);   // Was exception pending?
2600   __ br(Assembler::NE, noException);
2601   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2602   // QQQ this is useless it was null above
2603   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2604   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
2605   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2606 
2607   __ verify_oop(r0);
2608 
2609   // Overwrite the result registers with the exception results.
2610   __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2611   // I think this is useless
2612   // __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2613 
2614   __ bind(noException);
2615 
2616   // Only register save data is on the stack.
2617   // Now restore the result registers.  Everything else is either dead
2618   // or captured in the vframeArray.
2619 
2620   // Restore fp result register
2621   __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2622   // Restore integer result register
2623   __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2624 
2625   // Pop all of the register save area off the stack
2626   __ add(sp, sp, frame_size_in_words * wordSize);
2627 
2628   // All of the register save area has been popped of the stack. Only the
2629   // return address remains.
2630 
2631   // Pop all the frames we must move/replace.
2632   //
2633   // Frame picture (youngest to oldest)
2634   // 1: self-frame (no frame link)
2635   // 2: deopting frame  (no frame link)
2636   // 3: caller of deopting frame (could be compiled/interpreted).
2637   //
2638   // Note: by leaving the return address of self-frame on the stack
2639   // and using the size of frame 2 to adjust the stack
2640   // when we are done the return to frame 3 will still be on the stack.
2641 
2642   // Pop deoptimized frame
2643   __ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2644   __ sub(r2, r2, 2 * wordSize);
2645   __ add(sp, sp, r2);
2646   __ ldp(rfp, zr, __ post(sp, 2 * wordSize));
2647 
2648 #ifdef ASSERT
2649   // Compilers generate code that bang the stack by as much as the
2650   // interpreter would need. So this stack banging should never
2651   // trigger a fault. Verify that it does not on non product builds.
2652   __ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2653   __ bang_stack_size(r19, r2);
2654 #endif
2655   // Load address of array of frame pcs into r2
2656   __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset()));
2657 
2658   // Trash the old pc
2659   // __ addptr(sp, wordSize);  FIXME ????
2660 
2661   // Load address of array of frame sizes into r4
2662   __ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset()));
2663 
2664   // Load counter into r3
2665   __ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset()));
2666 
2667   // Now adjust the caller's stack to make up for the extra locals
2668   // but record the original sp so that we can save it in the skeletal interpreter
2669   // frame and the stack walking of interpreter_sender will get the unextended sp
2670   // value and not the "real" sp value.
2671 
2672   const Register sender_sp = r6;
2673 
2674   __ mov(sender_sp, sp);
2675   __ ldrw(r19, Address(r5,
2676                        Deoptimization::UnrollBlock::
2677                        caller_adjustment_offset()));
2678   __ sub(sp, sp, r19);
2679 
2680   // Push interpreter frames in a loop
2681   __ mov(rscratch1, (uint64_t)0xDEADDEAD);        // Make a recognizable pattern
2682   __ mov(rscratch2, rscratch1);
2683   Label loop;
2684   __ bind(loop);
2685   __ ldr(r19, Address(__ post(r4, wordSize)));          // Load frame size
2686   __ sub(r19, r19, 2*wordSize);           // We'll push pc and fp by hand
2687   __ ldr(lr, Address(__ post(r2, wordSize)));  // Load pc
2688   __ enter();                           // Save old & set new fp
2689   __ sub(sp, sp, r19);                  // Prolog
2690   // This value is corrected by layout_activation_impl
2691   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2692   __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2693   __ mov(sender_sp, sp);               // Pass sender_sp to next frame
2694   __ sub(r3, r3, 1);                   // Decrement counter
2695   __ cbnz(r3, loop);
2696 
2697     // Re-push self-frame
2698   __ ldr(lr, Address(r2));
2699   __ enter();
2700 
2701   // Allocate a full sized register save area.  We subtract 2 because
2702   // enter() just pushed 2 words
2703   __ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2704 
2705   // Restore frame locals after moving the frame
2706   __ strd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2707   __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2708 
2709   // Call C code.  Need thread but NOT official VM entry
2710   // crud.  We cannot block on this call, no GC can happen.  Call should
2711   // restore return values to their stack-slots with the new SP.
2712   //
2713   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2714 
2715   // Use rfp because the frames look interpreted now
2716   // Don't need the precise return PC here, just precise enough to point into this code blob.
2717   address the_pc = __ pc();
2718   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2719 
2720   __ mov(c_rarg0, rthread);
2721   __ movw(c_rarg1, rcpool); // second arg: exec_mode
2722   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2723   __ blr(rscratch1);
2724 
2725   // Set an oopmap for the call site
2726   // Use the same PC we used for the last java frame
2727   oop_maps->add_gc_map(the_pc - start,
2728                        new OopMap( frame_size_in_words, 0 ));
2729 
2730   // Clear fp AND pc
2731   __ reset_last_Java_frame(true);
2732 
2733   // Collect return values
2734   __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2735   __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2736   // I think this is useless (throwing pc?)
2737   // __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2738 
2739   // Pop self-frame.
2740   __ leave();                           // Epilog
2741 
2742   // Jump to interpreter
2743   __ ret(lr);
2744 
2745   // Make sure all code is generated
2746   masm->flush();
2747 
2748   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2749   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2750 #if INCLUDE_JVMCI
2751   if (EnableJVMCI) {
2752     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2753     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2754   }
2755 #endif
2756 
2757   AOTCodeCache::store_code_blob(*_deopt_blob, AOTCodeEntry::SharedBlob, BlobId::shared_deopt_id);
2758 }
2759 
2760 // Number of stack slots between incoming argument block and the start of
2761 // a new frame.  The PROLOG must add this many slots to the stack.  The
2762 // EPILOG must remove this many slots. aarch64 needs two slots for
2763 // return address and fp.
2764 // TODO think this is correct but check
2765 uint SharedRuntime::in_preserve_stack_slots() {
2766   return 4;
2767 }
2768 
2769 uint SharedRuntime::out_preserve_stack_slots() {
2770   return 0;
2771 }
2772 
2773 
2774 VMReg SharedRuntime::thread_register() {
2775   return rthread->as_VMReg();
2776 }
2777 
2778 //------------------------------generate_handler_blob------
2779 //
2780 // Generate a special Compile2Runtime blob that saves all registers,
2781 // and setup oopmap.
2782 //
2783 SafepointBlob* SharedRuntime::generate_handler_blob(StubId id, address call_ptr) {
2784   assert(is_polling_page_id(id), "expected a polling page stub id");
2785 
2786   // Allocate space for the code.  Setup code generation tools.
2787   const char* name = SharedRuntime::stub_name(id);
2788   CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, StubInfo::blob(id));
2789   if (blob != nullptr) {
2790     return blob->as_safepoint_blob();
2791   }
2792 
2793   ResourceMark rm;
2794   OopMapSet *oop_maps = new OopMapSet();
2795   OopMap* map;
2796   CodeBuffer buffer(name, 2048, 1024);
2797   MacroAssembler* masm = new MacroAssembler(&buffer);
2798 
2799   address start   = __ pc();
2800   address call_pc = nullptr;
2801   int frame_size_in_words;
2802   bool cause_return = (id == StubId::shared_polling_page_return_handler_id);
2803   RegisterSaver reg_save(id == StubId::shared_polling_page_vectors_safepoint_handler_id /* save_vectors */);
2804 
2805   // When the signal occurred, the LR was either signed and stored on the stack (in which
2806   // case it will be restored from the stack before being used) or unsigned and not stored
2807   // on the stack. Stipping ensures we get the right value.
2808   __ strip_return_address();
2809 
2810   // Save Integer and Float registers.
2811   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2812 
2813   // The following is basically a call_VM.  However, we need the precise
2814   // address of the call in order to generate an oopmap. Hence, we do all the
2815   // work ourselves.
2816 
2817   Label retaddr;
2818   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2819 
2820   // The return address must always be correct so that frame constructor never
2821   // sees an invalid pc.
2822 
2823   if (!cause_return) {
2824     // overwrite the return address pushed by save_live_registers
2825     // Additionally, r20 is a callee-saved register so we can look at
2826     // it later to determine if someone changed the return address for
2827     // us!
2828     __ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset()));
2829     __ protect_return_address(r20);
2830     __ str(r20, Address(rfp, wordSize));
2831   }
2832 
2833   // Do the call
2834   __ mov(c_rarg0, rthread);
2835   __ lea(rscratch1, RuntimeAddress(call_ptr));
2836   __ blr(rscratch1);
2837   __ bind(retaddr);
2838 
2839   // Set an oopmap for the call site.  This oopmap will map all
2840   // oop-registers and debug-info registers as callee-saved.  This
2841   // will allow deoptimization at this safepoint to find all possible
2842   // debug-info recordings, as well as let GC find all oops.
2843 
2844   oop_maps->add_gc_map( __ pc() - start, map);
2845 
2846   Label noException;
2847 
2848   __ reset_last_Java_frame(false);
2849 
2850   __ membar(Assembler::LoadLoad | Assembler::LoadStore);
2851 
2852   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2853   __ cbz(rscratch1, noException);
2854 
2855   // Exception pending
2856 
2857   reg_save.restore_live_registers(masm);
2858 
2859   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2860 
2861   // No exception case
2862   __ bind(noException);
2863 
2864   Label no_adjust, bail;
2865   if (!cause_return) {
2866     // If our stashed return pc was modified by the runtime we avoid touching it
2867     __ ldr(rscratch1, Address(rfp, wordSize));
2868     __ cmp(r20, rscratch1);
2869     __ br(Assembler::NE, no_adjust);
2870     __ authenticate_return_address(r20);
2871 
2872 #ifdef ASSERT
2873     // Verify the correct encoding of the poll we're about to skip.
2874     // See NativeInstruction::is_ldrw_to_zr()
2875     __ ldrw(rscratch1, Address(r20));
2876     __ ubfx(rscratch2, rscratch1, 22, 10);
2877     __ cmpw(rscratch2, 0b1011100101);
2878     __ br(Assembler::NE, bail);
2879     __ ubfx(rscratch2, rscratch1, 0, 5);
2880     __ cmpw(rscratch2, 0b11111);
2881     __ br(Assembler::NE, bail);
2882 #endif
2883     // Adjust return pc forward to step over the safepoint poll instruction
2884     __ add(r20, r20, NativeInstruction::instruction_size);
2885     __ protect_return_address(r20);
2886     __ str(r20, Address(rfp, wordSize));
2887   }
2888 
2889   __ bind(no_adjust);
2890   // Normal exit, restore registers and exit.
2891   reg_save.restore_live_registers(masm);
2892 
2893   __ ret(lr);
2894 
2895 #ifdef ASSERT
2896   __ bind(bail);
2897   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2898 #endif
2899 
2900   // Make sure all code is generated
2901   masm->flush();
2902 
2903   // Fill-out other meta info
2904   SafepointBlob* sp_blob = SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2905 
2906   AOTCodeCache::store_code_blob(*sp_blob, AOTCodeEntry::SharedBlob, StubInfo::blob(id));
2907   return sp_blob;
2908 }
2909 
2910 //
2911 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2912 //
2913 // Generate a stub that calls into vm to find out the proper destination
2914 // of a java call. All the argument registers are live at this point
2915 // but since this is generic code we don't know what they are and the caller
2916 // must do any gc of the args.
2917 //
2918 RuntimeStub* SharedRuntime::generate_resolve_blob(StubId id, address destination) {
2919   assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2920   assert(is_resolve_id(id), "expected a resolve stub id");
2921 
2922   const char* name = SharedRuntime::stub_name(id);
2923   CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, StubInfo::blob(id));
2924   if (blob != nullptr) {
2925     return blob->as_runtime_stub();
2926   }
2927 
2928   // allocate space for the code
2929   ResourceMark rm;
2930   CodeBuffer buffer(name, 1000, 512);
2931   MacroAssembler* masm                = new MacroAssembler(&buffer);
2932 
2933   int frame_size_in_words;
2934   RegisterSaver reg_save(false /* save_vectors */);
2935 
2936   OopMapSet *oop_maps = new OopMapSet();
2937   OopMap* map = nullptr;
2938 
2939   int start = __ offset();
2940 
2941   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2942 
2943   int frame_complete = __ offset();
2944 
2945   {
2946     Label retaddr;
2947     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2948 
2949     __ mov(c_rarg0, rthread);
2950     __ lea(rscratch1, RuntimeAddress(destination));
2951 
2952     __ blr(rscratch1);
2953     __ bind(retaddr);
2954   }
2955 
2956   // Set an oopmap for the call site.
2957   // We need this not only for callee-saved registers, but also for volatile
2958   // registers that the compiler might be keeping live across a safepoint.
2959 
2960   oop_maps->add_gc_map( __ offset() - start, map);
2961 
2962   // r0 contains the address we are going to jump to assuming no exception got installed
2963 
2964   // clear last_Java_sp
2965   __ reset_last_Java_frame(false);
2966   // check for pending exceptions
2967   Label pending;
2968   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2969   __ cbnz(rscratch1, pending);
2970 
2971   // get the returned Method*
2972   __ get_vm_result_metadata(rmethod, rthread);
2973   __ str(rmethod, Address(sp, reg_save.reg_offset_in_bytes(rmethod)));
2974 
2975   // r0 is where we want to jump, overwrite rscratch1 which is saved and scratch
2976   __ str(r0, Address(sp, reg_save.rscratch1_offset_in_bytes()));
2977   reg_save.restore_live_registers(masm);
2978 
2979   // We are back to the original state on entry and ready to go.
2980 
2981   __ br(rscratch1);
2982 
2983   // Pending exception after the safepoint
2984 
2985   __ bind(pending);
2986 
2987   reg_save.restore_live_registers(masm);
2988 
2989   // exception pending => remove activation and forward to exception handler
2990 
2991   __ str(zr, Address(rthread, JavaThread::vm_result_oop_offset()));
2992 
2993   __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
2994   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2995 
2996   // -------------
2997   // make sure all code is generated
2998   masm->flush();
2999 
3000   // return the  blob
3001   // frame_size_words or bytes??
3002   RuntimeStub* rs_blob = RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3003 
3004   AOTCodeCache::store_code_blob(*rs_blob, AOTCodeEntry::SharedBlob, StubInfo::blob(id));
3005   return rs_blob;
3006 }
3007 
3008 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
3009   BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K);
3010   if (buf == nullptr) {
3011     return nullptr;
3012   }
3013   CodeBuffer buffer(buf);
3014   short buffer_locs[20];
3015   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3016                                          sizeof(buffer_locs)/sizeof(relocInfo));
3017 
3018   MacroAssembler _masm(&buffer);
3019   MacroAssembler* masm = &_masm;
3020 
3021   const Array<SigEntry>* sig_vk = vk->extended_sig();
3022   const Array<VMRegPair>* regs = vk->return_regs();
3023 
3024   int pack_fields_jobject_off = __ offset();
3025   // Resolve pre-allocated buffer from JNI handle.
3026   // We cannot do this in generate_call_stub() because it requires GC code to be initialized.
3027   Register Rresult = r14;  // See StubGenerator::generate_call_stub().
3028   __ ldr(r0, Address(Rresult));
3029   __ resolve_jobject(r0 /* value */,
3030                      rthread /* thread */,
3031                      r12 /* tmp */);
3032   __ str(r0, Address(Rresult));
3033 
3034   int pack_fields_off = __ offset();
3035 
3036   int j = 1;
3037   for (int i = 0; i < sig_vk->length(); i++) {
3038     BasicType bt = sig_vk->at(i)._bt;
3039     if (bt == T_METADATA) {
3040       continue;
3041     }
3042     if (bt == T_VOID) {
3043       if (sig_vk->at(i-1)._bt == T_LONG ||
3044           sig_vk->at(i-1)._bt == T_DOUBLE) {
3045         j++;
3046       }
3047       continue;
3048     }
3049     int off = sig_vk->at(i)._offset;
3050     VMRegPair pair = regs->at(j);
3051     VMReg r_1 = pair.first();
3052     VMReg r_2 = pair.second();
3053     Address to(r0, off);
3054     if (bt == T_FLOAT) {
3055       __ strs(r_1->as_FloatRegister(), to);
3056     } else if (bt == T_DOUBLE) {
3057       __ strd(r_1->as_FloatRegister(), to);
3058     } else {
3059       Register val = r_1->as_Register();
3060       assert_different_registers(to.base(), val, r15, r16, r17);
3061       if (is_reference_type(bt)) {
3062         // store_heap_oop transitively calls oop_store_at which corrupts to.base(). We need to keep r0 valid.
3063         __ mov(r17, r0);
3064         Address to_with_r17(r17, off);
3065         __ store_heap_oop(to_with_r17, val, r15, r16, r17, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
3066       } else {
3067         __ store_sized_value(to, r_1->as_Register(), type2aelembytes(bt));
3068       }
3069     }
3070     j++;
3071   }
3072   assert(j == regs->length(), "missed a field?");
3073   if (vk->supports_nullable_layouts()) {
3074     // Zero the null marker (setting it to 1 would be better but would require an additional register)
3075     __ strb(zr, Address(r0, vk->null_marker_offset()));
3076   }
3077   __ ret(lr);
3078 
3079   int unpack_fields_off = __ offset();
3080 
3081   Label skip;
3082   Label not_null;
3083   __ cbnz(r0, not_null);
3084 
3085   // Return value is null. Zero all registers because the runtime requires a canonical
3086   // representation of a flat null.
3087   j = 1;
3088   for (int i = 0; i < sig_vk->length(); i++) {
3089     BasicType bt = sig_vk->at(i)._bt;
3090     if (bt == T_METADATA) {
3091       continue;
3092     }
3093     if (bt == T_VOID) {
3094       if (sig_vk->at(i-1)._bt == T_LONG ||
3095           sig_vk->at(i-1)._bt == T_DOUBLE) {
3096         j++;
3097       }
3098       continue;
3099     }
3100 
3101     VMRegPair pair = regs->at(j);
3102     VMReg r_1 = pair.first();
3103     if (r_1->is_FloatRegister()) {
3104       __ mov(r_1->as_FloatRegister(), Assembler::T2S, 0);
3105     } else {
3106       __ mov(r_1->as_Register(), zr);
3107     }
3108     j++;
3109   }
3110   __ b(skip);
3111   __ bind(not_null);
3112 
3113   j = 1;
3114   for (int i = 0; i < sig_vk->length(); i++) {
3115     BasicType bt = sig_vk->at(i)._bt;
3116     if (bt == T_METADATA) {
3117       continue;
3118     }
3119     if (bt == T_VOID) {
3120       if (sig_vk->at(i-1)._bt == T_LONG ||
3121           sig_vk->at(i-1)._bt == T_DOUBLE) {
3122         j++;
3123       }
3124       continue;
3125     }
3126     int off = sig_vk->at(i)._offset;
3127     assert(off > 0, "offset in object should be positive");
3128     VMRegPair pair = regs->at(j);
3129     VMReg r_1 = pair.first();
3130     VMReg r_2 = pair.second();
3131     Address from(r0, off);
3132     if (bt == T_FLOAT) {
3133       __ ldrs(r_1->as_FloatRegister(), from);
3134     } else if (bt == T_DOUBLE) {
3135       __ ldrd(r_1->as_FloatRegister(), from);
3136     } else if (bt == T_OBJECT || bt == T_ARRAY) {
3137       assert_different_registers(r0, r_1->as_Register());
3138       __ load_heap_oop(r_1->as_Register(), from, rscratch1, rscratch2);
3139     } else {
3140       assert(is_java_primitive(bt), "unexpected basic type");
3141       assert_different_registers(r0, r_1->as_Register());
3142       size_t size_in_bytes = type2aelembytes(bt);
3143       __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
3144     }
3145     j++;
3146   }
3147   assert(j == regs->length(), "missed a field?");
3148 
3149   __ bind(skip);
3150 
3151   __ ret(lr);
3152 
3153   __ flush();
3154 
3155   return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
3156 }
3157 
3158 // Continuation point for throwing of implicit exceptions that are
3159 // not handled in the current activation. Fabricates an exception
3160 // oop and initiates normal exception dispatching in this
3161 // frame. Since we need to preserve callee-saved values (currently
3162 // only for C2, but done for C1 as well) we need a callee-saved oop
3163 // map and therefore have to make these stubs into RuntimeStubs
3164 // rather than BufferBlobs.  If the compiler needs all registers to
3165 // be preserved between the fault point and the exception handler
3166 // then it must assume responsibility for that in
3167 // AbstractCompiler::continuation_for_implicit_null_exception or
3168 // continuation_for_implicit_division_by_zero_exception. All other
3169 // implicit exceptions (e.g., NullPointerException or
3170 // AbstractMethodError on entry) are either at call sites or
3171 // otherwise assume that stack unwinding will be initiated, so
3172 // caller saved registers were assumed volatile in the compiler.
3173 
3174 RuntimeStub* SharedRuntime::generate_throw_exception(StubId id, address runtime_entry) {
3175   assert(is_throw_id(id), "expected a throw stub id");
3176 
3177   const char* name = SharedRuntime::stub_name(id);
3178 
3179   // Information about frame layout at time of blocking runtime call.
3180   // Note that we only have to preserve callee-saved registers since
3181   // the compilers are responsible for supplying a continuation point
3182   // if they expect all registers to be preserved.
3183   // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
3184   enum layout {
3185     rfp_off = 0,
3186     rfp_off2,
3187     return_off,
3188     return_off2,
3189     framesize // inclusive of return address
3190   };
3191 
3192   int insts_size = 512;
3193   int locs_size  = 64;
3194 
3195   const char* timer_msg = "SharedRuntime generate_throw_exception";
3196   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
3197 
3198   CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, StubInfo::blob(id));
3199   if (blob != nullptr) {
3200     return blob->as_runtime_stub();
3201   }
3202 
3203   ResourceMark rm;
3204   CodeBuffer code(name, insts_size, locs_size);
3205   OopMapSet* oop_maps  = new OopMapSet();
3206   MacroAssembler* masm = new MacroAssembler(&code);
3207 
3208   address start = __ pc();
3209 
3210   // This is an inlined and slightly modified version of call_VM
3211   // which has the ability to fetch the return PC out of
3212   // thread-local storage and also sets up last_Java_sp slightly
3213   // differently than the real call_VM
3214 
3215   __ enter(); // Save FP and LR before call
3216 
3217   assert(is_even(framesize/2), "sp not 16-byte aligned");
3218 
3219   // lr and fp are already in place
3220   __ sub(sp, rfp, ((uint64_t)framesize-4) << LogBytesPerInt); // prolog
3221 
3222   int frame_complete = __ pc() - start;
3223 
3224   // Set up last_Java_sp and last_Java_fp
3225   address the_pc = __ pc();
3226   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
3227 
3228   __ mov(c_rarg0, rthread);
3229   BLOCK_COMMENT("call runtime_entry");
3230   __ lea(rscratch1, RuntimeAddress(runtime_entry));
3231   __ blr(rscratch1);
3232 
3233   // Generate oop map
3234   OopMap* map = new OopMap(framesize, 0);
3235 
3236   oop_maps->add_gc_map(the_pc - start, map);
3237 
3238   __ reset_last_Java_frame(true);
3239 
3240   // Reinitialize the ptrue predicate register, in case the external runtime
3241   // call clobbers ptrue reg, as we may return to SVE compiled code.
3242   __ reinitialize_ptrue();
3243 
3244   __ leave();
3245 
3246   // check for pending exceptions
3247 #ifdef ASSERT
3248   Label L;
3249   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
3250   __ cbnz(rscratch1, L);
3251   __ should_not_reach_here();
3252   __ bind(L);
3253 #endif // ASSERT
3254   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3255 
3256   // codeBlob framesize is in words (not VMRegImpl::slot_size)
3257   RuntimeStub* stub =
3258     RuntimeStub::new_runtime_stub(name,
3259                                   &code,
3260                                   frame_complete,
3261                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3262                                   oop_maps, false);
3263   AOTCodeCache::store_code_blob(*stub, AOTCodeEntry::SharedBlob, StubInfo::blob(id));
3264 
3265   return stub;
3266 }
3267 
3268 #if INCLUDE_JFR
3269 
3270 static void jfr_prologue(address the_pc, MacroAssembler* masm, Register thread) {
3271   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
3272   __ mov(c_rarg0, thread);
3273 }
3274 
3275 // The handle is dereferenced through a load barrier.
3276 static void jfr_epilogue(MacroAssembler* masm) {
3277   __ reset_last_Java_frame(true);
3278 }
3279 
3280 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
3281 // It returns a jobject handle to the event writer.
3282 // The handle is dereferenced and the return value is the event writer oop.
3283 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
3284   enum layout {
3285     rbp_off,
3286     rbpH_off,
3287     return_off,
3288     return_off2,
3289     framesize // inclusive of return address
3290   };
3291 
3292   int insts_size = 1024;
3293   int locs_size = 64;
3294   const char* name = SharedRuntime::stub_name(StubId::shared_jfr_write_checkpoint_id);
3295   CodeBuffer code(name, insts_size, locs_size);
3296   OopMapSet* oop_maps = new OopMapSet();
3297   MacroAssembler* masm = new MacroAssembler(&code);
3298 
3299   address start = __ pc();
3300   __ enter();
3301   int frame_complete = __ pc() - start;
3302   address the_pc = __ pc();
3303   jfr_prologue(the_pc, masm, rthread);
3304   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
3305   jfr_epilogue(masm);
3306   __ resolve_global_jobject(r0, rscratch1, rscratch2);
3307   __ leave();
3308   __ ret(lr);
3309 
3310   OopMap* map = new OopMap(framesize, 1); // rfp
3311   oop_maps->add_gc_map(the_pc - start, map);
3312 
3313   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
3314     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
3315                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3316                                   oop_maps, false);
3317   return stub;
3318 }
3319 
3320 // For c2: call to return a leased buffer.
3321 RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
3322   enum layout {
3323     rbp_off,
3324     rbpH_off,
3325     return_off,
3326     return_off2,
3327     framesize // inclusive of return address
3328   };
3329 
3330   int insts_size = 1024;
3331   int locs_size = 64;
3332 
3333   const char* name = SharedRuntime::stub_name(StubId::shared_jfr_return_lease_id);
3334   CodeBuffer code(name, insts_size, locs_size);
3335   OopMapSet* oop_maps = new OopMapSet();
3336   MacroAssembler* masm = new MacroAssembler(&code);
3337 
3338   address start = __ pc();
3339   __ enter();
3340   int frame_complete = __ pc() - start;
3341   address the_pc = __ pc();
3342   jfr_prologue(the_pc, masm, rthread);
3343   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
3344   jfr_epilogue(masm);
3345 
3346   __ leave();
3347   __ ret(lr);
3348 
3349   OopMap* map = new OopMap(framesize, 1); // rfp
3350   oop_maps->add_gc_map(the_pc - start, map);
3351 
3352   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
3353     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
3354                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3355                                   oop_maps, false);
3356   return stub;
3357 }
3358 
3359 #endif // INCLUDE_JFR