1 /*
   2  * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "code/aotCodeCache.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/compiledIC.hpp"
  33 #include "code/debugInfoRec.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/barrierSetAssembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "interpreter/interp_masm.hpp"
  39 #include "logging/log.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "nativeInst_aarch64.hpp"
  42 #include "oops/klass.inline.hpp"
  43 #include "oops/method.inline.hpp"
  44 #include "prims/methodHandles.hpp"
  45 #include "runtime/continuation.hpp"
  46 #include "runtime/continuationEntry.inline.hpp"
  47 #include "runtime/globals.hpp"
  48 #include "runtime/jniHandles.hpp"
  49 #include "runtime/safepointMechanism.hpp"
  50 #include "runtime/sharedRuntime.hpp"
  51 #include "runtime/signature.hpp"
  52 #include "runtime/stubRoutines.hpp"
  53 #include "runtime/timerTrace.hpp"
  54 #include "runtime/vframeArray.hpp"
  55 #include "utilities/align.hpp"
  56 #include "utilities/formatBuffer.hpp"
  57 #include "vmreg_aarch64.inline.hpp"
  58 #ifdef COMPILER1
  59 #include "c1/c1_Runtime1.hpp"
  60 #endif
  61 #ifdef COMPILER2
  62 #include "adfiles/ad_aarch64.hpp"
  63 #include "opto/runtime.hpp"
  64 #endif
  65 #if INCLUDE_JVMCI
  66 #include "jvmci/jvmciJavaClasses.hpp"
  67 #endif
  68 
  69 #define __ masm->
  70 
  71 #ifdef PRODUCT
  72 #define BLOCK_COMMENT(str) /* nothing */
  73 #else
  74 #define BLOCK_COMMENT(str) __ block_comment(str)
  75 #endif
  76 
  77 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  78 
  79 // FIXME -- this is used by C1
  80 class RegisterSaver {
  81   const bool _save_vectors;
  82  public:
  83   RegisterSaver(bool save_vectors) : _save_vectors(save_vectors) {}
  84 
  85   OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
  86   void restore_live_registers(MacroAssembler* masm);
  87 
  88   // Offsets into the register save area
  89   // Used by deoptimization when it is managing result register
  90   // values on its own
  91 
  92   int reg_offset_in_bytes(Register r);
  93   int r0_offset_in_bytes()    { return reg_offset_in_bytes(r0); }
  94   int rscratch1_offset_in_bytes()    { return reg_offset_in_bytes(rscratch1); }
  95   int v0_offset_in_bytes();
  96 
  97   // Total stack size in bytes for saving sve predicate registers.
  98   int total_sve_predicate_in_bytes();
  99 
 100   // Capture info about frame layout
 101   // Note this is only correct when not saving full vectors.
 102   enum layout {
 103                 fpu_state_off = 0,
 104                 fpu_state_end = fpu_state_off + FPUStateSizeInWords - 1,
 105                 // The frame sender code expects that rfp will be in
 106                 // the "natural" place and will override any oopMap
 107                 // setting for it. We must therefore force the layout
 108                 // so that it agrees with the frame sender code.
 109                 r0_off = fpu_state_off + FPUStateSizeInWords,
 110                 rfp_off = r0_off + (Register::number_of_registers - 2) * Register::max_slots_per_register,
 111                 return_off = rfp_off + Register::max_slots_per_register,      // slot for return address
 112                 reg_save_size = return_off + Register::max_slots_per_register};
 113 
 114 };
 115 
 116 int RegisterSaver::reg_offset_in_bytes(Register r) {
 117   // The integer registers are located above the floating point
 118   // registers in the stack frame pushed by save_live_registers() so the
 119   // offset depends on whether we are saving full vectors, and whether
 120   // those vectors are NEON or SVE.
 121 
 122   int slots_per_vect = FloatRegister::save_slots_per_register;
 123 
 124 #if COMPILER2_OR_JVMCI
 125   if (_save_vectors) {
 126     slots_per_vect = FloatRegister::slots_per_neon_register;
 127 
 128 #ifdef COMPILER2
 129     if (Matcher::supports_scalable_vector()) {
 130       slots_per_vect = Matcher::scalable_vector_reg_size(T_FLOAT);
 131     }
 132 #endif
 133   }
 134 #endif
 135 
 136   int r0_offset = v0_offset_in_bytes() + (slots_per_vect * FloatRegister::number_of_registers) * BytesPerInt;
 137   return r0_offset + r->encoding() * wordSize;
 138 }
 139 
 140 int RegisterSaver::v0_offset_in_bytes() {
 141   // The floating point registers are located above the predicate registers if
 142   // they are present in the stack frame pushed by save_live_registers(). So the
 143   // offset depends on the saved total predicate vectors in the stack frame.
 144   return (total_sve_predicate_in_bytes() / VMRegImpl::stack_slot_size) * BytesPerInt;
 145 }
 146 
 147 int RegisterSaver::total_sve_predicate_in_bytes() {
 148 #ifdef COMPILER2
 149   if (_save_vectors && Matcher::supports_scalable_vector()) {
 150     return (Matcher::scalable_vector_reg_size(T_BYTE) >> LogBitsPerByte) *
 151            PRegister::number_of_registers;
 152   }
 153 #endif
 154   return 0;
 155 }
 156 
 157 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
 158   bool use_sve = false;
 159   int sve_vector_size_in_bytes = 0;
 160   int sve_vector_size_in_slots = 0;
 161   int sve_predicate_size_in_slots = 0;
 162   int total_predicate_in_bytes = total_sve_predicate_in_bytes();
 163   int total_predicate_in_slots = total_predicate_in_bytes / VMRegImpl::stack_slot_size;
 164 
 165 #ifdef COMPILER2
 166   use_sve = Matcher::supports_scalable_vector();
 167   if (use_sve) {
 168     sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 169     sve_vector_size_in_slots = Matcher::scalable_vector_reg_size(T_FLOAT);
 170     sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
 171   }
 172 #endif
 173 
 174 #if COMPILER2_OR_JVMCI
 175   if (_save_vectors) {
 176     int extra_save_slots_per_register = 0;
 177     // Save upper half of vector registers
 178     if (use_sve) {
 179       extra_save_slots_per_register = sve_vector_size_in_slots - FloatRegister::save_slots_per_register;
 180     } else {
 181       extra_save_slots_per_register = FloatRegister::extra_save_slots_per_neon_register;
 182     }
 183     int extra_vector_bytes = extra_save_slots_per_register *
 184                              VMRegImpl::stack_slot_size *
 185                              FloatRegister::number_of_registers;
 186     additional_frame_words += ((extra_vector_bytes + total_predicate_in_bytes) / wordSize);
 187   }
 188 #else
 189   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 190 #endif
 191 
 192   int frame_size_in_bytes = align_up(additional_frame_words * wordSize +
 193                                      reg_save_size * BytesPerInt, 16);
 194   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 195   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 196   // The caller will allocate additional_frame_words
 197   int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
 198   // CodeBlob frame size is in words.
 199   int frame_size_in_words = frame_size_in_bytes / wordSize;
 200   *total_frame_words = frame_size_in_words;
 201 
 202   // Save Integer and Float registers.
 203   __ enter();
 204   __ push_CPU_state(_save_vectors, use_sve, sve_vector_size_in_bytes, total_predicate_in_bytes);
 205 
 206   // Set an oopmap for the call site.  This oopmap will map all
 207   // oop-registers and debug-info registers as callee-saved.  This
 208   // will allow deoptimization at this safepoint to find all possible
 209   // debug-info recordings, as well as let GC find all oops.
 210 
 211   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 212 
 213   for (int i = 0; i < Register::number_of_registers; i++) {
 214     Register r = as_Register(i);
 215     if (i <= rfp->encoding() && r != rscratch1 && r != rscratch2) {
 216       // SP offsets are in 4-byte words.
 217       // Register slots are 8 bytes wide, 32 floating-point registers.
 218       int sp_offset = Register::max_slots_per_register * i +
 219                       FloatRegister::save_slots_per_register * FloatRegister::number_of_registers;
 220       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots), r->as_VMReg());
 221     }
 222   }
 223 
 224   for (int i = 0; i < FloatRegister::number_of_registers; i++) {
 225     FloatRegister r = as_FloatRegister(i);
 226     int sp_offset = 0;
 227     if (_save_vectors) {
 228       sp_offset = use_sve ? (total_predicate_in_slots + sve_vector_size_in_slots * i) :
 229                             (FloatRegister::slots_per_neon_register * i);
 230     } else {
 231       sp_offset = FloatRegister::save_slots_per_register * i;
 232     }
 233     oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), r->as_VMReg());
 234   }
 235 
 236   return oop_map;
 237 }
 238 
 239 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 240 #ifdef COMPILER2
 241   __ pop_CPU_state(_save_vectors, Matcher::supports_scalable_vector(),
 242                    Matcher::scalable_vector_reg_size(T_BYTE), total_sve_predicate_in_bytes());
 243 #else
 244 #if !INCLUDE_JVMCI
 245   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 246 #endif
 247   __ pop_CPU_state(_save_vectors);
 248 #endif
 249   __ ldp(rfp, lr, Address(__ post(sp, 2 * wordSize)));
 250   __ authenticate_return_address();
 251 }
 252 
 253 // Is vector's size (in bytes) bigger than a size saved by default?
 254 // 8 bytes vector registers are saved by default on AArch64.
 255 // The SVE supported min vector size is 8 bytes and we need to save
 256 // predicate registers when the vector size is 8 bytes as well.
 257 bool SharedRuntime::is_wide_vector(int size) {
 258   return size > 8 || (UseSVE > 0 && size >= 8);
 259 }
 260 
 261 // ---------------------------------------------------------------------------
 262 // Read the array of BasicTypes from a signature, and compute where the
 263 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 264 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 265 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 266 // as framesizes are fixed.
 267 // VMRegImpl::stack0 refers to the first slot 0(sp).
 268 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
 269 // Register up to Register::number_of_registers are the 64-bit
 270 // integer registers.
 271 
 272 // Note: the INPUTS in sig_bt are in units of Java argument words,
 273 // which are 64-bit.  The OUTPUTS are in 32-bit units.
 274 
 275 // The Java calling convention is a "shifted" version of the C ABI.
 276 // By skipping the first C ABI register we can call non-static jni
 277 // methods with small numbers of arguments without having to shuffle
 278 // the arguments at all. Since we control the java ABI we ought to at
 279 // least get some advantage out of it.
 280 
 281 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 282                                            VMRegPair *regs,
 283                                            int total_args_passed) {
 284 
 285   // Create the mapping between argument positions and
 286   // registers.
 287   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 288     j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7
 289   };
 290   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 291     j_farg0, j_farg1, j_farg2, j_farg3,
 292     j_farg4, j_farg5, j_farg6, j_farg7
 293   };
 294 
 295 
 296   uint int_args = 0;
 297   uint fp_args = 0;
 298   uint stk_args = 0;
 299 
 300   for (int i = 0; i < total_args_passed; i++) {
 301     switch (sig_bt[i]) {
 302     case T_BOOLEAN:
 303     case T_CHAR:
 304     case T_BYTE:
 305     case T_SHORT:
 306     case T_INT:
 307       if (int_args < Argument::n_int_register_parameters_j) {
 308         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 309       } else {
 310         stk_args = align_up(stk_args, 2);
 311         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 312         stk_args += 1;
 313       }
 314       break;
 315     case T_VOID:
 316       // halves of T_LONG or T_DOUBLE
 317       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 318       regs[i].set_bad();
 319       break;
 320     case T_LONG:
 321       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 322       // fall through
 323     case T_OBJECT:
 324     case T_ARRAY:
 325     case T_ADDRESS:
 326       if (int_args < Argument::n_int_register_parameters_j) {
 327         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 328       } else {
 329         stk_args = align_up(stk_args, 2);
 330         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 331         stk_args += 2;
 332       }
 333       break;
 334     case T_FLOAT:
 335       if (fp_args < Argument::n_float_register_parameters_j) {
 336         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 337       } else {
 338         stk_args = align_up(stk_args, 2);
 339         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 340         stk_args += 1;
 341       }
 342       break;
 343     case T_DOUBLE:
 344       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 345       if (fp_args < Argument::n_float_register_parameters_j) {
 346         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 347       } else {
 348         stk_args = align_up(stk_args, 2);
 349         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 350         stk_args += 2;
 351       }
 352       break;
 353     default:
 354       ShouldNotReachHere();
 355       break;
 356     }
 357   }
 358 
 359   return stk_args;
 360 }
 361 
 362 
 363 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j;
 364 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
 365 
 366 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) {
 367 
 368   // Create the mapping between argument positions and registers.
 369 
 370   static const Register INT_ArgReg[java_return_convention_max_int] = {
 371     r0 /* j_rarg7 */, j_rarg6, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
 372   };
 373 
 374   static const FloatRegister FP_ArgReg[java_return_convention_max_float] = {
 375     j_farg0, j_farg1, j_farg2, j_farg3, j_farg4, j_farg5, j_farg6, j_farg7
 376   };
 377 
 378   uint int_args = 0;
 379   uint fp_args = 0;
 380 
 381   for (int i = 0; i < total_args_passed; i++) {
 382     switch (sig_bt[i]) {
 383     case T_BOOLEAN:
 384     case T_CHAR:
 385     case T_BYTE:
 386     case T_SHORT:
 387     case T_INT:
 388       if (int_args < SharedRuntime::java_return_convention_max_int) {
 389         regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
 390         int_args ++;
 391       } else {
 392         return -1;
 393       }
 394       break;
 395     case T_VOID:
 396       // halves of T_LONG or T_DOUBLE
 397       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 398       regs[i].set_bad();
 399       break;
 400     case T_LONG:
 401       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 402       // fall through
 403     case T_OBJECT:
 404     case T_ARRAY:
 405     case T_ADDRESS:
 406       // Should T_METADATA be added to java_calling_convention as well ?
 407     case T_METADATA:
 408       if (int_args < SharedRuntime::java_return_convention_max_int) {
 409         regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
 410         int_args ++;
 411       } else {
 412         return -1;
 413       }
 414       break;
 415     case T_FLOAT:
 416       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 417         regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
 418         fp_args ++;
 419       } else {
 420         return -1;
 421       }
 422       break;
 423     case T_DOUBLE:
 424       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 425       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 426         regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
 427         fp_args ++;
 428       } else {
 429         return -1;
 430       }
 431       break;
 432     default:
 433       ShouldNotReachHere();
 434       break;
 435     }
 436   }
 437 
 438   return int_args + fp_args;
 439 }
 440 
 441 // Patch the callers callsite with entry to compiled code if it exists.
 442 static void patch_callers_callsite(MacroAssembler *masm) {
 443   Label L;
 444   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 445   __ cbz(rscratch1, L);
 446 
 447   __ enter();
 448   __ push_CPU_state();
 449 
 450   // VM needs caller's callsite
 451   // VM needs target method
 452   // This needs to be a long call since we will relocate this adapter to
 453   // the codeBuffer and it may not reach
 454 
 455 #ifndef PRODUCT
 456   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 457 #endif
 458 
 459   __ mov(c_rarg0, rmethod);
 460   __ mov(c_rarg1, lr);
 461   __ authenticate_return_address(c_rarg1);
 462   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 463   __ blr(rscratch1);
 464 
 465   // Explicit isb required because fixup_callers_callsite may change the code
 466   // stream.
 467   __ safepoint_isb();
 468 
 469   __ pop_CPU_state();
 470   // restore sp
 471   __ leave();
 472   __ bind(L);
 473 }
 474 
 475 // For each inline type argument, sig includes the list of fields of
 476 // the inline type. This utility function computes the number of
 477 // arguments for the call if inline types are passed by reference (the
 478 // calling convention the interpreter expects).
 479 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
 480   int total_args_passed = 0;
 481   if (InlineTypePassFieldsAsArgs) {
 482     for (int i = 0; i < sig_extended->length(); i++) {
 483       BasicType bt = sig_extended->at(i)._bt;
 484       if (bt == T_METADATA) {
 485         // In sig_extended, an inline type argument starts with:
 486         // T_METADATA, followed by the types of the fields of the
 487         // inline type and T_VOID to mark the end of the value
 488         // type. Inline types are flattened so, for instance, in the
 489         // case of an inline type with an int field and an inline type
 490         // field that itself has 2 fields, an int and a long:
 491         // T_METADATA T_INT T_METADATA T_INT T_LONG T_VOID (second
 492         // slot for the T_LONG) T_VOID (inner inline type) T_VOID
 493         // (outer inline type)
 494         total_args_passed++;
 495         int vt = 1;
 496         do {
 497           i++;
 498           BasicType bt = sig_extended->at(i)._bt;
 499           BasicType prev_bt = sig_extended->at(i-1)._bt;
 500           if (bt == T_METADATA) {
 501             vt++;
 502           } else if (bt == T_VOID &&
 503                      prev_bt != T_LONG &&
 504                      prev_bt != T_DOUBLE) {
 505             vt--;
 506           }
 507         } while (vt != 0);
 508       } else {
 509         total_args_passed++;
 510       }
 511     }
 512   } else {
 513     total_args_passed = sig_extended->length();
 514   }
 515   return total_args_passed;
 516 }
 517 
 518 
 519 static void gen_c2i_adapter_helper(MacroAssembler* masm,
 520                                    BasicType bt,
 521                                    BasicType prev_bt,
 522                                    size_t size_in_bytes,
 523                                    const VMRegPair& reg_pair,
 524                                    const Address& to,
 525                                    Register tmp1,
 526                                    Register tmp2,
 527                                    Register tmp3,
 528                                    int extraspace,
 529                                    bool is_oop) {
 530   if (bt == T_VOID) {
 531     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 532     return;
 533   }
 534 
 535   // Say 4 args:
 536   // i   st_off
 537   // 0   32 T_LONG
 538   // 1   24 T_VOID
 539   // 2   16 T_OBJECT
 540   // 3    8 T_BOOL
 541   // -    0 return address
 542   //
 543   // However to make thing extra confusing. Because we can fit a Java long/double in
 544   // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 545   // leaves one slot empty and only stores to a single slot. In this case the
 546   // slot that is occupied is the T_VOID slot. See I said it was confusing.
 547 
 548   bool wide = (size_in_bytes == wordSize);
 549   VMReg r_1 = reg_pair.first();
 550   VMReg r_2 = reg_pair.second();
 551   assert(r_2->is_valid() == wide, "invalid size");
 552   if (!r_1->is_valid()) {
 553     assert(!r_2->is_valid(), "");
 554     return;
 555   }
 556 
 557   if (!r_1->is_FloatRegister()) {
 558     Register val = r25;
 559     if (r_1->is_stack()) {
 560       // memory to memory use r25 (scratch registers is used by store_heap_oop)
 561       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 562       __ load_sized_value(val, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
 563     } else {
 564       val = r_1->as_Register();
 565     }
 566     assert_different_registers(to.base(), val, tmp1, tmp2, tmp3);
 567     if (is_oop) {
 568       // store_heap_oop transitively calls oop_store_at which corrupts to.base(). We need to keep it valid.
 569       __ push(to.base(), sp);
 570       __ store_heap_oop(to, val, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 571       __ pop(to.base(), sp);
 572     } else {
 573       __ store_sized_value(to, val, size_in_bytes);
 574     }
 575   } else {
 576     if (wide) {
 577       __ strd(r_1->as_FloatRegister(), to);
 578     } else {
 579       // only a float use just part of the slot
 580       __ strs(r_1->as_FloatRegister(), to);
 581     }
 582   }
 583 }
 584 
 585 static void gen_c2i_adapter(MacroAssembler *masm,
 586                             const GrowableArray<SigEntry>* sig_extended,
 587                             const VMRegPair *regs,
 588                             bool requires_clinit_barrier,
 589                             address& c2i_no_clinit_check_entry,
 590                             Label& skip_fixup,
 591                             address start,
 592                             OopMapSet* oop_maps,
 593                             int& frame_complete,
 594                             int& frame_size_in_words,
 595                             bool alloc_inline_receiver) {
 596   if (requires_clinit_barrier && VM_Version::supports_fast_class_init_checks()) {
 597     Label L_skip_barrier;
 598 
 599     { // Bypass the barrier for non-static methods
 600       __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
 601       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 602       __ br(Assembler::EQ, L_skip_barrier); // non-static
 603     }
 604 
 605     __ load_method_holder(rscratch2, rmethod);
 606     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 607     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 608 
 609     __ bind(L_skip_barrier);
 610     c2i_no_clinit_check_entry = __ pc();
 611   }
 612 
 613   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 614   bs->c2i_entry_barrier(masm);
 615 
 616   // Before we get into the guts of the C2I adapter, see if we should be here
 617   // at all.  We've come from compiled code and are attempting to jump to the
 618   // interpreter, which means the caller made a static call to get here
 619   // (vcalls always get a compiled target if there is one).  Check for a
 620   // compiled target.  If there is one, we need to patch the caller's call.
 621   patch_callers_callsite(masm);
 622 
 623   __ bind(skip_fixup);
 624 
 625   // Name some registers to be used in the following code. We can use
 626   // anything except r0-r7 which are arguments in the Java calling
 627   // convention, rmethod (r12), and r19 which holds the outgoing sender
 628   // SP for the interpreter.
 629   Register buf_array = r10;   // Array of buffered inline types
 630   Register buf_oop = r11;     // Buffered inline type oop
 631   Register tmp1 = r15;
 632   Register tmp2 = r16;
 633   Register tmp3 = r17;
 634 
 635 #ifndef ASSERT
 636   RegSet clobbered_gp_regs = MacroAssembler::call_clobbered_gp_registers();
 637   assert(clobbered_gp_regs.contains(buf_array), "buf_array must be saved explicitly if it's not a clobber");
 638   assert(clobbered_gp_regs.contains(buf_oop), "buf_oop must be saved explicitly if it's not a clobber");
 639   assert(clobbered_gp_regs.contains(tmp1), "tmp1 must be saved explicitly if it's not a clobber");
 640   assert(clobbered_gp_regs.contains(tmp2), "tmp2 must be saved explicitly if it's not a clobber");
 641   assert(clobbered_gp_regs.contains(tmp3), "tmp3 must be saved explicitly if it's not a clobber");
 642 #endif
 643 
 644   if (InlineTypePassFieldsAsArgs) {
 645     // Is there an inline type argument?
 646     bool has_inline_argument = false;
 647     for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) {
 648       has_inline_argument = (sig_extended->at(i)._bt == T_METADATA);
 649     }
 650     if (has_inline_argument) {
 651       // There is at least an inline type argument: we're coming from
 652       // compiled code so we have no buffers to back the inline types
 653       // Allocate the buffers here with a runtime call.
 654       RegisterSaver reg_save(true /* save_vectors */);
 655       OopMap* map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
 656 
 657       frame_complete = __ offset();
 658       address the_pc = __ pc();
 659 
 660       Label retaddr;
 661       __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
 662 
 663       __ mov(c_rarg0, rthread);
 664       __ mov(c_rarg1, rmethod);
 665       __ mov(c_rarg2, (int64_t)alloc_inline_receiver);
 666 
 667       __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types)));
 668       __ blr(rscratch1);
 669       __ bind(retaddr);
 670 
 671       oop_maps->add_gc_map(__ pc() - start, map);
 672       __ reset_last_Java_frame(false);
 673 
 674       reg_save.restore_live_registers(masm);
 675 
 676       Label no_exception;
 677       __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
 678       __ cbz(rscratch1, no_exception);
 679 
 680       __ str(zr, Address(rthread, JavaThread::vm_result_oop_offset()));
 681       __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
 682       __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
 683 
 684       __ bind(no_exception);
 685 
 686       // We get an array of objects from the runtime call
 687       __ get_vm_result_oop(buf_array, rthread);
 688       __ get_vm_result_metadata(rmethod, rthread); // TODO: required to keep the callee Method live?
 689     }
 690   }
 691 
 692   // Since all args are passed on the stack, total_args_passed *
 693   // Interpreter::stackElementSize is the space we need.
 694 
 695   int total_args_passed = compute_total_args_passed_int(sig_extended);
 696   int extraspace = total_args_passed * Interpreter::stackElementSize;
 697 
 698   // stack is aligned, keep it that way
 699   extraspace = align_up(extraspace, StackAlignmentInBytes);
 700 
 701   // set senderSP value
 702   __ mov(r19_sender_sp, sp);
 703 
 704   __ sub(sp, sp, extraspace);
 705 
 706   // Now write the args into the outgoing interpreter space
 707 
 708   // next_arg_comp is the next argument from the compiler point of
 709   // view (inline type fields are passed in registers/on the stack). In
 710   // sig_extended, an inline type argument starts with: T_METADATA,
 711   // followed by the types of the fields of the inline type and T_VOID
 712   // to mark the end of the inline type. ignored counts the number of
 713   // T_METADATA/T_VOID. next_vt_arg is the next inline type argument:
 714   // used to get the buffer for that argument from the pool of buffers
 715   // we allocated above and want to pass to the
 716   // interpreter. next_arg_int is the next argument from the
 717   // interpreter point of view (inline types are passed by reference).
 718   for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
 719        next_arg_comp < sig_extended->length(); next_arg_comp++) {
 720     assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
 721     assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?");
 722     BasicType bt = sig_extended->at(next_arg_comp)._bt;
 723     int st_off = (total_args_passed - next_arg_int - 1) * Interpreter::stackElementSize;
 724     if (!InlineTypePassFieldsAsArgs || bt != T_METADATA) {
 725       int next_off = st_off - Interpreter::stackElementSize;
 726       const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
 727       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
 728       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
 729       gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 730                              size_in_bytes, reg_pair, Address(sp, offset), tmp1, tmp2, tmp3, extraspace, false);
 731       next_arg_int++;
 732 #ifdef ASSERT
 733       if (bt == T_LONG || bt == T_DOUBLE) {
 734         // Overwrite the unused slot with known junk
 735         __ mov(rscratch1, CONST64(0xdeadffffdeadaaaa));
 736         __ str(rscratch1, Address(sp, st_off));
 737       }
 738 #endif /* ASSERT */
 739     } else {
 740       ignored++;
 741       // get the buffer from the just allocated pool of buffers
 742       int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_OBJECT);
 743       __ load_heap_oop(buf_oop, Address(buf_array, index), tmp1, tmp2);
 744       next_vt_arg++; next_arg_int++;
 745       int vt = 1;
 746       // write fields we get from compiled code in registers/stack
 747       // slots to the buffer: we know we are done with that inline type
 748       // argument when we hit the T_VOID that acts as an end of inline
 749       // type delimiter for this inline type. Inline types are flattened
 750       // so we might encounter embedded inline types. Each entry in
 751       // sig_extended contains a field offset in the buffer.
 752       Label L_null;
 753       do {
 754         next_arg_comp++;
 755         BasicType bt = sig_extended->at(next_arg_comp)._bt;
 756         BasicType prev_bt = sig_extended->at(next_arg_comp - 1)._bt;
 757         if (bt == T_METADATA) {
 758           vt++;
 759           ignored++;
 760         } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
 761           vt--;
 762           ignored++;
 763         } else {
 764           int off = sig_extended->at(next_arg_comp)._offset;
 765           if (off == -1) {
 766             // Nullable inline type argument, emit null check
 767             VMReg reg = regs[next_arg_comp-ignored].first();
 768             Label L_notNull;
 769             if (reg->is_stack()) {
 770               int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 771               __ ldrb(tmp1, Address(sp, ld_off));
 772               __ cbnz(tmp1, L_notNull);
 773             } else {
 774               __ cbnz(reg->as_Register(), L_notNull);
 775             }
 776             __ str(zr, Address(sp, st_off));
 777             __ b(L_null);
 778             __ bind(L_notNull);
 779             continue;
 780           }
 781           assert(off > 0, "offset in object should be positive");
 782           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 783           bool is_oop = is_reference_type(bt);
 784           gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 785                                  size_in_bytes, regs[next_arg_comp-ignored], Address(buf_oop, off), tmp1, tmp2, tmp3, extraspace, is_oop);
 786         }
 787       } while (vt != 0);
 788       // pass the buffer to the interpreter
 789       __ str(buf_oop, Address(sp, st_off));
 790       __ bind(L_null);
 791     }
 792   }
 793 
 794   __ mov(esp, sp); // Interp expects args on caller's expression stack
 795 
 796   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 797   __ br(rscratch1);
 798 }
 799 
 800 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs) {
 801 
 802 
 803   // Note: r19_sender_sp contains the senderSP on entry. We must
 804   // preserve it since we may do a i2c -> c2i transition if we lose a
 805   // race where compiled code goes non-entrant while we get args
 806   // ready.
 807 
 808   // Adapters are frameless.
 809 
 810   // An i2c adapter is frameless because the *caller* frame, which is
 811   // interpreted, routinely repairs its own esp (from
 812   // interpreter_frame_last_sp), even if a callee has modified the
 813   // stack pointer.  It also recalculates and aligns sp.
 814 
 815   // A c2i adapter is frameless because the *callee* frame, which is
 816   // interpreted, routinely repairs its caller's sp (from sender_sp,
 817   // which is set up via the senderSP register).
 818 
 819   // In other words, if *either* the caller or callee is interpreted, we can
 820   // get the stack pointer repaired after a call.
 821 
 822   // This is why c2i and i2c adapters cannot be indefinitely composed.
 823   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 824   // both caller and callee would be compiled methods, and neither would
 825   // clean up the stack pointer changes performed by the two adapters.
 826   // If this happens, control eventually transfers back to the compiled
 827   // caller, but with an uncorrected stack, causing delayed havoc.
 828 
 829   // Cut-out for having no stack args.
 830   int comp_words_on_stack = 0;
 831   if (comp_args_on_stack) {
 832      comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord;
 833      __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 834      __ andr(sp, rscratch1, -16);
 835   }
 836 
 837   // Will jump to the compiled code just as if compiled code was doing it.
 838   // Pre-load the register-jump target early, to schedule it better.
 839   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_inline_offset())));
 840 
 841 #if INCLUDE_JVMCI
 842   if (EnableJVMCI) {
 843     // check if this call should be routed towards a specific entry point
 844     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 845     Label no_alternative_target;
 846     __ cbz(rscratch2, no_alternative_target);
 847     __ mov(rscratch1, rscratch2);
 848     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 849     __ bind(no_alternative_target);
 850   }
 851 #endif // INCLUDE_JVMCI
 852 
 853   int total_args_passed = sig->length();
 854 
 855   // Now generate the shuffle code.
 856   for (int i = 0; i < total_args_passed; i++) {
 857     BasicType bt = sig->at(i)._bt;
 858     if (bt == T_VOID) {
 859       assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
 860       continue;
 861     }
 862 
 863     // Pick up 0, 1 or 2 words from SP+offset.
 864     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?");
 865 
 866     // Load in argument order going down.
 867     int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 868     // Point to interpreter value (vs. tag)
 869     int next_off = ld_off - Interpreter::stackElementSize;
 870     //
 871     //
 872     //
 873     VMReg r_1 = regs[i].first();
 874     VMReg r_2 = regs[i].second();
 875     if (!r_1->is_valid()) {
 876       assert(!r_2->is_valid(), "");
 877       continue;
 878     }
 879     if (r_1->is_stack()) {
 880       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 881       int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size;
 882       if (!r_2->is_valid()) {
 883         // sign extend???
 884         __ ldrsw(rscratch2, Address(esp, ld_off));
 885         __ str(rscratch2, Address(sp, st_off));
 886       } else {
 887         //
 888         // We are using two optoregs. This can be either T_OBJECT,
 889         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 890         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 891         // So we must adjust where to pick up the data to match the
 892         // interpreter.
 893         //
 894         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 895         // are accessed as negative so LSW is at LOW address
 896 
 897         // ld_off is MSW so get LSW
 898         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
 899         __ ldr(rscratch2, Address(esp, offset));
 900         // st_off is LSW (i.e. reg.first())
 901          __ str(rscratch2, Address(sp, st_off));
 902        }
 903      } else if (r_1->is_Register()) {  // Register argument
 904        Register r = r_1->as_Register();
 905        if (r_2->is_valid()) {
 906          //
 907          // We are using two VMRegs. This can be either T_OBJECT,
 908          // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 909          // two slots but only uses one for thr T_LONG or T_DOUBLE case
 910          // So we must adjust where to pick up the data to match the
 911          // interpreter.
 912 
 913         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
 914 
 915          // this can be a misaligned move
 916          __ ldr(r, Address(esp, offset));
 917        } else {
 918          // sign extend and use a full word?
 919          __ ldrw(r, Address(esp, ld_off));
 920        }
 921      } else {
 922        if (!r_2->is_valid()) {
 923          __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 924        } else {
 925          __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 926        }
 927      }
 928    }
 929 
 930 
 931   __ mov(rscratch2, rscratch1);
 932   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 933   __ mov(rscratch1, rscratch2);
 934 
 935   // 6243940 We might end up in handle_wrong_method if
 936   // the callee is deoptimized as we race thru here. If that
 937   // happens we don't want to take a safepoint because the
 938   // caller frame will look interpreted and arguments are now
 939   // "compiled" so it is much better to make this transition
 940   // invisible to the stack walking code. Unfortunately if
 941   // we try and find the callee by normal means a safepoint
 942   // is possible. So we stash the desired callee in the thread
 943   // and the vm will find there should this case occur.
 944 
 945   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 946   __ br(rscratch1);
 947 }
 948 
 949 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) {
 950   Register data = rscratch2;
 951   __ ic_check(1 /* end_alignment */);
 952   __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));
 953 
 954   // Method might have been compiled since the call site was patched to
 955   // interpreted; if that is the case treat it as a miss so we can get
 956   // the call site corrected.
 957   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 958   __ cbz(rscratch1, skip_fixup);
 959   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 960 }
 961 
 962 // ---------------------------------------------------------------
 963 void SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,
 964                                             int comp_args_on_stack,
 965                                             const GrowableArray<SigEntry>* sig,
 966                                             const VMRegPair* regs,
 967                                             const GrowableArray<SigEntry>* sig_cc,
 968                                             const VMRegPair* regs_cc,
 969                                             const GrowableArray<SigEntry>* sig_cc_ro,
 970                                             const VMRegPair* regs_cc_ro,
 971                                             address entry_address[AdapterBlob::ENTRY_COUNT],
 972                                             AdapterBlob*& new_adapter,
 973                                             bool allocate_code_blob) {
 974 
 975   entry_address[AdapterBlob::I2C] = __ pc();
 976   gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);
 977 
 978   // -------------------------------------------------------------------------
 979   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 980   // to the interpreter.  The args start out packed in the compiled layout.  They
 981   // need to be unpacked into the interpreter layout.  This will almost always
 982   // require some stack space.  We grow the current (compiled) stack, then repack
 983   // the args.  We  finally end in a jump to the generic interpreter entry point.
 984   // On exit from the interpreter, the interpreter will restore our SP (lest the
 985   // compiled code, which relies solely on SP and not FP, get sick).
 986 
 987   entry_address[AdapterBlob::C2I_Unverified] = __ pc();
 988   entry_address[AdapterBlob::C2I_Unverified_Inline] = __ pc();
 989   Label skip_fixup;
 990 
 991   gen_inline_cache_check(masm, skip_fixup);
 992 
 993   OopMapSet* oop_maps = new OopMapSet();
 994   int frame_complete = CodeOffsets::frame_never_safe;
 995   int frame_size_in_words = 0;
 996 
 997   // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver)
 998   entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
 999   entry_address[AdapterBlob::C2I_Inline_RO] = __ pc();
1000   if (regs_cc != regs_cc_ro) {
1001     // No class init barrier needed because method is guaranteed to be non-static
1002     gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, /* requires_clinit_barrier = */ false, entry_address[AdapterBlob::C2I_No_Clinit_Check],
1003                     skip_fixup, entry_address[AdapterBlob::I2C], oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1004     skip_fixup.reset();
1005   }
1006 
1007   // Scalarized c2i adapter
1008   entry_address[AdapterBlob::C2I]        = __ pc();
1009   entry_address[AdapterBlob::C2I_Inline] = __ pc();
1010   gen_c2i_adapter(masm, sig_cc, regs_cc, /* requires_clinit_barrier = */ true, entry_address[AdapterBlob::C2I_No_Clinit_Check],
1011                   skip_fixup, entry_address[AdapterBlob::I2C], oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ true);
1012 
1013   // Non-scalarized c2i adapter
1014   if (regs != regs_cc) {
1015     entry_address[AdapterBlob::C2I_Unverified_Inline] = __ pc();
1016     Label inline_entry_skip_fixup;
1017     gen_inline_cache_check(masm, inline_entry_skip_fixup);
1018 
1019     entry_address[AdapterBlob::C2I_Inline] = __ pc();
1020     gen_c2i_adapter(masm, sig, regs, /* requires_clinit_barrier = */ true, entry_address[AdapterBlob::C2I_No_Clinit_Check],
1021                     inline_entry_skip_fixup, entry_address[AdapterBlob::I2C], oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1022   }
1023 
1024   // The c2i adapters might safepoint and trigger a GC. The caller must make sure that
1025   // the GC knows about the location of oop argument locations passed to the c2i adapter.
1026   if (allocate_code_blob) {
1027     bool caller_must_gc_arguments = (regs != regs_cc);
1028     int entry_offset[AdapterHandlerEntry::ENTRIES_COUNT];
1029     assert(AdapterHandlerEntry::ENTRIES_COUNT == 7, "sanity");
1030     AdapterHandlerLibrary::address_to_offset(entry_address, entry_offset);
1031     new_adapter = AdapterBlob::create(masm->code(), entry_offset, frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);
1032   }
1033 }
1034 
1035 static int c_calling_convention_priv(const BasicType *sig_bt,
1036                                          VMRegPair *regs,
1037                                          int total_args_passed) {
1038 
1039 // We return the amount of VMRegImpl stack slots we need to reserve for all
1040 // the arguments NOT counting out_preserve_stack_slots.
1041 
1042     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1043       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
1044     };
1045     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1046       c_farg0, c_farg1, c_farg2, c_farg3,
1047       c_farg4, c_farg5, c_farg6, c_farg7
1048     };
1049 
1050     uint int_args = 0;
1051     uint fp_args = 0;
1052     uint stk_args = 0; // inc by 2 each time
1053 
1054     for (int i = 0; i < total_args_passed; i++) {
1055       switch (sig_bt[i]) {
1056       case T_BOOLEAN:
1057       case T_CHAR:
1058       case T_BYTE:
1059       case T_SHORT:
1060       case T_INT:
1061         if (int_args < Argument::n_int_register_parameters_c) {
1062           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1063         } else {
1064 #ifdef __APPLE__
1065           // Less-than word types are stored one after another.
1066           // The code is unable to handle this so bailout.
1067           return -1;
1068 #endif
1069           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1070           stk_args += 2;
1071         }
1072         break;
1073       case T_LONG:
1074         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1075         // fall through
1076       case T_OBJECT:
1077       case T_ARRAY:
1078       case T_ADDRESS:
1079       case T_METADATA:
1080         if (int_args < Argument::n_int_register_parameters_c) {
1081           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1082         } else {
1083           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1084           stk_args += 2;
1085         }
1086         break;
1087       case T_FLOAT:
1088         if (fp_args < Argument::n_float_register_parameters_c) {
1089           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1090         } else {
1091 #ifdef __APPLE__
1092           // Less-than word types are stored one after another.
1093           // The code is unable to handle this so bailout.
1094           return -1;
1095 #endif
1096           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1097           stk_args += 2;
1098         }
1099         break;
1100       case T_DOUBLE:
1101         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1102         if (fp_args < Argument::n_float_register_parameters_c) {
1103           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
1104         } else {
1105           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1106           stk_args += 2;
1107         }
1108         break;
1109       case T_VOID: // Halves of longs and doubles
1110         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1111         regs[i].set_bad();
1112         break;
1113       default:
1114         ShouldNotReachHere();
1115         break;
1116       }
1117     }
1118 
1119   return stk_args;
1120 }
1121 
1122 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
1123                                              uint num_bits,
1124                                              uint total_args_passed) {
1125   // More than 8 argument inputs are not supported now.
1126   assert(total_args_passed <= Argument::n_float_register_parameters_c, "unsupported");
1127   assert(num_bits >= 64 && num_bits <= 2048 && is_power_of_2(num_bits), "unsupported");
1128 
1129   static const FloatRegister VEC_ArgReg[Argument::n_float_register_parameters_c] = {
1130     v0, v1, v2, v3, v4, v5, v6, v7
1131   };
1132 
1133   // On SVE, we use the same vector registers with 128-bit vector registers on NEON.
1134   int next_reg_val = num_bits == 64 ? 1 : 3;
1135   for (uint i = 0; i < total_args_passed; i++) {
1136     VMReg vmreg = VEC_ArgReg[i]->as_VMReg();
1137     regs[i].set_pair(vmreg->next(next_reg_val), vmreg);
1138   }
1139   return 0;
1140 }
1141 
1142 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1143                                          VMRegPair *regs,
1144                                          int total_args_passed)
1145 {
1146   int result = c_calling_convention_priv(sig_bt, regs, total_args_passed);
1147   guarantee(result >= 0, "Unsupported arguments configuration");
1148   return result;
1149 }
1150 
1151 
1152 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1153   // We always ignore the frame_slots arg and just use the space just below frame pointer
1154   // which by this time is free to use
1155   switch (ret_type) {
1156   case T_FLOAT:
1157     __ strs(v0, Address(rfp, -wordSize));
1158     break;
1159   case T_DOUBLE:
1160     __ strd(v0, Address(rfp, -wordSize));
1161     break;
1162   case T_VOID:  break;
1163   default: {
1164     __ str(r0, Address(rfp, -wordSize));
1165     }
1166   }
1167 }
1168 
1169 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1170   // We always ignore the frame_slots arg and just use the space just below frame pointer
1171   // which by this time is free to use
1172   switch (ret_type) {
1173   case T_FLOAT:
1174     __ ldrs(v0, Address(rfp, -wordSize));
1175     break;
1176   case T_DOUBLE:
1177     __ ldrd(v0, Address(rfp, -wordSize));
1178     break;
1179   case T_VOID:  break;
1180   default: {
1181     __ ldr(r0, Address(rfp, -wordSize));
1182     }
1183   }
1184 }
1185 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1186   RegSet x;
1187   for ( int i = first_arg ; i < arg_count ; i++ ) {
1188     if (args[i].first()->is_Register()) {
1189       x = x + args[i].first()->as_Register();
1190     } else if (args[i].first()->is_FloatRegister()) {
1191       __ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize)));
1192     }
1193   }
1194   __ push(x, sp);
1195 }
1196 
1197 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1198   RegSet x;
1199   for ( int i = first_arg ; i < arg_count ; i++ ) {
1200     if (args[i].first()->is_Register()) {
1201       x = x + args[i].first()->as_Register();
1202     } else {
1203       ;
1204     }
1205   }
1206   __ pop(x, sp);
1207   for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1208     if (args[i].first()->is_Register()) {
1209       ;
1210     } else if (args[i].first()->is_FloatRegister()) {
1211       __ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize)));
1212     }
1213   }
1214 }
1215 
1216 static void verify_oop_args(MacroAssembler* masm,
1217                             const methodHandle& method,
1218                             const BasicType* sig_bt,
1219                             const VMRegPair* regs) {
1220   Register temp_reg = r19;  // not part of any compiled calling seq
1221   if (VerifyOops) {
1222     for (int i = 0; i < method->size_of_parameters(); i++) {
1223       if (sig_bt[i] == T_OBJECT ||
1224           sig_bt[i] == T_ARRAY) {
1225         VMReg r = regs[i].first();
1226         assert(r->is_valid(), "bad oop arg");
1227         if (r->is_stack()) {
1228           __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1229           __ verify_oop(temp_reg);
1230         } else {
1231           __ verify_oop(r->as_Register());
1232         }
1233       }
1234     }
1235   }
1236 }
1237 
1238 // on exit, sp points to the ContinuationEntry
1239 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) {
1240   assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, "");
1241   assert(in_bytes(ContinuationEntry::cont_offset())  % VMRegImpl::stack_slot_size == 0, "");
1242   assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, "");
1243 
1244   stack_slots += (int)ContinuationEntry::size()/wordSize;
1245   __ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata
1246 
1247   OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize)/ VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
1248 
1249   __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1250   __ str(rscratch1, Address(sp, ContinuationEntry::parent_offset()));
1251   __ mov(rscratch1, sp); // we can't use sp as the source in str
1252   __ str(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1253 
1254   return map;
1255 }
1256 
1257 // on entry c_rarg1 points to the continuation
1258 //          sp points to ContinuationEntry
1259 //          c_rarg3 -- isVirtualThread
1260 static void fill_continuation_entry(MacroAssembler* masm) {
1261 #ifdef ASSERT
1262   __ movw(rscratch1, ContinuationEntry::cookie_value());
1263   __ strw(rscratch1, Address(sp, ContinuationEntry::cookie_offset()));
1264 #endif
1265 
1266   __ str (c_rarg1, Address(sp, ContinuationEntry::cont_offset()));
1267   __ strw(c_rarg3, Address(sp, ContinuationEntry::flags_offset()));
1268   __ str (zr,      Address(sp, ContinuationEntry::chunk_offset()));
1269   __ strw(zr,      Address(sp, ContinuationEntry::argsize_offset()));
1270   __ strw(zr,      Address(sp, ContinuationEntry::pin_count_offset()));
1271 
1272   __ ldr(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1273   __ str(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1274 
1275   __ str(zr, Address(rthread, JavaThread::cont_fastpath_offset()));
1276 }
1277 
1278 // on entry, sp points to the ContinuationEntry
1279 // on exit, rfp points to the spilled rfp in the entry frame
1280 static void continuation_enter_cleanup(MacroAssembler* masm) {
1281 #ifndef PRODUCT
1282   Label OK;
1283   __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1284   __ cmp(sp, rscratch1);
1285   __ br(Assembler::EQ, OK);
1286   __ stop("incorrect sp1");
1287   __ bind(OK);
1288 #endif
1289   __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1290   __ str(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1291   __ ldr(rscratch2, Address(sp, ContinuationEntry::parent_offset()));
1292   __ str(rscratch2, Address(rthread, JavaThread::cont_entry_offset()));
1293   __ add(rfp, sp, (int)ContinuationEntry::size());
1294 }
1295 
1296 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
1297 // On entry: c_rarg1 -- the continuation object
1298 //           c_rarg2 -- isContinue
1299 //           c_rarg3 -- isVirtualThread
1300 static void gen_continuation_enter(MacroAssembler* masm,
1301                                  const methodHandle& method,
1302                                  const BasicType* sig_bt,
1303                                  const VMRegPair* regs,
1304                                  int& exception_offset,
1305                                  OopMapSet*oop_maps,
1306                                  int& frame_complete,
1307                                  int& stack_slots,
1308                                  int& interpreted_entry_offset,
1309                                  int& compiled_entry_offset) {
1310   //verify_oop_args(masm, method, sig_bt, regs);
1311   Address resolve(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
1312 
1313   address start = __ pc();
1314 
1315   Label call_thaw, exit;
1316 
1317   // i2i entry used at interp_only_mode only
1318   interpreted_entry_offset = __ pc() - start;
1319   {
1320 
1321 #ifdef ASSERT
1322     Label is_interp_only;
1323     __ ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
1324     __ cbnzw(rscratch1, is_interp_only);
1325     __ stop("enterSpecial interpreter entry called when not in interp_only_mode");
1326     __ bind(is_interp_only);
1327 #endif
1328 
1329     // Read interpreter arguments into registers (this is an ad-hoc i2c adapter)
1330     __ ldr(c_rarg1, Address(esp, Interpreter::stackElementSize*2));
1331     __ ldr(c_rarg2, Address(esp, Interpreter::stackElementSize*1));
1332     __ ldr(c_rarg3, Address(esp, Interpreter::stackElementSize*0));
1333     __ push_cont_fastpath(rthread);
1334 
1335     __ enter();
1336     stack_slots = 2; // will be adjusted in setup
1337     OopMap* map = continuation_enter_setup(masm, stack_slots);
1338     // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe,
1339     // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
1340 
1341     fill_continuation_entry(masm);
1342 
1343     __ cbnz(c_rarg2, call_thaw);
1344 
1345     const address tr_call = __ trampoline_call(resolve);
1346     if (tr_call == nullptr) {
1347       fatal("CodeCache is full at gen_continuation_enter");
1348     }
1349 
1350     oop_maps->add_gc_map(__ pc() - start, map);
1351     __ post_call_nop();
1352 
1353     __ b(exit);
1354 
1355     address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1356     if (stub == nullptr) {
1357       fatal("CodeCache is full at gen_continuation_enter");
1358     }
1359   }
1360 
1361   // compiled entry
1362   __ align(CodeEntryAlignment);
1363   compiled_entry_offset = __ pc() - start;
1364 
1365   __ enter();
1366   stack_slots = 2; // will be adjusted in setup
1367   OopMap* map = continuation_enter_setup(masm, stack_slots);
1368   frame_complete = __ pc() - start;
1369 
1370   fill_continuation_entry(masm);
1371 
1372   __ cbnz(c_rarg2, call_thaw);
1373 
1374   const address tr_call = __ trampoline_call(resolve);
1375   if (tr_call == nullptr) {
1376     fatal("CodeCache is full at gen_continuation_enter");
1377   }
1378 
1379   oop_maps->add_gc_map(__ pc() - start, map);
1380   __ post_call_nop();
1381 
1382   __ b(exit);
1383 
1384   __ bind(call_thaw);
1385 
1386   ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1387   __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1388   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1389   ContinuationEntry::_return_pc_offset = __ pc() - start;
1390   __ post_call_nop();
1391 
1392   __ bind(exit);
1393   ContinuationEntry::_cleanup_offset = __ pc() - start;
1394   continuation_enter_cleanup(masm);
1395   __ leave();
1396   __ ret(lr);
1397 
1398   /// exception handling
1399 
1400   exception_offset = __ pc() - start;
1401   {
1402       __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1403 
1404       continuation_enter_cleanup(masm);
1405 
1406       __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1407       __ authenticate_return_address(c_rarg1);
1408       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1);
1409 
1410       // see OptoRuntime::generate_exception_blob: r0 -- exception oop, r3 -- exception pc
1411 
1412       __ mov(r1, r0); // the exception handler
1413       __ mov(r0, r19); // restore return value contaning the exception oop
1414       __ verify_oop(r0);
1415 
1416       __ leave();
1417       __ mov(r3, lr);
1418       __ br(r1); // the exception handler
1419   }
1420 
1421   address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1422   if (stub == nullptr) {
1423     fatal("CodeCache is full at gen_continuation_enter");
1424   }
1425 }
1426 
1427 static void gen_continuation_yield(MacroAssembler* masm,
1428                                    const methodHandle& method,
1429                                    const BasicType* sig_bt,
1430                                    const VMRegPair* regs,
1431                                    OopMapSet* oop_maps,
1432                                    int& frame_complete,
1433                                    int& stack_slots,
1434                                    int& compiled_entry_offset) {
1435     enum layout {
1436       rfp_off1,
1437       rfp_off2,
1438       lr_off,
1439       lr_off2,
1440       framesize // inclusive of return address
1441     };
1442     // assert(is_even(framesize/2), "sp not 16-byte aligned");
1443     stack_slots = framesize /  VMRegImpl::slots_per_word;
1444     assert(stack_slots == 2, "recheck layout");
1445 
1446     address start = __ pc();
1447 
1448     compiled_entry_offset = __ pc() - start;
1449     __ enter();
1450 
1451     __ mov(c_rarg1, sp);
1452 
1453     frame_complete = __ pc() - start;
1454     address the_pc = __ pc();
1455 
1456     __ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup
1457 
1458     __ mov(c_rarg0, rthread);
1459     __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
1460     __ call_VM_leaf(Continuation::freeze_entry(), 2);
1461     __ reset_last_Java_frame(true);
1462 
1463     Label pinned;
1464 
1465     __ cbnz(r0, pinned);
1466 
1467     // We've succeeded, set sp to the ContinuationEntry
1468     __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1469     __ mov(sp, rscratch1);
1470     continuation_enter_cleanup(masm);
1471 
1472     __ bind(pinned); // pinned -- return to caller
1473 
1474     // handle pending exception thrown by freeze
1475     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1476     Label ok;
1477     __ cbz(rscratch1, ok);
1478     __ leave();
1479     __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1480     __ br(rscratch1);
1481     __ bind(ok);
1482 
1483     __ leave();
1484     __ ret(lr);
1485 
1486     OopMap* map = new OopMap(framesize, 1);
1487     oop_maps->add_gc_map(the_pc - start, map);
1488 }
1489 
1490 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1491   ::continuation_enter_cleanup(masm);
1492 }
1493 
1494 static void gen_special_dispatch(MacroAssembler* masm,
1495                                  const methodHandle& method,
1496                                  const BasicType* sig_bt,
1497                                  const VMRegPair* regs) {
1498   verify_oop_args(masm, method, sig_bt, regs);
1499   vmIntrinsics::ID iid = method->intrinsic_id();
1500 
1501   // Now write the args into the outgoing interpreter space
1502   bool     has_receiver   = false;
1503   Register receiver_reg   = noreg;
1504   int      member_arg_pos = -1;
1505   Register member_reg     = noreg;
1506   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1507   if (ref_kind != 0) {
1508     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1509     member_reg = r19;  // known to be free at this point
1510     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1511   } else if (iid == vmIntrinsics::_invokeBasic) {
1512     has_receiver = true;
1513   } else if (iid == vmIntrinsics::_linkToNative) {
1514     member_arg_pos = method->size_of_parameters() - 1;  // trailing NativeEntryPoint argument
1515     member_reg = r19;  // known to be free at this point
1516   } else {
1517     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1518   }
1519 
1520   if (member_reg != noreg) {
1521     // Load the member_arg into register, if necessary.
1522     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1523     VMReg r = regs[member_arg_pos].first();
1524     if (r->is_stack()) {
1525       __ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1526     } else {
1527       // no data motion is needed
1528       member_reg = r->as_Register();
1529     }
1530   }
1531 
1532   if (has_receiver) {
1533     // Make sure the receiver is loaded into a register.
1534     assert(method->size_of_parameters() > 0, "oob");
1535     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1536     VMReg r = regs[0].first();
1537     assert(r->is_valid(), "bad receiver arg");
1538     if (r->is_stack()) {
1539       // Porting note:  This assumes that compiled calling conventions always
1540       // pass the receiver oop in a register.  If this is not true on some
1541       // platform, pick a temp and load the receiver from stack.
1542       fatal("receiver always in a register");
1543       receiver_reg = r2;  // known to be free at this point
1544       __ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1545     } else {
1546       // no data motion is needed
1547       receiver_reg = r->as_Register();
1548     }
1549   }
1550 
1551   // Figure out which address we are really jumping to:
1552   MethodHandles::generate_method_handle_dispatch(masm, iid,
1553                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1554 }
1555 
1556 // ---------------------------------------------------------------------------
1557 // Generate a native wrapper for a given method.  The method takes arguments
1558 // in the Java compiled code convention, marshals them to the native
1559 // convention (handlizes oops, etc), transitions to native, makes the call,
1560 // returns to java state (possibly blocking), unhandlizes any result and
1561 // returns.
1562 //
1563 // Critical native functions are a shorthand for the use of
1564 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1565 // functions.  The wrapper is expected to unpack the arguments before
1566 // passing them to the callee. Critical native functions leave the state _in_Java,
1567 // since they block out GC.
1568 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1569 // block and the check for pending exceptions it's impossible for them
1570 // to be thrown.
1571 //
1572 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1573                                                 const methodHandle& method,
1574                                                 int compile_id,
1575                                                 BasicType* in_sig_bt,
1576                                                 VMRegPair* in_regs,
1577                                                 BasicType ret_type) {
1578   if (method->is_continuation_native_intrinsic()) {
1579     int exception_offset = -1;
1580     OopMapSet* oop_maps = new OopMapSet();
1581     int frame_complete = -1;
1582     int stack_slots = -1;
1583     int interpreted_entry_offset = -1;
1584     int vep_offset = -1;
1585     if (method->is_continuation_enter_intrinsic()) {
1586       gen_continuation_enter(masm,
1587                              method,
1588                              in_sig_bt,
1589                              in_regs,
1590                              exception_offset,
1591                              oop_maps,
1592                              frame_complete,
1593                              stack_slots,
1594                              interpreted_entry_offset,
1595                              vep_offset);
1596     } else if (method->is_continuation_yield_intrinsic()) {
1597       gen_continuation_yield(masm,
1598                              method,
1599                              in_sig_bt,
1600                              in_regs,
1601                              oop_maps,
1602                              frame_complete,
1603                              stack_slots,
1604                              vep_offset);
1605     } else {
1606       guarantee(false, "Unknown Continuation native intrinsic");
1607     }
1608 
1609 #ifdef ASSERT
1610     if (method->is_continuation_enter_intrinsic()) {
1611       assert(interpreted_entry_offset != -1, "Must be set");
1612       assert(exception_offset != -1,         "Must be set");
1613     } else {
1614       assert(interpreted_entry_offset == -1, "Must be unset");
1615       assert(exception_offset == -1,         "Must be unset");
1616     }
1617     assert(frame_complete != -1,    "Must be set");
1618     assert(stack_slots != -1,       "Must be set");
1619     assert(vep_offset != -1,        "Must be set");
1620 #endif
1621 
1622     __ flush();
1623     nmethod* nm = nmethod::new_native_nmethod(method,
1624                                               compile_id,
1625                                               masm->code(),
1626                                               vep_offset,
1627                                               frame_complete,
1628                                               stack_slots,
1629                                               in_ByteSize(-1),
1630                                               in_ByteSize(-1),
1631                                               oop_maps,
1632                                               exception_offset);
1633     if (nm == nullptr) return nm;
1634     if (method->is_continuation_enter_intrinsic()) {
1635       ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1636     } else if (method->is_continuation_yield_intrinsic()) {
1637       _cont_doYield_stub = nm;
1638     } else {
1639       guarantee(false, "Unknown Continuation native intrinsic");
1640     }
1641     return nm;
1642   }
1643 
1644   if (method->is_method_handle_intrinsic()) {
1645     vmIntrinsics::ID iid = method->intrinsic_id();
1646     intptr_t start = (intptr_t)__ pc();
1647     int vep_offset = ((intptr_t)__ pc()) - start;
1648 
1649     // First instruction must be a nop as it may need to be patched on deoptimisation
1650     __ nop();
1651     gen_special_dispatch(masm,
1652                          method,
1653                          in_sig_bt,
1654                          in_regs);
1655     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1656     __ flush();
1657     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1658     return nmethod::new_native_nmethod(method,
1659                                        compile_id,
1660                                        masm->code(),
1661                                        vep_offset,
1662                                        frame_complete,
1663                                        stack_slots / VMRegImpl::slots_per_word,
1664                                        in_ByteSize(-1),
1665                                        in_ByteSize(-1),
1666                                        nullptr);
1667   }
1668   address native_func = method->native_function();
1669   assert(native_func != nullptr, "must have function");
1670 
1671   // An OopMap for lock (and class if static)
1672   OopMapSet *oop_maps = new OopMapSet();
1673   intptr_t start = (intptr_t)__ pc();
1674 
1675   // We have received a description of where all the java arg are located
1676   // on entry to the wrapper. We need to convert these args to where
1677   // the jni function will expect them. To figure out where they go
1678   // we convert the java signature to a C signature by inserting
1679   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1680 
1681   const int total_in_args = method->size_of_parameters();
1682   int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1683 
1684   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1685   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1686 
1687   int argc = 0;
1688   out_sig_bt[argc++] = T_ADDRESS;
1689   if (method->is_static()) {
1690     out_sig_bt[argc++] = T_OBJECT;
1691   }
1692 
1693   for (int i = 0; i < total_in_args ; i++ ) {
1694     out_sig_bt[argc++] = in_sig_bt[i];
1695   }
1696 
1697   // Now figure out where the args must be stored and how much stack space
1698   // they require.
1699   int out_arg_slots;
1700   out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, total_c_args);
1701 
1702   if (out_arg_slots < 0) {
1703     return nullptr;
1704   }
1705 
1706   // Compute framesize for the wrapper.  We need to handlize all oops in
1707   // incoming registers
1708 
1709   // Calculate the total number of stack slots we will need.
1710 
1711   // First count the abi requirement plus all of the outgoing args
1712   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1713 
1714   // Now the space for the inbound oop handle area
1715   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1716 
1717   int oop_handle_offset = stack_slots;
1718   stack_slots += total_save_slots;
1719 
1720   // Now any space we need for handlizing a klass if static method
1721 
1722   int klass_slot_offset = 0;
1723   int klass_offset = -1;
1724   int lock_slot_offset = 0;
1725   bool is_static = false;
1726 
1727   if (method->is_static()) {
1728     klass_slot_offset = stack_slots;
1729     stack_slots += VMRegImpl::slots_per_word;
1730     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1731     is_static = true;
1732   }
1733 
1734   // Plus a lock if needed
1735 
1736   if (method->is_synchronized()) {
1737     lock_slot_offset = stack_slots;
1738     stack_slots += VMRegImpl::slots_per_word;
1739   }
1740 
1741   // Now a place (+2) to save return values or temp during shuffling
1742   // + 4 for return address (which we own) and saved rfp
1743   stack_slots += 6;
1744 
1745   // Ok The space we have allocated will look like:
1746   //
1747   //
1748   // FP-> |                     |
1749   //      |---------------------|
1750   //      | 2 slots for moves   |
1751   //      |---------------------|
1752   //      | lock box (if sync)  |
1753   //      |---------------------| <- lock_slot_offset
1754   //      | klass (if static)   |
1755   //      |---------------------| <- klass_slot_offset
1756   //      | oopHandle area      |
1757   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1758   //      | outbound memory     |
1759   //      | based arguments     |
1760   //      |                     |
1761   //      |---------------------|
1762   //      |                     |
1763   // SP-> | out_preserved_slots |
1764   //
1765   //
1766 
1767 
1768   // Now compute actual number of stack words we need rounding to make
1769   // stack properly aligned.
1770   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1771 
1772   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1773 
1774   // First thing make an ic check to see if we should even be here
1775 
1776   // We are free to use all registers as temps without saving them and
1777   // restoring them except rfp. rfp is the only callee save register
1778   // as far as the interpreter and the compiler(s) are concerned.
1779 
1780   const Register receiver = j_rarg0;
1781 
1782   Label exception_pending;
1783 
1784   assert_different_registers(receiver, rscratch1);
1785   __ verify_oop(receiver);
1786   __ ic_check(8 /* end_alignment */);
1787 
1788   // Verified entry point must be aligned
1789   int vep_offset = ((intptr_t)__ pc()) - start;
1790 
1791   // If we have to make this method not-entrant we'll overwrite its
1792   // first instruction with a jump.  For this action to be legal we
1793   // must ensure that this first instruction is a B, BL, NOP, BKPT,
1794   // SVC, HVC, or SMC.  Make it a NOP.
1795   __ nop();
1796 
1797   if (method->needs_clinit_barrier()) {
1798     assert(VM_Version::supports_fast_class_init_checks(), "sanity");
1799     Label L_skip_barrier;
1800     __ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
1801     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1802     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1803 
1804     __ bind(L_skip_barrier);
1805   }
1806 
1807   // Generate stack overflow check
1808   __ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size()));
1809 
1810   // Generate a new frame for the wrapper.
1811   __ enter();
1812   // -2 because return address is already present and so is saved rfp
1813   __ sub(sp, sp, stack_size - 2*wordSize);
1814 
1815   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1816   bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
1817 
1818   // Frame is now completed as far as size and linkage.
1819   int frame_complete = ((intptr_t)__ pc()) - start;
1820 
1821   // We use r20 as the oop handle for the receiver/klass
1822   // It is callee save so it survives the call to native
1823 
1824   const Register oop_handle_reg = r20;
1825 
1826   //
1827   // We immediately shuffle the arguments so that any vm call we have to
1828   // make from here on out (sync slow path, jvmti, etc.) we will have
1829   // captured the oops from our caller and have a valid oopMap for
1830   // them.
1831 
1832   // -----------------
1833   // The Grand Shuffle
1834 
1835   // The Java calling convention is either equal (linux) or denser (win64) than the
1836   // c calling convention. However the because of the jni_env argument the c calling
1837   // convention always has at least one more (and two for static) arguments than Java.
1838   // Therefore if we move the args from java -> c backwards then we will never have
1839   // a register->register conflict and we don't have to build a dependency graph
1840   // and figure out how to break any cycles.
1841   //
1842 
1843   // Record esp-based slot for receiver on stack for non-static methods
1844   int receiver_offset = -1;
1845 
1846   // This is a trick. We double the stack slots so we can claim
1847   // the oops in the caller's frame. Since we are sure to have
1848   // more args than the caller doubling is enough to make
1849   // sure we can capture all the incoming oop args from the
1850   // caller.
1851   //
1852   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1853 
1854   // Mark location of rfp (someday)
1855   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp));
1856 
1857 
1858   int float_args = 0;
1859   int int_args = 0;
1860 
1861 #ifdef ASSERT
1862   bool reg_destroyed[Register::number_of_registers];
1863   bool freg_destroyed[FloatRegister::number_of_registers];
1864   for ( int r = 0 ; r < Register::number_of_registers ; r++ ) {
1865     reg_destroyed[r] = false;
1866   }
1867   for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) {
1868     freg_destroyed[f] = false;
1869   }
1870 
1871 #endif /* ASSERT */
1872 
1873   // For JNI natives the incoming and outgoing registers are offset upwards.
1874   GrowableArray<int> arg_order(2 * total_in_args);
1875 
1876   for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1877     arg_order.push(i);
1878     arg_order.push(c_arg);
1879   }
1880 
1881   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1882     int i = arg_order.at(ai);
1883     int c_arg = arg_order.at(ai + 1);
1884     __ block_comment(err_msg("move %d -> %d", i, c_arg));
1885     assert(c_arg != -1 && i != -1, "wrong order");
1886 #ifdef ASSERT
1887     if (in_regs[i].first()->is_Register()) {
1888       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1889     } else if (in_regs[i].first()->is_FloatRegister()) {
1890       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1891     }
1892     if (out_regs[c_arg].first()->is_Register()) {
1893       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1894     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1895       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1896     }
1897 #endif /* ASSERT */
1898     switch (in_sig_bt[i]) {
1899       case T_ARRAY:
1900       case T_OBJECT:
1901         __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1902                        ((i == 0) && (!is_static)),
1903                        &receiver_offset);
1904         int_args++;
1905         break;
1906       case T_VOID:
1907         break;
1908 
1909       case T_FLOAT:
1910         __ float_move(in_regs[i], out_regs[c_arg]);
1911         float_args++;
1912         break;
1913 
1914       case T_DOUBLE:
1915         assert( i + 1 < total_in_args &&
1916                 in_sig_bt[i + 1] == T_VOID &&
1917                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1918         __ double_move(in_regs[i], out_regs[c_arg]);
1919         float_args++;
1920         break;
1921 
1922       case T_LONG :
1923         __ long_move(in_regs[i], out_regs[c_arg]);
1924         int_args++;
1925         break;
1926 
1927       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1928 
1929       default:
1930         __ move32_64(in_regs[i], out_regs[c_arg]);
1931         int_args++;
1932     }
1933   }
1934 
1935   // point c_arg at the first arg that is already loaded in case we
1936   // need to spill before we call out
1937   int c_arg = total_c_args - total_in_args;
1938 
1939   // Pre-load a static method's oop into c_rarg1.
1940   if (method->is_static()) {
1941 
1942     //  load oop into a register
1943     __ movoop(c_rarg1,
1944               JNIHandles::make_local(method->method_holder()->java_mirror()));
1945 
1946     // Now handlize the static class mirror it's known not-null.
1947     __ str(c_rarg1, Address(sp, klass_offset));
1948     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1949 
1950     // Now get the handle
1951     __ lea(c_rarg1, Address(sp, klass_offset));
1952     // and protect the arg if we must spill
1953     c_arg--;
1954   }
1955 
1956   // Change state to native (we save the return address in the thread, since it might not
1957   // be pushed on the stack when we do a stack traversal). It is enough that the pc()
1958   // points into the right code segment. It does not have to be the correct return pc.
1959   // We use the same pc/oopMap repeatedly when we call out.
1960 
1961   Label native_return;
1962   if (method->is_object_wait0()) {
1963     // For convenience we use the pc we want to resume to in case of preemption on Object.wait.
1964     __ set_last_Java_frame(sp, noreg, native_return, rscratch1);
1965   } else {
1966     intptr_t the_pc = (intptr_t) __ pc();
1967     oop_maps->add_gc_map(the_pc - start, map);
1968 
1969     __ set_last_Java_frame(sp, noreg, __ pc(), rscratch1);
1970   }
1971 
1972   Label dtrace_method_entry, dtrace_method_entry_done;
1973   if (DTraceMethodProbes) {
1974     __ b(dtrace_method_entry);
1975     __ bind(dtrace_method_entry_done);
1976   }
1977 
1978   // RedefineClasses() tracing support for obsolete method entry
1979   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1980     // protect the args we've loaded
1981     save_args(masm, total_c_args, c_arg, out_regs);
1982     __ mov_metadata(c_rarg1, method());
1983     __ call_VM_leaf(
1984       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1985       rthread, c_rarg1);
1986     restore_args(masm, total_c_args, c_arg, out_regs);
1987   }
1988 
1989   // Lock a synchronized method
1990 
1991   // Register definitions used by locking and unlocking
1992 
1993   const Register swap_reg = r0;
1994   const Register obj_reg  = r19;  // Will contain the oop
1995   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
1996   const Register old_hdr  = r13;  // value of old header at unlock time
1997   const Register lock_tmp = r14;  // Temporary used by fast_lock/unlock
1998   const Register tmp = lr;
1999 
2000   Label slow_path_lock;
2001   Label lock_done;
2002 
2003   if (method->is_synchronized()) {
2004     // Get the handle (the 2nd argument)
2005     __ mov(oop_handle_reg, c_rarg1);
2006 
2007     // Get address of the box
2008 
2009     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2010 
2011     // Load the oop from the handle
2012     __ ldr(obj_reg, Address(oop_handle_reg, 0));
2013 
2014     __ fast_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
2015 
2016     // Slow path will re-enter here
2017     __ bind(lock_done);
2018   }
2019 
2020 
2021   // Finally just about ready to make the JNI call
2022 
2023   // get JNIEnv* which is first argument to native
2024   __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
2025 
2026   // Now set thread in native
2027   __ mov(rscratch1, _thread_in_native);
2028   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
2029   __ stlrw(rscratch1, rscratch2);
2030 
2031   __ rt_call(native_func);
2032 
2033   // Verify or restore cpu control state after JNI call
2034   __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
2035 
2036   // Unpack native results.
2037   switch (ret_type) {
2038   case T_BOOLEAN: __ c2bool(r0);                     break;
2039   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
2040   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
2041   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
2042   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
2043   case T_DOUBLE :
2044   case T_FLOAT  :
2045     // Result is in v0 we'll save as needed
2046     break;
2047   case T_ARRAY:                 // Really a handle
2048   case T_OBJECT:                // Really a handle
2049       break; // can't de-handlize until after safepoint check
2050   case T_VOID: break;
2051   case T_LONG: break;
2052   default       : ShouldNotReachHere();
2053   }
2054 
2055   Label safepoint_in_progress, safepoint_in_progress_done;
2056 
2057   // Switch thread to "native transition" state before reading the synchronization state.
2058   // This additional state is necessary because reading and testing the synchronization
2059   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2060   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2061   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2062   //     Thread A is resumed to finish this native method, but doesn't block here since it
2063   //     didn't see any synchronization is progress, and escapes.
2064   __ mov(rscratch1, _thread_in_native_trans);
2065 
2066   __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
2067 
2068   // Force this write out before the read below
2069   if (!UseSystemMemoryBarrier) {
2070     __ dmb(Assembler::ISH);
2071   }
2072 
2073   __ verify_sve_vector_length();
2074 
2075   // Check for safepoint operation in progress and/or pending suspend requests.
2076   {
2077     // No need for acquire as Java threads always disarm themselves.
2078     __ safepoint_poll(safepoint_in_progress, true /* at_return */, false /* in_nmethod */);
2079     __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
2080     __ cbnzw(rscratch1, safepoint_in_progress);
2081     __ bind(safepoint_in_progress_done);
2082   }
2083 
2084   // change thread state
2085   __ mov(rscratch1, _thread_in_Java);
2086   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
2087   __ stlrw(rscratch1, rscratch2);
2088 
2089   if (method->is_object_wait0()) {
2090     // Check preemption for Object.wait()
2091     __ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
2092     __ cbz(rscratch1, native_return);
2093     __ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
2094     __ br(rscratch1);
2095     __ bind(native_return);
2096 
2097     intptr_t the_pc = (intptr_t) __ pc();
2098     oop_maps->add_gc_map(the_pc - start, map);
2099   }
2100 
2101   Label reguard;
2102   Label reguard_done;
2103   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
2104   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
2105   __ br(Assembler::EQ, reguard);
2106   __ bind(reguard_done);
2107 
2108   // native result if any is live
2109 
2110   // Unlock
2111   Label unlock_done;
2112   Label slow_path_unlock;
2113   if (method->is_synchronized()) {
2114 
2115     // Get locked oop from the handle we passed to jni
2116     __ ldr(obj_reg, Address(oop_handle_reg, 0));
2117 
2118     // Must save r0 if if it is live now because cmpxchg must use it
2119     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2120       save_native_result(masm, ret_type, stack_slots);
2121     }
2122 
2123     __ fast_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
2124 
2125     // slow path re-enters here
2126     __ bind(unlock_done);
2127     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2128       restore_native_result(masm, ret_type, stack_slots);
2129     }
2130   }
2131 
2132   Label dtrace_method_exit, dtrace_method_exit_done;
2133   if (DTraceMethodProbes) {
2134     __ b(dtrace_method_exit);
2135     __ bind(dtrace_method_exit_done);
2136   }
2137 
2138   __ reset_last_Java_frame(false);
2139 
2140   // Unbox oop result, e.g. JNIHandles::resolve result.
2141   if (is_reference_type(ret_type)) {
2142     __ resolve_jobject(r0, r1, r2);
2143   }
2144 
2145   if (CheckJNICalls) {
2146     // clear_pending_jni_exception_check
2147     __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
2148   }
2149 
2150   // reset handle block
2151   __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
2152   __ str(zr, Address(r2, JNIHandleBlock::top_offset()));
2153 
2154   __ leave();
2155 
2156   #if INCLUDE_JFR
2157   // We need to do a poll test after unwind in case the sampler
2158   // managed to sample the native frame after returning to Java.
2159   Label L_return;
2160   __ ldr(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
2161   address poll_test_pc = __ pc();
2162   __ relocate(relocInfo::poll_return_type);
2163   __ tbz(rscratch1, log2i_exact(SafepointMechanism::poll_bit()), L_return);
2164   assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
2165     "polling page return stub not created yet");
2166   address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
2167   __ adr(rscratch1, InternalAddress(poll_test_pc));
2168   __ str(rscratch1, Address(rthread, JavaThread::saved_exception_pc_offset()));
2169   __ far_jump(RuntimeAddress(stub));
2170   __ bind(L_return);
2171 #endif // INCLUDE_JFR
2172 
2173   // Any exception pending?
2174   __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2175   __ cbnz(rscratch1, exception_pending);
2176 
2177   // We're done
2178   __ ret(lr);
2179 
2180   // Unexpected paths are out of line and go here
2181 
2182   // forward the exception
2183   __ bind(exception_pending);
2184 
2185   // and forward the exception
2186   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2187 
2188   // Slow path locking & unlocking
2189   if (method->is_synchronized()) {
2190 
2191     __ block_comment("Slow path lock {");
2192     __ bind(slow_path_lock);
2193 
2194     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2195     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2196 
2197     // protect the args we've loaded
2198     save_args(masm, total_c_args, c_arg, out_regs);
2199 
2200     __ mov(c_rarg0, obj_reg);
2201     __ mov(c_rarg1, lock_reg);
2202     __ mov(c_rarg2, rthread);
2203 
2204     // Not a leaf but we have last_Java_frame setup as we want.
2205     // We don't want to unmount in case of contention since that would complicate preserving
2206     // the arguments that had already been marshalled into the native convention. So we force
2207     // the freeze slow path to find this native wrapper frame (see recurse_freeze_native_frame())
2208     // and pin the vthread. Otherwise the fast path won't find it since we don't walk the stack.
2209     __ push_cont_fastpath();
2210     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2211     __ pop_cont_fastpath();
2212     restore_args(masm, total_c_args, c_arg, out_regs);
2213 
2214 #ifdef ASSERT
2215     { Label L;
2216       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2217       __ cbz(rscratch1, L);
2218       __ stop("no pending exception allowed on exit from monitorenter");
2219       __ bind(L);
2220     }
2221 #endif
2222     __ b(lock_done);
2223 
2224     __ block_comment("} Slow path lock");
2225 
2226     __ block_comment("Slow path unlock {");
2227     __ bind(slow_path_unlock);
2228 
2229     // If we haven't already saved the native result we must save it now as xmm registers
2230     // are still exposed.
2231 
2232     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2233       save_native_result(masm, ret_type, stack_slots);
2234     }
2235 
2236     __ mov(c_rarg2, rthread);
2237     __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2238     __ mov(c_rarg0, obj_reg);
2239 
2240     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2241     // NOTE that obj_reg == r19 currently
2242     __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2243     __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2244 
2245     __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
2246 
2247 #ifdef ASSERT
2248     {
2249       Label L;
2250       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2251       __ cbz(rscratch1, L);
2252       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2253       __ bind(L);
2254     }
2255 #endif /* ASSERT */
2256 
2257     __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2258 
2259     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2260       restore_native_result(masm, ret_type, stack_slots);
2261     }
2262     __ b(unlock_done);
2263 
2264     __ block_comment("} Slow path unlock");
2265 
2266   } // synchronized
2267 
2268   // SLOW PATH Reguard the stack if needed
2269 
2270   __ bind(reguard);
2271   save_native_result(masm, ret_type, stack_slots);
2272   __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2273   restore_native_result(masm, ret_type, stack_slots);
2274   // and continue
2275   __ b(reguard_done);
2276 
2277   // SLOW PATH safepoint
2278   {
2279     __ block_comment("safepoint {");
2280     __ bind(safepoint_in_progress);
2281 
2282     // Don't use call_VM as it will see a possible pending exception and forward it
2283     // and never return here preventing us from clearing _last_native_pc down below.
2284     //
2285     save_native_result(masm, ret_type, stack_slots);
2286     __ mov(c_rarg0, rthread);
2287 #ifndef PRODUCT
2288   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2289 #endif
2290     __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2291     __ blr(rscratch1);
2292 
2293     // Restore any method result value
2294     restore_native_result(masm, ret_type, stack_slots);
2295 
2296     __ b(safepoint_in_progress_done);
2297     __ block_comment("} safepoint");
2298   }
2299 
2300   // SLOW PATH dtrace support
2301   if (DTraceMethodProbes) {
2302     {
2303       __ block_comment("dtrace entry {");
2304       __ bind(dtrace_method_entry);
2305 
2306       // We have all of the arguments setup at this point. We must not touch any register
2307       // argument registers at this point (what if we save/restore them there are no oop?
2308 
2309       save_args(masm, total_c_args, c_arg, out_regs);
2310       __ mov_metadata(c_rarg1, method());
2311       __ call_VM_leaf(
2312         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2313         rthread, c_rarg1);
2314       restore_args(masm, total_c_args, c_arg, out_regs);
2315       __ b(dtrace_method_entry_done);
2316       __ block_comment("} dtrace entry");
2317     }
2318 
2319     {
2320       __ block_comment("dtrace exit {");
2321       __ bind(dtrace_method_exit);
2322       save_native_result(masm, ret_type, stack_slots);
2323       __ mov_metadata(c_rarg1, method());
2324       __ call_VM_leaf(
2325         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2326         rthread, c_rarg1);
2327       restore_native_result(masm, ret_type, stack_slots);
2328       __ b(dtrace_method_exit_done);
2329       __ block_comment("} dtrace exit");
2330     }
2331   }
2332 
2333   __ flush();
2334 
2335   nmethod *nm = nmethod::new_native_nmethod(method,
2336                                             compile_id,
2337                                             masm->code(),
2338                                             vep_offset,
2339                                             frame_complete,
2340                                             stack_slots / VMRegImpl::slots_per_word,
2341                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2342                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2343                                             oop_maps);
2344 
2345   return nm;
2346 }
2347 
2348 // this function returns the adjust size (in number of words) to a c2i adapter
2349 // activation for use during deoptimization
2350 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2351   assert(callee_locals >= callee_parameters,
2352           "test and remove; got more parms than locals");
2353   if (callee_locals < callee_parameters)
2354     return 0;                   // No adjustment for negative locals
2355   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2356   // diff is counted in stack words
2357   return align_up(diff, 2);
2358 }
2359 
2360 
2361 //------------------------------generate_deopt_blob----------------------------
2362 void SharedRuntime::generate_deopt_blob() {
2363   // Allocate space for the code
2364   ResourceMark rm;
2365   // Setup code generation tools
2366   int pad = 0;
2367 #if INCLUDE_JVMCI
2368   if (EnableJVMCI) {
2369     pad += 512; // Increase the buffer size when compiling for JVMCI
2370   }
2371 #endif
2372   const char* name = SharedRuntime::stub_name(StubId::shared_deopt_id);
2373   CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, BlobId::shared_deopt_id);
2374   if (blob != nullptr) {
2375     _deopt_blob = blob->as_deoptimization_blob();
2376     return;
2377   }
2378 
2379   CodeBuffer buffer(name, 2048+pad, 1024);
2380   MacroAssembler* masm = new MacroAssembler(&buffer);
2381   int frame_size_in_words;
2382   OopMap* map = nullptr;
2383   OopMapSet *oop_maps = new OopMapSet();
2384   RegisterSaver reg_save(COMPILER2_OR_JVMCI != 0);
2385 
2386   // -------------
2387   // This code enters when returning to a de-optimized nmethod.  A return
2388   // address has been pushed on the stack, and return values are in
2389   // registers.
2390   // If we are doing a normal deopt then we were called from the patched
2391   // nmethod from the point we returned to the nmethod. So the return
2392   // address on the stack is wrong by NativeCall::instruction_size
2393   // We will adjust the value so it looks like we have the original return
2394   // address on the stack (like when we eagerly deoptimized).
2395   // In the case of an exception pending when deoptimizing, we enter
2396   // with a return address on the stack that points after the call we patched
2397   // into the exception handler. We have the following register state from,
2398   // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2399   //    r0: exception oop
2400   //    r19: exception handler
2401   //    r3: throwing pc
2402   // So in this case we simply jam r3 into the useless return address and
2403   // the stack looks just like we want.
2404   //
2405   // At this point we need to de-opt.  We save the argument return
2406   // registers.  We call the first C routine, fetch_unroll_info().  This
2407   // routine captures the return values and returns a structure which
2408   // describes the current frame size and the sizes of all replacement frames.
2409   // The current frame is compiled code and may contain many inlined
2410   // functions, each with their own JVM state.  We pop the current frame, then
2411   // push all the new frames.  Then we call the C routine unpack_frames() to
2412   // populate these frames.  Finally unpack_frames() returns us the new target
2413   // address.  Notice that callee-save registers are BLOWN here; they have
2414   // already been captured in the vframeArray at the time the return PC was
2415   // patched.
2416   address start = __ pc();
2417   Label cont;
2418 
2419   // Prolog for non exception case!
2420 
2421   // Save everything in sight.
2422   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2423 
2424   // Normal deoptimization.  Save exec mode for unpack_frames.
2425   __ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved
2426   __ b(cont);
2427 
2428   int reexecute_offset = __ pc() - start;
2429 #if INCLUDE_JVMCI && !defined(COMPILER1)
2430   if (UseJVMCICompiler) {
2431     // JVMCI does not use this kind of deoptimization
2432     __ should_not_reach_here();
2433   }
2434 #endif
2435 
2436   // Reexecute case
2437   // return address is the pc describes what bci to do re-execute at
2438 
2439   // No need to update map as each call to save_live_registers will produce identical oopmap
2440   (void) reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2441 
2442   __ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved
2443   __ b(cont);
2444 
2445 #if INCLUDE_JVMCI
2446   Label after_fetch_unroll_info_call;
2447   int implicit_exception_uncommon_trap_offset = 0;
2448   int uncommon_trap_offset = 0;
2449 
2450   if (EnableJVMCI) {
2451     implicit_exception_uncommon_trap_offset = __ pc() - start;
2452 
2453     __ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2454     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2455 
2456     uncommon_trap_offset = __ pc() - start;
2457 
2458     // Save everything in sight.
2459     reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2460     // fetch_unroll_info needs to call last_java_frame()
2461     Label retaddr;
2462     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2463 
2464     __ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2465     __ movw(rscratch1, -1);
2466     __ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2467 
2468     __ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute);
2469     __ mov(c_rarg0, rthread);
2470     __ movw(c_rarg2, rcpool); // exec mode
2471     __ lea(rscratch1,
2472            RuntimeAddress(CAST_FROM_FN_PTR(address,
2473                                            Deoptimization::uncommon_trap)));
2474     __ blr(rscratch1);
2475     __ bind(retaddr);
2476     oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2477 
2478     __ reset_last_Java_frame(false);
2479 
2480     __ b(after_fetch_unroll_info_call);
2481   } // EnableJVMCI
2482 #endif // INCLUDE_JVMCI
2483 
2484   int exception_offset = __ pc() - start;
2485 
2486   // Prolog for exception case
2487 
2488   // all registers are dead at this entry point, except for r0, and
2489   // r3 which contain the exception oop and exception pc
2490   // respectively.  Set them in TLS and fall thru to the
2491   // unpack_with_exception_in_tls entry point.
2492 
2493   __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
2494   __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
2495 
2496   int exception_in_tls_offset = __ pc() - start;
2497 
2498   // new implementation because exception oop is now passed in JavaThread
2499 
2500   // Prolog for exception case
2501   // All registers must be preserved because they might be used by LinearScan
2502   // Exceptiop oop and throwing PC are passed in JavaThread
2503   // tos: stack at point of call to method that threw the exception (i.e. only
2504   // args are on the stack, no return address)
2505 
2506   // The return address pushed by save_live_registers will be patched
2507   // later with the throwing pc. The correct value is not available
2508   // now because loading it from memory would destroy registers.
2509 
2510   // NB: The SP at this point must be the SP of the method that is
2511   // being deoptimized.  Deoptimization assumes that the frame created
2512   // here by save_live_registers is immediately below the method's SP.
2513   // This is a somewhat fragile mechanism.
2514 
2515   // Save everything in sight.
2516   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2517 
2518   // Now it is safe to overwrite any register
2519 
2520   // Deopt during an exception.  Save exec mode for unpack_frames.
2521   __ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved
2522 
2523   // load throwing pc from JavaThread and patch it as the return address
2524   // of the current frame. Then clear the field in JavaThread
2525   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2526   __ protect_return_address(r3);
2527   __ str(r3, Address(rfp, wordSize));
2528   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2529 
2530 #ifdef ASSERT
2531   // verify that there is really an exception oop in JavaThread
2532   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2533   __ verify_oop(r0);
2534 
2535   // verify that there is no pending exception
2536   Label no_pending_exception;
2537   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2538   __ cbz(rscratch1, no_pending_exception);
2539   __ stop("must not have pending exception here");
2540   __ bind(no_pending_exception);
2541 #endif
2542 
2543   __ bind(cont);
2544 
2545   // Call C code.  Need thread and this frame, but NOT official VM entry
2546   // crud.  We cannot block on this call, no GC can happen.
2547   //
2548   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2549 
2550   // fetch_unroll_info needs to call last_java_frame().
2551 
2552   Label retaddr;
2553   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2554 #ifdef ASSERT
2555   { Label L;
2556     __ ldr(rscratch1, Address(rthread, JavaThread::last_Java_fp_offset()));
2557     __ cbz(rscratch1, L);
2558     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2559     __ bind(L);
2560   }
2561 #endif // ASSERT
2562   __ mov(c_rarg0, rthread);
2563   __ mov(c_rarg1, rcpool);
2564   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2565   __ blr(rscratch1);
2566   __ bind(retaddr);
2567 
2568   // Need to have an oopmap that tells fetch_unroll_info where to
2569   // find any register it might need.
2570   oop_maps->add_gc_map(__ pc() - start, map);
2571 
2572   __ reset_last_Java_frame(false);
2573 
2574 #if INCLUDE_JVMCI
2575   if (EnableJVMCI) {
2576     __ bind(after_fetch_unroll_info_call);
2577   }
2578 #endif
2579 
2580   // Load UnrollBlock* into r5
2581   __ mov(r5, r0);
2582 
2583   __ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset()));
2584    Label noException;
2585   __ cmpw(rcpool, Deoptimization::Unpack_exception);   // Was exception pending?
2586   __ br(Assembler::NE, noException);
2587   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2588   // QQQ this is useless it was null above
2589   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2590   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
2591   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2592 
2593   __ verify_oop(r0);
2594 
2595   // Overwrite the result registers with the exception results.
2596   __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2597   // I think this is useless
2598   // __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2599 
2600   __ bind(noException);
2601 
2602   // Only register save data is on the stack.
2603   // Now restore the result registers.  Everything else is either dead
2604   // or captured in the vframeArray.
2605 
2606   // Restore fp result register
2607   __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2608   // Restore integer result register
2609   __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2610 
2611   // Pop all of the register save area off the stack
2612   __ add(sp, sp, frame_size_in_words * wordSize);
2613 
2614   // All of the register save area has been popped of the stack. Only the
2615   // return address remains.
2616 
2617   // Pop all the frames we must move/replace.
2618   //
2619   // Frame picture (youngest to oldest)
2620   // 1: self-frame (no frame link)
2621   // 2: deopting frame  (no frame link)
2622   // 3: caller of deopting frame (could be compiled/interpreted).
2623   //
2624   // Note: by leaving the return address of self-frame on the stack
2625   // and using the size of frame 2 to adjust the stack
2626   // when we are done the return to frame 3 will still be on the stack.
2627 
2628   // Pop deoptimized frame
2629   __ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2630   __ sub(r2, r2, 2 * wordSize);
2631   __ add(sp, sp, r2);
2632   __ ldp(rfp, zr, __ post(sp, 2 * wordSize));
2633 
2634 #ifdef ASSERT
2635   // Compilers generate code that bang the stack by as much as the
2636   // interpreter would need. So this stack banging should never
2637   // trigger a fault. Verify that it does not on non product builds.
2638   __ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2639   __ bang_stack_size(r19, r2);
2640 #endif
2641   // Load address of array of frame pcs into r2
2642   __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset()));
2643 
2644   // Trash the old pc
2645   // __ addptr(sp, wordSize);  FIXME ????
2646 
2647   // Load address of array of frame sizes into r4
2648   __ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset()));
2649 
2650   // Load counter into r3
2651   __ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset()));
2652 
2653   // Now adjust the caller's stack to make up for the extra locals
2654   // but record the original sp so that we can save it in the skeletal interpreter
2655   // frame and the stack walking of interpreter_sender will get the unextended sp
2656   // value and not the "real" sp value.
2657 
2658   const Register sender_sp = r6;
2659 
2660   __ mov(sender_sp, sp);
2661   __ ldrw(r19, Address(r5,
2662                        Deoptimization::UnrollBlock::
2663                        caller_adjustment_offset()));
2664   __ sub(sp, sp, r19);
2665 
2666   // Push interpreter frames in a loop
2667   __ mov(rscratch1, (uint64_t)0xDEADDEAD);        // Make a recognizable pattern
2668   __ mov(rscratch2, rscratch1);
2669   Label loop;
2670   __ bind(loop);
2671   __ ldr(r19, Address(__ post(r4, wordSize)));          // Load frame size
2672   __ sub(r19, r19, 2*wordSize);           // We'll push pc and fp by hand
2673   __ ldr(lr, Address(__ post(r2, wordSize)));  // Load pc
2674   __ enter();                           // Save old & set new fp
2675   __ sub(sp, sp, r19);                  // Prolog
2676   // This value is corrected by layout_activation_impl
2677   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2678   __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2679   __ mov(sender_sp, sp);               // Pass sender_sp to next frame
2680   __ sub(r3, r3, 1);                   // Decrement counter
2681   __ cbnz(r3, loop);
2682 
2683     // Re-push self-frame
2684   __ ldr(lr, Address(r2));
2685   __ enter();
2686 
2687   // Allocate a full sized register save area.  We subtract 2 because
2688   // enter() just pushed 2 words
2689   __ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2690 
2691   // Restore frame locals after moving the frame
2692   __ strd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2693   __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2694 
2695   // Call C code.  Need thread but NOT official VM entry
2696   // crud.  We cannot block on this call, no GC can happen.  Call should
2697   // restore return values to their stack-slots with the new SP.
2698   //
2699   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2700 
2701   // Use rfp because the frames look interpreted now
2702   // Don't need the precise return PC here, just precise enough to point into this code blob.
2703   address the_pc = __ pc();
2704   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2705 
2706   __ mov(c_rarg0, rthread);
2707   __ movw(c_rarg1, rcpool); // second arg: exec_mode
2708   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2709   __ blr(rscratch1);
2710 
2711   // Set an oopmap for the call site
2712   // Use the same PC we used for the last java frame
2713   oop_maps->add_gc_map(the_pc - start,
2714                        new OopMap( frame_size_in_words, 0 ));
2715 
2716   // Clear fp AND pc
2717   __ reset_last_Java_frame(true);
2718 
2719   // Collect return values
2720   __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2721   __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2722   // I think this is useless (throwing pc?)
2723   // __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2724 
2725   // Pop self-frame.
2726   __ leave();                           // Epilog
2727 
2728   // Jump to interpreter
2729   __ ret(lr);
2730 
2731   // Make sure all code is generated
2732   masm->flush();
2733 
2734   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2735   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2736 #if INCLUDE_JVMCI
2737   if (EnableJVMCI) {
2738     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2739     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2740   }
2741 #endif
2742 
2743   AOTCodeCache::store_code_blob(*_deopt_blob, AOTCodeEntry::SharedBlob, BlobId::shared_deopt_id);
2744 }
2745 
2746 // Number of stack slots between incoming argument block and the start of
2747 // a new frame.  The PROLOG must add this many slots to the stack.  The
2748 // EPILOG must remove this many slots. aarch64 needs two slots for
2749 // return address and fp.
2750 // TODO think this is correct but check
2751 uint SharedRuntime::in_preserve_stack_slots() {
2752   return 4;
2753 }
2754 
2755 uint SharedRuntime::out_preserve_stack_slots() {
2756   return 0;
2757 }
2758 
2759 
2760 VMReg SharedRuntime::thread_register() {
2761   return rthread->as_VMReg();
2762 }
2763 
2764 //------------------------------generate_handler_blob------
2765 //
2766 // Generate a special Compile2Runtime blob that saves all registers,
2767 // and setup oopmap.
2768 //
2769 SafepointBlob* SharedRuntime::generate_handler_blob(StubId id, address call_ptr) {
2770   assert(is_polling_page_id(id), "expected a polling page stub id");
2771 
2772   // Allocate space for the code.  Setup code generation tools.
2773   const char* name = SharedRuntime::stub_name(id);
2774   CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, StubInfo::blob(id));
2775   if (blob != nullptr) {
2776     return blob->as_safepoint_blob();
2777   }
2778 
2779   ResourceMark rm;
2780   OopMapSet *oop_maps = new OopMapSet();
2781   OopMap* map;
2782   CodeBuffer buffer(name, 2048, 1024);
2783   MacroAssembler* masm = new MacroAssembler(&buffer);
2784 
2785   address start   = __ pc();
2786   address call_pc = nullptr;
2787   int frame_size_in_words;
2788   bool cause_return = (id == StubId::shared_polling_page_return_handler_id);
2789   RegisterSaver reg_save(id == StubId::shared_polling_page_vectors_safepoint_handler_id /* save_vectors */);
2790 
2791   // When the signal occurred, the LR was either signed and stored on the stack (in which
2792   // case it will be restored from the stack before being used) or unsigned and not stored
2793   // on the stack. Stipping ensures we get the right value.
2794   __ strip_return_address();
2795 
2796   // Save Integer and Float registers.
2797   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2798 
2799   // The following is basically a call_VM.  However, we need the precise
2800   // address of the call in order to generate an oopmap. Hence, we do all the
2801   // work ourselves.
2802 
2803   Label retaddr;
2804   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2805 
2806   // The return address must always be correct so that frame constructor never
2807   // sees an invalid pc.
2808 
2809   if (!cause_return) {
2810     // overwrite the return address pushed by save_live_registers
2811     // Additionally, r20 is a callee-saved register so we can look at
2812     // it later to determine if someone changed the return address for
2813     // us!
2814     __ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset()));
2815     __ protect_return_address(r20);
2816     __ str(r20, Address(rfp, wordSize));
2817   }
2818 
2819   // Do the call
2820   __ mov(c_rarg0, rthread);
2821   __ lea(rscratch1, RuntimeAddress(call_ptr));
2822   __ blr(rscratch1);
2823   __ bind(retaddr);
2824 
2825   // Set an oopmap for the call site.  This oopmap will map all
2826   // oop-registers and debug-info registers as callee-saved.  This
2827   // will allow deoptimization at this safepoint to find all possible
2828   // debug-info recordings, as well as let GC find all oops.
2829 
2830   oop_maps->add_gc_map( __ pc() - start, map);
2831 
2832   Label noException;
2833 
2834   __ reset_last_Java_frame(false);
2835 
2836   __ membar(Assembler::LoadLoad | Assembler::LoadStore);
2837 
2838   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2839   __ cbz(rscratch1, noException);
2840 
2841   // Exception pending
2842 
2843   reg_save.restore_live_registers(masm);
2844 
2845   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2846 
2847   // No exception case
2848   __ bind(noException);
2849 
2850   Label no_adjust, bail;
2851   if (!cause_return) {
2852     // If our stashed return pc was modified by the runtime we avoid touching it
2853     __ ldr(rscratch1, Address(rfp, wordSize));
2854     __ cmp(r20, rscratch1);
2855     __ br(Assembler::NE, no_adjust);
2856     __ authenticate_return_address(r20);
2857 
2858 #ifdef ASSERT
2859     // Verify the correct encoding of the poll we're about to skip.
2860     // See NativeInstruction::is_ldrw_to_zr()
2861     __ ldrw(rscratch1, Address(r20));
2862     __ ubfx(rscratch2, rscratch1, 22, 10);
2863     __ cmpw(rscratch2, 0b1011100101);
2864     __ br(Assembler::NE, bail);
2865     __ ubfx(rscratch2, rscratch1, 0, 5);
2866     __ cmpw(rscratch2, 0b11111);
2867     __ br(Assembler::NE, bail);
2868 #endif
2869     // Adjust return pc forward to step over the safepoint poll instruction
2870     __ add(r20, r20, NativeInstruction::instruction_size);
2871     __ protect_return_address(r20);
2872     __ str(r20, Address(rfp, wordSize));
2873   }
2874 
2875   __ bind(no_adjust);
2876   // Normal exit, restore registers and exit.
2877   reg_save.restore_live_registers(masm);
2878 
2879   __ ret(lr);
2880 
2881 #ifdef ASSERT
2882   __ bind(bail);
2883   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2884 #endif
2885 
2886   // Make sure all code is generated
2887   masm->flush();
2888 
2889   // Fill-out other meta info
2890   SafepointBlob* sp_blob = SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2891 
2892   AOTCodeCache::store_code_blob(*sp_blob, AOTCodeEntry::SharedBlob, StubInfo::blob(id));
2893   return sp_blob;
2894 }
2895 
2896 //
2897 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2898 //
2899 // Generate a stub that calls into vm to find out the proper destination
2900 // of a java call. All the argument registers are live at this point
2901 // but since this is generic code we don't know what they are and the caller
2902 // must do any gc of the args.
2903 //
2904 RuntimeStub* SharedRuntime::generate_resolve_blob(StubId id, address destination) {
2905   assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2906   assert(is_resolve_id(id), "expected a resolve stub id");
2907 
2908   const char* name = SharedRuntime::stub_name(id);
2909   CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, StubInfo::blob(id));
2910   if (blob != nullptr) {
2911     return blob->as_runtime_stub();
2912   }
2913 
2914   // allocate space for the code
2915   ResourceMark rm;
2916   CodeBuffer buffer(name, 1000, 512);
2917   MacroAssembler* masm                = new MacroAssembler(&buffer);
2918 
2919   int frame_size_in_words;
2920   RegisterSaver reg_save(false /* save_vectors */);
2921 
2922   OopMapSet *oop_maps = new OopMapSet();
2923   OopMap* map = nullptr;
2924 
2925   int start = __ offset();
2926 
2927   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2928 
2929   int frame_complete = __ offset();
2930 
2931   {
2932     Label retaddr;
2933     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2934 
2935     __ mov(c_rarg0, rthread);
2936     __ lea(rscratch1, RuntimeAddress(destination));
2937 
2938     __ blr(rscratch1);
2939     __ bind(retaddr);
2940   }
2941 
2942   // Set an oopmap for the call site.
2943   // We need this not only for callee-saved registers, but also for volatile
2944   // registers that the compiler might be keeping live across a safepoint.
2945 
2946   oop_maps->add_gc_map( __ offset() - start, map);
2947 
2948   // r0 contains the address we are going to jump to assuming no exception got installed
2949 
2950   // clear last_Java_sp
2951   __ reset_last_Java_frame(false);
2952   // check for pending exceptions
2953   Label pending;
2954   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2955   __ cbnz(rscratch1, pending);
2956 
2957   // get the returned Method*
2958   __ get_vm_result_metadata(rmethod, rthread);
2959   __ str(rmethod, Address(sp, reg_save.reg_offset_in_bytes(rmethod)));
2960 
2961   // r0 is where we want to jump, overwrite rscratch1 which is saved and scratch
2962   __ str(r0, Address(sp, reg_save.rscratch1_offset_in_bytes()));
2963   reg_save.restore_live_registers(masm);
2964 
2965   // We are back to the original state on entry and ready to go.
2966 
2967   __ br(rscratch1);
2968 
2969   // Pending exception after the safepoint
2970 
2971   __ bind(pending);
2972 
2973   reg_save.restore_live_registers(masm);
2974 
2975   // exception pending => remove activation and forward to exception handler
2976 
2977   __ str(zr, Address(rthread, JavaThread::vm_result_oop_offset()));
2978 
2979   __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
2980   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2981 
2982   // -------------
2983   // make sure all code is generated
2984   masm->flush();
2985 
2986   // return the  blob
2987   // frame_size_words or bytes??
2988   RuntimeStub* rs_blob = RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2989 
2990   AOTCodeCache::store_code_blob(*rs_blob, AOTCodeEntry::SharedBlob, StubInfo::blob(id));
2991   return rs_blob;
2992 }
2993 
2994 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
2995   BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K);
2996   if (buf == nullptr) {
2997     return nullptr;
2998   }
2999   CodeBuffer buffer(buf);
3000   short buffer_locs[20];
3001   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3002                                          sizeof(buffer_locs)/sizeof(relocInfo));
3003 
3004   MacroAssembler _masm(&buffer);
3005   MacroAssembler* masm = &_masm;
3006 
3007   const Array<SigEntry>* sig_vk = vk->extended_sig();
3008   const Array<VMRegPair>* regs = vk->return_regs();
3009 
3010   int pack_fields_jobject_off = __ offset();
3011   // Resolve pre-allocated buffer from JNI handle.
3012   // We cannot do this in generate_call_stub() because it requires GC code to be initialized.
3013   Register Rresult = r14;  // See StubGenerator::generate_call_stub().
3014   __ ldr(r0, Address(Rresult));
3015   __ resolve_jobject(r0 /* value */,
3016                      rthread /* thread */,
3017                      r12 /* tmp */);
3018   __ str(r0, Address(Rresult));
3019 
3020   int pack_fields_off = __ offset();
3021 
3022   int j = 1;
3023   for (int i = 0; i < sig_vk->length(); i++) {
3024     BasicType bt = sig_vk->at(i)._bt;
3025     if (bt == T_METADATA) {
3026       continue;
3027     }
3028     if (bt == T_VOID) {
3029       if (sig_vk->at(i-1)._bt == T_LONG ||
3030           sig_vk->at(i-1)._bt == T_DOUBLE) {
3031         j++;
3032       }
3033       continue;
3034     }
3035     int off = sig_vk->at(i)._offset;
3036     VMRegPair pair = regs->at(j);
3037     VMReg r_1 = pair.first();
3038     VMReg r_2 = pair.second();
3039     Address to(r0, off);
3040     if (bt == T_FLOAT) {
3041       __ strs(r_1->as_FloatRegister(), to);
3042     } else if (bt == T_DOUBLE) {
3043       __ strd(r_1->as_FloatRegister(), to);
3044     } else {
3045       Register val = r_1->as_Register();
3046       assert_different_registers(to.base(), val, r15, r16, r17);
3047       if (is_reference_type(bt)) {
3048         // store_heap_oop transitively calls oop_store_at which corrupts to.base(). We need to keep r0 valid.
3049         __ mov(r17, r0);
3050         Address to_with_r17(r17, off);
3051         __ store_heap_oop(to_with_r17, val, r15, r16, r17, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
3052       } else {
3053         __ store_sized_value(to, r_1->as_Register(), type2aelembytes(bt));
3054       }
3055     }
3056     j++;
3057   }
3058   assert(j == regs->length(), "missed a field?");
3059   if (vk->supports_nullable_layouts()) {
3060     // Zero the null marker (setting it to 1 would be better but would require an additional register)
3061     __ strb(zr, Address(r0, vk->null_marker_offset()));
3062   }
3063   __ ret(lr);
3064 
3065   int unpack_fields_off = __ offset();
3066 
3067   Label skip;
3068   Label not_null;
3069   __ cbnz(r0, not_null);
3070 
3071   // Return value is null. Zero all registers because the runtime requires a canonical
3072   // representation of a flat null.
3073   j = 1;
3074   for (int i = 0; i < sig_vk->length(); i++) {
3075     BasicType bt = sig_vk->at(i)._bt;
3076     if (bt == T_METADATA) {
3077       continue;
3078     }
3079     if (bt == T_VOID) {
3080       if (sig_vk->at(i-1)._bt == T_LONG ||
3081           sig_vk->at(i-1)._bt == T_DOUBLE) {
3082         j++;
3083       }
3084       continue;
3085     }
3086 
3087     VMRegPair pair = regs->at(j);
3088     VMReg r_1 = pair.first();
3089     if (r_1->is_FloatRegister()) {
3090       __ mov(r_1->as_FloatRegister(), Assembler::T2S, 0);
3091     } else {
3092       __ mov(r_1->as_Register(), zr);
3093     }
3094     j++;
3095   }
3096   __ b(skip);
3097   __ bind(not_null);
3098 
3099   j = 1;
3100   for (int i = 0; i < sig_vk->length(); i++) {
3101     BasicType bt = sig_vk->at(i)._bt;
3102     if (bt == T_METADATA) {
3103       continue;
3104     }
3105     if (bt == T_VOID) {
3106       if (sig_vk->at(i-1)._bt == T_LONG ||
3107           sig_vk->at(i-1)._bt == T_DOUBLE) {
3108         j++;
3109       }
3110       continue;
3111     }
3112     int off = sig_vk->at(i)._offset;
3113     assert(off > 0, "offset in object should be positive");
3114     VMRegPair pair = regs->at(j);
3115     VMReg r_1 = pair.first();
3116     VMReg r_2 = pair.second();
3117     Address from(r0, off);
3118     if (bt == T_FLOAT) {
3119       __ ldrs(r_1->as_FloatRegister(), from);
3120     } else if (bt == T_DOUBLE) {
3121       __ ldrd(r_1->as_FloatRegister(), from);
3122     } else if (bt == T_OBJECT || bt == T_ARRAY) {
3123       assert_different_registers(r0, r_1->as_Register());
3124       __ load_heap_oop(r_1->as_Register(), from, rscratch1, rscratch2);
3125     } else {
3126       assert(is_java_primitive(bt), "unexpected basic type");
3127       assert_different_registers(r0, r_1->as_Register());
3128       size_t size_in_bytes = type2aelembytes(bt);
3129       __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
3130     }
3131     j++;
3132   }
3133   assert(j == regs->length(), "missed a field?");
3134 
3135   __ bind(skip);
3136 
3137   __ ret(lr);
3138 
3139   __ flush();
3140 
3141   return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
3142 }
3143 
3144 // Continuation point for throwing of implicit exceptions that are
3145 // not handled in the current activation. Fabricates an exception
3146 // oop and initiates normal exception dispatching in this
3147 // frame. Since we need to preserve callee-saved values (currently
3148 // only for C2, but done for C1 as well) we need a callee-saved oop
3149 // map and therefore have to make these stubs into RuntimeStubs
3150 // rather than BufferBlobs.  If the compiler needs all registers to
3151 // be preserved between the fault point and the exception handler
3152 // then it must assume responsibility for that in
3153 // AbstractCompiler::continuation_for_implicit_null_exception or
3154 // continuation_for_implicit_division_by_zero_exception. All other
3155 // implicit exceptions (e.g., NullPointerException or
3156 // AbstractMethodError on entry) are either at call sites or
3157 // otherwise assume that stack unwinding will be initiated, so
3158 // caller saved registers were assumed volatile in the compiler.
3159 
3160 RuntimeStub* SharedRuntime::generate_throw_exception(StubId id, address runtime_entry) {
3161   assert(is_throw_id(id), "expected a throw stub id");
3162 
3163   const char* name = SharedRuntime::stub_name(id);
3164 
3165   // Information about frame layout at time of blocking runtime call.
3166   // Note that we only have to preserve callee-saved registers since
3167   // the compilers are responsible for supplying a continuation point
3168   // if they expect all registers to be preserved.
3169   // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
3170   enum layout {
3171     rfp_off = 0,
3172     rfp_off2,
3173     return_off,
3174     return_off2,
3175     framesize // inclusive of return address
3176   };
3177 
3178   int insts_size = 512;
3179   int locs_size  = 64;
3180 
3181   const char* timer_msg = "SharedRuntime generate_throw_exception";
3182   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
3183 
3184   CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, StubInfo::blob(id));
3185   if (blob != nullptr) {
3186     return blob->as_runtime_stub();
3187   }
3188 
3189   ResourceMark rm;
3190   CodeBuffer code(name, insts_size, locs_size);
3191   OopMapSet* oop_maps  = new OopMapSet();
3192   MacroAssembler* masm = new MacroAssembler(&code);
3193 
3194   address start = __ pc();
3195 
3196   // This is an inlined and slightly modified version of call_VM
3197   // which has the ability to fetch the return PC out of
3198   // thread-local storage and also sets up last_Java_sp slightly
3199   // differently than the real call_VM
3200 
3201   __ enter(); // Save FP and LR before call
3202 
3203   assert(is_even(framesize/2), "sp not 16-byte aligned");
3204 
3205   // lr and fp are already in place
3206   __ sub(sp, rfp, ((uint64_t)framesize-4) << LogBytesPerInt); // prolog
3207 
3208   int frame_complete = __ pc() - start;
3209 
3210   // Set up last_Java_sp and last_Java_fp
3211   address the_pc = __ pc();
3212   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
3213 
3214   __ mov(c_rarg0, rthread);
3215   BLOCK_COMMENT("call runtime_entry");
3216   __ lea(rscratch1, RuntimeAddress(runtime_entry));
3217   __ blr(rscratch1);
3218 
3219   // Generate oop map
3220   OopMap* map = new OopMap(framesize, 0);
3221 
3222   oop_maps->add_gc_map(the_pc - start, map);
3223 
3224   __ reset_last_Java_frame(true);
3225 
3226   // Reinitialize the ptrue predicate register, in case the external runtime
3227   // call clobbers ptrue reg, as we may return to SVE compiled code.
3228   __ reinitialize_ptrue();
3229 
3230   __ leave();
3231 
3232   // check for pending exceptions
3233 #ifdef ASSERT
3234   Label L;
3235   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
3236   __ cbnz(rscratch1, L);
3237   __ should_not_reach_here();
3238   __ bind(L);
3239 #endif // ASSERT
3240   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3241 
3242   // codeBlob framesize is in words (not VMRegImpl::slot_size)
3243   RuntimeStub* stub =
3244     RuntimeStub::new_runtime_stub(name,
3245                                   &code,
3246                                   frame_complete,
3247                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3248                                   oop_maps, false);
3249   AOTCodeCache::store_code_blob(*stub, AOTCodeEntry::SharedBlob, StubInfo::blob(id));
3250 
3251   return stub;
3252 }
3253 
3254 #if INCLUDE_JFR
3255 
3256 static void jfr_prologue(address the_pc, MacroAssembler* masm, Register thread) {
3257   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
3258   __ mov(c_rarg0, thread);
3259 }
3260 
3261 // The handle is dereferenced through a load barrier.
3262 static void jfr_epilogue(MacroAssembler* masm) {
3263   __ reset_last_Java_frame(true);
3264 }
3265 
3266 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
3267 // It returns a jobject handle to the event writer.
3268 // The handle is dereferenced and the return value is the event writer oop.
3269 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
3270   enum layout {
3271     rbp_off,
3272     rbpH_off,
3273     return_off,
3274     return_off2,
3275     framesize // inclusive of return address
3276   };
3277 
3278   int insts_size = 1024;
3279   int locs_size = 64;
3280   const char* name = SharedRuntime::stub_name(StubId::shared_jfr_write_checkpoint_id);
3281   CodeBuffer code(name, insts_size, locs_size);
3282   OopMapSet* oop_maps = new OopMapSet();
3283   MacroAssembler* masm = new MacroAssembler(&code);
3284 
3285   address start = __ pc();
3286   __ enter();
3287   int frame_complete = __ pc() - start;
3288   address the_pc = __ pc();
3289   jfr_prologue(the_pc, masm, rthread);
3290   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
3291   jfr_epilogue(masm);
3292   __ resolve_global_jobject(r0, rscratch1, rscratch2);
3293   __ leave();
3294   __ ret(lr);
3295 
3296   OopMap* map = new OopMap(framesize, 1); // rfp
3297   oop_maps->add_gc_map(the_pc - start, map);
3298 
3299   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
3300     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
3301                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3302                                   oop_maps, false);
3303   return stub;
3304 }
3305 
3306 // For c2: call to return a leased buffer.
3307 RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
3308   enum layout {
3309     rbp_off,
3310     rbpH_off,
3311     return_off,
3312     return_off2,
3313     framesize // inclusive of return address
3314   };
3315 
3316   int insts_size = 1024;
3317   int locs_size = 64;
3318 
3319   const char* name = SharedRuntime::stub_name(StubId::shared_jfr_return_lease_id);
3320   CodeBuffer code(name, insts_size, locs_size);
3321   OopMapSet* oop_maps = new OopMapSet();
3322   MacroAssembler* masm = new MacroAssembler(&code);
3323 
3324   address start = __ pc();
3325   __ enter();
3326   int frame_complete = __ pc() - start;
3327   address the_pc = __ pc();
3328   jfr_prologue(the_pc, masm, rthread);
3329   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
3330   jfr_epilogue(masm);
3331 
3332   __ leave();
3333   __ ret(lr);
3334 
3335   OopMap* map = new OopMap(framesize, 1); // rfp
3336   oop_maps->add_gc_map(the_pc - start, map);
3337 
3338   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
3339     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
3340                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3341                                   oop_maps, false);
3342   return stub;
3343 }
3344 
3345 #endif // INCLUDE_JFR