1 /*
   2  * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "asm/macroAssembler.inline.hpp"
  30 #include "classfile/symbolTable.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/compiledIC.hpp"
  33 #include "code/debugInfoRec.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/barrierSetAssembler.hpp"
  38 #include "interpreter/interpreter.hpp"
  39 #include "interpreter/interp_masm.hpp"
  40 #include "logging/log.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "nativeInst_aarch64.hpp"
  43 #include "oops/compiledICHolder.hpp"
  44 #include "oops/klass.inline.hpp"
  45 #include "oops/method.inline.hpp"
  46 #include "prims/methodHandles.hpp"
  47 #include "runtime/continuation.hpp"
  48 #include "runtime/continuationEntry.inline.hpp"
  49 #include "runtime/globals.hpp"
  50 #include "runtime/jniHandles.hpp"
  51 #include "runtime/safepointMechanism.hpp"
  52 #include "runtime/sharedRuntime.hpp"
  53 #include "runtime/signature.hpp"
  54 #include "runtime/stubRoutines.hpp"
  55 #include "runtime/vframeArray.hpp"
  56 #include "utilities/align.hpp"
  57 #include "utilities/formatBuffer.hpp"
  58 #include "vmreg_aarch64.inline.hpp"
  59 #ifdef COMPILER1
  60 #include "c1/c1_Runtime1.hpp"
  61 #endif
  62 #ifdef COMPILER2
  63 #include "adfiles/ad_aarch64.hpp"
  64 #include "opto/runtime.hpp"
  65 #endif
  66 #if INCLUDE_JVMCI
  67 #include "jvmci/jvmciJavaClasses.hpp"
  68 #endif
  69 
  70 #define __ masm->
  71 
  72 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  73 
  74 class SimpleRuntimeFrame {
  75 
  76   public:
  77 
  78   // Most of the runtime stubs have this simple frame layout.
  79   // This class exists to make the layout shared in one place.
  80   // Offsets are for compiler stack slots, which are jints.
  81   enum layout {
  82     // The frame sender code expects that rbp will be in the "natural" place and
  83     // will override any oopMap setting for it. We must therefore force the layout
  84     // so that it agrees with the frame sender code.
  85     // we don't expect any arg reg save area so aarch64 asserts that
  86     // frame::arg_reg_save_area_bytes == 0
  87     rfp_off = 0,
  88     rfp_off2,
  89     return_off, return_off2,
  90     framesize
  91   };
  92 };
  93 
  94 // FIXME -- this is used by C1
  95 class RegisterSaver {
  96   const bool _save_vectors;
  97  public:
  98   RegisterSaver(bool save_vectors) : _save_vectors(save_vectors) {}
  99 
 100   OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
 101   void restore_live_registers(MacroAssembler* masm);
 102 
 103   // Offsets into the register save area
 104   // Used by deoptimization when it is managing result register
 105   // values on its own
 106 
 107   int reg_offset_in_bytes(Register r);
 108   int r0_offset_in_bytes()    { return reg_offset_in_bytes(r0); }
 109   int rscratch1_offset_in_bytes()    { return reg_offset_in_bytes(rscratch1); }
 110   int v0_offset_in_bytes();
 111 
 112   // Total stack size in bytes for saving sve predicate registers.
 113   int total_sve_predicate_in_bytes();
 114 
 115   // Capture info about frame layout
 116   // Note this is only correct when not saving full vectors.
 117   enum layout {
 118                 fpu_state_off = 0,
 119                 fpu_state_end = fpu_state_off + FPUStateSizeInWords - 1,
 120                 // The frame sender code expects that rfp will be in
 121                 // the "natural" place and will override any oopMap
 122                 // setting for it. We must therefore force the layout
 123                 // so that it agrees with the frame sender code.
 124                 r0_off = fpu_state_off + FPUStateSizeInWords,
 125                 rfp_off = r0_off + (Register::number_of_registers - 2) * Register::max_slots_per_register,
 126                 return_off = rfp_off + Register::max_slots_per_register,      // slot for return address
 127                 reg_save_size = return_off + Register::max_slots_per_register};
 128 
 129 };
 130 
 131 int RegisterSaver::reg_offset_in_bytes(Register r) {
 132   // The integer registers are located above the floating point
 133   // registers in the stack frame pushed by save_live_registers() so the
 134   // offset depends on whether we are saving full vectors, and whether
 135   // those vectors are NEON or SVE.
 136 
 137   int slots_per_vect = FloatRegister::save_slots_per_register;
 138 
 139 #if COMPILER2_OR_JVMCI
 140   if (_save_vectors) {
 141     slots_per_vect = FloatRegister::slots_per_neon_register;
 142 
 143 #ifdef COMPILER2
 144     if (Matcher::supports_scalable_vector()) {
 145       slots_per_vect = Matcher::scalable_vector_reg_size(T_FLOAT);
 146     }
 147 #endif
 148   }
 149 #endif
 150 
 151   int r0_offset = v0_offset_in_bytes() + (slots_per_vect * FloatRegister::number_of_registers) * BytesPerInt;
 152   return r0_offset + r->encoding() * wordSize;
 153 }
 154 
 155 int RegisterSaver::v0_offset_in_bytes() {
 156   // The floating point registers are located above the predicate registers if
 157   // they are present in the stack frame pushed by save_live_registers(). So the
 158   // offset depends on the saved total predicate vectors in the stack frame.
 159   return (total_sve_predicate_in_bytes() / VMRegImpl::stack_slot_size) * BytesPerInt;
 160 }
 161 
 162 int RegisterSaver::total_sve_predicate_in_bytes() {
 163 #ifdef COMPILER2
 164   if (_save_vectors && Matcher::supports_scalable_vector()) {
 165     return (Matcher::scalable_vector_reg_size(T_BYTE) >> LogBitsPerByte) *
 166            PRegister::number_of_registers;
 167   }
 168 #endif
 169   return 0;
 170 }
 171 
 172 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
 173   bool use_sve = false;
 174   int sve_vector_size_in_bytes = 0;
 175   int sve_vector_size_in_slots = 0;
 176   int sve_predicate_size_in_slots = 0;
 177   int total_predicate_in_bytes = total_sve_predicate_in_bytes();
 178   int total_predicate_in_slots = total_predicate_in_bytes / VMRegImpl::stack_slot_size;
 179 
 180 #ifdef COMPILER2
 181   use_sve = Matcher::supports_scalable_vector();
 182   if (use_sve) {
 183     sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 184     sve_vector_size_in_slots = Matcher::scalable_vector_reg_size(T_FLOAT);
 185     sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
 186   }
 187 #endif
 188 
 189 #if COMPILER2_OR_JVMCI
 190   if (_save_vectors) {
 191     int extra_save_slots_per_register = 0;
 192     // Save upper half of vector registers
 193     if (use_sve) {
 194       extra_save_slots_per_register = sve_vector_size_in_slots - FloatRegister::save_slots_per_register;
 195     } else {
 196       extra_save_slots_per_register = FloatRegister::extra_save_slots_per_neon_register;
 197     }
 198     int extra_vector_bytes = extra_save_slots_per_register *
 199                              VMRegImpl::stack_slot_size *
 200                              FloatRegister::number_of_registers;
 201     additional_frame_words += ((extra_vector_bytes + total_predicate_in_bytes) / wordSize);
 202   }
 203 #else
 204   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 205 #endif
 206 
 207   int frame_size_in_bytes = align_up(additional_frame_words * wordSize +
 208                                      reg_save_size * BytesPerInt, 16);
 209   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 210   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 211   // The caller will allocate additional_frame_words
 212   int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
 213   // CodeBlob frame size is in words.
 214   int frame_size_in_words = frame_size_in_bytes / wordSize;
 215   *total_frame_words = frame_size_in_words;
 216 
 217   // Save Integer and Float registers.
 218   __ enter();
 219   __ push_CPU_state(_save_vectors, use_sve, sve_vector_size_in_bytes, total_predicate_in_bytes);
 220 
 221   // Set an oopmap for the call site.  This oopmap will map all
 222   // oop-registers and debug-info registers as callee-saved.  This
 223   // will allow deoptimization at this safepoint to find all possible
 224   // debug-info recordings, as well as let GC find all oops.
 225 
 226   OopMapSet *oop_maps = new OopMapSet();
 227   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 228 
 229   for (int i = 0; i < Register::number_of_registers; i++) {
 230     Register r = as_Register(i);
 231     if (i <= rfp->encoding() && r != rscratch1 && r != rscratch2) {
 232       // SP offsets are in 4-byte words.
 233       // Register slots are 8 bytes wide, 32 floating-point registers.
 234       int sp_offset = Register::max_slots_per_register * i +
 235                       FloatRegister::save_slots_per_register * FloatRegister::number_of_registers;
 236       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots), r->as_VMReg());
 237     }
 238   }
 239 
 240   for (int i = 0; i < FloatRegister::number_of_registers; i++) {
 241     FloatRegister r = as_FloatRegister(i);
 242     int sp_offset = 0;
 243     if (_save_vectors) {
 244       sp_offset = use_sve ? (total_predicate_in_slots + sve_vector_size_in_slots * i) :
 245                             (FloatRegister::slots_per_neon_register * i);
 246     } else {
 247       sp_offset = FloatRegister::save_slots_per_register * i;
 248     }
 249     oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), r->as_VMReg());
 250   }
 251 
 252   return oop_map;
 253 }
 254 
 255 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 256 #ifdef COMPILER2
 257   __ pop_CPU_state(_save_vectors, Matcher::supports_scalable_vector(),
 258                    Matcher::scalable_vector_reg_size(T_BYTE), total_sve_predicate_in_bytes());
 259 #else
 260 #if !INCLUDE_JVMCI
 261   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 262 #endif
 263   __ pop_CPU_state(_save_vectors);
 264 #endif
 265   __ ldp(rfp, lr, Address(__ post(sp, 2 * wordSize)));
 266   __ authenticate_return_address();
 267 }
 268 
 269 // Is vector's size (in bytes) bigger than a size saved by default?
 270 // 8 bytes vector registers are saved by default on AArch64.
 271 // The SVE supported min vector size is 8 bytes and we need to save
 272 // predicate registers when the vector size is 8 bytes as well.
 273 bool SharedRuntime::is_wide_vector(int size) {
 274   return size > 8 || (UseSVE > 0 && size >= 8);
 275 }
 276 
 277 // ---------------------------------------------------------------------------
 278 // Read the array of BasicTypes from a signature, and compute where the
 279 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 280 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 281 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 282 // as framesizes are fixed.
 283 // VMRegImpl::stack0 refers to the first slot 0(sp).
 284 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
 285 // Register up to Register::number_of_registers are the 64-bit
 286 // integer registers.
 287 
 288 // Note: the INPUTS in sig_bt are in units of Java argument words,
 289 // which are 64-bit.  The OUTPUTS are in 32-bit units.
 290 
 291 // The Java calling convention is a "shifted" version of the C ABI.
 292 // By skipping the first C ABI register we can call non-static jni
 293 // methods with small numbers of arguments without having to shuffle
 294 // the arguments at all. Since we control the java ABI we ought to at
 295 // least get some advantage out of it.
 296 
 297 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 298                                            VMRegPair *regs,
 299                                            int total_args_passed) {
 300 
 301   // Create the mapping between argument positions and
 302   // registers.
 303   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 304     j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7
 305   };
 306   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 307     j_farg0, j_farg1, j_farg2, j_farg3,
 308     j_farg4, j_farg5, j_farg6, j_farg7
 309   };
 310 
 311 
 312   uint int_args = 0;
 313   uint fp_args = 0;
 314   uint stk_args = 0; // inc by 2 each time
 315 
 316   for (int i = 0; i < total_args_passed; i++) {
 317     switch (sig_bt[i]) {
 318     case T_BOOLEAN:
 319     case T_CHAR:
 320     case T_BYTE:
 321     case T_SHORT:
 322     case T_INT:
 323       if (int_args < Argument::n_int_register_parameters_j) {
 324         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 325       } else {
 326         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 327         stk_args += 2;
 328       }
 329       break;
 330     case T_VOID:
 331       // halves of T_LONG or T_DOUBLE
 332       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 333       regs[i].set_bad();
 334       break;
 335     case T_LONG:
 336       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 337       // fall through
 338     case T_OBJECT:
 339     case T_ARRAY:
 340     case T_ADDRESS:
 341     case T_PRIMITIVE_OBJECT:
 342       if (int_args < Argument::n_int_register_parameters_j) {
 343         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 344       } else {
 345         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 346         stk_args += 2;
 347       }
 348       break;
 349     case T_FLOAT:
 350       if (fp_args < Argument::n_float_register_parameters_j) {
 351         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 352       } else {
 353         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 354         stk_args += 2;
 355       }
 356       break;
 357     case T_DOUBLE:
 358       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 359       if (fp_args < Argument::n_float_register_parameters_j) {
 360         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 361       } else {
 362         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 363         stk_args += 2;
 364       }
 365       break;
 366     default:
 367       ShouldNotReachHere();
 368       break;
 369     }
 370   }
 371 
 372   return align_up(stk_args, 2);
 373 }
 374 
 375 
 376 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j;
 377 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
 378 
 379 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) {
 380 
 381   // Create the mapping between argument positions and registers.
 382 
 383   static const Register INT_ArgReg[java_return_convention_max_int] = {
 384     r0 /* j_rarg7 */, j_rarg6, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
 385   };
 386 
 387   static const FloatRegister FP_ArgReg[java_return_convention_max_float] = {
 388     j_farg0, j_farg1, j_farg2, j_farg3, j_farg4, j_farg5, j_farg6, j_farg7
 389   };
 390 
 391   uint int_args = 0;
 392   uint fp_args = 0;
 393 
 394   for (int i = 0; i < total_args_passed; i++) {
 395     switch (sig_bt[i]) {
 396     case T_BOOLEAN:
 397     case T_CHAR:
 398     case T_BYTE:
 399     case T_SHORT:
 400     case T_INT:
 401       if (int_args < SharedRuntime::java_return_convention_max_int) {
 402         regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
 403         int_args ++;
 404       } else {
 405         return -1;
 406       }
 407       break;
 408     case T_VOID:
 409       // halves of T_LONG or T_DOUBLE
 410       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 411       regs[i].set_bad();
 412       break;
 413     case T_LONG:
 414       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 415       // fall through
 416     case T_OBJECT:
 417     case T_ARRAY:
 418     case T_ADDRESS:
 419       // Should T_METADATA be added to java_calling_convention as well ?
 420     case T_METADATA:
 421     case T_PRIMITIVE_OBJECT:
 422       if (int_args < SharedRuntime::java_return_convention_max_int) {
 423         regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
 424         int_args ++;
 425       } else {
 426         return -1;
 427       }
 428       break;
 429     case T_FLOAT:
 430       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 431         regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
 432         fp_args ++;
 433       } else {
 434         return -1;
 435       }
 436       break;
 437     case T_DOUBLE:
 438       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 439       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 440         regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
 441         fp_args ++;
 442       } else {
 443         return -1;
 444       }
 445       break;
 446     default:
 447       ShouldNotReachHere();
 448       break;
 449     }
 450   }
 451 
 452   return int_args + fp_args;
 453 }
 454 
 455 // Patch the callers callsite with entry to compiled code if it exists.
 456 static void patch_callers_callsite(MacroAssembler *masm) {
 457   Label L;
 458   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 459   __ cbz(rscratch1, L);
 460 
 461   __ enter();
 462   __ push_CPU_state();
 463 
 464   // VM needs caller's callsite
 465   // VM needs target method
 466   // This needs to be a long call since we will relocate this adapter to
 467   // the codeBuffer and it may not reach
 468 
 469 #ifndef PRODUCT
 470   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 471 #endif
 472 
 473   __ mov(c_rarg0, rmethod);
 474   __ mov(c_rarg1, lr);
 475   __ authenticate_return_address(c_rarg1, rscratch1);
 476   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 477   __ blr(rscratch1);
 478 
 479   // Explicit isb required because fixup_callers_callsite may change the code
 480   // stream.
 481   __ safepoint_isb();
 482 
 483   __ pop_CPU_state();
 484   // restore sp
 485   __ leave();
 486   __ bind(L);
 487 }
 488 
 489 // For each inline type argument, sig includes the list of fields of
 490 // the inline type. This utility function computes the number of
 491 // arguments for the call if inline types are passed by reference (the
 492 // calling convention the interpreter expects).
 493 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
 494   int total_args_passed = 0;
 495   if (InlineTypePassFieldsAsArgs) {
 496      for (int i = 0; i < sig_extended->length(); i++) {
 497        BasicType bt = sig_extended->at(i)._bt;
 498        if (bt == T_PRIMITIVE_OBJECT) {
 499          // In sig_extended, an inline type argument starts with:
 500          // T_PRIMITIVE_OBJECT, followed by the types of the fields of the
 501          // inline type and T_VOID to mark the end of the value
 502          // type. Inline types are flattened so, for instance, in the
 503          // case of an inline type with an int field and an inline type
 504          // field that itself has 2 fields, an int and a long:
 505          // T_PRIMITIVE_OBJECT T_INT T_PRIMITIVE_OBJECT T_INT T_LONG T_VOID (second
 506          // slot for the T_LONG) T_VOID (inner T_PRIMITIVE_OBJECT) T_VOID
 507          // (outer T_PRIMITIVE_OBJECT)
 508          total_args_passed++;
 509          int vt = 1;
 510          do {
 511            i++;
 512            BasicType bt = sig_extended->at(i)._bt;
 513            BasicType prev_bt = sig_extended->at(i-1)._bt;
 514            if (bt == T_PRIMITIVE_OBJECT) {
 515              vt++;
 516            } else if (bt == T_VOID &&
 517                       prev_bt != T_LONG &&
 518                       prev_bt != T_DOUBLE) {
 519              vt--;
 520            }
 521          } while (vt != 0);
 522        } else {
 523          total_args_passed++;
 524        }
 525      }
 526   } else {
 527     total_args_passed = sig_extended->length();
 528   }
 529 
 530   return total_args_passed;
 531 }
 532 
 533 
 534 static void gen_c2i_adapter_helper(MacroAssembler* masm,
 535                                    BasicType bt,
 536                                    BasicType prev_bt,
 537                                    size_t size_in_bytes,
 538                                    const VMRegPair& reg_pair,
 539                                    const Address& to,
 540                                    Register tmp1,
 541                                    Register tmp2,
 542                                    Register tmp3,
 543                                    int extraspace,
 544                                    bool is_oop) {
 545   assert(bt != T_PRIMITIVE_OBJECT || !InlineTypePassFieldsAsArgs, "no inline type here");
 546   if (bt == T_VOID) {
 547     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 548     return;
 549   }
 550 
 551   // Say 4 args:
 552   // i   st_off
 553   // 0   32 T_LONG
 554   // 1   24 T_VOID
 555   // 2   16 T_OBJECT
 556   // 3    8 T_BOOL
 557   // -    0 return address
 558   //
 559   // However to make thing extra confusing. Because we can fit a Java long/double in
 560   // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 561   // leaves one slot empty and only stores to a single slot. In this case the
 562   // slot that is occupied is the T_VOID slot. See I said it was confusing.
 563 
 564   bool wide = (size_in_bytes == wordSize);
 565   VMReg r_1 = reg_pair.first();
 566   VMReg r_2 = reg_pair.second();
 567   assert(r_2->is_valid() == wide, "invalid size");
 568   if (!r_1->is_valid()) {
 569     assert(!r_2->is_valid(), "");
 570     return;
 571   }
 572 
 573   if (!r_1->is_FloatRegister()) {
 574     Register val = r25;
 575     if (r_1->is_stack()) {
 576       // memory to memory use r25 (scratch registers is used by store_heap_oop)
 577       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 578       __ load_sized_value(val, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
 579     } else {
 580       val = r_1->as_Register();
 581     }
 582     assert_different_registers(to.base(), val, tmp1, tmp2, tmp3);
 583     if (is_oop) {
 584       __ store_heap_oop(to, val, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 585     } else {
 586       __ store_sized_value(to, val, size_in_bytes);
 587     }
 588   } else {
 589     if (wide) {
 590       __ strd(r_1->as_FloatRegister(), to);
 591     } else {
 592       // only a float use just part of the slot
 593       __ strs(r_1->as_FloatRegister(), to);
 594     }
 595   }
 596 }
 597 
 598 static void gen_c2i_adapter(MacroAssembler *masm,
 599                             const GrowableArray<SigEntry>* sig_extended,
 600                             const VMRegPair *regs,
 601                             bool requires_clinit_barrier,
 602                             address& c2i_no_clinit_check_entry,
 603                             Label& skip_fixup,
 604                             address start,
 605                             OopMapSet* oop_maps,
 606                             int& frame_complete,
 607                             int& frame_size_in_words,
 608                             bool alloc_inline_receiver) {
 609   if (requires_clinit_barrier && VM_Version::supports_fast_class_init_checks()) {
 610     Label L_skip_barrier;
 611 
 612     { // Bypass the barrier for non-static methods
 613       __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
 614       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 615       __ br(Assembler::EQ, L_skip_barrier); // non-static
 616     }
 617 
 618     __ load_method_holder(rscratch2, rmethod);
 619     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 620     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 621 
 622     __ bind(L_skip_barrier);
 623     c2i_no_clinit_check_entry = __ pc();
 624   }
 625 
 626   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 627   bs->c2i_entry_barrier(masm);
 628 
 629   // Before we get into the guts of the C2I adapter, see if we should be here
 630   // at all.  We've come from compiled code and are attempting to jump to the
 631   // interpreter, which means the caller made a static call to get here
 632   // (vcalls always get a compiled target if there is one).  Check for a
 633   // compiled target.  If there is one, we need to patch the caller's call.
 634   patch_callers_callsite(masm);
 635 
 636   __ bind(skip_fixup);
 637 
 638   // Name some registers to be used in the following code. We can use
 639   // anything except r0-r7 which are arguments in the Java calling
 640   // convention, rmethod (r12), and r13 which holds the outgoing sender
 641   // SP for the interpreter.
 642   Register buf_array = r10;   // Array of buffered inline types
 643   Register buf_oop = r11;     // Buffered inline type oop
 644   Register tmp1 = r15;
 645   Register tmp2 = r16;
 646   Register tmp3 = r17;
 647 
 648   if (InlineTypePassFieldsAsArgs) {
 649     // Is there an inline type argument?
 650     bool has_inline_argument = false;
 651     for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) {
 652       has_inline_argument = (sig_extended->at(i)._bt == T_PRIMITIVE_OBJECT);
 653     }
 654     if (has_inline_argument) {
 655       // There is at least an inline type argument: we're coming from
 656       // compiled code so we have no buffers to back the inline types
 657       // Allocate the buffers here with a runtime call.
 658       RegisterSaver reg_save(false /* save_vectors */);
 659       OopMap* map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
 660 
 661       frame_complete = __ offset();
 662       address the_pc = __ pc();
 663 
 664       Label retaddr;
 665       __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
 666 
 667       __ mov(c_rarg0, rthread);
 668       __ mov(c_rarg1, rmethod);
 669       __ mov(c_rarg2, (int64_t)alloc_inline_receiver);
 670 
 671       __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types)));
 672       __ blr(rscratch1);
 673       __ bind(retaddr);
 674 
 675       oop_maps->add_gc_map(__ pc() - start, map);
 676       __ reset_last_Java_frame(false);
 677 
 678       reg_save.restore_live_registers(masm);
 679 
 680       Label no_exception;
 681       __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
 682       __ cbz(rscratch1, no_exception);
 683 
 684       __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
 685       __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
 686       __ b(RuntimeAddress(StubRoutines::forward_exception_entry()));
 687 
 688       __ bind(no_exception);
 689 
 690       // We get an array of objects from the runtime call
 691       __ get_vm_result(buf_array, rthread);
 692       __ get_vm_result_2(rmethod, rthread); // TODO: required to keep the callee Method live?
 693     }
 694   }
 695 
 696   // Since all args are passed on the stack, total_args_passed *
 697   // Interpreter::stackElementSize is the space we need.
 698 
 699   int total_args_passed = compute_total_args_passed_int(sig_extended);
 700   int extraspace = total_args_passed * Interpreter::stackElementSize;
 701 
 702   // stack is aligned, keep it that way
 703   extraspace = align_up(extraspace, StackAlignmentInBytes);
 704 
 705   // set senderSP value
 706   __ mov(r19_sender_sp, sp);
 707 
 708   __ sub(sp, sp, extraspace);
 709 
 710   // Now write the args into the outgoing interpreter space
 711 
 712   // next_arg_comp is the next argument from the compiler point of
 713   // view (inline type fields are passed in registers/on the stack). In
 714   // sig_extended, an inline type argument starts with: T_PRIMITIVE_OBJECT,
 715   // followed by the types of the fields of the inline type and T_VOID
 716   // to mark the end of the inline type. ignored counts the number of
 717   // T_PRIMITIVE_OBJECT/T_VOID. next_vt_arg is the next inline type argument:
 718   // used to get the buffer for that argument from the pool of buffers
 719   // we allocated above and want to pass to the
 720   // interpreter. next_arg_int is the next argument from the
 721   // interpreter point of view (inline types are passed by reference).
 722   for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
 723        next_arg_comp < sig_extended->length(); next_arg_comp++) {
 724     assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
 725     assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?");
 726     BasicType bt = sig_extended->at(next_arg_comp)._bt;
 727     int st_off = (total_args_passed - next_arg_int - 1) * Interpreter::stackElementSize;
 728     if (!InlineTypePassFieldsAsArgs || bt != T_PRIMITIVE_OBJECT) {
 729       int next_off = st_off - Interpreter::stackElementSize;
 730       const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
 731       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
 732       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
 733       gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 734                              size_in_bytes, reg_pair, Address(sp, offset), tmp1, tmp2, tmp3, extraspace, false);
 735       next_arg_int++;
 736 #ifdef ASSERT
 737       if (bt == T_LONG || bt == T_DOUBLE) {
 738         // Overwrite the unused slot with known junk
 739         __ mov(rscratch1, CONST64(0xdeadffffdeadaaaa));
 740         __ str(rscratch1, Address(sp, st_off));
 741       }
 742 #endif /* ASSERT */
 743     } else {
 744       ignored++;
 745       // get the buffer from the just allocated pool of buffers
 746       int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_PRIMITIVE_OBJECT);
 747       __ load_heap_oop(buf_oop, Address(buf_array, index), tmp1, tmp2);
 748       next_vt_arg++; next_arg_int++;
 749       int vt = 1;
 750       // write fields we get from compiled code in registers/stack
 751       // slots to the buffer: we know we are done with that inline type
 752       // argument when we hit the T_VOID that acts as an end of inline
 753       // type delimiter for this inline type. Inline types are flattened
 754       // so we might encounter embedded inline types. Each entry in
 755       // sig_extended contains a field offset in the buffer.
 756       Label L_null;
 757       do {
 758         next_arg_comp++;
 759         BasicType bt = sig_extended->at(next_arg_comp)._bt;
 760         BasicType prev_bt = sig_extended->at(next_arg_comp - 1)._bt;
 761         if (bt == T_PRIMITIVE_OBJECT) {
 762           vt++;
 763           ignored++;
 764         } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
 765           vt--;
 766           ignored++;
 767         } else {
 768           int off = sig_extended->at(next_arg_comp)._offset;
 769           if (off == -1) {
 770             // Nullable inline type argument, emit null check
 771             VMReg reg = regs[next_arg_comp-ignored].first();
 772             Label L_notNull;
 773             if (reg->is_stack()) {
 774               int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 775               __ ldr(tmp1, Address(sp, ld_off));
 776               __ cbnz(tmp1, L_notNull);
 777             } else {
 778               __ cbnz(reg->as_Register(), L_notNull);
 779             }
 780             __ str(zr, Address(sp, st_off));
 781             __ b(L_null);
 782             __ bind(L_notNull);
 783             continue;
 784           }
 785           assert(off > 0, "offset in object should be positive");
 786           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 787           bool is_oop = is_reference_type(bt);
 788           gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 789                                  size_in_bytes, regs[next_arg_comp-ignored], Address(buf_oop, off), tmp1, tmp2, tmp3, extraspace, is_oop);
 790         }
 791       } while (vt != 0);
 792       // pass the buffer to the interpreter
 793       __ str(buf_oop, Address(sp, st_off));
 794       __ bind(L_null);
 795     }
 796   }
 797 
 798   __ mov(esp, sp); // Interp expects args on caller's expression stack
 799 
 800   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 801   __ br(rscratch1);
 802 }
 803 
 804 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs) {
 805 
 806 
 807   // Note: r19_sender_sp contains the senderSP on entry. We must
 808   // preserve it since we may do a i2c -> c2i transition if we lose a
 809   // race where compiled code goes non-entrant while we get args
 810   // ready.
 811 
 812   // Adapters are frameless.
 813 
 814   // An i2c adapter is frameless because the *caller* frame, which is
 815   // interpreted, routinely repairs its own esp (from
 816   // interpreter_frame_last_sp), even if a callee has modified the
 817   // stack pointer.  It also recalculates and aligns sp.
 818 
 819   // A c2i adapter is frameless because the *callee* frame, which is
 820   // interpreted, routinely repairs its caller's sp (from sender_sp,
 821   // which is set up via the senderSP register).
 822 
 823   // In other words, if *either* the caller or callee is interpreted, we can
 824   // get the stack pointer repaired after a call.
 825 
 826   // This is why c2i and i2c adapters cannot be indefinitely composed.
 827   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 828   // both caller and callee would be compiled methods, and neither would
 829   // clean up the stack pointer changes performed by the two adapters.
 830   // If this happens, control eventually transfers back to the compiled
 831   // caller, but with an uncorrected stack, causing delayed havoc.
 832 
 833   if (VerifyAdapterCalls &&
 834       (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
 835 #if 0
 836     // So, let's test for cascading c2i/i2c adapters right now.
 837     //  assert(Interpreter::contains($return_addr) ||
 838     //         StubRoutines::contains($return_addr),
 839     //         "i2c adapter must return to an interpreter frame");
 840     __ block_comment("verify_i2c { ");
 841     Label L_ok;
 842     if (Interpreter::code() != NULL)
 843       range_check(masm, rax, r11,
 844                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 845                   L_ok);
 846     if (StubRoutines::code1() != NULL)
 847       range_check(masm, rax, r11,
 848                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 849                   L_ok);
 850     if (StubRoutines::code2() != NULL)
 851       range_check(masm, rax, r11,
 852                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 853                   L_ok);
 854     const char* msg = "i2c adapter must return to an interpreter frame";
 855     __ block_comment(msg);
 856     __ stop(msg);
 857     __ bind(L_ok);
 858     __ block_comment("} verify_i2ce ");
 859 #endif
 860   }
 861 
 862   // Cut-out for having no stack args.
 863   int comp_words_on_stack = 0;
 864   if (comp_args_on_stack) {
 865      comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord;
 866      __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 867      __ andr(sp, rscratch1, -16);
 868   }
 869 
 870   // Will jump to the compiled code just as if compiled code was doing it.
 871   // Pre-load the register-jump target early, to schedule it better.
 872   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_inline_offset())));
 873 
 874 #if INCLUDE_JVMCI
 875   if (EnableJVMCI) {
 876     // check if this call should be routed towards a specific entry point
 877     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 878     Label no_alternative_target;
 879     __ cbz(rscratch2, no_alternative_target);
 880     __ mov(rscratch1, rscratch2);
 881     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 882     __ bind(no_alternative_target);
 883   }
 884 #endif // INCLUDE_JVMCI
 885 
 886   int total_args_passed = sig->length();
 887 
 888   // Now generate the shuffle code.
 889   for (int i = 0; i < total_args_passed; i++) {
 890     BasicType bt = sig->at(i)._bt;
 891 
 892     assert(bt != T_PRIMITIVE_OBJECT, "i2c adapter doesn't unpack inline typ args");
 893     if (bt == T_VOID) {
 894       assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
 895       continue;
 896     }
 897 
 898     // Pick up 0, 1 or 2 words from SP+offset.
 899     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?");
 900 
 901     // Load in argument order going down.
 902     int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 903     // Point to interpreter value (vs. tag)
 904     int next_off = ld_off - Interpreter::stackElementSize;
 905     //
 906     //
 907     //
 908     VMReg r_1 = regs[i].first();
 909     VMReg r_2 = regs[i].second();
 910     if (!r_1->is_valid()) {
 911       assert(!r_2->is_valid(), "");
 912       continue;
 913     }
 914     if (r_1->is_stack()) {
 915       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 916       int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size;
 917       if (!r_2->is_valid()) {
 918         // sign extend???
 919         __ ldrsw(rscratch2, Address(esp, ld_off));
 920         __ str(rscratch2, Address(sp, st_off));
 921       } else {
 922         //
 923         // We are using two optoregs. This can be either T_OBJECT,
 924         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 925         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 926         // So we must adjust where to pick up the data to match the
 927         // interpreter.
 928         //
 929         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 930         // are accessed as negative so LSW is at LOW address
 931 
 932         // ld_off is MSW so get LSW
 933         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
 934         __ ldr(rscratch2, Address(esp, offset));
 935         // st_off is LSW (i.e. reg.first())
 936          __ str(rscratch2, Address(sp, st_off));
 937        }
 938      } else if (r_1->is_Register()) {  // Register argument
 939        Register r = r_1->as_Register();
 940        if (r_2->is_valid()) {
 941          //
 942          // We are using two VMRegs. This can be either T_OBJECT,
 943          // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 944          // two slots but only uses one for thr T_LONG or T_DOUBLE case
 945          // So we must adjust where to pick up the data to match the
 946          // interpreter.
 947 
 948         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
 949 
 950          // this can be a misaligned move
 951          __ ldr(r, Address(esp, offset));
 952        } else {
 953          // sign extend and use a full word?
 954          __ ldrw(r, Address(esp, ld_off));
 955        }
 956      } else {
 957        if (!r_2->is_valid()) {
 958          __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 959        } else {
 960          __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 961        }
 962      }
 963    }
 964 
 965 
 966   __ mov(rscratch2, rscratch1);
 967   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 968   __ mov(rscratch1, rscratch2);
 969 
 970   // 6243940 We might end up in handle_wrong_method if
 971   // the callee is deoptimized as we race thru here. If that
 972   // happens we don't want to take a safepoint because the
 973   // caller frame will look interpreted and arguments are now
 974   // "compiled" so it is much better to make this transition
 975   // invisible to the stack walking code. Unfortunately if
 976   // we try and find the callee by normal means a safepoint
 977   // is possible. So we stash the desired callee in the thread
 978   // and the vm will find there should this case occur.
 979 
 980   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 981   __ br(rscratch1);
 982 }
 983 
 984 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) {
 985 
 986   Label ok;
 987 
 988   Register holder = rscratch2;
 989   Register receiver = j_rarg0;
 990   Register tmp = r10;  // A call-clobbered register not used for arg passing
 991 
 992   // -------------------------------------------------------------------------
 993   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 994   // to the interpreter.  The args start out packed in the compiled layout.  They
 995   // need to be unpacked into the interpreter layout.  This will almost always
 996   // require some stack space.  We grow the current (compiled) stack, then repack
 997   // the args.  We  finally end in a jump to the generic interpreter entry point.
 998   // On exit from the interpreter, the interpreter will restore our SP (lest the
 999   // compiled code, which relies solely on SP and not FP, get sick).
1000 
1001   {
1002     __ block_comment("c2i_unverified_entry {");
1003     __ load_klass(rscratch1, receiver);
1004     __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
1005     __ cmp(rscratch1, tmp);
1006     __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
1007     __ br(Assembler::EQ, ok);
1008     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1009 
1010     __ bind(ok);
1011     // Method might have been compiled since the call site was patched to
1012     // interpreted; if that is the case treat it as a miss so we can get
1013     // the call site corrected.
1014     __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
1015     __ cbz(rscratch1, skip_fixup);
1016     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1017     __ block_comment("} c2i_unverified_entry");
1018   }
1019 }
1020 
1021 
1022 // ---------------------------------------------------------------
1023 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,
1024                                                             int comp_args_on_stack,
1025                                                             const GrowableArray<SigEntry>* sig,
1026                                                             const VMRegPair* regs,
1027                                                             const GrowableArray<SigEntry>* sig_cc,
1028                                                             const VMRegPair* regs_cc,
1029                                                             const GrowableArray<SigEntry>* sig_cc_ro,
1030                                                             const VMRegPair* regs_cc_ro,
1031                                                             AdapterFingerPrint* fingerprint,
1032                                                             AdapterBlob*& new_adapter,
1033                                                             bool allocate_code_blob) {
1034 
1035   address i2c_entry = __ pc();
1036   gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);
1037 
1038   address c2i_unverified_entry        = __ pc();
1039   address c2i_unverified_inline_entry = __ pc();
1040   Label skip_fixup;
1041 
1042   gen_inline_cache_check(masm, skip_fixup);
1043 
1044   OopMapSet* oop_maps = new OopMapSet();
1045   int frame_complete = CodeOffsets::frame_never_safe;
1046   int frame_size_in_words = 0;
1047 
1048   // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver)
1049   address c2i_no_clinit_check_entry = NULL;
1050   address c2i_inline_ro_entry = __ pc();
1051   if (regs_cc != regs_cc_ro) {
1052     // No class init barrier needed because method is guaranteed to be non-static
1053     gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, /* requires_clinit_barrier = */ false, c2i_no_clinit_check_entry,
1054                     skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1055     skip_fixup.reset();
1056   }
1057 
1058   // Scalarized c2i adapter
1059   address c2i_entry        = __ pc();
1060   address c2i_inline_entry = __ pc();
1061   gen_c2i_adapter(masm, sig_cc, regs_cc, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1062                   skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ true);
1063 
1064   // Non-scalarized c2i adapter
1065   if (regs != regs_cc) {
1066     c2i_unverified_inline_entry = __ pc();
1067     Label inline_entry_skip_fixup;
1068     gen_inline_cache_check(masm, inline_entry_skip_fixup);
1069 
1070     c2i_inline_entry = __ pc();
1071     gen_c2i_adapter(masm, sig, regs, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1072                     inline_entry_skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1073   }
1074 
1075   __ flush();
1076 
1077   // The c2i adapter might safepoint and trigger a GC. The caller must make sure that
1078   // the GC knows about the location of oop argument locations passed to the c2i adapter.
1079   if (allocate_code_blob) {
1080     bool caller_must_gc_arguments = (regs != regs_cc);
1081     new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);
1082   }
1083 
1084   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry, c2i_unverified_inline_entry, c2i_no_clinit_check_entry);
1085 }
1086 
1087 static int c_calling_convention_priv(const BasicType *sig_bt,
1088                                          VMRegPair *regs,
1089                                          VMRegPair *regs2,
1090                                          int total_args_passed) {
1091   assert(regs2 == NULL, "not needed on AArch64");
1092 
1093 // We return the amount of VMRegImpl stack slots we need to reserve for all
1094 // the arguments NOT counting out_preserve_stack_slots.
1095 
1096     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1097       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
1098     };
1099     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1100       c_farg0, c_farg1, c_farg2, c_farg3,
1101       c_farg4, c_farg5, c_farg6, c_farg7
1102     };
1103 
1104     uint int_args = 0;
1105     uint fp_args = 0;
1106     uint stk_args = 0; // inc by 2 each time
1107 
1108     for (int i = 0; i < total_args_passed; i++) {
1109       switch (sig_bt[i]) {
1110       case T_BOOLEAN:
1111       case T_CHAR:
1112       case T_BYTE:
1113       case T_SHORT:
1114       case T_INT:
1115         if (int_args < Argument::n_int_register_parameters_c) {
1116           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1117         } else {
1118 #ifdef __APPLE__
1119           // Less-than word types are stored one after another.
1120           // The code is unable to handle this so bailout.
1121           return -1;
1122 #endif
1123           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1124           stk_args += 2;
1125         }
1126         break;
1127       case T_LONG:
1128         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1129         // fall through
1130       case T_OBJECT:
1131       case T_ARRAY:
1132       case T_PRIMITIVE_OBJECT:
1133       case T_ADDRESS:
1134       case T_METADATA:
1135         if (int_args < Argument::n_int_register_parameters_c) {
1136           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1137         } else {
1138           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1139           stk_args += 2;
1140         }
1141         break;
1142       case T_FLOAT:
1143         if (fp_args < Argument::n_float_register_parameters_c) {
1144           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1145         } else {
1146 #ifdef __APPLE__
1147           // Less-than word types are stored one after another.
1148           // The code is unable to handle this so bailout.
1149           return -1;
1150 #endif
1151           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1152           stk_args += 2;
1153         }
1154         break;
1155       case T_DOUBLE:
1156         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1157         if (fp_args < Argument::n_float_register_parameters_c) {
1158           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
1159         } else {
1160           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1161           stk_args += 2;
1162         }
1163         break;
1164       case T_VOID: // Halves of longs and doubles
1165         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1166         regs[i].set_bad();
1167         break;
1168       default:
1169         ShouldNotReachHere();
1170         break;
1171       }
1172     }
1173 
1174   return stk_args;
1175 }
1176 
1177 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
1178                                              uint num_bits,
1179                                              uint total_args_passed) {
1180   Unimplemented();
1181   return 0;
1182 }
1183 
1184 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1185                                          VMRegPair *regs,
1186                                          VMRegPair *regs2,
1187                                          int total_args_passed)
1188 {
1189   int result = c_calling_convention_priv(sig_bt, regs, regs2, total_args_passed);
1190   guarantee(result >= 0, "Unsupported arguments configuration");
1191   return result;
1192 }
1193 
1194 
1195 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1196   // We always ignore the frame_slots arg and just use the space just below frame pointer
1197   // which by this time is free to use
1198   switch (ret_type) {
1199   case T_FLOAT:
1200     __ strs(v0, Address(rfp, -wordSize));
1201     break;
1202   case T_DOUBLE:
1203     __ strd(v0, Address(rfp, -wordSize));
1204     break;
1205   case T_VOID:  break;
1206   default: {
1207     __ str(r0, Address(rfp, -wordSize));
1208     }
1209   }
1210 }
1211 
1212 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1213   // We always ignore the frame_slots arg and just use the space just below frame pointer
1214   // which by this time is free to use
1215   switch (ret_type) {
1216   case T_FLOAT:
1217     __ ldrs(v0, Address(rfp, -wordSize));
1218     break;
1219   case T_DOUBLE:
1220     __ ldrd(v0, Address(rfp, -wordSize));
1221     break;
1222   case T_VOID:  break;
1223   default: {
1224     __ ldr(r0, Address(rfp, -wordSize));
1225     }
1226   }
1227 }
1228 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1229   RegSet x;
1230   for ( int i = first_arg ; i < arg_count ; i++ ) {
1231     if (args[i].first()->is_Register()) {
1232       x = x + args[i].first()->as_Register();
1233     } else if (args[i].first()->is_FloatRegister()) {
1234       __ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize)));
1235     }
1236   }
1237   __ push(x, sp);
1238 }
1239 
1240 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1241   RegSet x;
1242   for ( int i = first_arg ; i < arg_count ; i++ ) {
1243     if (args[i].first()->is_Register()) {
1244       x = x + args[i].first()->as_Register();
1245     } else {
1246       ;
1247     }
1248   }
1249   __ pop(x, sp);
1250   for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1251     if (args[i].first()->is_Register()) {
1252       ;
1253     } else if (args[i].first()->is_FloatRegister()) {
1254       __ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize)));
1255     }
1256   }
1257 }
1258 
1259 static void verify_oop_args(MacroAssembler* masm,
1260                             const methodHandle& method,
1261                             const BasicType* sig_bt,
1262                             const VMRegPair* regs) {
1263   Register temp_reg = r19;  // not part of any compiled calling seq
1264   if (VerifyOops) {
1265     for (int i = 0; i < method->size_of_parameters(); i++) {
1266       if (sig_bt[i] == T_OBJECT ||
1267           sig_bt[i] == T_ARRAY) {
1268         VMReg r = regs[i].first();
1269         assert(r->is_valid(), "bad oop arg");
1270         if (r->is_stack()) {
1271           __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1272           __ verify_oop(temp_reg);
1273         } else {
1274           __ verify_oop(r->as_Register());
1275         }
1276       }
1277     }
1278   }
1279 }
1280 
1281 // on exit, sp points to the ContinuationEntry
1282 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) {
1283   assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, "");
1284   assert(in_bytes(ContinuationEntry::cont_offset())  % VMRegImpl::stack_slot_size == 0, "");
1285   assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, "");
1286 
1287   stack_slots += (int)ContinuationEntry::size()/wordSize;
1288   __ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata
1289 
1290   OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize)/ VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
1291 
1292   __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1293   __ str(rscratch1, Address(sp, ContinuationEntry::parent_offset()));
1294   __ mov(rscratch1, sp); // we can't use sp as the source in str
1295   __ str(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1296 
1297   return map;
1298 }
1299 
1300 // on entry c_rarg1 points to the continuation
1301 //          sp points to ContinuationEntry
1302 //          c_rarg3 -- isVirtualThread
1303 static void fill_continuation_entry(MacroAssembler* masm) {
1304 #ifdef ASSERT
1305   __ movw(rscratch1, ContinuationEntry::cookie_value());
1306   __ strw(rscratch1, Address(sp, ContinuationEntry::cookie_offset()));
1307 #endif
1308 
1309   __ str (c_rarg1, Address(sp, ContinuationEntry::cont_offset()));
1310   __ strw(c_rarg3, Address(sp, ContinuationEntry::flags_offset()));
1311   __ str (zr,      Address(sp, ContinuationEntry::chunk_offset()));
1312   __ strw(zr,      Address(sp, ContinuationEntry::argsize_offset()));
1313   __ strw(zr,      Address(sp, ContinuationEntry::pin_count_offset()));
1314 
1315   __ ldr(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1316   __ str(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1317   __ ldr(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
1318   __ str(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
1319 
1320   __ str(zr, Address(rthread, JavaThread::cont_fastpath_offset()));
1321   __ str(zr, Address(rthread, JavaThread::held_monitor_count_offset()));
1322 }
1323 
1324 // on entry, sp points to the ContinuationEntry
1325 // on exit, rfp points to the spilled rfp in the entry frame
1326 static void continuation_enter_cleanup(MacroAssembler* masm) {
1327 #ifndef PRODUCT
1328   Label OK;
1329   __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1330   __ cmp(sp, rscratch1);
1331   __ br(Assembler::EQ, OK);
1332   __ stop("incorrect sp1");
1333   __ bind(OK);
1334 #endif
1335 
1336   __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1337   __ str(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1338   __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
1339   __ str(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
1340 
1341   __ ldr(rscratch2, Address(sp, ContinuationEntry::parent_offset()));
1342   __ str(rscratch2, Address(rthread, JavaThread::cont_entry_offset()));
1343   __ add(rfp, sp, (int)ContinuationEntry::size());
1344 }
1345 
1346 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
1347 // On entry: c_rarg1 -- the continuation object
1348 //           c_rarg2 -- isContinue
1349 //           c_rarg3 -- isVirtualThread
1350 static void gen_continuation_enter(MacroAssembler* masm,
1351                                  const methodHandle& method,
1352                                  const BasicType* sig_bt,
1353                                  const VMRegPair* regs,
1354                                  int& exception_offset,
1355                                  OopMapSet*oop_maps,
1356                                  int& frame_complete,
1357                                  int& stack_slots,
1358                                  int& interpreted_entry_offset,
1359                                  int& compiled_entry_offset) {
1360   //verify_oop_args(masm, method, sig_bt, regs);
1361   Address resolve(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
1362 
1363   address start = __ pc();
1364 
1365   Label call_thaw, exit;
1366 
1367   // i2i entry used at interp_only_mode only
1368   interpreted_entry_offset = __ pc() - start;
1369   {
1370 
1371 #ifdef ASSERT
1372     Label is_interp_only;
1373     __ ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
1374     __ cbnzw(rscratch1, is_interp_only);
1375     __ stop("enterSpecial interpreter entry called when not in interp_only_mode");
1376     __ bind(is_interp_only);
1377 #endif
1378 
1379     // Read interpreter arguments into registers (this is an ad-hoc i2c adapter)
1380     __ ldr(c_rarg1, Address(esp, Interpreter::stackElementSize*2));
1381     __ ldr(c_rarg2, Address(esp, Interpreter::stackElementSize*1));
1382     __ ldr(c_rarg3, Address(esp, Interpreter::stackElementSize*0));
1383     __ push_cont_fastpath(rthread);
1384 
1385     __ enter();
1386     stack_slots = 2; // will be adjusted in setup
1387     OopMap* map = continuation_enter_setup(masm, stack_slots);
1388     // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe,
1389     // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
1390 
1391     fill_continuation_entry(masm);
1392 
1393     __ cbnz(c_rarg2, call_thaw);
1394 
1395     const address tr_call = __ trampoline_call(resolve);
1396     if (tr_call == nullptr) {
1397       fatal("CodeCache is full at gen_continuation_enter");
1398     }
1399 
1400     oop_maps->add_gc_map(__ pc() - start, map);
1401     __ post_call_nop();
1402 
1403     __ b(exit);
1404 
1405     CodeBuffer* cbuf = masm->code_section()->outer();
1406     address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, tr_call);
1407     if (stub == nullptr) {
1408       fatal("CodeCache is full at gen_continuation_enter");
1409     }
1410   }
1411 
1412   // compiled entry
1413   __ align(CodeEntryAlignment);
1414   compiled_entry_offset = __ pc() - start;
1415 
1416   __ enter();
1417   stack_slots = 2; // will be adjusted in setup
1418   OopMap* map = continuation_enter_setup(masm, stack_slots);
1419   frame_complete = __ pc() - start;
1420 
1421   fill_continuation_entry(masm);
1422 
1423   __ cbnz(c_rarg2, call_thaw);
1424 
1425   const address tr_call = __ trampoline_call(resolve);
1426   if (tr_call == nullptr) {
1427     fatal("CodeCache is full at gen_continuation_enter");
1428   }
1429 
1430   oop_maps->add_gc_map(__ pc() - start, map);
1431   __ post_call_nop();
1432 
1433   __ b(exit);
1434 
1435   __ bind(call_thaw);
1436 
1437   __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1438   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1439   ContinuationEntry::_return_pc_offset = __ pc() - start;
1440   __ post_call_nop();
1441 
1442   __ bind(exit);
1443   continuation_enter_cleanup(masm);
1444   __ leave();
1445   __ ret(lr);
1446 
1447   /// exception handling
1448 
1449   exception_offset = __ pc() - start;
1450   {
1451       __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1452 
1453       continuation_enter_cleanup(masm);
1454 
1455       __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1456       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1);
1457 
1458       // see OptoRuntime::generate_exception_blob: r0 -- exception oop, r3 -- exception pc
1459 
1460       __ mov(r1, r0); // the exception handler
1461       __ mov(r0, r19); // restore return value contaning the exception oop
1462       __ verify_oop(r0);
1463 
1464       __ leave();
1465       __ mov(r3, lr);
1466       __ br(r1); // the exception handler
1467   }
1468 
1469   CodeBuffer* cbuf = masm->code_section()->outer();
1470   address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, tr_call);
1471   if (stub == nullptr) {
1472     fatal("CodeCache is full at gen_continuation_enter");
1473   }
1474 }
1475 
1476 static void gen_continuation_yield(MacroAssembler* masm,
1477                                    const methodHandle& method,
1478                                    const BasicType* sig_bt,
1479                                    const VMRegPair* regs,
1480                                    OopMapSet* oop_maps,
1481                                    int& frame_complete,
1482                                    int& stack_slots,
1483                                    int& compiled_entry_offset) {
1484     enum layout {
1485       rfp_off1,
1486       rfp_off2,
1487       lr_off,
1488       lr_off2,
1489       framesize // inclusive of return address
1490     };
1491     // assert(is_even(framesize/2), "sp not 16-byte aligned");
1492     stack_slots = framesize /  VMRegImpl::slots_per_word;
1493     assert(stack_slots == 2, "recheck layout");
1494 
1495     address start = __ pc();
1496 
1497     compiled_entry_offset = __ pc() - start;
1498     __ enter();
1499 
1500     __ mov(c_rarg1, sp);
1501 
1502     frame_complete = __ pc() - start;
1503     address the_pc = __ pc();
1504 
1505     __ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup
1506 
1507     __ mov(c_rarg0, rthread);
1508     __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
1509     __ call_VM_leaf(Continuation::freeze_entry(), 2);
1510     __ reset_last_Java_frame(true);
1511 
1512     Label pinned;
1513 
1514     __ cbnz(r0, pinned);
1515 
1516     // We've succeeded, set sp to the ContinuationEntry
1517     __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1518     __ mov(sp, rscratch1);
1519     continuation_enter_cleanup(masm);
1520 
1521     __ bind(pinned); // pinned -- return to caller
1522 
1523     __ leave();
1524     __ ret(lr);
1525 
1526     OopMap* map = new OopMap(framesize, 1);
1527     oop_maps->add_gc_map(the_pc - start, map);
1528 }
1529 
1530 static void gen_special_dispatch(MacroAssembler* masm,
1531                                  const methodHandle& method,
1532                                  const BasicType* sig_bt,
1533                                  const VMRegPair* regs) {
1534   verify_oop_args(masm, method, sig_bt, regs);
1535   vmIntrinsics::ID iid = method->intrinsic_id();
1536 
1537   // Now write the args into the outgoing interpreter space
1538   bool     has_receiver   = false;
1539   Register receiver_reg   = noreg;
1540   int      member_arg_pos = -1;
1541   Register member_reg     = noreg;
1542   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1543   if (ref_kind != 0) {
1544     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1545     member_reg = r19;  // known to be free at this point
1546     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1547   } else if (iid == vmIntrinsics::_invokeBasic) {
1548     has_receiver = true;
1549   } else if (iid == vmIntrinsics::_linkToNative) {
1550     member_arg_pos = method->size_of_parameters() - 1;  // trailing NativeEntryPoint argument
1551     member_reg = r19;  // known to be free at this point
1552   } else {
1553     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1554   }
1555 
1556   if (member_reg != noreg) {
1557     // Load the member_arg into register, if necessary.
1558     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1559     VMReg r = regs[member_arg_pos].first();
1560     if (r->is_stack()) {
1561       __ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1562     } else {
1563       // no data motion is needed
1564       member_reg = r->as_Register();
1565     }
1566   }
1567 
1568   if (has_receiver) {
1569     // Make sure the receiver is loaded into a register.
1570     assert(method->size_of_parameters() > 0, "oob");
1571     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1572     VMReg r = regs[0].first();
1573     assert(r->is_valid(), "bad receiver arg");
1574     if (r->is_stack()) {
1575       // Porting note:  This assumes that compiled calling conventions always
1576       // pass the receiver oop in a register.  If this is not true on some
1577       // platform, pick a temp and load the receiver from stack.
1578       fatal("receiver always in a register");
1579       receiver_reg = r2;  // known to be free at this point
1580       __ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1581     } else {
1582       // no data motion is needed
1583       receiver_reg = r->as_Register();
1584     }
1585   }
1586 
1587   // Figure out which address we are really jumping to:
1588   MethodHandles::generate_method_handle_dispatch(masm, iid,
1589                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1590 }
1591 
1592 // ---------------------------------------------------------------------------
1593 // Generate a native wrapper for a given method.  The method takes arguments
1594 // in the Java compiled code convention, marshals them to the native
1595 // convention (handlizes oops, etc), transitions to native, makes the call,
1596 // returns to java state (possibly blocking), unhandlizes any result and
1597 // returns.
1598 //
1599 // Critical native functions are a shorthand for the use of
1600 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1601 // functions.  The wrapper is expected to unpack the arguments before
1602 // passing them to the callee. Critical native functions leave the state _in_Java,
1603 // since they block out GC.
1604 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1605 // block and the check for pending exceptions it's impossible for them
1606 // to be thrown.
1607 //
1608 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1609                                                 const methodHandle& method,
1610                                                 int compile_id,
1611                                                 BasicType* in_sig_bt,
1612                                                 VMRegPair* in_regs,
1613                                                 BasicType ret_type) {
1614   if (method->is_continuation_native_intrinsic()) {
1615     int exception_offset = -1;
1616     OopMapSet* oop_maps = new OopMapSet();
1617     int frame_complete = -1;
1618     int stack_slots = -1;
1619     int interpreted_entry_offset = -1;
1620     int vep_offset = -1;
1621     if (method->is_continuation_enter_intrinsic()) {
1622       gen_continuation_enter(masm,
1623                              method,
1624                              in_sig_bt,
1625                              in_regs,
1626                              exception_offset,
1627                              oop_maps,
1628                              frame_complete,
1629                              stack_slots,
1630                              interpreted_entry_offset,
1631                              vep_offset);
1632     } else if (method->is_continuation_yield_intrinsic()) {
1633       gen_continuation_yield(masm,
1634                              method,
1635                              in_sig_bt,
1636                              in_regs,
1637                              oop_maps,
1638                              frame_complete,
1639                              stack_slots,
1640                              vep_offset);
1641     } else {
1642       guarantee(false, "Unknown Continuation native intrinsic");
1643     }
1644 
1645 #ifdef ASSERT
1646     if (method->is_continuation_enter_intrinsic()) {
1647       assert(interpreted_entry_offset != -1, "Must be set");
1648       assert(exception_offset != -1,         "Must be set");
1649     } else {
1650       assert(interpreted_entry_offset == -1, "Must be unset");
1651       assert(exception_offset == -1,         "Must be unset");
1652     }
1653     assert(frame_complete != -1,    "Must be set");
1654     assert(stack_slots != -1,       "Must be set");
1655     assert(vep_offset != -1,        "Must be set");
1656 #endif
1657 
1658     __ flush();
1659     nmethod* nm = nmethod::new_native_nmethod(method,
1660                                               compile_id,
1661                                               masm->code(),
1662                                               vep_offset,
1663                                               frame_complete,
1664                                               stack_slots,
1665                                               in_ByteSize(-1),
1666                                               in_ByteSize(-1),
1667                                               oop_maps,
1668                                               exception_offset);
1669     if (method->is_continuation_enter_intrinsic()) {
1670       ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1671     } else if (method->is_continuation_yield_intrinsic()) {
1672       _cont_doYield_stub = nm;
1673     } else {
1674       guarantee(false, "Unknown Continuation native intrinsic");
1675     }
1676     return nm;
1677   }
1678 
1679   if (method->is_method_handle_intrinsic()) {
1680     vmIntrinsics::ID iid = method->intrinsic_id();
1681     intptr_t start = (intptr_t)__ pc();
1682     int vep_offset = ((intptr_t)__ pc()) - start;
1683 
1684     // First instruction must be a nop as it may need to be patched on deoptimisation
1685     __ nop();
1686     gen_special_dispatch(masm,
1687                          method,
1688                          in_sig_bt,
1689                          in_regs);
1690     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1691     __ flush();
1692     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1693     return nmethod::new_native_nmethod(method,
1694                                        compile_id,
1695                                        masm->code(),
1696                                        vep_offset,
1697                                        frame_complete,
1698                                        stack_slots / VMRegImpl::slots_per_word,
1699                                        in_ByteSize(-1),
1700                                        in_ByteSize(-1),
1701                                        (OopMapSet*)NULL);
1702   }
1703   address native_func = method->native_function();
1704   assert(native_func != NULL, "must have function");
1705 
1706   // An OopMap for lock (and class if static)
1707   OopMapSet *oop_maps = new OopMapSet();
1708   intptr_t start = (intptr_t)__ pc();
1709 
1710   // We have received a description of where all the java arg are located
1711   // on entry to the wrapper. We need to convert these args to where
1712   // the jni function will expect them. To figure out where they go
1713   // we convert the java signature to a C signature by inserting
1714   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1715 
1716   const int total_in_args = method->size_of_parameters();
1717   int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1718 
1719   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1720   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1721   BasicType* in_elem_bt = NULL;
1722 
1723   int argc = 0;
1724   out_sig_bt[argc++] = T_ADDRESS;
1725   if (method->is_static()) {
1726     out_sig_bt[argc++] = T_OBJECT;
1727   }
1728 
1729   for (int i = 0; i < total_in_args ; i++ ) {
1730     out_sig_bt[argc++] = in_sig_bt[i];
1731   }
1732 
1733   // Now figure out where the args must be stored and how much stack space
1734   // they require.
1735   int out_arg_slots;
1736   out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, NULL, total_c_args);
1737 
1738   if (out_arg_slots < 0) {
1739     return NULL;
1740   }
1741 
1742   // Compute framesize for the wrapper.  We need to handlize all oops in
1743   // incoming registers
1744 
1745   // Calculate the total number of stack slots we will need.
1746 
1747   // First count the abi requirement plus all of the outgoing args
1748   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1749 
1750   // Now the space for the inbound oop handle area
1751   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1752 
1753   int oop_handle_offset = stack_slots;
1754   stack_slots += total_save_slots;
1755 
1756   // Now any space we need for handlizing a klass if static method
1757 
1758   int klass_slot_offset = 0;
1759   int klass_offset = -1;
1760   int lock_slot_offset = 0;
1761   bool is_static = false;
1762 
1763   if (method->is_static()) {
1764     klass_slot_offset = stack_slots;
1765     stack_slots += VMRegImpl::slots_per_word;
1766     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1767     is_static = true;
1768   }
1769 
1770   // Plus a lock if needed
1771 
1772   if (method->is_synchronized()) {
1773     lock_slot_offset = stack_slots;
1774     stack_slots += VMRegImpl::slots_per_word;
1775   }
1776 
1777   // Now a place (+2) to save return values or temp during shuffling
1778   // + 4 for return address (which we own) and saved rfp
1779   stack_slots += 6;
1780 
1781   // Ok The space we have allocated will look like:
1782   //
1783   //
1784   // FP-> |                     |
1785   //      |---------------------|
1786   //      | 2 slots for moves   |
1787   //      |---------------------|
1788   //      | lock box (if sync)  |
1789   //      |---------------------| <- lock_slot_offset
1790   //      | klass (if static)   |
1791   //      |---------------------| <- klass_slot_offset
1792   //      | oopHandle area      |
1793   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1794   //      | outbound memory     |
1795   //      | based arguments     |
1796   //      |                     |
1797   //      |---------------------|
1798   //      |                     |
1799   // SP-> | out_preserved_slots |
1800   //
1801   //
1802 
1803 
1804   // Now compute actual number of stack words we need rounding to make
1805   // stack properly aligned.
1806   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1807 
1808   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1809 
1810   // First thing make an ic check to see if we should even be here
1811 
1812   // We are free to use all registers as temps without saving them and
1813   // restoring them except rfp. rfp is the only callee save register
1814   // as far as the interpreter and the compiler(s) are concerned.
1815 
1816 
1817   const Register ic_reg = rscratch2;
1818   const Register receiver = j_rarg0;
1819 
1820   Label hit;
1821   Label exception_pending;
1822 
1823   assert_different_registers(ic_reg, receiver, rscratch1);
1824   __ verify_oop(receiver);
1825   __ cmp_klass(receiver, ic_reg, rscratch1);
1826   __ br(Assembler::EQ, hit);
1827 
1828   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1829 
1830   // Verified entry point must be aligned
1831   __ align(8);
1832 
1833   __ bind(hit);
1834 
1835   int vep_offset = ((intptr_t)__ pc()) - start;
1836 
1837   // If we have to make this method not-entrant we'll overwrite its
1838   // first instruction with a jump.  For this action to be legal we
1839   // must ensure that this first instruction is a B, BL, NOP, BKPT,
1840   // SVC, HVC, or SMC.  Make it a NOP.
1841   __ nop();
1842 
1843   if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
1844     Label L_skip_barrier;
1845     __ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
1846     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1847     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1848 
1849     __ bind(L_skip_barrier);
1850   }
1851 
1852   // Generate stack overflow check
1853   __ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size()));
1854 
1855   // Generate a new frame for the wrapper.
1856   __ enter();
1857   // -2 because return address is already present and so is saved rfp
1858   __ sub(sp, sp, stack_size - 2*wordSize);
1859 
1860   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1861   bs->nmethod_entry_barrier(masm, NULL /* slow_path */, NULL /* continuation */, NULL /* guard */);
1862 
1863   // Frame is now completed as far as size and linkage.
1864   int frame_complete = ((intptr_t)__ pc()) - start;
1865 
1866   // We use r20 as the oop handle for the receiver/klass
1867   // It is callee save so it survives the call to native
1868 
1869   const Register oop_handle_reg = r20;
1870 
1871   //
1872   // We immediately shuffle the arguments so that any vm call we have to
1873   // make from here on out (sync slow path, jvmti, etc.) we will have
1874   // captured the oops from our caller and have a valid oopMap for
1875   // them.
1876 
1877   // -----------------
1878   // The Grand Shuffle
1879 
1880   // The Java calling convention is either equal (linux) or denser (win64) than the
1881   // c calling convention. However the because of the jni_env argument the c calling
1882   // convention always has at least one more (and two for static) arguments than Java.
1883   // Therefore if we move the args from java -> c backwards then we will never have
1884   // a register->register conflict and we don't have to build a dependency graph
1885   // and figure out how to break any cycles.
1886   //
1887 
1888   // Record esp-based slot for receiver on stack for non-static methods
1889   int receiver_offset = -1;
1890 
1891   // This is a trick. We double the stack slots so we can claim
1892   // the oops in the caller's frame. Since we are sure to have
1893   // more args than the caller doubling is enough to make
1894   // sure we can capture all the incoming oop args from the
1895   // caller.
1896   //
1897   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1898 
1899   // Mark location of rfp (someday)
1900   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp));
1901 
1902 
1903   int float_args = 0;
1904   int int_args = 0;
1905 
1906 #ifdef ASSERT
1907   bool reg_destroyed[Register::number_of_registers];
1908   bool freg_destroyed[FloatRegister::number_of_registers];
1909   for ( int r = 0 ; r < Register::number_of_registers ; r++ ) {
1910     reg_destroyed[r] = false;
1911   }
1912   for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) {
1913     freg_destroyed[f] = false;
1914   }
1915 
1916 #endif /* ASSERT */
1917 
1918   // For JNI natives the incoming and outgoing registers are offset upwards.
1919   GrowableArray<int> arg_order(2 * total_in_args);
1920   VMRegPair tmp_vmreg;
1921   tmp_vmreg.set2(r19->as_VMReg());
1922 
1923   for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1924     arg_order.push(i);
1925     arg_order.push(c_arg);
1926   }
1927 
1928   int temploc = -1;
1929   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1930     int i = arg_order.at(ai);
1931     int c_arg = arg_order.at(ai + 1);
1932     __ block_comment(err_msg("move %d -> %d", i, c_arg));
1933     assert(c_arg != -1 && i != -1, "wrong order");
1934 #ifdef ASSERT
1935     if (in_regs[i].first()->is_Register()) {
1936       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1937     } else if (in_regs[i].first()->is_FloatRegister()) {
1938       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1939     }
1940     if (out_regs[c_arg].first()->is_Register()) {
1941       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1942     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1943       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1944     }
1945 #endif /* ASSERT */
1946     switch (in_sig_bt[i]) {
1947       case T_ARRAY:
1948       case T_PRIMITIVE_OBJECT:
1949       case T_OBJECT:
1950         __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1951                        ((i == 0) && (!is_static)),
1952                        &receiver_offset);
1953         int_args++;
1954         break;
1955       case T_VOID:
1956         break;
1957 
1958       case T_FLOAT:
1959         __ float_move(in_regs[i], out_regs[c_arg]);
1960         float_args++;
1961         break;
1962 
1963       case T_DOUBLE:
1964         assert( i + 1 < total_in_args &&
1965                 in_sig_bt[i + 1] == T_VOID &&
1966                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1967         __ double_move(in_regs[i], out_regs[c_arg]);
1968         float_args++;
1969         break;
1970 
1971       case T_LONG :
1972         __ long_move(in_regs[i], out_regs[c_arg]);
1973         int_args++;
1974         break;
1975 
1976       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1977 
1978       default:
1979         __ move32_64(in_regs[i], out_regs[c_arg]);
1980         int_args++;
1981     }
1982   }
1983 
1984   // point c_arg at the first arg that is already loaded in case we
1985   // need to spill before we call out
1986   int c_arg = total_c_args - total_in_args;
1987 
1988   // Pre-load a static method's oop into c_rarg1.
1989   if (method->is_static()) {
1990 
1991     //  load oop into a register
1992     __ movoop(c_rarg1,
1993               JNIHandles::make_local(method->method_holder()->java_mirror()));
1994 
1995     // Now handlize the static class mirror it's known not-null.
1996     __ str(c_rarg1, Address(sp, klass_offset));
1997     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1998 
1999     // Now get the handle
2000     __ lea(c_rarg1, Address(sp, klass_offset));
2001     // and protect the arg if we must spill
2002     c_arg--;
2003   }
2004 
2005   // Change state to native (we save the return address in the thread, since it might not
2006   // be pushed on the stack when we do a stack traversal).
2007   // We use the same pc/oopMap repeatedly when we call out
2008 
2009   Label native_return;
2010   __ set_last_Java_frame(sp, noreg, native_return, rscratch1);
2011 
2012   Label dtrace_method_entry, dtrace_method_entry_done;
2013   {
2014     uint64_t offset;
2015     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
2016     __ ldrb(rscratch1, Address(rscratch1, offset));
2017     __ cbnzw(rscratch1, dtrace_method_entry);
2018     __ bind(dtrace_method_entry_done);
2019   }
2020 
2021   // RedefineClasses() tracing support for obsolete method entry
2022   if (log_is_enabled(Trace, redefine, class, obsolete)) {
2023     // protect the args we've loaded
2024     save_args(masm, total_c_args, c_arg, out_regs);
2025     __ mov_metadata(c_rarg1, method());
2026     __ call_VM_leaf(
2027       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2028       rthread, c_rarg1);
2029     restore_args(masm, total_c_args, c_arg, out_regs);
2030   }
2031 
2032   // Lock a synchronized method
2033 
2034   // Register definitions used by locking and unlocking
2035 
2036   const Register swap_reg = r0;
2037   const Register obj_reg  = r19;  // Will contain the oop
2038   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
2039   const Register old_hdr  = r13;  // value of old header at unlock time
2040   const Register tmp = lr;
2041 
2042   Label slow_path_lock;
2043   Label lock_done;
2044 
2045   if (method->is_synchronized()) {
2046     Label count;
2047     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2048 
2049     // Get the handle (the 2nd argument)
2050     __ mov(oop_handle_reg, c_rarg1);
2051 
2052     // Get address of the box
2053 
2054     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2055 
2056     // Load the oop from the handle
2057     __ ldr(obj_reg, Address(oop_handle_reg, 0));
2058 
2059     if (!UseHeavyMonitors) {
2060       // Load (object->mark() | 1) into swap_reg %r0
2061       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2062       __ orr(swap_reg, rscratch1, 1);
2063       if (EnableValhalla) {
2064         // Mask inline_type bit such that we go to the slow path if object is an inline type
2065         __ andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
2066       }
2067 
2068       // Save (object->mark() | 1) into BasicLock's displaced header
2069       __ str(swap_reg, Address(lock_reg, mark_word_offset));
2070 
2071       // src -> dest iff dest == r0 else r0 <- dest
2072       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/NULL);
2073 
2074       // Hmm should this move to the slow path code area???
2075 
2076       // Test if the oopMark is an obvious stack pointer, i.e.,
2077       //  1) (mark & 3) == 0, and
2078       //  2) sp <= mark < mark + os::pagesize()
2079       // These 3 tests can be done by evaluating the following
2080       // expression: ((mark - sp) & (3 - os::vm_page_size())),
2081       // assuming both stack pointer and pagesize have their
2082       // least significant 2 bits clear.
2083       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
2084 
2085       __ sub(swap_reg, sp, swap_reg);
2086       __ neg(swap_reg, swap_reg);
2087       __ ands(swap_reg, swap_reg, 3 - os::vm_page_size());
2088 
2089       // Save the test result, for recursive case, the result is zero
2090       __ str(swap_reg, Address(lock_reg, mark_word_offset));
2091       __ br(Assembler::NE, slow_path_lock);
2092     } else {
2093       __ b(slow_path_lock);
2094     }
2095     __ bind(count);
2096     __ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
2097 
2098     // Slow path will re-enter here
2099     __ bind(lock_done);
2100   }
2101 
2102 
2103   // Finally just about ready to make the JNI call
2104 
2105   // get JNIEnv* which is first argument to native
2106   __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
2107 
2108   // Now set thread in native
2109   __ mov(rscratch1, _thread_in_native);
2110   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
2111   __ stlrw(rscratch1, rscratch2);
2112 
2113   __ rt_call(native_func);
2114 
2115   __ bind(native_return);
2116 
2117   intptr_t return_pc = (intptr_t) __ pc();
2118   oop_maps->add_gc_map(return_pc - start, map);
2119 
2120   // Unpack native results.
2121   switch (ret_type) {
2122   case T_BOOLEAN: __ c2bool(r0);                     break;
2123   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
2124   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
2125   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
2126   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
2127   case T_DOUBLE :
2128   case T_FLOAT  :
2129     // Result is in v0 we'll save as needed
2130     break;
2131   case T_ARRAY:                 // Really a handle
2132   case T_PRIMITIVE_OBJECT:           // Really a handle
2133   case T_OBJECT:                // Really a handle
2134       break; // can't de-handlize until after safepoint check
2135   case T_VOID: break;
2136   case T_LONG: break;
2137   default       : ShouldNotReachHere();
2138   }
2139 
2140   Label safepoint_in_progress, safepoint_in_progress_done;
2141   Label after_transition;
2142 
2143   // Switch thread to "native transition" state before reading the synchronization state.
2144   // This additional state is necessary because reading and testing the synchronization
2145   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2146   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2147   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2148   //     Thread A is resumed to finish this native method, but doesn't block here since it
2149   //     didn't see any synchronization is progress, and escapes.
2150   __ mov(rscratch1, _thread_in_native_trans);
2151 
2152   __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
2153 
2154   // Force this write out before the read below
2155   if (!UseSystemMemoryBarrier) {
2156     __ dmb(Assembler::ISH);
2157   }
2158 
2159   __ verify_sve_vector_length();
2160 
2161   // Check for safepoint operation in progress and/or pending suspend requests.
2162   {
2163     // We need an acquire here to ensure that any subsequent load of the
2164     // global SafepointSynchronize::_state flag is ordered after this load
2165     // of the thread-local polling word.  We don't want this poll to
2166     // return false (i.e. not safepointing) and a later poll of the global
2167     // SafepointSynchronize::_state spuriously to return true.
2168     //
2169     // This is to avoid a race when we're in a native->Java transition
2170     // racing the code which wakes up from a safepoint.
2171 
2172     __ safepoint_poll(safepoint_in_progress, true /* at_return */, true /* acquire */, false /* in_nmethod */);
2173     __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
2174     __ cbnzw(rscratch1, safepoint_in_progress);
2175     __ bind(safepoint_in_progress_done);
2176   }
2177 
2178   // change thread state
2179   __ mov(rscratch1, _thread_in_Java);
2180   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
2181   __ stlrw(rscratch1, rscratch2);
2182   __ bind(after_transition);
2183 
2184   Label reguard;
2185   Label reguard_done;
2186   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
2187   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
2188   __ br(Assembler::EQ, reguard);
2189   __ bind(reguard_done);
2190 
2191   // native result if any is live
2192 
2193   // Unlock
2194   Label unlock_done;
2195   Label slow_path_unlock;
2196   if (method->is_synchronized()) {
2197 
2198     // Get locked oop from the handle we passed to jni
2199     __ ldr(obj_reg, Address(oop_handle_reg, 0));
2200 
2201     Label done, not_recursive;
2202 
2203     if (!UseHeavyMonitors) {
2204       // Simple recursive lock?
2205       __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2206       __ cbnz(rscratch1, not_recursive);
2207       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
2208       __ b(done);
2209     }
2210 
2211     __ bind(not_recursive);
2212 
2213     // Must save r0 if if it is live now because cmpxchg must use it
2214     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2215       save_native_result(masm, ret_type, stack_slots);
2216     }
2217 
2218     if (!UseHeavyMonitors) {
2219       // get address of the stack lock
2220       __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2221       //  get old displaced header
2222       __ ldr(old_hdr, Address(r0, 0));
2223 
2224       // Atomic swap old header if oop still contains the stack lock
2225       Label count;
2226       __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
2227       __ bind(count);
2228       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
2229     } else {
2230       __ b(slow_path_unlock);
2231     }
2232 
2233     // slow path re-enters here
2234     __ bind(unlock_done);
2235     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2236       restore_native_result(masm, ret_type, stack_slots);
2237     }
2238 
2239     __ bind(done);
2240   }
2241 
2242   Label dtrace_method_exit, dtrace_method_exit_done;
2243   {
2244     uint64_t offset;
2245     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
2246     __ ldrb(rscratch1, Address(rscratch1, offset));
2247     __ cbnzw(rscratch1, dtrace_method_exit);
2248     __ bind(dtrace_method_exit_done);
2249   }
2250 
2251   __ reset_last_Java_frame(false);
2252 
2253   // Unbox oop result, e.g. JNIHandles::resolve result.
2254   if (is_reference_type(ret_type)) {
2255     __ resolve_jobject(r0, r1, r2);
2256   }
2257 
2258   if (CheckJNICalls) {
2259     // clear_pending_jni_exception_check
2260     __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
2261   }
2262 
2263   // reset handle block
2264   __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
2265   __ str(zr, Address(r2, JNIHandleBlock::top_offset_in_bytes()));
2266 
2267   __ leave();
2268 
2269   // Any exception pending?
2270   __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2271   __ cbnz(rscratch1, exception_pending);
2272 
2273   // We're done
2274   __ ret(lr);
2275 
2276   // Unexpected paths are out of line and go here
2277 
2278   // forward the exception
2279   __ bind(exception_pending);
2280 
2281   // and forward the exception
2282   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2283 
2284   // Slow path locking & unlocking
2285   if (method->is_synchronized()) {
2286 
2287     __ block_comment("Slow path lock {");
2288     __ bind(slow_path_lock);
2289 
2290     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2291     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2292 
2293     // protect the args we've loaded
2294     save_args(masm, total_c_args, c_arg, out_regs);
2295 
2296     __ mov(c_rarg0, obj_reg);
2297     __ mov(c_rarg1, lock_reg);
2298     __ mov(c_rarg2, rthread);
2299 
2300     // Not a leaf but we have last_Java_frame setup as we want
2301     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2302     restore_args(masm, total_c_args, c_arg, out_regs);
2303 
2304 #ifdef ASSERT
2305     { Label L;
2306       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2307       __ cbz(rscratch1, L);
2308       __ stop("no pending exception allowed on exit from monitorenter");
2309       __ bind(L);
2310     }
2311 #endif
2312     __ b(lock_done);
2313 
2314     __ block_comment("} Slow path lock");
2315 
2316     __ block_comment("Slow path unlock {");
2317     __ bind(slow_path_unlock);
2318 
2319     // If we haven't already saved the native result we must save it now as xmm registers
2320     // are still exposed.
2321 
2322     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2323       save_native_result(masm, ret_type, stack_slots);
2324     }
2325 
2326     __ mov(c_rarg2, rthread);
2327     __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2328     __ mov(c_rarg0, obj_reg);
2329 
2330     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2331     // NOTE that obj_reg == r19 currently
2332     __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2333     __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2334 
2335     __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
2336 
2337 #ifdef ASSERT
2338     {
2339       Label L;
2340       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2341       __ cbz(rscratch1, L);
2342       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2343       __ bind(L);
2344     }
2345 #endif /* ASSERT */
2346 
2347     __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2348 
2349     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2350       restore_native_result(masm, ret_type, stack_slots);
2351     }
2352     __ b(unlock_done);
2353 
2354     __ block_comment("} Slow path unlock");
2355 
2356   } // synchronized
2357 
2358   // SLOW PATH Reguard the stack if needed
2359 
2360   __ bind(reguard);
2361   save_native_result(masm, ret_type, stack_slots);
2362   __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2363   restore_native_result(masm, ret_type, stack_slots);
2364   // and continue
2365   __ b(reguard_done);
2366 
2367   // SLOW PATH safepoint
2368   {
2369     __ block_comment("safepoint {");
2370     __ bind(safepoint_in_progress);
2371 
2372     // Don't use call_VM as it will see a possible pending exception and forward it
2373     // and never return here preventing us from clearing _last_native_pc down below.
2374     //
2375     save_native_result(masm, ret_type, stack_slots);
2376     __ mov(c_rarg0, rthread);
2377 #ifndef PRODUCT
2378   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2379 #endif
2380     __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2381     __ blr(rscratch1);
2382 
2383     // Restore any method result value
2384     restore_native_result(masm, ret_type, stack_slots);
2385 
2386     __ b(safepoint_in_progress_done);
2387     __ block_comment("} safepoint");
2388   }
2389 
2390   // SLOW PATH dtrace support
2391   {
2392     __ block_comment("dtrace entry {");
2393     __ bind(dtrace_method_entry);
2394 
2395     // We have all of the arguments setup at this point. We must not touch any register
2396     // argument registers at this point (what if we save/restore them there are no oop?
2397 
2398     save_args(masm, total_c_args, c_arg, out_regs);
2399     __ mov_metadata(c_rarg1, method());
2400     __ call_VM_leaf(
2401       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2402       rthread, c_rarg1);
2403     restore_args(masm, total_c_args, c_arg, out_regs);
2404     __ b(dtrace_method_entry_done);
2405     __ block_comment("} dtrace entry");
2406   }
2407 
2408   {
2409     __ block_comment("dtrace exit {");
2410     __ bind(dtrace_method_exit);
2411     save_native_result(masm, ret_type, stack_slots);
2412     __ mov_metadata(c_rarg1, method());
2413     __ call_VM_leaf(
2414          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2415          rthread, c_rarg1);
2416     restore_native_result(masm, ret_type, stack_slots);
2417     __ b(dtrace_method_exit_done);
2418     __ block_comment("} dtrace exit");
2419   }
2420 
2421 
2422   __ flush();
2423 
2424   nmethod *nm = nmethod::new_native_nmethod(method,
2425                                             compile_id,
2426                                             masm->code(),
2427                                             vep_offset,
2428                                             frame_complete,
2429                                             stack_slots / VMRegImpl::slots_per_word,
2430                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2431                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2432                                             oop_maps);
2433 
2434   return nm;
2435 }
2436 
2437 // this function returns the adjust size (in number of words) to a c2i adapter
2438 // activation for use during deoptimization
2439 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2440   assert(callee_locals >= callee_parameters,
2441           "test and remove; got more parms than locals");
2442   if (callee_locals < callee_parameters)
2443     return 0;                   // No adjustment for negative locals
2444   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2445   // diff is counted in stack words
2446   return align_up(diff, 2);
2447 }
2448 
2449 
2450 //------------------------------generate_deopt_blob----------------------------
2451 void SharedRuntime::generate_deopt_blob() {
2452   // Allocate space for the code
2453   ResourceMark rm;
2454   // Setup code generation tools
2455   int pad = 0;
2456 #if INCLUDE_JVMCI
2457   if (EnableJVMCI) {
2458     pad += 512; // Increase the buffer size when compiling for JVMCI
2459   }
2460 #endif
2461   CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
2462   MacroAssembler* masm = new MacroAssembler(&buffer);
2463   int frame_size_in_words;
2464   OopMap* map = NULL;
2465   OopMapSet *oop_maps = new OopMapSet();
2466   RegisterSaver reg_save(COMPILER2_OR_JVMCI != 0);
2467 
2468   // -------------
2469   // This code enters when returning to a de-optimized nmethod.  A return
2470   // address has been pushed on the stack, and return values are in
2471   // registers.
2472   // If we are doing a normal deopt then we were called from the patched
2473   // nmethod from the point we returned to the nmethod. So the return
2474   // address on the stack is wrong by NativeCall::instruction_size
2475   // We will adjust the value so it looks like we have the original return
2476   // address on the stack (like when we eagerly deoptimized).
2477   // In the case of an exception pending when deoptimizing, we enter
2478   // with a return address on the stack that points after the call we patched
2479   // into the exception handler. We have the following register state from,
2480   // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2481   //    r0: exception oop
2482   //    r19: exception handler
2483   //    r3: throwing pc
2484   // So in this case we simply jam r3 into the useless return address and
2485   // the stack looks just like we want.
2486   //
2487   // At this point we need to de-opt.  We save the argument return
2488   // registers.  We call the first C routine, fetch_unroll_info().  This
2489   // routine captures the return values and returns a structure which
2490   // describes the current frame size and the sizes of all replacement frames.
2491   // The current frame is compiled code and may contain many inlined
2492   // functions, each with their own JVM state.  We pop the current frame, then
2493   // push all the new frames.  Then we call the C routine unpack_frames() to
2494   // populate these frames.  Finally unpack_frames() returns us the new target
2495   // address.  Notice that callee-save registers are BLOWN here; they have
2496   // already been captured in the vframeArray at the time the return PC was
2497   // patched.
2498   address start = __ pc();
2499   Label cont;
2500 
2501   // Prolog for non exception case!
2502 
2503   // Save everything in sight.
2504   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2505 
2506   // Normal deoptimization.  Save exec mode for unpack_frames.
2507   __ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved
2508   __ b(cont);
2509 
2510   int reexecute_offset = __ pc() - start;
2511 #if INCLUDE_JVMCI && !defined(COMPILER1)
2512   if (EnableJVMCI && UseJVMCICompiler) {
2513     // JVMCI does not use this kind of deoptimization
2514     __ should_not_reach_here();
2515   }
2516 #endif
2517 
2518   // Reexecute case
2519   // return address is the pc describes what bci to do re-execute at
2520 
2521   // No need to update map as each call to save_live_registers will produce identical oopmap
2522   (void) reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2523 
2524   __ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved
2525   __ b(cont);
2526 
2527 #if INCLUDE_JVMCI
2528   Label after_fetch_unroll_info_call;
2529   int implicit_exception_uncommon_trap_offset = 0;
2530   int uncommon_trap_offset = 0;
2531 
2532   if (EnableJVMCI) {
2533     implicit_exception_uncommon_trap_offset = __ pc() - start;
2534 
2535     __ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2536     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2537 
2538     uncommon_trap_offset = __ pc() - start;
2539 
2540     // Save everything in sight.
2541     reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2542     // fetch_unroll_info needs to call last_java_frame()
2543     Label retaddr;
2544     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2545 
2546     __ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2547     __ movw(rscratch1, -1);
2548     __ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2549 
2550     __ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute);
2551     __ mov(c_rarg0, rthread);
2552     __ movw(c_rarg2, rcpool); // exec mode
2553     __ lea(rscratch1,
2554            RuntimeAddress(CAST_FROM_FN_PTR(address,
2555                                            Deoptimization::uncommon_trap)));
2556     __ blr(rscratch1);
2557     __ bind(retaddr);
2558     oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2559 
2560     __ reset_last_Java_frame(false);
2561 
2562     __ b(after_fetch_unroll_info_call);
2563   } // EnableJVMCI
2564 #endif // INCLUDE_JVMCI
2565 
2566   int exception_offset = __ pc() - start;
2567 
2568   // Prolog for exception case
2569 
2570   // all registers are dead at this entry point, except for r0, and
2571   // r3 which contain the exception oop and exception pc
2572   // respectively.  Set them in TLS and fall thru to the
2573   // unpack_with_exception_in_tls entry point.
2574 
2575   __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
2576   __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
2577 
2578   int exception_in_tls_offset = __ pc() - start;
2579 
2580   // new implementation because exception oop is now passed in JavaThread
2581 
2582   // Prolog for exception case
2583   // All registers must be preserved because they might be used by LinearScan
2584   // Exceptiop oop and throwing PC are passed in JavaThread
2585   // tos: stack at point of call to method that threw the exception (i.e. only
2586   // args are on the stack, no return address)
2587 
2588   // The return address pushed by save_live_registers will be patched
2589   // later with the throwing pc. The correct value is not available
2590   // now because loading it from memory would destroy registers.
2591 
2592   // NB: The SP at this point must be the SP of the method that is
2593   // being deoptimized.  Deoptimization assumes that the frame created
2594   // here by save_live_registers is immediately below the method's SP.
2595   // This is a somewhat fragile mechanism.
2596 
2597   // Save everything in sight.
2598   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2599 
2600   // Now it is safe to overwrite any register
2601 
2602   // Deopt during an exception.  Save exec mode for unpack_frames.
2603   __ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved
2604 
2605   // load throwing pc from JavaThread and patch it as the return address
2606   // of the current frame. Then clear the field in JavaThread
2607   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2608   __ protect_return_address(r3, rscratch1);
2609   __ str(r3, Address(rfp, wordSize));
2610   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2611 
2612 #ifdef ASSERT
2613   // verify that there is really an exception oop in JavaThread
2614   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2615   __ verify_oop(r0);
2616 
2617   // verify that there is no pending exception
2618   Label no_pending_exception;
2619   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2620   __ cbz(rscratch1, no_pending_exception);
2621   __ stop("must not have pending exception here");
2622   __ bind(no_pending_exception);
2623 #endif
2624 
2625   __ bind(cont);
2626 
2627   // Call C code.  Need thread and this frame, but NOT official VM entry
2628   // crud.  We cannot block on this call, no GC can happen.
2629   //
2630   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2631 
2632   // fetch_unroll_info needs to call last_java_frame().
2633 
2634   Label retaddr;
2635   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2636 #ifdef ASSERT
2637   { Label L;
2638     __ ldr(rscratch1, Address(rthread, JavaThread::last_Java_fp_offset()));
2639     __ cbz(rscratch1, L);
2640     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2641     __ bind(L);
2642   }
2643 #endif // ASSERT
2644   __ mov(c_rarg0, rthread);
2645   __ mov(c_rarg1, rcpool);
2646   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2647   __ blr(rscratch1);
2648   __ bind(retaddr);
2649 
2650   // Need to have an oopmap that tells fetch_unroll_info where to
2651   // find any register it might need.
2652   oop_maps->add_gc_map(__ pc() - start, map);
2653 
2654   __ reset_last_Java_frame(false);
2655 
2656 #if INCLUDE_JVMCI
2657   if (EnableJVMCI) {
2658     __ bind(after_fetch_unroll_info_call);
2659   }
2660 #endif
2661 
2662   // Load UnrollBlock* into r5
2663   __ mov(r5, r0);
2664 
2665   __ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
2666    Label noException;
2667   __ cmpw(rcpool, Deoptimization::Unpack_exception);   // Was exception pending?
2668   __ br(Assembler::NE, noException);
2669   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2670   // QQQ this is useless it was NULL above
2671   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2672   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
2673   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2674 
2675   __ verify_oop(r0);
2676 
2677   // Overwrite the result registers with the exception results.
2678   __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2679   // I think this is useless
2680   // __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2681 
2682   __ bind(noException);
2683 
2684   // Only register save data is on the stack.
2685   // Now restore the result registers.  Everything else is either dead
2686   // or captured in the vframeArray.
2687 
2688   // Restore fp result register
2689   __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2690   // Restore integer result register
2691   __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2692 
2693   // Pop all of the register save area off the stack
2694   __ add(sp, sp, frame_size_in_words * wordSize);
2695 
2696   // All of the register save area has been popped of the stack. Only the
2697   // return address remains.
2698 
2699   // Pop all the frames we must move/replace.
2700   //
2701   // Frame picture (youngest to oldest)
2702   // 1: self-frame (no frame link)
2703   // 2: deopting frame  (no frame link)
2704   // 3: caller of deopting frame (could be compiled/interpreted).
2705   //
2706   // Note: by leaving the return address of self-frame on the stack
2707   // and using the size of frame 2 to adjust the stack
2708   // when we are done the return to frame 3 will still be on the stack.
2709 
2710   // Pop deoptimized frame
2711   __ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2712   __ sub(r2, r2, 2 * wordSize);
2713   __ add(sp, sp, r2);
2714   __ ldp(rfp, lr, __ post(sp, 2 * wordSize));
2715   __ authenticate_return_address();
2716   // LR should now be the return address to the caller (3)
2717 
2718 #ifdef ASSERT
2719   // Compilers generate code that bang the stack by as much as the
2720   // interpreter would need. So this stack banging should never
2721   // trigger a fault. Verify that it does not on non product builds.
2722   __ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2723   __ bang_stack_size(r19, r2);
2724 #endif
2725   // Load address of array of frame pcs into r2
2726   __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2727 
2728   // Trash the old pc
2729   // __ addptr(sp, wordSize);  FIXME ????
2730 
2731   // Load address of array of frame sizes into r4
2732   __ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2733 
2734   // Load counter into r3
2735   __ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2736 
2737   // Now adjust the caller's stack to make up for the extra locals
2738   // but record the original sp so that we can save it in the skeletal interpreter
2739   // frame and the stack walking of interpreter_sender will get the unextended sp
2740   // value and not the "real" sp value.
2741 
2742   const Register sender_sp = r6;
2743 
2744   __ mov(sender_sp, sp);
2745   __ ldrw(r19, Address(r5,
2746                        Deoptimization::UnrollBlock::
2747                        caller_adjustment_offset_in_bytes()));
2748   __ sub(sp, sp, r19);
2749 
2750   // Push interpreter frames in a loop
2751   __ mov(rscratch1, (uint64_t)0xDEADDEAD);        // Make a recognizable pattern
2752   __ mov(rscratch2, rscratch1);
2753   Label loop;
2754   __ bind(loop);
2755   __ ldr(r19, Address(__ post(r4, wordSize)));          // Load frame size
2756   __ sub(r19, r19, 2*wordSize);           // We'll push pc and fp by hand
2757   __ ldr(lr, Address(__ post(r2, wordSize)));  // Load pc
2758   __ enter();                           // Save old & set new fp
2759   __ sub(sp, sp, r19);                  // Prolog
2760   // This value is corrected by layout_activation_impl
2761   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2762   __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2763   __ mov(sender_sp, sp);               // Pass sender_sp to next frame
2764   __ sub(r3, r3, 1);                   // Decrement counter
2765   __ cbnz(r3, loop);
2766 
2767     // Re-push self-frame
2768   __ ldr(lr, Address(r2));
2769   __ enter();
2770 
2771   // Allocate a full sized register save area.  We subtract 2 because
2772   // enter() just pushed 2 words
2773   __ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2774 
2775   // Restore frame locals after moving the frame
2776   __ strd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2777   __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2778 
2779   // Call C code.  Need thread but NOT official VM entry
2780   // crud.  We cannot block on this call, no GC can happen.  Call should
2781   // restore return values to their stack-slots with the new SP.
2782   //
2783   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2784 
2785   // Use rfp because the frames look interpreted now
2786   // Don't need the precise return PC here, just precise enough to point into this code blob.
2787   address the_pc = __ pc();
2788   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2789 
2790   __ mov(c_rarg0, rthread);
2791   __ movw(c_rarg1, rcpool); // second arg: exec_mode
2792   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2793   __ blr(rscratch1);
2794 
2795   // Set an oopmap for the call site
2796   // Use the same PC we used for the last java frame
2797   oop_maps->add_gc_map(the_pc - start,
2798                        new OopMap( frame_size_in_words, 0 ));
2799 
2800   // Clear fp AND pc
2801   __ reset_last_Java_frame(true);
2802 
2803   // Collect return values
2804   __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2805   __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2806   // I think this is useless (throwing pc?)
2807   // __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2808 
2809   // Pop self-frame.
2810   __ leave();                           // Epilog
2811 
2812   // Jump to interpreter
2813   __ ret(lr);
2814 
2815   // Make sure all code is generated
2816   masm->flush();
2817 
2818   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2819   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2820 #if INCLUDE_JVMCI
2821   if (EnableJVMCI) {
2822     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2823     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2824   }
2825 #endif
2826 }
2827 
2828 // Number of stack slots between incoming argument block and the start of
2829 // a new frame.  The PROLOG must add this many slots to the stack.  The
2830 // EPILOG must remove this many slots. aarch64 needs two slots for
2831 // return address and fp.
2832 // TODO think this is correct but check
2833 uint SharedRuntime::in_preserve_stack_slots() {
2834   return 4;
2835 }
2836 
2837 uint SharedRuntime::out_preserve_stack_slots() {
2838   return 0;
2839 }
2840 
2841 #ifdef COMPILER2
2842 //------------------------------generate_uncommon_trap_blob--------------------
2843 void SharedRuntime::generate_uncommon_trap_blob() {
2844   // Allocate space for the code
2845   ResourceMark rm;
2846   // Setup code generation tools
2847   CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2848   MacroAssembler* masm = new MacroAssembler(&buffer);
2849 
2850   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2851 
2852   address start = __ pc();
2853 
2854   // Push self-frame.  We get here with a return address in LR
2855   // and sp should be 16 byte aligned
2856   // push rfp and retaddr by hand
2857   __ protect_return_address();
2858   __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
2859   // we don't expect an arg reg save area
2860 #ifndef PRODUCT
2861   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2862 #endif
2863   // compiler left unloaded_class_index in j_rarg0 move to where the
2864   // runtime expects it.
2865   if (c_rarg1 != j_rarg0) {
2866     __ movw(c_rarg1, j_rarg0);
2867   }
2868 
2869   // we need to set the past SP to the stack pointer of the stub frame
2870   // and the pc to the address where this runtime call will return
2871   // although actually any pc in this code blob will do).
2872   Label retaddr;
2873   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2874 
2875   // Call C code.  Need thread but NOT official VM entry
2876   // crud.  We cannot block on this call, no GC can happen.  Call should
2877   // capture callee-saved registers as well as return values.
2878   // Thread is in rdi already.
2879   //
2880   // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
2881   //
2882   // n.b. 2 gp args, 0 fp args, integral return type
2883 
2884   __ mov(c_rarg0, rthread);
2885   __ movw(c_rarg2, (unsigned)Deoptimization::Unpack_uncommon_trap);
2886   __ lea(rscratch1,
2887          RuntimeAddress(CAST_FROM_FN_PTR(address,
2888                                          Deoptimization::uncommon_trap)));
2889   __ blr(rscratch1);
2890   __ bind(retaddr);
2891 
2892   // Set an oopmap for the call site
2893   OopMapSet* oop_maps = new OopMapSet();
2894   OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
2895 
2896   // location of rfp is known implicitly by the frame sender code
2897 
2898   oop_maps->add_gc_map(__ pc() - start, map);
2899 
2900   __ reset_last_Java_frame(false);
2901 
2902   // move UnrollBlock* into r4
2903   __ mov(r4, r0);
2904 
2905 #ifdef ASSERT
2906   { Label L;
2907     __ ldrw(rscratch1, Address(r4, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
2908     __ cmpw(rscratch1, (unsigned)Deoptimization::Unpack_uncommon_trap);
2909     __ br(Assembler::EQ, L);
2910     __ stop("SharedRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
2911     __ bind(L);
2912   }
2913 #endif
2914 
2915   // Pop all the frames we must move/replace.
2916   //
2917   // Frame picture (youngest to oldest)
2918   // 1: self-frame (no frame link)
2919   // 2: deopting frame  (no frame link)
2920   // 3: caller of deopting frame (could be compiled/interpreted).
2921 
2922   // Pop self-frame.  We have no frame, and must rely only on r0 and sp.
2923   __ add(sp, sp, (SimpleRuntimeFrame::framesize) << LogBytesPerInt); // Epilog!
2924 
2925   // Pop deoptimized frame (int)
2926   __ ldrw(r2, Address(r4,
2927                       Deoptimization::UnrollBlock::
2928                       size_of_deoptimized_frame_offset_in_bytes()));
2929   __ sub(r2, r2, 2 * wordSize);
2930   __ add(sp, sp, r2);
2931   __ ldp(rfp, lr, __ post(sp, 2 * wordSize));
2932   __ authenticate_return_address();
2933   // LR should now be the return address to the caller (3) frame
2934 
2935 #ifdef ASSERT
2936   // Compilers generate code that bang the stack by as much as the
2937   // interpreter would need. So this stack banging should never
2938   // trigger a fault. Verify that it does not on non product builds.
2939   __ ldrw(r1, Address(r4,
2940                       Deoptimization::UnrollBlock::
2941                       total_frame_sizes_offset_in_bytes()));
2942   __ bang_stack_size(r1, r2);
2943 #endif
2944 
2945   // Load address of array of frame pcs into r2 (address*)
2946   __ ldr(r2, Address(r4,
2947                      Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2948 
2949   // Load address of array of frame sizes into r5 (intptr_t*)
2950   __ ldr(r5, Address(r4,
2951                      Deoptimization::UnrollBlock::
2952                      frame_sizes_offset_in_bytes()));
2953 
2954   // Counter
2955   __ ldrw(r3, Address(r4,
2956                       Deoptimization::UnrollBlock::
2957                       number_of_frames_offset_in_bytes())); // (int)
2958 
2959   // Now adjust the caller's stack to make up for the extra locals but
2960   // record the original sp so that we can save it in the skeletal
2961   // interpreter frame and the stack walking of interpreter_sender
2962   // will get the unextended sp value and not the "real" sp value.
2963 
2964   const Register sender_sp = r8;
2965 
2966   __ mov(sender_sp, sp);
2967   __ ldrw(r1, Address(r4,
2968                       Deoptimization::UnrollBlock::
2969                       caller_adjustment_offset_in_bytes())); // (int)
2970   __ sub(sp, sp, r1);
2971 
2972   // Push interpreter frames in a loop
2973   Label loop;
2974   __ bind(loop);
2975   __ ldr(r1, Address(r5, 0));       // Load frame size
2976   __ sub(r1, r1, 2 * wordSize);     // We'll push pc and rfp by hand
2977   __ ldr(lr, Address(r2, 0));       // Save return address
2978   __ enter();                       // and old rfp & set new rfp
2979   __ sub(sp, sp, r1);               // Prolog
2980   __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2981   // This value is corrected by layout_activation_impl
2982   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2983   __ mov(sender_sp, sp);          // Pass sender_sp to next frame
2984   __ add(r5, r5, wordSize);       // Bump array pointer (sizes)
2985   __ add(r2, r2, wordSize);       // Bump array pointer (pcs)
2986   __ subsw(r3, r3, 1);            // Decrement counter
2987   __ br(Assembler::GT, loop);
2988   __ ldr(lr, Address(r2, 0));     // save final return address
2989   // Re-push self-frame
2990   __ enter();                     // & old rfp & set new rfp
2991 
2992   // Use rfp because the frames look interpreted now
2993   // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
2994   // Don't need the precise return PC here, just precise enough to point into this code blob.
2995   address the_pc = __ pc();
2996   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2997 
2998   // Call C code.  Need thread but NOT official VM entry
2999   // crud.  We cannot block on this call, no GC can happen.  Call should
3000   // restore return values to their stack-slots with the new SP.
3001   // Thread is in rdi already.
3002   //
3003   // BasicType unpack_frames(JavaThread* thread, int exec_mode);
3004   //
3005   // n.b. 2 gp args, 0 fp args, integral return type
3006 
3007   // sp should already be aligned
3008   __ mov(c_rarg0, rthread);
3009   __ movw(c_rarg1, (unsigned)Deoptimization::Unpack_uncommon_trap);
3010   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3011   __ blr(rscratch1);
3012 
3013   // Set an oopmap for the call site
3014   // Use the same PC we used for the last java frame
3015   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3016 
3017   // Clear fp AND pc
3018   __ reset_last_Java_frame(true);
3019 
3020   // Pop self-frame.
3021   __ leave();                 // Epilog
3022 
3023   // Jump to interpreter
3024   __ ret(lr);
3025 
3026   // Make sure all code is generated
3027   masm->flush();
3028 
3029   _uncommon_trap_blob =  UncommonTrapBlob::create(&buffer, oop_maps,
3030                                                  SimpleRuntimeFrame::framesize >> 1);
3031 }
3032 #endif // COMPILER2
3033 
3034 
3035 //------------------------------generate_handler_blob------
3036 //
3037 // Generate a special Compile2Runtime blob that saves all registers,
3038 // and setup oopmap.
3039 //
3040 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3041   ResourceMark rm;
3042   OopMapSet *oop_maps = new OopMapSet();
3043   OopMap* map;
3044 
3045   // Allocate space for the code.  Setup code generation tools.
3046   CodeBuffer buffer("handler_blob", 2048, 1024);
3047   MacroAssembler* masm = new MacroAssembler(&buffer);
3048 
3049   address start   = __ pc();
3050   address call_pc = NULL;
3051   int frame_size_in_words;
3052   bool cause_return = (poll_type == POLL_AT_RETURN);
3053   RegisterSaver reg_save(poll_type == POLL_AT_VECTOR_LOOP /* save_vectors */);
3054 
3055   // When the signal occurred, the LR was either signed and stored on the stack (in which
3056   // case it will be restored from the stack before being used) or unsigned and not stored
3057   // on the stack. Stipping ensures we get the right value.
3058   __ strip_return_address();
3059 
3060   // Save Integer and Float registers.
3061   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
3062 
3063   // The following is basically a call_VM.  However, we need the precise
3064   // address of the call in order to generate an oopmap. Hence, we do all the
3065   // work ourselves.
3066 
3067   Label retaddr;
3068   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
3069 
3070   // The return address must always be correct so that frame constructor never
3071   // sees an invalid pc.
3072 
3073   if (!cause_return) {
3074     // overwrite the return address pushed by save_live_registers
3075     // Additionally, r20 is a callee-saved register so we can look at
3076     // it later to determine if someone changed the return address for
3077     // us!
3078     __ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset()));
3079     __ protect_return_address(r20, rscratch1);
3080     __ str(r20, Address(rfp, wordSize));
3081   }
3082 
3083   // Do the call
3084   __ mov(c_rarg0, rthread);
3085   __ lea(rscratch1, RuntimeAddress(call_ptr));
3086   __ blr(rscratch1);
3087   __ bind(retaddr);
3088 
3089   // Set an oopmap for the call site.  This oopmap will map all
3090   // oop-registers and debug-info registers as callee-saved.  This
3091   // will allow deoptimization at this safepoint to find all possible
3092   // debug-info recordings, as well as let GC find all oops.
3093 
3094   oop_maps->add_gc_map( __ pc() - start, map);
3095 
3096   Label noException;
3097 
3098   __ reset_last_Java_frame(false);
3099 
3100   __ membar(Assembler::LoadLoad | Assembler::LoadStore);
3101 
3102   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
3103   __ cbz(rscratch1, noException);
3104 
3105   // Exception pending
3106 
3107   reg_save.restore_live_registers(masm);
3108 
3109   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3110 
3111   // No exception case
3112   __ bind(noException);
3113 
3114   Label no_adjust, bail;
3115   if (!cause_return) {
3116     // If our stashed return pc was modified by the runtime we avoid touching it
3117     __ ldr(rscratch1, Address(rfp, wordSize));
3118     __ cmp(r20, rscratch1);
3119     __ br(Assembler::NE, no_adjust);
3120     __ authenticate_return_address(r20, rscratch1);
3121 
3122 #ifdef ASSERT
3123     // Verify the correct encoding of the poll we're about to skip.
3124     // See NativeInstruction::is_ldrw_to_zr()
3125     __ ldrw(rscratch1, Address(r20));
3126     __ ubfx(rscratch2, rscratch1, 22, 10);
3127     __ cmpw(rscratch2, 0b1011100101);
3128     __ br(Assembler::NE, bail);
3129     __ ubfx(rscratch2, rscratch1, 0, 5);
3130     __ cmpw(rscratch2, 0b11111);
3131     __ br(Assembler::NE, bail);
3132 #endif
3133     // Adjust return pc forward to step over the safepoint poll instruction
3134     __ add(r20, r20, NativeInstruction::instruction_size);
3135     __ protect_return_address(r20, rscratch1);
3136     __ str(r20, Address(rfp, wordSize));
3137   }
3138 
3139   __ bind(no_adjust);
3140   // Normal exit, restore registers and exit.
3141   reg_save.restore_live_registers(masm);
3142 
3143   __ ret(lr);
3144 
3145 #ifdef ASSERT
3146   __ bind(bail);
3147   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
3148 #endif
3149 
3150   // Make sure all code is generated
3151   masm->flush();
3152 
3153   // Fill-out other meta info
3154   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3155 }
3156 
3157 //
3158 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3159 //
3160 // Generate a stub that calls into vm to find out the proper destination
3161 // of a java call. All the argument registers are live at this point
3162 // but since this is generic code we don't know what they are and the caller
3163 // must do any gc of the args.
3164 //
3165 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3166   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3167 
3168   // allocate space for the code
3169   ResourceMark rm;
3170 
3171   CodeBuffer buffer(name, 1000, 512);
3172   MacroAssembler* masm                = new MacroAssembler(&buffer);
3173 
3174   int frame_size_in_words;
3175   RegisterSaver reg_save(false /* save_vectors */);
3176 
3177   OopMapSet *oop_maps = new OopMapSet();
3178   OopMap* map = NULL;
3179 
3180   int start = __ offset();
3181 
3182   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
3183 
3184   int frame_complete = __ offset();
3185 
3186   {
3187     Label retaddr;
3188     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
3189 
3190     __ mov(c_rarg0, rthread);
3191     __ lea(rscratch1, RuntimeAddress(destination));
3192 
3193     __ blr(rscratch1);
3194     __ bind(retaddr);
3195   }
3196 
3197   // Set an oopmap for the call site.
3198   // We need this not only for callee-saved registers, but also for volatile
3199   // registers that the compiler might be keeping live across a safepoint.
3200 
3201   oop_maps->add_gc_map( __ offset() - start, map);
3202 
3203   // r0 contains the address we are going to jump to assuming no exception got installed
3204 
3205   // clear last_Java_sp
3206   __ reset_last_Java_frame(false);
3207   // check for pending exceptions
3208   Label pending;
3209   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
3210   __ cbnz(rscratch1, pending);
3211 
3212   // get the returned Method*
3213   __ get_vm_result_2(rmethod, rthread);
3214   __ str(rmethod, Address(sp, reg_save.reg_offset_in_bytes(rmethod)));
3215 
3216   // r0 is where we want to jump, overwrite rscratch1 which is saved and scratch
3217   __ str(r0, Address(sp, reg_save.rscratch1_offset_in_bytes()));
3218   reg_save.restore_live_registers(masm);
3219 
3220   // We are back to the original state on entry and ready to go.
3221 
3222   __ br(rscratch1);
3223 
3224   // Pending exception after the safepoint
3225 
3226   __ bind(pending);
3227 
3228   reg_save.restore_live_registers(masm);
3229 
3230   // exception pending => remove activation and forward to exception handler
3231 
3232   __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
3233 
3234   __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
3235   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3236 
3237   // -------------
3238   // make sure all code is generated
3239   masm->flush();
3240 
3241   // return the  blob
3242   // frame_size_words or bytes??
3243   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3244 }
3245 
3246 #ifdef COMPILER2
3247 // This is here instead of runtime_aarch64_64.cpp because it uses SimpleRuntimeFrame
3248 //
3249 //------------------------------generate_exception_blob---------------------------
3250 // creates exception blob at the end
3251 // Using exception blob, this code is jumped from a compiled method.
3252 // (see emit_exception_handler in x86_64.ad file)
3253 //
3254 // Given an exception pc at a call we call into the runtime for the
3255 // handler in this method. This handler might merely restore state
3256 // (i.e. callee save registers) unwind the frame and jump to the
3257 // exception handler for the nmethod if there is no Java level handler
3258 // for the nmethod.
3259 //
3260 // This code is entered with a jmp.
3261 //
3262 // Arguments:
3263 //   r0: exception oop
3264 //   r3: exception pc
3265 //
3266 // Results:
3267 //   r0: exception oop
3268 //   r3: exception pc in caller or ???
3269 //   destination: exception handler of caller
3270 //
3271 // Note: the exception pc MUST be at a call (precise debug information)
3272 //       Registers r0, r3, r2, r4, r5, r8-r11 are not callee saved.
3273 //
3274 
3275 void OptoRuntime::generate_exception_blob() {
3276   assert(!OptoRuntime::is_callee_saved_register(R3_num), "");
3277   assert(!OptoRuntime::is_callee_saved_register(R0_num), "");
3278   assert(!OptoRuntime::is_callee_saved_register(R2_num), "");
3279 
3280   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3281 
3282   // Allocate space for the code
3283   ResourceMark rm;
3284   // Setup code generation tools
3285   CodeBuffer buffer("exception_blob", 2048, 1024);
3286   MacroAssembler* masm = new MacroAssembler(&buffer);
3287 
3288   // TODO check various assumptions made here
3289   //
3290   // make sure we do so before running this
3291 
3292   address start = __ pc();
3293 
3294   // push rfp and retaddr by hand
3295   // Exception pc is 'return address' for stack walker
3296   __ protect_return_address();
3297   __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
3298   // there are no callee save registers and we don't expect an
3299   // arg reg save area
3300 #ifndef PRODUCT
3301   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
3302 #endif
3303   // Store exception in Thread object. We cannot pass any arguments to the
3304   // handle_exception call, since we do not want to make any assumption
3305   // about the size of the frame where the exception happened in.
3306   __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
3307   __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
3308 
3309   // This call does all the hard work.  It checks if an exception handler
3310   // exists in the method.
3311   // If so, it returns the handler address.
3312   // If not, it prepares for stack-unwinding, restoring the callee-save
3313   // registers of the frame being removed.
3314   //
3315   // address OptoRuntime::handle_exception_C(JavaThread* thread)
3316   //
3317   // n.b. 1 gp arg, 0 fp args, integral return type
3318 
3319   // the stack should always be aligned
3320   address the_pc = __ pc();
3321   __ set_last_Java_frame(sp, noreg, the_pc, rscratch1);
3322   __ mov(c_rarg0, rthread);
3323   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
3324   __ blr(rscratch1);
3325   // handle_exception_C is a special VM call which does not require an explicit
3326   // instruction sync afterwards.
3327 
3328   // May jump to SVE compiled code
3329   __ reinitialize_ptrue();
3330 
3331   // Set an oopmap for the call site.  This oopmap will only be used if we
3332   // are unwinding the stack.  Hence, all locations will be dead.
3333   // Callee-saved registers will be the same as the frame above (i.e.,
3334   // handle_exception_stub), since they were restored when we got the
3335   // exception.
3336 
3337   OopMapSet* oop_maps = new OopMapSet();
3338 
3339   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3340 
3341   __ reset_last_Java_frame(false);
3342 
3343   // Restore callee-saved registers
3344 
3345   // rfp is an implicitly saved callee saved register (i.e. the calling
3346   // convention will save restore it in prolog/epilog) Other than that
3347   // there are no callee save registers now that adapter frames are gone.
3348   // and we dont' expect an arg reg save area
3349   __ ldp(rfp, r3, Address(__ post(sp, 2 * wordSize)));
3350   __ authenticate_return_address(r3);
3351 
3352   // r0: exception handler
3353 
3354   // We have a handler in r0 (could be deopt blob).
3355   __ mov(r8, r0);
3356 
3357   // Get the exception oop
3358   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
3359   // Get the exception pc in case we are deoptimized
3360   __ ldr(r4, Address(rthread, JavaThread::exception_pc_offset()));
3361 #ifdef ASSERT
3362   __ str(zr, Address(rthread, JavaThread::exception_handler_pc_offset()));
3363   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3364 #endif
3365   // Clear the exception oop so GC no longer processes it as a root.
3366   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3367 
3368   // r0: exception oop
3369   // r8:  exception handler
3370   // r4: exception pc
3371   // Jump to handler
3372 
3373   __ br(r8);
3374 
3375   // Make sure all code is generated
3376   masm->flush();
3377 
3378   // Set exception blob
3379   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3380 }
3381 
3382 #endif // COMPILER2
3383 
3384 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
3385   BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K);
3386   CodeBuffer buffer(buf);
3387   short buffer_locs[20];
3388   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3389                                          sizeof(buffer_locs)/sizeof(relocInfo));
3390 
3391   MacroAssembler _masm(&buffer);
3392   MacroAssembler* masm = &_masm;
3393 
3394   const Array<SigEntry>* sig_vk = vk->extended_sig();
3395   const Array<VMRegPair>* regs = vk->return_regs();
3396 
3397   int pack_fields_jobject_off = __ offset();
3398   // Resolve pre-allocated buffer from JNI handle.
3399   // We cannot do this in generate_call_stub() because it requires GC code to be initialized.
3400   Register Rresult = r14;  // See StubGenerator::generate_call_stub().
3401   __ ldr(r0, Address(Rresult));
3402   __ resolve_jobject(r0 /* value */,
3403                      rthread /* thread */,
3404                      r12 /* tmp */);
3405   __ str(r0, Address(Rresult));
3406 
3407   int pack_fields_off = __ offset();
3408 
3409   int j = 1;
3410   for (int i = 0; i < sig_vk->length(); i++) {
3411     BasicType bt = sig_vk->at(i)._bt;
3412     if (bt == T_PRIMITIVE_OBJECT) {
3413       continue;
3414     }
3415     if (bt == T_VOID) {
3416       if (sig_vk->at(i-1)._bt == T_LONG ||
3417           sig_vk->at(i-1)._bt == T_DOUBLE) {
3418         j++;
3419       }
3420       continue;
3421     }
3422     int off = sig_vk->at(i)._offset;
3423     VMRegPair pair = regs->at(j);
3424     VMReg r_1 = pair.first();
3425     VMReg r_2 = pair.second();
3426     Address to(r0, off);
3427     if (bt == T_FLOAT) {
3428       __ strs(r_1->as_FloatRegister(), to);
3429     } else if (bt == T_DOUBLE) {
3430       __ strd(r_1->as_FloatRegister(), to);
3431     } else if (bt == T_OBJECT || bt == T_ARRAY) {
3432       Register val = r_1->as_Register();
3433       assert_different_registers(r0, val);
3434       // We don't need barriers because the destination is a newly allocated object.
3435       // Also, we cannot use store_heap_oop(to, val) because it uses r8 as tmp.
3436       if (UseCompressedOops) {
3437         __ encode_heap_oop(val);
3438         __ str(val, to);
3439       } else {
3440         __ str(val, to);
3441       }
3442     } else {
3443       assert(is_java_primitive(bt), "unexpected basic type");
3444       assert_different_registers(r0, r_1->as_Register());
3445       size_t size_in_bytes = type2aelembytes(bt);
3446       __ store_sized_value(to, r_1->as_Register(), size_in_bytes);
3447     }
3448     j++;
3449   }
3450   assert(j == regs->length(), "missed a field?");
3451 
3452   __ ret(lr);
3453 
3454   int unpack_fields_off = __ offset();
3455 
3456   Label skip;
3457   __ cbz(r0, skip);
3458 
3459   j = 1;
3460   for (int i = 0; i < sig_vk->length(); i++) {
3461     BasicType bt = sig_vk->at(i)._bt;
3462     if (bt == T_PRIMITIVE_OBJECT) {
3463       continue;
3464     }
3465     if (bt == T_VOID) {
3466       if (sig_vk->at(i-1)._bt == T_LONG ||
3467           sig_vk->at(i-1)._bt == T_DOUBLE) {
3468         j++;
3469       }
3470       continue;
3471     }
3472     int off = sig_vk->at(i)._offset;
3473     assert(off > 0, "offset in object should be positive");
3474     VMRegPair pair = regs->at(j);
3475     VMReg r_1 = pair.first();
3476     VMReg r_2 = pair.second();
3477     Address from(r0, off);
3478     if (bt == T_FLOAT) {
3479       __ ldrs(r_1->as_FloatRegister(), from);
3480     } else if (bt == T_DOUBLE) {
3481       __ ldrd(r_1->as_FloatRegister(), from);
3482     } else if (bt == T_OBJECT || bt == T_ARRAY) {
3483       assert_different_registers(r0, r_1->as_Register());
3484       __ load_heap_oop(r_1->as_Register(), from, rscratch1, rscratch2);
3485     } else {
3486       assert(is_java_primitive(bt), "unexpected basic type");
3487       assert_different_registers(r0, r_1->as_Register());
3488 
3489       size_t size_in_bytes = type2aelembytes(bt);
3490       __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
3491     }
3492     j++;
3493   }
3494   assert(j == regs->length(), "missed a field?");
3495 
3496   __ bind(skip);
3497 
3498   __ ret(lr);
3499 
3500   __ flush();
3501 
3502   return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
3503 }