1 /*
   2  * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "asm/macroAssembler.inline.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/compiledIC.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "interpreter/interp_masm.hpp"
  38 #include "logging/log.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/klass.inline.hpp"
  42 #include "oops/method.inline.hpp"
  43 #include "prims/methodHandles.hpp"
  44 #include "runtime/continuation.hpp"
  45 #include "runtime/continuationEntry.inline.hpp"
  46 #include "runtime/globals.hpp"
  47 #include "runtime/jniHandles.hpp"
  48 #include "runtime/safepointMechanism.hpp"
  49 #include "runtime/sharedRuntime.hpp"
  50 #include "runtime/signature.hpp"
  51 #include "runtime/stubRoutines.hpp"
  52 #include "runtime/vframeArray.hpp"
  53 #include "utilities/align.hpp"
  54 #include "utilities/formatBuffer.hpp"
  55 #include "vmreg_aarch64.inline.hpp"
  56 #ifdef COMPILER1
  57 #include "c1/c1_Runtime1.hpp"
  58 #endif
  59 #ifdef COMPILER2
  60 #include "adfiles/ad_aarch64.hpp"
  61 #include "opto/runtime.hpp"
  62 #endif
  63 #if INCLUDE_JVMCI
  64 #include "jvmci/jvmciJavaClasses.hpp"
  65 #endif
  66 
  67 #define __ masm->
  68 
  69 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  70 
  71 class SimpleRuntimeFrame {
  72 
  73   public:
  74 
  75   // Most of the runtime stubs have this simple frame layout.
  76   // This class exists to make the layout shared in one place.
  77   // Offsets are for compiler stack slots, which are jints.
  78   enum layout {
  79     // The frame sender code expects that rbp will be in the "natural" place and
  80     // will override any oopMap setting for it. We must therefore force the layout
  81     // so that it agrees with the frame sender code.
  82     // we don't expect any arg reg save area so aarch64 asserts that
  83     // frame::arg_reg_save_area_bytes == 0
  84     rfp_off = 0,
  85     rfp_off2,
  86     return_off, return_off2,
  87     framesize
  88   };
  89 };
  90 
  91 // FIXME -- this is used by C1
  92 class RegisterSaver {
  93   const bool _save_vectors;
  94  public:
  95   RegisterSaver(bool save_vectors) : _save_vectors(save_vectors) {}
  96 
  97   OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
  98   void restore_live_registers(MacroAssembler* masm);
  99 
 100   // Offsets into the register save area
 101   // Used by deoptimization when it is managing result register
 102   // values on its own
 103 
 104   int reg_offset_in_bytes(Register r);
 105   int r0_offset_in_bytes()    { return reg_offset_in_bytes(r0); }
 106   int rscratch1_offset_in_bytes()    { return reg_offset_in_bytes(rscratch1); }
 107   int v0_offset_in_bytes();
 108 
 109   // Total stack size in bytes for saving sve predicate registers.
 110   int total_sve_predicate_in_bytes();
 111 
 112   // Capture info about frame layout
 113   // Note this is only correct when not saving full vectors.
 114   enum layout {
 115                 fpu_state_off = 0,
 116                 fpu_state_end = fpu_state_off + FPUStateSizeInWords - 1,
 117                 // The frame sender code expects that rfp will be in
 118                 // the "natural" place and will override any oopMap
 119                 // setting for it. We must therefore force the layout
 120                 // so that it agrees with the frame sender code.
 121                 r0_off = fpu_state_off + FPUStateSizeInWords,
 122                 rfp_off = r0_off + (Register::number_of_registers - 2) * Register::max_slots_per_register,
 123                 return_off = rfp_off + Register::max_slots_per_register,      // slot for return address
 124                 reg_save_size = return_off + Register::max_slots_per_register};
 125 
 126 };
 127 
 128 int RegisterSaver::reg_offset_in_bytes(Register r) {
 129   // The integer registers are located above the floating point
 130   // registers in the stack frame pushed by save_live_registers() so the
 131   // offset depends on whether we are saving full vectors, and whether
 132   // those vectors are NEON or SVE.
 133 
 134   int slots_per_vect = FloatRegister::save_slots_per_register;
 135 
 136 #if COMPILER2_OR_JVMCI
 137   if (_save_vectors) {
 138     slots_per_vect = FloatRegister::slots_per_neon_register;
 139 
 140 #ifdef COMPILER2
 141     if (Matcher::supports_scalable_vector()) {
 142       slots_per_vect = Matcher::scalable_vector_reg_size(T_FLOAT);
 143     }
 144 #endif
 145   }
 146 #endif
 147 
 148   int r0_offset = v0_offset_in_bytes() + (slots_per_vect * FloatRegister::number_of_registers) * BytesPerInt;
 149   return r0_offset + r->encoding() * wordSize;
 150 }
 151 
 152 int RegisterSaver::v0_offset_in_bytes() {
 153   // The floating point registers are located above the predicate registers if
 154   // they are present in the stack frame pushed by save_live_registers(). So the
 155   // offset depends on the saved total predicate vectors in the stack frame.
 156   return (total_sve_predicate_in_bytes() / VMRegImpl::stack_slot_size) * BytesPerInt;
 157 }
 158 
 159 int RegisterSaver::total_sve_predicate_in_bytes() {
 160 #ifdef COMPILER2
 161   if (_save_vectors && Matcher::supports_scalable_vector()) {
 162     return (Matcher::scalable_vector_reg_size(T_BYTE) >> LogBitsPerByte) *
 163            PRegister::number_of_registers;
 164   }
 165 #endif
 166   return 0;
 167 }
 168 
 169 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
 170   bool use_sve = false;
 171   int sve_vector_size_in_bytes = 0;
 172   int sve_vector_size_in_slots = 0;
 173   int sve_predicate_size_in_slots = 0;
 174   int total_predicate_in_bytes = total_sve_predicate_in_bytes();
 175   int total_predicate_in_slots = total_predicate_in_bytes / VMRegImpl::stack_slot_size;
 176 
 177 #ifdef COMPILER2
 178   use_sve = Matcher::supports_scalable_vector();
 179   if (use_sve) {
 180     sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 181     sve_vector_size_in_slots = Matcher::scalable_vector_reg_size(T_FLOAT);
 182     sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
 183   }
 184 #endif
 185 
 186 #if COMPILER2_OR_JVMCI
 187   if (_save_vectors) {
 188     int extra_save_slots_per_register = 0;
 189     // Save upper half of vector registers
 190     if (use_sve) {
 191       extra_save_slots_per_register = sve_vector_size_in_slots - FloatRegister::save_slots_per_register;
 192     } else {
 193       extra_save_slots_per_register = FloatRegister::extra_save_slots_per_neon_register;
 194     }
 195     int extra_vector_bytes = extra_save_slots_per_register *
 196                              VMRegImpl::stack_slot_size *
 197                              FloatRegister::number_of_registers;
 198     additional_frame_words += ((extra_vector_bytes + total_predicate_in_bytes) / wordSize);
 199   }
 200 #else
 201   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 202 #endif
 203 
 204   int frame_size_in_bytes = align_up(additional_frame_words * wordSize +
 205                                      reg_save_size * BytesPerInt, 16);
 206   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 207   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 208   // The caller will allocate additional_frame_words
 209   int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
 210   // CodeBlob frame size is in words.
 211   int frame_size_in_words = frame_size_in_bytes / wordSize;
 212   *total_frame_words = frame_size_in_words;
 213 
 214   // Save Integer and Float registers.
 215   __ enter();
 216   __ push_CPU_state(_save_vectors, use_sve, sve_vector_size_in_bytes, total_predicate_in_bytes);
 217 
 218   // Set an oopmap for the call site.  This oopmap will map all
 219   // oop-registers and debug-info registers as callee-saved.  This
 220   // will allow deoptimization at this safepoint to find all possible
 221   // debug-info recordings, as well as let GC find all oops.
 222 
 223   OopMapSet *oop_maps = new OopMapSet();
 224   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 225 
 226   for (int i = 0; i < Register::number_of_registers; i++) {
 227     Register r = as_Register(i);
 228     if (i <= rfp->encoding() && r != rscratch1 && r != rscratch2) {
 229       // SP offsets are in 4-byte words.
 230       // Register slots are 8 bytes wide, 32 floating-point registers.
 231       int sp_offset = Register::max_slots_per_register * i +
 232                       FloatRegister::save_slots_per_register * FloatRegister::number_of_registers;
 233       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots), r->as_VMReg());
 234     }
 235   }
 236 
 237   for (int i = 0; i < FloatRegister::number_of_registers; i++) {
 238     FloatRegister r = as_FloatRegister(i);
 239     int sp_offset = 0;
 240     if (_save_vectors) {
 241       sp_offset = use_sve ? (total_predicate_in_slots + sve_vector_size_in_slots * i) :
 242                             (FloatRegister::slots_per_neon_register * i);
 243     } else {
 244       sp_offset = FloatRegister::save_slots_per_register * i;
 245     }
 246     oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), r->as_VMReg());
 247   }
 248 
 249   return oop_map;
 250 }
 251 
 252 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 253 #ifdef COMPILER2
 254   __ pop_CPU_state(_save_vectors, Matcher::supports_scalable_vector(),
 255                    Matcher::scalable_vector_reg_size(T_BYTE), total_sve_predicate_in_bytes());
 256 #else
 257 #if !INCLUDE_JVMCI
 258   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 259 #endif
 260   __ pop_CPU_state(_save_vectors);
 261 #endif
 262   __ ldp(rfp, lr, Address(__ post(sp, 2 * wordSize)));
 263   __ authenticate_return_address();
 264 }
 265 
 266 // Is vector's size (in bytes) bigger than a size saved by default?
 267 // 8 bytes vector registers are saved by default on AArch64.
 268 // The SVE supported min vector size is 8 bytes and we need to save
 269 // predicate registers when the vector size is 8 bytes as well.
 270 bool SharedRuntime::is_wide_vector(int size) {
 271   return size > 8 || (UseSVE > 0 && size >= 8);
 272 }
 273 
 274 // ---------------------------------------------------------------------------
 275 // Read the array of BasicTypes from a signature, and compute where the
 276 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 277 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 278 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 279 // as framesizes are fixed.
 280 // VMRegImpl::stack0 refers to the first slot 0(sp).
 281 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
 282 // Register up to Register::number_of_registers are the 64-bit
 283 // integer registers.
 284 
 285 // Note: the INPUTS in sig_bt are in units of Java argument words,
 286 // which are 64-bit.  The OUTPUTS are in 32-bit units.
 287 
 288 // The Java calling convention is a "shifted" version of the C ABI.
 289 // By skipping the first C ABI register we can call non-static jni
 290 // methods with small numbers of arguments without having to shuffle
 291 // the arguments at all. Since we control the java ABI we ought to at
 292 // least get some advantage out of it.
 293 
 294 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 295                                            VMRegPair *regs,
 296                                            int total_args_passed) {
 297 
 298   // Create the mapping between argument positions and
 299   // registers.
 300   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 301     j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7
 302   };
 303   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 304     j_farg0, j_farg1, j_farg2, j_farg3,
 305     j_farg4, j_farg5, j_farg6, j_farg7
 306   };
 307 
 308 
 309   uint int_args = 0;
 310   uint fp_args = 0;
 311   uint stk_args = 0;
 312 
 313   for (int i = 0; i < total_args_passed; i++) {
 314     switch (sig_bt[i]) {
 315     case T_BOOLEAN:
 316     case T_CHAR:
 317     case T_BYTE:
 318     case T_SHORT:
 319     case T_INT:
 320       if (int_args < Argument::n_int_register_parameters_j) {
 321         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 322       } else {
 323         stk_args = align_up(stk_args, 2);
 324         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 325         stk_args += 1;
 326       }
 327       break;
 328     case T_VOID:
 329       // halves of T_LONG or T_DOUBLE
 330       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 331       regs[i].set_bad();
 332       break;
 333     case T_LONG:
 334       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 335       // fall through
 336     case T_OBJECT:
 337     case T_ARRAY:
 338     case T_ADDRESS:
 339       if (int_args < Argument::n_int_register_parameters_j) {
 340         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 341       } else {
 342         stk_args = align_up(stk_args, 2);
 343         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 344         stk_args += 2;
 345       }
 346       break;
 347     case T_FLOAT:
 348       if (fp_args < Argument::n_float_register_parameters_j) {
 349         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 350       } else {
 351         stk_args = align_up(stk_args, 2);
 352         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 353         stk_args += 1;
 354       }
 355       break;
 356     case T_DOUBLE:
 357       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 358       if (fp_args < Argument::n_float_register_parameters_j) {
 359         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 360       } else {
 361         stk_args = align_up(stk_args, 2);
 362         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 363         stk_args += 2;
 364       }
 365       break;
 366     default:
 367       ShouldNotReachHere();
 368       break;
 369     }
 370   }
 371 
 372   return stk_args;
 373 }
 374 
 375 // Patch the callers callsite with entry to compiled code if it exists.
 376 static void patch_callers_callsite(MacroAssembler *masm) {
 377   Label L;
 378   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 379   __ cbz(rscratch1, L);
 380 
 381   __ enter();
 382   __ push_CPU_state();
 383 
 384   // VM needs caller's callsite
 385   // VM needs target method
 386   // This needs to be a long call since we will relocate this adapter to
 387   // the codeBuffer and it may not reach
 388 
 389 #ifndef PRODUCT
 390   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 391 #endif
 392 
 393   __ mov(c_rarg0, rmethod);
 394   __ mov(c_rarg1, lr);
 395   __ authenticate_return_address(c_rarg1);
 396   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 397   __ blr(rscratch1);
 398 
 399   // Explicit isb required because fixup_callers_callsite may change the code
 400   // stream.
 401   __ safepoint_isb();
 402 
 403   __ pop_CPU_state();
 404   // restore sp
 405   __ leave();
 406   __ bind(L);
 407 }
 408 
 409 static void gen_c2i_adapter(MacroAssembler *masm,
 410                             int total_args_passed,
 411                             int comp_args_on_stack,
 412                             const BasicType *sig_bt,
 413                             const VMRegPair *regs,
 414                             Label& skip_fixup) {
 415   // Before we get into the guts of the C2I adapter, see if we should be here
 416   // at all.  We've come from compiled code and are attempting to jump to the
 417   // interpreter, which means the caller made a static call to get here
 418   // (vcalls always get a compiled target if there is one).  Check for a
 419   // compiled target.  If there is one, we need to patch the caller's call.
 420   patch_callers_callsite(masm);
 421 
 422   __ bind(skip_fixup);
 423 
 424   int words_pushed = 0;
 425 
 426   // Since all args are passed on the stack, total_args_passed *
 427   // Interpreter::stackElementSize is the space we need.
 428 
 429   int extraspace = total_args_passed * Interpreter::stackElementSize;
 430 
 431   __ mov(r19_sender_sp, sp);
 432 
 433   // stack is aligned, keep it that way
 434   extraspace = align_up(extraspace, 2*wordSize);
 435 
 436   if (extraspace)
 437     __ sub(sp, sp, extraspace);
 438 
 439   // Now write the args into the outgoing interpreter space
 440   for (int i = 0; i < total_args_passed; i++) {
 441     if (sig_bt[i] == T_VOID) {
 442       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 443       continue;
 444     }
 445 
 446     // offset to start parameters
 447     int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 448     int next_off = st_off - Interpreter::stackElementSize;
 449 
 450     // Say 4 args:
 451     // i   st_off
 452     // 0   32 T_LONG
 453     // 1   24 T_VOID
 454     // 2   16 T_OBJECT
 455     // 3    8 T_BOOL
 456     // -    0 return address
 457     //
 458     // However to make thing extra confusing. Because we can fit a Java long/double in
 459     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 460     // leaves one slot empty and only stores to a single slot. In this case the
 461     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 462 
 463     VMReg r_1 = regs[i].first();
 464     VMReg r_2 = regs[i].second();
 465     if (!r_1->is_valid()) {
 466       assert(!r_2->is_valid(), "");
 467       continue;
 468     }
 469     if (r_1->is_stack()) {
 470       // memory to memory use rscratch1
 471       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
 472                     + extraspace
 473                     + words_pushed * wordSize);
 474       if (!r_2->is_valid()) {
 475         // sign extend??
 476         __ ldrw(rscratch1, Address(sp, ld_off));
 477         __ str(rscratch1, Address(sp, st_off));
 478 
 479       } else {
 480 
 481         __ ldr(rscratch1, Address(sp, ld_off));
 482 
 483         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 484         // T_DOUBLE and T_LONG use two slots in the interpreter
 485         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 486           // ld_off == LSW, ld_off+wordSize == MSW
 487           // st_off == MSW, next_off == LSW
 488           __ str(rscratch1, Address(sp, next_off));
 489 #ifdef ASSERT
 490           // Overwrite the unused slot with known junk
 491           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaaaull);
 492           __ str(rscratch1, Address(sp, st_off));
 493 #endif /* ASSERT */
 494         } else {
 495           __ str(rscratch1, Address(sp, st_off));
 496         }
 497       }
 498     } else if (r_1->is_Register()) {
 499       Register r = r_1->as_Register();
 500       if (!r_2->is_valid()) {
 501         // must be only an int (or less ) so move only 32bits to slot
 502         // why not sign extend??
 503         __ str(r, Address(sp, st_off));
 504       } else {
 505         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 506         // T_DOUBLE and T_LONG use two slots in the interpreter
 507         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 508           // jlong/double in gpr
 509 #ifdef ASSERT
 510           // Overwrite the unused slot with known junk
 511           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaabull);
 512           __ str(rscratch1, Address(sp, st_off));
 513 #endif /* ASSERT */
 514           __ str(r, Address(sp, next_off));
 515         } else {
 516           __ str(r, Address(sp, st_off));
 517         }
 518       }
 519     } else {
 520       assert(r_1->is_FloatRegister(), "");
 521       if (!r_2->is_valid()) {
 522         // only a float use just part of the slot
 523         __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
 524       } else {
 525 #ifdef ASSERT
 526         // Overwrite the unused slot with known junk
 527         __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaacull);
 528         __ str(rscratch1, Address(sp, st_off));
 529 #endif /* ASSERT */
 530         __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
 531       }
 532     }
 533   }
 534 
 535   __ mov(esp, sp); // Interp expects args on caller's expression stack
 536 
 537   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 538   __ br(rscratch1);
 539 }
 540 
 541 
 542 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 543                                     int total_args_passed,
 544                                     int comp_args_on_stack,
 545                                     const BasicType *sig_bt,
 546                                     const VMRegPair *regs) {
 547 
 548   // Note: r19_sender_sp contains the senderSP on entry. We must
 549   // preserve it since we may do a i2c -> c2i transition if we lose a
 550   // race where compiled code goes non-entrant while we get args
 551   // ready.
 552 
 553   // Adapters are frameless.
 554 
 555   // An i2c adapter is frameless because the *caller* frame, which is
 556   // interpreted, routinely repairs its own esp (from
 557   // interpreter_frame_last_sp), even if a callee has modified the
 558   // stack pointer.  It also recalculates and aligns sp.
 559 
 560   // A c2i adapter is frameless because the *callee* frame, which is
 561   // interpreted, routinely repairs its caller's sp (from sender_sp,
 562   // which is set up via the senderSP register).
 563 
 564   // In other words, if *either* the caller or callee is interpreted, we can
 565   // get the stack pointer repaired after a call.
 566 
 567   // This is why c2i and i2c adapters cannot be indefinitely composed.
 568   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 569   // both caller and callee would be compiled methods, and neither would
 570   // clean up the stack pointer changes performed by the two adapters.
 571   // If this happens, control eventually transfers back to the compiled
 572   // caller, but with an uncorrected stack, causing delayed havoc.
 573 
 574   if (VerifyAdapterCalls &&
 575       (Interpreter::code() != nullptr || StubRoutines::final_stubs_code() != nullptr)) {
 576 #if 0
 577     // So, let's test for cascading c2i/i2c adapters right now.
 578     //  assert(Interpreter::contains($return_addr) ||
 579     //         StubRoutines::contains($return_addr),
 580     //         "i2c adapter must return to an interpreter frame");
 581     __ block_comment("verify_i2c { ");
 582     Label L_ok;
 583     if (Interpreter::code() != nullptr) {
 584       range_check(masm, rax, r11,
 585                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 586                   L_ok);
 587     }
 588     if (StubRoutines::initial_stubs_code() != nullptr) {
 589       range_check(masm, rax, r11,
 590                   StubRoutines::initial_stubs_code()->code_begin(),
 591                   StubRoutines::initial_stubs_code()->code_end(),
 592                   L_ok);
 593     }
 594     if (StubRoutines::final_stubs_code() != nullptr) {
 595       range_check(masm, rax, r11,
 596                   StubRoutines::final_stubs_code()->code_begin(),
 597                   StubRoutines::final_stubs_code()->code_end(),
 598                   L_ok);
 599     }
 600     const char* msg = "i2c adapter must return to an interpreter frame";
 601     __ block_comment(msg);
 602     __ stop(msg);
 603     __ bind(L_ok);
 604     __ block_comment("} verify_i2ce ");
 605 #endif
 606   }
 607 
 608   // Cut-out for having no stack args.
 609   int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 610   if (comp_args_on_stack) {
 611     __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 612     __ andr(sp, rscratch1, -16);
 613   }
 614 
 615   // Will jump to the compiled code just as if compiled code was doing it.
 616   // Pre-load the register-jump target early, to schedule it better.
 617   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
 618 
 619 #if INCLUDE_JVMCI
 620   if (EnableJVMCI) {
 621     // check if this call should be routed towards a specific entry point
 622     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 623     Label no_alternative_target;
 624     __ cbz(rscratch2, no_alternative_target);
 625     __ mov(rscratch1, rscratch2);
 626     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 627     __ bind(no_alternative_target);
 628   }
 629 #endif // INCLUDE_JVMCI
 630 
 631   // Now generate the shuffle code.
 632   for (int i = 0; i < total_args_passed; i++) {
 633     if (sig_bt[i] == T_VOID) {
 634       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 635       continue;
 636     }
 637 
 638     // Pick up 0, 1 or 2 words from SP+offset.
 639 
 640     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 641             "scrambled load targets?");
 642     // Load in argument order going down.
 643     int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
 644     // Point to interpreter value (vs. tag)
 645     int next_off = ld_off - Interpreter::stackElementSize;
 646     //
 647     //
 648     //
 649     VMReg r_1 = regs[i].first();
 650     VMReg r_2 = regs[i].second();
 651     if (!r_1->is_valid()) {
 652       assert(!r_2->is_valid(), "");
 653       continue;
 654     }
 655     if (r_1->is_stack()) {
 656       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 657       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
 658       if (!r_2->is_valid()) {
 659         // sign extend???
 660         __ ldrsw(rscratch2, Address(esp, ld_off));
 661         __ str(rscratch2, Address(sp, st_off));
 662       } else {
 663         //
 664         // We are using two optoregs. This can be either T_OBJECT,
 665         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 666         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 667         // So we must adjust where to pick up the data to match the
 668         // interpreter.
 669         //
 670         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 671         // are accessed as negative so LSW is at LOW address
 672 
 673         // ld_off is MSW so get LSW
 674         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 675                            next_off : ld_off;
 676         __ ldr(rscratch2, Address(esp, offset));
 677         // st_off is LSW (i.e. reg.first())
 678         __ str(rscratch2, Address(sp, st_off));
 679       }
 680     } else if (r_1->is_Register()) {  // Register argument
 681       Register r = r_1->as_Register();
 682       if (r_2->is_valid()) {
 683         //
 684         // We are using two VMRegs. This can be either T_OBJECT,
 685         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 686         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 687         // So we must adjust where to pick up the data to match the
 688         // interpreter.
 689 
 690         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 691                            next_off : ld_off;
 692 
 693         // this can be a misaligned move
 694         __ ldr(r, Address(esp, offset));
 695       } else {
 696         // sign extend and use a full word?
 697         __ ldrw(r, Address(esp, ld_off));
 698       }
 699     } else {
 700       if (!r_2->is_valid()) {
 701         __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 702       } else {
 703         __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 704       }
 705     }
 706   }
 707 
 708   __ mov(rscratch2, rscratch1);
 709   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 710   __ mov(rscratch1, rscratch2);
 711 
 712   // 6243940 We might end up in handle_wrong_method if
 713   // the callee is deoptimized as we race thru here. If that
 714   // happens we don't want to take a safepoint because the
 715   // caller frame will look interpreted and arguments are now
 716   // "compiled" so it is much better to make this transition
 717   // invisible to the stack walking code. Unfortunately if
 718   // we try and find the callee by normal means a safepoint
 719   // is possible. So we stash the desired callee in the thread
 720   // and the vm will find there should this case occur.
 721 
 722   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 723 
 724   __ br(rscratch1);
 725 }
 726 
 727 // ---------------------------------------------------------------
 728 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 729                                                             int total_args_passed,
 730                                                             int comp_args_on_stack,
 731                                                             const BasicType *sig_bt,
 732                                                             const VMRegPair *regs,
 733                                                             AdapterFingerPrint* fingerprint) {
 734   address i2c_entry = __ pc();
 735 
 736   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 737 
 738   address c2i_unverified_entry = __ pc();
 739   Label skip_fixup;
 740 
 741   Register data = rscratch2;
 742   Register receiver = j_rarg0;
 743   Register tmp = r10;  // A call-clobbered register not used for arg passing
 744 
 745   // -------------------------------------------------------------------------
 746   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 747   // to the interpreter.  The args start out packed in the compiled layout.  They
 748   // need to be unpacked into the interpreter layout.  This will almost always
 749   // require some stack space.  We grow the current (compiled) stack, then repack
 750   // the args.  We  finally end in a jump to the generic interpreter entry point.
 751   // On exit from the interpreter, the interpreter will restore our SP (lest the
 752   // compiled code, which relies solely on SP and not FP, get sick).
 753 
 754   {
 755     __ block_comment("c2i_unverified_entry {");
 756     // Method might have been compiled since the call site was patched to
 757     // interpreted; if that is the case treat it as a miss so we can get
 758     // the call site corrected.
 759     __ ic_check(1 /* end_alignment */);
 760     __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));
 761 
 762     __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 763     __ cbz(rscratch1, skip_fixup);
 764     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 765     __ block_comment("} c2i_unverified_entry");
 766   }
 767 
 768   address c2i_entry = __ pc();
 769 
 770   // Class initialization barrier for static methods
 771   address c2i_no_clinit_check_entry = nullptr;
 772   if (VM_Version::supports_fast_class_init_checks()) {
 773     Label L_skip_barrier;
 774 
 775     { // Bypass the barrier for non-static methods
 776       __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
 777       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 778       __ br(Assembler::EQ, L_skip_barrier); // non-static
 779     }
 780 
 781     __ load_method_holder(rscratch2, rmethod);
 782     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 783     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 784 
 785     __ bind(L_skip_barrier);
 786     c2i_no_clinit_check_entry = __ pc();
 787   }
 788 
 789   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 790   bs->c2i_entry_barrier(masm);
 791 
 792   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 793 
 794   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
 795 }
 796 
 797 static int c_calling_convention_priv(const BasicType *sig_bt,
 798                                          VMRegPair *regs,
 799                                          int total_args_passed) {
 800 
 801 // We return the amount of VMRegImpl stack slots we need to reserve for all
 802 // the arguments NOT counting out_preserve_stack_slots.
 803 
 804     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 805       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 806     };
 807     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 808       c_farg0, c_farg1, c_farg2, c_farg3,
 809       c_farg4, c_farg5, c_farg6, c_farg7
 810     };
 811 
 812     uint int_args = 0;
 813     uint fp_args = 0;
 814     uint stk_args = 0; // inc by 2 each time
 815 
 816     for (int i = 0; i < total_args_passed; i++) {
 817       switch (sig_bt[i]) {
 818       case T_BOOLEAN:
 819       case T_CHAR:
 820       case T_BYTE:
 821       case T_SHORT:
 822       case T_INT:
 823         if (int_args < Argument::n_int_register_parameters_c) {
 824           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 825         } else {
 826 #ifdef __APPLE__
 827           // Less-than word types are stored one after another.
 828           // The code is unable to handle this so bailout.
 829           return -1;
 830 #endif
 831           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 832           stk_args += 2;
 833         }
 834         break;
 835       case T_LONG:
 836         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 837         // fall through
 838       case T_OBJECT:
 839       case T_ARRAY:
 840       case T_ADDRESS:
 841       case T_METADATA:
 842         if (int_args < Argument::n_int_register_parameters_c) {
 843           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 844         } else {
 845           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 846           stk_args += 2;
 847         }
 848         break;
 849       case T_FLOAT:
 850         if (fp_args < Argument::n_float_register_parameters_c) {
 851           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 852         } else {
 853 #ifdef __APPLE__
 854           // Less-than word types are stored one after another.
 855           // The code is unable to handle this so bailout.
 856           return -1;
 857 #endif
 858           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 859           stk_args += 2;
 860         }
 861         break;
 862       case T_DOUBLE:
 863         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 864         if (fp_args < Argument::n_float_register_parameters_c) {
 865           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 866         } else {
 867           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 868           stk_args += 2;
 869         }
 870         break;
 871       case T_VOID: // Halves of longs and doubles
 872         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 873         regs[i].set_bad();
 874         break;
 875       default:
 876         ShouldNotReachHere();
 877         break;
 878       }
 879     }
 880 
 881   return stk_args;
 882 }
 883 
 884 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
 885                                              uint num_bits,
 886                                              uint total_args_passed) {
 887   Unimplemented();
 888   return 0;
 889 }
 890 
 891 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 892                                          VMRegPair *regs,
 893                                          int total_args_passed)
 894 {
 895   int result = c_calling_convention_priv(sig_bt, regs, total_args_passed);
 896   guarantee(result >= 0, "Unsupported arguments configuration");
 897   return result;
 898 }
 899 
 900 
 901 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
 902   // We always ignore the frame_slots arg and just use the space just below frame pointer
 903   // which by this time is free to use
 904   switch (ret_type) {
 905   case T_FLOAT:
 906     __ strs(v0, Address(rfp, -wordSize));
 907     break;
 908   case T_DOUBLE:
 909     __ strd(v0, Address(rfp, -wordSize));
 910     break;
 911   case T_VOID:  break;
 912   default: {
 913     __ str(r0, Address(rfp, -wordSize));
 914     }
 915   }
 916 }
 917 
 918 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
 919   // We always ignore the frame_slots arg and just use the space just below frame pointer
 920   // which by this time is free to use
 921   switch (ret_type) {
 922   case T_FLOAT:
 923     __ ldrs(v0, Address(rfp, -wordSize));
 924     break;
 925   case T_DOUBLE:
 926     __ ldrd(v0, Address(rfp, -wordSize));
 927     break;
 928   case T_VOID:  break;
 929   default: {
 930     __ ldr(r0, Address(rfp, -wordSize));
 931     }
 932   }
 933 }
 934 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
 935   RegSet x;
 936   for ( int i = first_arg ; i < arg_count ; i++ ) {
 937     if (args[i].first()->is_Register()) {
 938       x = x + args[i].first()->as_Register();
 939     } else if (args[i].first()->is_FloatRegister()) {
 940       __ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize)));
 941     }
 942   }
 943   __ push(x, sp);
 944 }
 945 
 946 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
 947   RegSet x;
 948   for ( int i = first_arg ; i < arg_count ; i++ ) {
 949     if (args[i].first()->is_Register()) {
 950       x = x + args[i].first()->as_Register();
 951     } else {
 952       ;
 953     }
 954   }
 955   __ pop(x, sp);
 956   for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
 957     if (args[i].first()->is_Register()) {
 958       ;
 959     } else if (args[i].first()->is_FloatRegister()) {
 960       __ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize)));
 961     }
 962   }
 963 }
 964 
 965 static void verify_oop_args(MacroAssembler* masm,
 966                             const methodHandle& method,
 967                             const BasicType* sig_bt,
 968                             const VMRegPair* regs) {
 969   Register temp_reg = r19;  // not part of any compiled calling seq
 970   if (VerifyOops) {
 971     for (int i = 0; i < method->size_of_parameters(); i++) {
 972       if (sig_bt[i] == T_OBJECT ||
 973           sig_bt[i] == T_ARRAY) {
 974         VMReg r = regs[i].first();
 975         assert(r->is_valid(), "bad oop arg");
 976         if (r->is_stack()) {
 977           __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
 978           __ verify_oop(temp_reg);
 979         } else {
 980           __ verify_oop(r->as_Register());
 981         }
 982       }
 983     }
 984   }
 985 }
 986 
 987 // on exit, sp points to the ContinuationEntry
 988 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) {
 989   assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, "");
 990   assert(in_bytes(ContinuationEntry::cont_offset())  % VMRegImpl::stack_slot_size == 0, "");
 991   assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, "");
 992 
 993   stack_slots += (int)ContinuationEntry::size()/wordSize;
 994   __ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata
 995 
 996   OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize)/ VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
 997 
 998   __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
 999   __ str(rscratch1, Address(sp, ContinuationEntry::parent_offset()));
1000   __ mov(rscratch1, sp); // we can't use sp as the source in str
1001   __ str(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1002 
1003   return map;
1004 }
1005 
1006 // on entry c_rarg1 points to the continuation
1007 //          sp points to ContinuationEntry
1008 //          c_rarg3 -- isVirtualThread
1009 static void fill_continuation_entry(MacroAssembler* masm) {
1010 #ifdef ASSERT
1011   __ movw(rscratch1, ContinuationEntry::cookie_value());
1012   __ strw(rscratch1, Address(sp, ContinuationEntry::cookie_offset()));
1013 #endif
1014 
1015   __ str (c_rarg1, Address(sp, ContinuationEntry::cont_offset()));
1016   __ strw(c_rarg3, Address(sp, ContinuationEntry::flags_offset()));
1017   __ str (zr,      Address(sp, ContinuationEntry::chunk_offset()));
1018   __ strw(zr,      Address(sp, ContinuationEntry::argsize_offset()));
1019   __ strw(zr,      Address(sp, ContinuationEntry::pin_count_offset()));
1020 
1021   __ ldr(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1022   __ str(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1023   __ ldr(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
1024   __ str(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
1025 
1026   __ str(zr, Address(rthread, JavaThread::cont_fastpath_offset()));
1027   __ str(zr, Address(rthread, JavaThread::held_monitor_count_offset()));
1028 }
1029 
1030 // on entry, sp points to the ContinuationEntry
1031 // on exit, rfp points to the spilled rfp in the entry frame
1032 static void continuation_enter_cleanup(MacroAssembler* masm) {
1033 #ifndef PRODUCT
1034   Label OK;
1035   __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1036   __ cmp(sp, rscratch1);
1037   __ br(Assembler::EQ, OK);
1038   __ stop("incorrect sp1");
1039   __ bind(OK);
1040 #endif
1041 
1042   __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1043   __ str(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1044   __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
1045   __ str(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
1046 
1047   __ ldr(rscratch2, Address(sp, ContinuationEntry::parent_offset()));
1048   __ str(rscratch2, Address(rthread, JavaThread::cont_entry_offset()));
1049   __ add(rfp, sp, (int)ContinuationEntry::size());
1050 }
1051 
1052 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
1053 // On entry: c_rarg1 -- the continuation object
1054 //           c_rarg2 -- isContinue
1055 //           c_rarg3 -- isVirtualThread
1056 static void gen_continuation_enter(MacroAssembler* masm,
1057                                  const methodHandle& method,
1058                                  const BasicType* sig_bt,
1059                                  const VMRegPair* regs,
1060                                  int& exception_offset,
1061                                  OopMapSet*oop_maps,
1062                                  int& frame_complete,
1063                                  int& stack_slots,
1064                                  int& interpreted_entry_offset,
1065                                  int& compiled_entry_offset) {
1066   //verify_oop_args(masm, method, sig_bt, regs);
1067   Address resolve(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
1068 
1069   address start = __ pc();
1070 
1071   Label call_thaw, exit;
1072 
1073   // i2i entry used at interp_only_mode only
1074   interpreted_entry_offset = __ pc() - start;
1075   {
1076 
1077 #ifdef ASSERT
1078     Label is_interp_only;
1079     __ ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
1080     __ cbnzw(rscratch1, is_interp_only);
1081     __ stop("enterSpecial interpreter entry called when not in interp_only_mode");
1082     __ bind(is_interp_only);
1083 #endif
1084 
1085     // Read interpreter arguments into registers (this is an ad-hoc i2c adapter)
1086     __ ldr(c_rarg1, Address(esp, Interpreter::stackElementSize*2));
1087     __ ldr(c_rarg2, Address(esp, Interpreter::stackElementSize*1));
1088     __ ldr(c_rarg3, Address(esp, Interpreter::stackElementSize*0));
1089     __ push_cont_fastpath(rthread);
1090 
1091     __ enter();
1092     stack_slots = 2; // will be adjusted in setup
1093     OopMap* map = continuation_enter_setup(masm, stack_slots);
1094     // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe,
1095     // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
1096 
1097     fill_continuation_entry(masm);
1098 
1099     __ cbnz(c_rarg2, call_thaw);
1100 
1101     const address tr_call = __ trampoline_call(resolve);
1102     if (tr_call == nullptr) {
1103       fatal("CodeCache is full at gen_continuation_enter");
1104     }
1105 
1106     oop_maps->add_gc_map(__ pc() - start, map);
1107     __ post_call_nop();
1108 
1109     __ b(exit);
1110 
1111     CodeBuffer* cbuf = masm->code_section()->outer();
1112     address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
1113     if (stub == nullptr) {
1114       fatal("CodeCache is full at gen_continuation_enter");
1115     }
1116   }
1117 
1118   // compiled entry
1119   __ align(CodeEntryAlignment);
1120   compiled_entry_offset = __ pc() - start;
1121 
1122   __ enter();
1123   stack_slots = 2; // will be adjusted in setup
1124   OopMap* map = continuation_enter_setup(masm, stack_slots);
1125   frame_complete = __ pc() - start;
1126 
1127   fill_continuation_entry(masm);
1128 
1129   __ cbnz(c_rarg2, call_thaw);
1130 
1131   const address tr_call = __ trampoline_call(resolve);
1132   if (tr_call == nullptr) {
1133     fatal("CodeCache is full at gen_continuation_enter");
1134   }
1135 
1136   oop_maps->add_gc_map(__ pc() - start, map);
1137   __ post_call_nop();
1138 
1139   __ b(exit);
1140 
1141   __ bind(call_thaw);
1142 
1143   ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1144   __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1145   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1146   ContinuationEntry::_return_pc_offset = __ pc() - start;
1147   __ post_call_nop();
1148 
1149   __ bind(exit);
1150   continuation_enter_cleanup(masm);
1151   __ leave();
1152   __ ret(lr);
1153 
1154   /// exception handling
1155 
1156   exception_offset = __ pc() - start;
1157   {
1158       __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1159 
1160       continuation_enter_cleanup(masm);
1161 
1162       __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1163       __ authenticate_return_address(c_rarg1);
1164       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1);
1165 
1166       // see OptoRuntime::generate_exception_blob: r0 -- exception oop, r3 -- exception pc
1167 
1168       __ mov(r1, r0); // the exception handler
1169       __ mov(r0, r19); // restore return value contaning the exception oop
1170       __ verify_oop(r0);
1171 
1172       __ leave();
1173       __ mov(r3, lr);
1174       __ br(r1); // the exception handler
1175   }
1176 
1177   CodeBuffer* cbuf = masm->code_section()->outer();
1178   address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
1179   if (stub == nullptr) {
1180     fatal("CodeCache is full at gen_continuation_enter");
1181   }
1182 }
1183 
1184 static void gen_continuation_yield(MacroAssembler* masm,
1185                                    const methodHandle& method,
1186                                    const BasicType* sig_bt,
1187                                    const VMRegPair* regs,
1188                                    OopMapSet* oop_maps,
1189                                    int& frame_complete,
1190                                    int& stack_slots,
1191                                    int& compiled_entry_offset) {
1192     enum layout {
1193       rfp_off1,
1194       rfp_off2,
1195       lr_off,
1196       lr_off2,
1197       framesize // inclusive of return address
1198     };
1199     // assert(is_even(framesize/2), "sp not 16-byte aligned");
1200     stack_slots = framesize /  VMRegImpl::slots_per_word;
1201     assert(stack_slots == 2, "recheck layout");
1202 
1203     address start = __ pc();
1204 
1205     compiled_entry_offset = __ pc() - start;
1206     __ enter();
1207 
1208     __ mov(c_rarg1, sp);
1209 
1210     frame_complete = __ pc() - start;
1211     address the_pc = __ pc();
1212 
1213     __ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup
1214 
1215     __ mov(c_rarg0, rthread);
1216     __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
1217     __ call_VM_leaf(Continuation::freeze_entry(), 2);
1218     __ reset_last_Java_frame(true);
1219 
1220     Label pinned;
1221 
1222     __ cbnz(r0, pinned);
1223 
1224     // We've succeeded, set sp to the ContinuationEntry
1225     __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1226     __ mov(sp, rscratch1);
1227     continuation_enter_cleanup(masm);
1228 
1229     __ bind(pinned); // pinned -- return to caller
1230 
1231     // handle pending exception thrown by freeze
1232     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1233     Label ok;
1234     __ cbz(rscratch1, ok);
1235     __ leave();
1236     __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1237     __ br(rscratch1);
1238     __ bind(ok);
1239 
1240     __ leave();
1241     __ ret(lr);
1242 
1243     OopMap* map = new OopMap(framesize, 1);
1244     oop_maps->add_gc_map(the_pc - start, map);
1245 }
1246 
1247 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1248   ::continuation_enter_cleanup(masm);
1249 }
1250 
1251 static void gen_special_dispatch(MacroAssembler* masm,
1252                                  const methodHandle& method,
1253                                  const BasicType* sig_bt,
1254                                  const VMRegPair* regs) {
1255   verify_oop_args(masm, method, sig_bt, regs);
1256   vmIntrinsics::ID iid = method->intrinsic_id();
1257 
1258   // Now write the args into the outgoing interpreter space
1259   bool     has_receiver   = false;
1260   Register receiver_reg   = noreg;
1261   int      member_arg_pos = -1;
1262   Register member_reg     = noreg;
1263   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1264   if (ref_kind != 0) {
1265     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1266     member_reg = r19;  // known to be free at this point
1267     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1268   } else if (iid == vmIntrinsics::_invokeBasic) {
1269     has_receiver = true;
1270   } else if (iid == vmIntrinsics::_linkToNative) {
1271     member_arg_pos = method->size_of_parameters() - 1;  // trailing NativeEntryPoint argument
1272     member_reg = r19;  // known to be free at this point
1273   } else {
1274     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1275   }
1276 
1277   if (member_reg != noreg) {
1278     // Load the member_arg into register, if necessary.
1279     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1280     VMReg r = regs[member_arg_pos].first();
1281     if (r->is_stack()) {
1282       __ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1283     } else {
1284       // no data motion is needed
1285       member_reg = r->as_Register();
1286     }
1287   }
1288 
1289   if (has_receiver) {
1290     // Make sure the receiver is loaded into a register.
1291     assert(method->size_of_parameters() > 0, "oob");
1292     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1293     VMReg r = regs[0].first();
1294     assert(r->is_valid(), "bad receiver arg");
1295     if (r->is_stack()) {
1296       // Porting note:  This assumes that compiled calling conventions always
1297       // pass the receiver oop in a register.  If this is not true on some
1298       // platform, pick a temp and load the receiver from stack.
1299       fatal("receiver always in a register");
1300       receiver_reg = r2;  // known to be free at this point
1301       __ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1302     } else {
1303       // no data motion is needed
1304       receiver_reg = r->as_Register();
1305     }
1306   }
1307 
1308   // Figure out which address we are really jumping to:
1309   MethodHandles::generate_method_handle_dispatch(masm, iid,
1310                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1311 }
1312 
1313 // ---------------------------------------------------------------------------
1314 // Generate a native wrapper for a given method.  The method takes arguments
1315 // in the Java compiled code convention, marshals them to the native
1316 // convention (handlizes oops, etc), transitions to native, makes the call,
1317 // returns to java state (possibly blocking), unhandlizes any result and
1318 // returns.
1319 //
1320 // Critical native functions are a shorthand for the use of
1321 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1322 // functions.  The wrapper is expected to unpack the arguments before
1323 // passing them to the callee. Critical native functions leave the state _in_Java,
1324 // since they block out GC.
1325 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1326 // block and the check for pending exceptions it's impossible for them
1327 // to be thrown.
1328 //
1329 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1330                                                 const methodHandle& method,
1331                                                 int compile_id,
1332                                                 BasicType* in_sig_bt,
1333                                                 VMRegPair* in_regs,
1334                                                 BasicType ret_type) {
1335   if (method->is_continuation_native_intrinsic()) {
1336     int exception_offset = -1;
1337     OopMapSet* oop_maps = new OopMapSet();
1338     int frame_complete = -1;
1339     int stack_slots = -1;
1340     int interpreted_entry_offset = -1;
1341     int vep_offset = -1;
1342     if (method->is_continuation_enter_intrinsic()) {
1343       gen_continuation_enter(masm,
1344                              method,
1345                              in_sig_bt,
1346                              in_regs,
1347                              exception_offset,
1348                              oop_maps,
1349                              frame_complete,
1350                              stack_slots,
1351                              interpreted_entry_offset,
1352                              vep_offset);
1353     } else if (method->is_continuation_yield_intrinsic()) {
1354       gen_continuation_yield(masm,
1355                              method,
1356                              in_sig_bt,
1357                              in_regs,
1358                              oop_maps,
1359                              frame_complete,
1360                              stack_slots,
1361                              vep_offset);
1362     } else {
1363       guarantee(false, "Unknown Continuation native intrinsic");
1364     }
1365 
1366 #ifdef ASSERT
1367     if (method->is_continuation_enter_intrinsic()) {
1368       assert(interpreted_entry_offset != -1, "Must be set");
1369       assert(exception_offset != -1,         "Must be set");
1370     } else {
1371       assert(interpreted_entry_offset == -1, "Must be unset");
1372       assert(exception_offset == -1,         "Must be unset");
1373     }
1374     assert(frame_complete != -1,    "Must be set");
1375     assert(stack_slots != -1,       "Must be set");
1376     assert(vep_offset != -1,        "Must be set");
1377 #endif
1378 
1379     __ flush();
1380     nmethod* nm = nmethod::new_native_nmethod(method,
1381                                               compile_id,
1382                                               masm->code(),
1383                                               vep_offset,
1384                                               frame_complete,
1385                                               stack_slots,
1386                                               in_ByteSize(-1),
1387                                               in_ByteSize(-1),
1388                                               oop_maps,
1389                                               exception_offset);
1390     if (nm == nullptr) return nm;
1391     if (method->is_continuation_enter_intrinsic()) {
1392       ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1393     } else if (method->is_continuation_yield_intrinsic()) {
1394       _cont_doYield_stub = nm;
1395     } else {
1396       guarantee(false, "Unknown Continuation native intrinsic");
1397     }
1398     return nm;
1399   }
1400 
1401   if (method->is_method_handle_intrinsic()) {
1402     vmIntrinsics::ID iid = method->intrinsic_id();
1403     intptr_t start = (intptr_t)__ pc();
1404     int vep_offset = ((intptr_t)__ pc()) - start;
1405 
1406     // First instruction must be a nop as it may need to be patched on deoptimisation
1407     __ nop();
1408     gen_special_dispatch(masm,
1409                          method,
1410                          in_sig_bt,
1411                          in_regs);
1412     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1413     __ flush();
1414     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1415     return nmethod::new_native_nmethod(method,
1416                                        compile_id,
1417                                        masm->code(),
1418                                        vep_offset,
1419                                        frame_complete,
1420                                        stack_slots / VMRegImpl::slots_per_word,
1421                                        in_ByteSize(-1),
1422                                        in_ByteSize(-1),
1423                                        nullptr);
1424   }
1425   address native_func = method->native_function();
1426   assert(native_func != nullptr, "must have function");
1427 
1428   // An OopMap for lock (and class if static)
1429   OopMapSet *oop_maps = new OopMapSet();
1430   intptr_t start = (intptr_t)__ pc();
1431 
1432   // We have received a description of where all the java arg are located
1433   // on entry to the wrapper. We need to convert these args to where
1434   // the jni function will expect them. To figure out where they go
1435   // we convert the java signature to a C signature by inserting
1436   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1437 
1438   const int total_in_args = method->size_of_parameters();
1439   int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1440 
1441   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1442   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1443   BasicType* in_elem_bt = nullptr;
1444 
1445   int argc = 0;
1446   out_sig_bt[argc++] = T_ADDRESS;
1447   if (method->is_static()) {
1448     out_sig_bt[argc++] = T_OBJECT;
1449   }
1450 
1451   for (int i = 0; i < total_in_args ; i++ ) {
1452     out_sig_bt[argc++] = in_sig_bt[i];
1453   }
1454 
1455   // Now figure out where the args must be stored and how much stack space
1456   // they require.
1457   int out_arg_slots;
1458   out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, total_c_args);
1459 
1460   if (out_arg_slots < 0) {
1461     return nullptr;
1462   }
1463 
1464   // Compute framesize for the wrapper.  We need to handlize all oops in
1465   // incoming registers
1466 
1467   // Calculate the total number of stack slots we will need.
1468 
1469   // First count the abi requirement plus all of the outgoing args
1470   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1471 
1472   // Now the space for the inbound oop handle area
1473   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1474 
1475   int oop_handle_offset = stack_slots;
1476   stack_slots += total_save_slots;
1477 
1478   // Now any space we need for handlizing a klass if static method
1479 
1480   int klass_slot_offset = 0;
1481   int klass_offset = -1;
1482   int lock_slot_offset = 0;
1483   bool is_static = false;
1484 
1485   if (method->is_static()) {
1486     klass_slot_offset = stack_slots;
1487     stack_slots += VMRegImpl::slots_per_word;
1488     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1489     is_static = true;
1490   }
1491 
1492   // Plus a lock if needed
1493 
1494   if (method->is_synchronized()) {
1495     lock_slot_offset = stack_slots;
1496     stack_slots += VMRegImpl::slots_per_word;
1497   }
1498 
1499   // Now a place (+2) to save return values or temp during shuffling
1500   // + 4 for return address (which we own) and saved rfp
1501   stack_slots += 6;
1502 
1503   // Ok The space we have allocated will look like:
1504   //
1505   //
1506   // FP-> |                     |
1507   //      |---------------------|
1508   //      | 2 slots for moves   |
1509   //      |---------------------|
1510   //      | lock box (if sync)  |
1511   //      |---------------------| <- lock_slot_offset
1512   //      | klass (if static)   |
1513   //      |---------------------| <- klass_slot_offset
1514   //      | oopHandle area      |
1515   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1516   //      | outbound memory     |
1517   //      | based arguments     |
1518   //      |                     |
1519   //      |---------------------|
1520   //      |                     |
1521   // SP-> | out_preserved_slots |
1522   //
1523   //
1524 
1525 
1526   // Now compute actual number of stack words we need rounding to make
1527   // stack properly aligned.
1528   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1529 
1530   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1531 
1532   // First thing make an ic check to see if we should even be here
1533 
1534   // We are free to use all registers as temps without saving them and
1535   // restoring them except rfp. rfp is the only callee save register
1536   // as far as the interpreter and the compiler(s) are concerned.
1537 
1538   const Register receiver = j_rarg0;
1539 
1540   Label exception_pending;
1541 
1542   assert_different_registers(receiver, rscratch1);
1543   __ verify_oop(receiver);
1544   __ ic_check(8 /* end_alignment */);
1545 
1546   // Verified entry point must be aligned
1547   int vep_offset = ((intptr_t)__ pc()) - start;
1548 
1549   // If we have to make this method not-entrant we'll overwrite its
1550   // first instruction with a jump.  For this action to be legal we
1551   // must ensure that this first instruction is a B, BL, NOP, BKPT,
1552   // SVC, HVC, or SMC.  Make it a NOP.
1553   __ nop();
1554 
1555   if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
1556     Label L_skip_barrier;
1557     __ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
1558     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1559     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1560 
1561     __ bind(L_skip_barrier);
1562   }
1563 
1564   // Generate stack overflow check
1565   __ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size()));
1566 
1567   // Generate a new frame for the wrapper.
1568   __ enter();
1569   // -2 because return address is already present and so is saved rfp
1570   __ sub(sp, sp, stack_size - 2*wordSize);
1571 
1572   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1573   bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
1574 
1575   // Frame is now completed as far as size and linkage.
1576   int frame_complete = ((intptr_t)__ pc()) - start;
1577 
1578   // We use r20 as the oop handle for the receiver/klass
1579   // It is callee save so it survives the call to native
1580 
1581   const Register oop_handle_reg = r20;
1582 
1583   //
1584   // We immediately shuffle the arguments so that any vm call we have to
1585   // make from here on out (sync slow path, jvmti, etc.) we will have
1586   // captured the oops from our caller and have a valid oopMap for
1587   // them.
1588 
1589   // -----------------
1590   // The Grand Shuffle
1591 
1592   // The Java calling convention is either equal (linux) or denser (win64) than the
1593   // c calling convention. However the because of the jni_env argument the c calling
1594   // convention always has at least one more (and two for static) arguments than Java.
1595   // Therefore if we move the args from java -> c backwards then we will never have
1596   // a register->register conflict and we don't have to build a dependency graph
1597   // and figure out how to break any cycles.
1598   //
1599 
1600   // Record esp-based slot for receiver on stack for non-static methods
1601   int receiver_offset = -1;
1602 
1603   // This is a trick. We double the stack slots so we can claim
1604   // the oops in the caller's frame. Since we are sure to have
1605   // more args than the caller doubling is enough to make
1606   // sure we can capture all the incoming oop args from the
1607   // caller.
1608   //
1609   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1610 
1611   // Mark location of rfp (someday)
1612   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp));
1613 
1614 
1615   int float_args = 0;
1616   int int_args = 0;
1617 
1618 #ifdef ASSERT
1619   bool reg_destroyed[Register::number_of_registers];
1620   bool freg_destroyed[FloatRegister::number_of_registers];
1621   for ( int r = 0 ; r < Register::number_of_registers ; r++ ) {
1622     reg_destroyed[r] = false;
1623   }
1624   for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) {
1625     freg_destroyed[f] = false;
1626   }
1627 
1628 #endif /* ASSERT */
1629 
1630   // For JNI natives the incoming and outgoing registers are offset upwards.
1631   GrowableArray<int> arg_order(2 * total_in_args);
1632   VMRegPair tmp_vmreg;
1633   tmp_vmreg.set2(r19->as_VMReg());
1634 
1635   for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1636     arg_order.push(i);
1637     arg_order.push(c_arg);
1638   }
1639 
1640   int temploc = -1;
1641   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1642     int i = arg_order.at(ai);
1643     int c_arg = arg_order.at(ai + 1);
1644     __ block_comment(err_msg("move %d -> %d", i, c_arg));
1645     assert(c_arg != -1 && i != -1, "wrong order");
1646 #ifdef ASSERT
1647     if (in_regs[i].first()->is_Register()) {
1648       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1649     } else if (in_regs[i].first()->is_FloatRegister()) {
1650       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1651     }
1652     if (out_regs[c_arg].first()->is_Register()) {
1653       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1654     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1655       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1656     }
1657 #endif /* ASSERT */
1658     switch (in_sig_bt[i]) {
1659       case T_ARRAY:
1660       case T_OBJECT:
1661         __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1662                        ((i == 0) && (!is_static)),
1663                        &receiver_offset);
1664         int_args++;
1665         break;
1666       case T_VOID:
1667         break;
1668 
1669       case T_FLOAT:
1670         __ float_move(in_regs[i], out_regs[c_arg]);
1671         float_args++;
1672         break;
1673 
1674       case T_DOUBLE:
1675         assert( i + 1 < total_in_args &&
1676                 in_sig_bt[i + 1] == T_VOID &&
1677                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1678         __ double_move(in_regs[i], out_regs[c_arg]);
1679         float_args++;
1680         break;
1681 
1682       case T_LONG :
1683         __ long_move(in_regs[i], out_regs[c_arg]);
1684         int_args++;
1685         break;
1686 
1687       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1688 
1689       default:
1690         __ move32_64(in_regs[i], out_regs[c_arg]);
1691         int_args++;
1692     }
1693   }
1694 
1695   // point c_arg at the first arg that is already loaded in case we
1696   // need to spill before we call out
1697   int c_arg = total_c_args - total_in_args;
1698 
1699   // Pre-load a static method's oop into c_rarg1.
1700   if (method->is_static()) {
1701 
1702     //  load oop into a register
1703     __ movoop(c_rarg1,
1704               JNIHandles::make_local(method->method_holder()->java_mirror()));
1705 
1706     // Now handlize the static class mirror it's known not-null.
1707     __ str(c_rarg1, Address(sp, klass_offset));
1708     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1709 
1710     // Now get the handle
1711     __ lea(c_rarg1, Address(sp, klass_offset));
1712     // and protect the arg if we must spill
1713     c_arg--;
1714   }
1715 
1716   // Change state to native (we save the return address in the thread, since it might not
1717   // be pushed on the stack when we do a stack traversal).
1718   // We use the same pc/oopMap repeatedly when we call out
1719 
1720   Label native_return;
1721   __ set_last_Java_frame(sp, noreg, native_return, rscratch1);
1722 
1723   Label dtrace_method_entry, dtrace_method_entry_done;
1724   {
1725     uint64_t offset;
1726     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1727     __ ldrb(rscratch1, Address(rscratch1, offset));
1728     __ cbnzw(rscratch1, dtrace_method_entry);
1729     __ bind(dtrace_method_entry_done);
1730   }
1731 
1732   // RedefineClasses() tracing support for obsolete method entry
1733   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1734     // protect the args we've loaded
1735     save_args(masm, total_c_args, c_arg, out_regs);
1736     __ mov_metadata(c_rarg1, method());
1737     __ call_VM_leaf(
1738       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1739       rthread, c_rarg1);
1740     restore_args(masm, total_c_args, c_arg, out_regs);
1741   }
1742 
1743   // Lock a synchronized method
1744 
1745   // Register definitions used by locking and unlocking
1746 
1747   const Register swap_reg = r0;
1748   const Register obj_reg  = r19;  // Will contain the oop
1749   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
1750   const Register old_hdr  = r13;  // value of old header at unlock time
1751   const Register lock_tmp = r14;  // Temporary used by lightweight_lock/unlock
1752   const Register tmp = lr;
1753 
1754   Label slow_path_lock;
1755   Label lock_done;
1756 
1757   if (method->is_synchronized()) {
1758     Label count;
1759     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1760 
1761     // Get the handle (the 2nd argument)
1762     __ mov(oop_handle_reg, c_rarg1);
1763 
1764     // Get address of the box
1765 
1766     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1767 
1768     // Load the oop from the handle
1769     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1770 
1771     if (LockingMode == LM_MONITOR) {
1772       __ b(slow_path_lock);
1773     } else if (LockingMode == LM_LEGACY) {
1774       // Load (object->mark() | 1) into swap_reg %r0
1775       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1776       __ orr(swap_reg, rscratch1, 1);
1777 
1778       // Save (object->mark() | 1) into BasicLock's displaced header
1779       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1780 
1781       // src -> dest iff dest == r0 else r0 <- dest
1782       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1783 
1784       // Hmm should this move to the slow path code area???
1785 
1786       // Test if the oopMark is an obvious stack pointer, i.e.,
1787       //  1) (mark & 3) == 0, and
1788       //  2) sp <= mark < mark + os::pagesize()
1789       // These 3 tests can be done by evaluating the following
1790       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1791       // assuming both stack pointer and pagesize have their
1792       // least significant 2 bits clear.
1793       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1794 
1795       __ sub(swap_reg, sp, swap_reg);
1796       __ neg(swap_reg, swap_reg);
1797       __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1798 
1799       // Save the test result, for recursive case, the result is zero
1800       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1801       __ br(Assembler::NE, slow_path_lock);
1802       __ b(lock_done);
1803     } else {
1804       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1805       __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1806       __ b(lock_done);
1807     }
1808     __ bind(count);
1809     __ inc_held_monitor_count();
1810 
1811     // Slow path will re-enter here
1812     __ bind(lock_done);
1813   }
1814 
1815 
1816   // Finally just about ready to make the JNI call
1817 
1818   // get JNIEnv* which is first argument to native
1819   __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1820 
1821   // Now set thread in native
1822   __ mov(rscratch1, _thread_in_native);
1823   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1824   __ stlrw(rscratch1, rscratch2);
1825 
1826   __ rt_call(native_func);
1827 
1828   __ bind(native_return);
1829 
1830   intptr_t return_pc = (intptr_t) __ pc();
1831   oop_maps->add_gc_map(return_pc - start, map);
1832 
1833   // Verify or restore cpu control state after JNI call
1834   __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
1835 
1836   // Unpack native results.
1837   switch (ret_type) {
1838   case T_BOOLEAN: __ c2bool(r0);                     break;
1839   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
1840   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
1841   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
1842   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
1843   case T_DOUBLE :
1844   case T_FLOAT  :
1845     // Result is in v0 we'll save as needed
1846     break;
1847   case T_ARRAY:                 // Really a handle
1848   case T_OBJECT:                // Really a handle
1849       break; // can't de-handlize until after safepoint check
1850   case T_VOID: break;
1851   case T_LONG: break;
1852   default       : ShouldNotReachHere();
1853   }
1854 
1855   Label safepoint_in_progress, safepoint_in_progress_done;
1856   Label after_transition;
1857 
1858   // Switch thread to "native transition" state before reading the synchronization state.
1859   // This additional state is necessary because reading and testing the synchronization
1860   // state is not atomic w.r.t. GC, as this scenario demonstrates:
1861   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1862   //     VM thread changes sync state to synchronizing and suspends threads for GC.
1863   //     Thread A is resumed to finish this native method, but doesn't block here since it
1864   //     didn't see any synchronization is progress, and escapes.
1865   __ mov(rscratch1, _thread_in_native_trans);
1866 
1867   __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
1868 
1869   // Force this write out before the read below
1870   if (!UseSystemMemoryBarrier) {
1871     __ dmb(Assembler::ISH);
1872   }
1873 
1874   __ verify_sve_vector_length();
1875 
1876   // Check for safepoint operation in progress and/or pending suspend requests.
1877   {
1878     // We need an acquire here to ensure that any subsequent load of the
1879     // global SafepointSynchronize::_state flag is ordered after this load
1880     // of the thread-local polling word.  We don't want this poll to
1881     // return false (i.e. not safepointing) and a later poll of the global
1882     // SafepointSynchronize::_state spuriously to return true.
1883     //
1884     // This is to avoid a race when we're in a native->Java transition
1885     // racing the code which wakes up from a safepoint.
1886 
1887     __ safepoint_poll(safepoint_in_progress, true /* at_return */, true /* acquire */, false /* in_nmethod */);
1888     __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
1889     __ cbnzw(rscratch1, safepoint_in_progress);
1890     __ bind(safepoint_in_progress_done);
1891   }
1892 
1893   // change thread state
1894   __ mov(rscratch1, _thread_in_Java);
1895   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1896   __ stlrw(rscratch1, rscratch2);
1897   __ bind(after_transition);
1898 
1899   Label reguard;
1900   Label reguard_done;
1901   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1902   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1903   __ br(Assembler::EQ, reguard);
1904   __ bind(reguard_done);
1905 
1906   // native result if any is live
1907 
1908   // Unlock
1909   Label unlock_done;
1910   Label slow_path_unlock;
1911   if (method->is_synchronized()) {
1912 
1913     // Get locked oop from the handle we passed to jni
1914     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1915 
1916     Label done, not_recursive;
1917 
1918     if (LockingMode == LM_LEGACY) {
1919       // Simple recursive lock?
1920       __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1921       __ cbnz(rscratch1, not_recursive);
1922       __ b(done);
1923     }
1924 
1925     __ bind(not_recursive);
1926 
1927     // Must save r0 if if it is live now because cmpxchg must use it
1928     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1929       save_native_result(masm, ret_type, stack_slots);
1930     }
1931 
1932     if (LockingMode == LM_MONITOR) {
1933       __ b(slow_path_unlock);
1934     } else if (LockingMode == LM_LEGACY) {
1935       // get address of the stack lock
1936       __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1937       //  get old displaced header
1938       __ ldr(old_hdr, Address(r0, 0));
1939 
1940       // Atomic swap old header if oop still contains the stack lock
1941       Label count;
1942       __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1943       __ bind(count);
1944       __ dec_held_monitor_count();
1945     } else {
1946       assert(LockingMode == LM_LIGHTWEIGHT, "");
1947       __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1948     }
1949 
1950     // slow path re-enters here
1951     __ bind(unlock_done);
1952     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1953       restore_native_result(masm, ret_type, stack_slots);
1954     }
1955 
1956     __ bind(done);
1957   }
1958 
1959   Label dtrace_method_exit, dtrace_method_exit_done;
1960   {
1961     uint64_t offset;
1962     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1963     __ ldrb(rscratch1, Address(rscratch1, offset));
1964     __ cbnzw(rscratch1, dtrace_method_exit);
1965     __ bind(dtrace_method_exit_done);
1966   }
1967 
1968   __ reset_last_Java_frame(false);
1969 
1970   // Unbox oop result, e.g. JNIHandles::resolve result.
1971   if (is_reference_type(ret_type)) {
1972     __ resolve_jobject(r0, r1, r2);
1973   }
1974 
1975   if (CheckJNICalls) {
1976     // clear_pending_jni_exception_check
1977     __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
1978   }
1979 
1980   // reset handle block
1981   __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
1982   __ str(zr, Address(r2, JNIHandleBlock::top_offset()));
1983 
1984   __ leave();
1985 
1986   // Any exception pending?
1987   __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1988   __ cbnz(rscratch1, exception_pending);
1989 
1990   // We're done
1991   __ ret(lr);
1992 
1993   // Unexpected paths are out of line and go here
1994 
1995   // forward the exception
1996   __ bind(exception_pending);
1997 
1998   // and forward the exception
1999   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2000 
2001   // Slow path locking & unlocking
2002   if (method->is_synchronized()) {
2003 
2004     __ block_comment("Slow path lock {");
2005     __ bind(slow_path_lock);
2006 
2007     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2008     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2009 
2010     // protect the args we've loaded
2011     save_args(masm, total_c_args, c_arg, out_regs);
2012 
2013     __ mov(c_rarg0, obj_reg);
2014     __ mov(c_rarg1, lock_reg);
2015     __ mov(c_rarg2, rthread);
2016 
2017     // Not a leaf but we have last_Java_frame setup as we want
2018     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2019     restore_args(masm, total_c_args, c_arg, out_regs);
2020 
2021 #ifdef ASSERT
2022     { Label L;
2023       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2024       __ cbz(rscratch1, L);
2025       __ stop("no pending exception allowed on exit from monitorenter");
2026       __ bind(L);
2027     }
2028 #endif
2029     __ b(lock_done);
2030 
2031     __ block_comment("} Slow path lock");
2032 
2033     __ block_comment("Slow path unlock {");
2034     __ bind(slow_path_unlock);
2035 
2036     // If we haven't already saved the native result we must save it now as xmm registers
2037     // are still exposed.
2038 
2039     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2040       save_native_result(masm, ret_type, stack_slots);
2041     }
2042 
2043     __ mov(c_rarg2, rthread);
2044     __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2045     __ mov(c_rarg0, obj_reg);
2046 
2047     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2048     // NOTE that obj_reg == r19 currently
2049     __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2050     __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2051 
2052     __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
2053 
2054 #ifdef ASSERT
2055     {
2056       Label L;
2057       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2058       __ cbz(rscratch1, L);
2059       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2060       __ bind(L);
2061     }
2062 #endif /* ASSERT */
2063 
2064     __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2065 
2066     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2067       restore_native_result(masm, ret_type, stack_slots);
2068     }
2069     __ b(unlock_done);
2070 
2071     __ block_comment("} Slow path unlock");
2072 
2073   } // synchronized
2074 
2075   // SLOW PATH Reguard the stack if needed
2076 
2077   __ bind(reguard);
2078   save_native_result(masm, ret_type, stack_slots);
2079   __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2080   restore_native_result(masm, ret_type, stack_slots);
2081   // and continue
2082   __ b(reguard_done);
2083 
2084   // SLOW PATH safepoint
2085   {
2086     __ block_comment("safepoint {");
2087     __ bind(safepoint_in_progress);
2088 
2089     // Don't use call_VM as it will see a possible pending exception and forward it
2090     // and never return here preventing us from clearing _last_native_pc down below.
2091     //
2092     save_native_result(masm, ret_type, stack_slots);
2093     __ mov(c_rarg0, rthread);
2094 #ifndef PRODUCT
2095   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2096 #endif
2097     __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2098     __ blr(rscratch1);
2099 
2100     // Restore any method result value
2101     restore_native_result(masm, ret_type, stack_slots);
2102 
2103     __ b(safepoint_in_progress_done);
2104     __ block_comment("} safepoint");
2105   }
2106 
2107   // SLOW PATH dtrace support
2108   {
2109     __ block_comment("dtrace entry {");
2110     __ bind(dtrace_method_entry);
2111 
2112     // We have all of the arguments setup at this point. We must not touch any register
2113     // argument registers at this point (what if we save/restore them there are no oop?
2114 
2115     save_args(masm, total_c_args, c_arg, out_regs);
2116     __ mov_metadata(c_rarg1, method());
2117     __ call_VM_leaf(
2118       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2119       rthread, c_rarg1);
2120     restore_args(masm, total_c_args, c_arg, out_regs);
2121     __ b(dtrace_method_entry_done);
2122     __ block_comment("} dtrace entry");
2123   }
2124 
2125   {
2126     __ block_comment("dtrace exit {");
2127     __ bind(dtrace_method_exit);
2128     save_native_result(masm, ret_type, stack_slots);
2129     __ mov_metadata(c_rarg1, method());
2130     __ call_VM_leaf(
2131          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2132          rthread, c_rarg1);
2133     restore_native_result(masm, ret_type, stack_slots);
2134     __ b(dtrace_method_exit_done);
2135     __ block_comment("} dtrace exit");
2136   }
2137 
2138 
2139   __ flush();
2140 
2141   nmethod *nm = nmethod::new_native_nmethod(method,
2142                                             compile_id,
2143                                             masm->code(),
2144                                             vep_offset,
2145                                             frame_complete,
2146                                             stack_slots / VMRegImpl::slots_per_word,
2147                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2148                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2149                                             oop_maps);
2150 
2151   return nm;
2152 }
2153 
2154 // this function returns the adjust size (in number of words) to a c2i adapter
2155 // activation for use during deoptimization
2156 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2157   assert(callee_locals >= callee_parameters,
2158           "test and remove; got more parms than locals");
2159   if (callee_locals < callee_parameters)
2160     return 0;                   // No adjustment for negative locals
2161   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2162   // diff is counted in stack words
2163   return align_up(diff, 2);
2164 }
2165 
2166 
2167 //------------------------------generate_deopt_blob----------------------------
2168 void SharedRuntime::generate_deopt_blob() {
2169   // Allocate space for the code
2170   ResourceMark rm;
2171   // Setup code generation tools
2172   int pad = 0;
2173 #if INCLUDE_JVMCI
2174   if (EnableJVMCI) {
2175     pad += 512; // Increase the buffer size when compiling for JVMCI
2176   }
2177 #endif
2178   CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
2179   MacroAssembler* masm = new MacroAssembler(&buffer);
2180   int frame_size_in_words;
2181   OopMap* map = nullptr;
2182   OopMapSet *oop_maps = new OopMapSet();
2183   RegisterSaver reg_save(COMPILER2_OR_JVMCI != 0);
2184 
2185   // -------------
2186   // This code enters when returning to a de-optimized nmethod.  A return
2187   // address has been pushed on the stack, and return values are in
2188   // registers.
2189   // If we are doing a normal deopt then we were called from the patched
2190   // nmethod from the point we returned to the nmethod. So the return
2191   // address on the stack is wrong by NativeCall::instruction_size
2192   // We will adjust the value so it looks like we have the original return
2193   // address on the stack (like when we eagerly deoptimized).
2194   // In the case of an exception pending when deoptimizing, we enter
2195   // with a return address on the stack that points after the call we patched
2196   // into the exception handler. We have the following register state from,
2197   // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2198   //    r0: exception oop
2199   //    r19: exception handler
2200   //    r3: throwing pc
2201   // So in this case we simply jam r3 into the useless return address and
2202   // the stack looks just like we want.
2203   //
2204   // At this point we need to de-opt.  We save the argument return
2205   // registers.  We call the first C routine, fetch_unroll_info().  This
2206   // routine captures the return values and returns a structure which
2207   // describes the current frame size and the sizes of all replacement frames.
2208   // The current frame is compiled code and may contain many inlined
2209   // functions, each with their own JVM state.  We pop the current frame, then
2210   // push all the new frames.  Then we call the C routine unpack_frames() to
2211   // populate these frames.  Finally unpack_frames() returns us the new target
2212   // address.  Notice that callee-save registers are BLOWN here; they have
2213   // already been captured in the vframeArray at the time the return PC was
2214   // patched.
2215   address start = __ pc();
2216   Label cont;
2217 
2218   // Prolog for non exception case!
2219 
2220   // Save everything in sight.
2221   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2222 
2223   // Normal deoptimization.  Save exec mode for unpack_frames.
2224   __ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved
2225   __ b(cont);
2226 
2227   int reexecute_offset = __ pc() - start;
2228 #if INCLUDE_JVMCI && !defined(COMPILER1)
2229   if (EnableJVMCI && UseJVMCICompiler) {
2230     // JVMCI does not use this kind of deoptimization
2231     __ should_not_reach_here();
2232   }
2233 #endif
2234 
2235   // Reexecute case
2236   // return address is the pc describes what bci to do re-execute at
2237 
2238   // No need to update map as each call to save_live_registers will produce identical oopmap
2239   (void) reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2240 
2241   __ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved
2242   __ b(cont);
2243 
2244 #if INCLUDE_JVMCI
2245   Label after_fetch_unroll_info_call;
2246   int implicit_exception_uncommon_trap_offset = 0;
2247   int uncommon_trap_offset = 0;
2248 
2249   if (EnableJVMCI) {
2250     implicit_exception_uncommon_trap_offset = __ pc() - start;
2251 
2252     __ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2253     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2254 
2255     uncommon_trap_offset = __ pc() - start;
2256 
2257     // Save everything in sight.
2258     reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2259     // fetch_unroll_info needs to call last_java_frame()
2260     Label retaddr;
2261     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2262 
2263     __ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2264     __ movw(rscratch1, -1);
2265     __ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2266 
2267     __ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute);
2268     __ mov(c_rarg0, rthread);
2269     __ movw(c_rarg2, rcpool); // exec mode
2270     __ lea(rscratch1,
2271            RuntimeAddress(CAST_FROM_FN_PTR(address,
2272                                            Deoptimization::uncommon_trap)));
2273     __ blr(rscratch1);
2274     __ bind(retaddr);
2275     oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2276 
2277     __ reset_last_Java_frame(false);
2278 
2279     __ b(after_fetch_unroll_info_call);
2280   } // EnableJVMCI
2281 #endif // INCLUDE_JVMCI
2282 
2283   int exception_offset = __ pc() - start;
2284 
2285   // Prolog for exception case
2286 
2287   // all registers are dead at this entry point, except for r0, and
2288   // r3 which contain the exception oop and exception pc
2289   // respectively.  Set them in TLS and fall thru to the
2290   // unpack_with_exception_in_tls entry point.
2291 
2292   __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
2293   __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
2294 
2295   int exception_in_tls_offset = __ pc() - start;
2296 
2297   // new implementation because exception oop is now passed in JavaThread
2298 
2299   // Prolog for exception case
2300   // All registers must be preserved because they might be used by LinearScan
2301   // Exceptiop oop and throwing PC are passed in JavaThread
2302   // tos: stack at point of call to method that threw the exception (i.e. only
2303   // args are on the stack, no return address)
2304 
2305   // The return address pushed by save_live_registers will be patched
2306   // later with the throwing pc. The correct value is not available
2307   // now because loading it from memory would destroy registers.
2308 
2309   // NB: The SP at this point must be the SP of the method that is
2310   // being deoptimized.  Deoptimization assumes that the frame created
2311   // here by save_live_registers is immediately below the method's SP.
2312   // This is a somewhat fragile mechanism.
2313 
2314   // Save everything in sight.
2315   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2316 
2317   // Now it is safe to overwrite any register
2318 
2319   // Deopt during an exception.  Save exec mode for unpack_frames.
2320   __ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved
2321 
2322   // load throwing pc from JavaThread and patch it as the return address
2323   // of the current frame. Then clear the field in JavaThread
2324   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2325   __ protect_return_address(r3);
2326   __ str(r3, Address(rfp, wordSize));
2327   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2328 
2329 #ifdef ASSERT
2330   // verify that there is really an exception oop in JavaThread
2331   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2332   __ verify_oop(r0);
2333 
2334   // verify that there is no pending exception
2335   Label no_pending_exception;
2336   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2337   __ cbz(rscratch1, no_pending_exception);
2338   __ stop("must not have pending exception here");
2339   __ bind(no_pending_exception);
2340 #endif
2341 
2342   __ bind(cont);
2343 
2344   // Call C code.  Need thread and this frame, but NOT official VM entry
2345   // crud.  We cannot block on this call, no GC can happen.
2346   //
2347   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2348 
2349   // fetch_unroll_info needs to call last_java_frame().
2350 
2351   Label retaddr;
2352   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2353 #ifdef ASSERT
2354   { Label L;
2355     __ ldr(rscratch1, Address(rthread, JavaThread::last_Java_fp_offset()));
2356     __ cbz(rscratch1, L);
2357     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2358     __ bind(L);
2359   }
2360 #endif // ASSERT
2361   __ mov(c_rarg0, rthread);
2362   __ mov(c_rarg1, rcpool);
2363   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2364   __ blr(rscratch1);
2365   __ bind(retaddr);
2366 
2367   // Need to have an oopmap that tells fetch_unroll_info where to
2368   // find any register it might need.
2369   oop_maps->add_gc_map(__ pc() - start, map);
2370 
2371   __ reset_last_Java_frame(false);
2372 
2373 #if INCLUDE_JVMCI
2374   if (EnableJVMCI) {
2375     __ bind(after_fetch_unroll_info_call);
2376   }
2377 #endif
2378 
2379   // Load UnrollBlock* into r5
2380   __ mov(r5, r0);
2381 
2382   __ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset()));
2383    Label noException;
2384   __ cmpw(rcpool, Deoptimization::Unpack_exception);   // Was exception pending?
2385   __ br(Assembler::NE, noException);
2386   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2387   // QQQ this is useless it was null above
2388   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2389   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
2390   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2391 
2392   __ verify_oop(r0);
2393 
2394   // Overwrite the result registers with the exception results.
2395   __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2396   // I think this is useless
2397   // __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2398 
2399   __ bind(noException);
2400 
2401   // Only register save data is on the stack.
2402   // Now restore the result registers.  Everything else is either dead
2403   // or captured in the vframeArray.
2404 
2405   // Restore fp result register
2406   __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2407   // Restore integer result register
2408   __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2409 
2410   // Pop all of the register save area off the stack
2411   __ add(sp, sp, frame_size_in_words * wordSize);
2412 
2413   // All of the register save area has been popped of the stack. Only the
2414   // return address remains.
2415 
2416   // Pop all the frames we must move/replace.
2417   //
2418   // Frame picture (youngest to oldest)
2419   // 1: self-frame (no frame link)
2420   // 2: deopting frame  (no frame link)
2421   // 3: caller of deopting frame (could be compiled/interpreted).
2422   //
2423   // Note: by leaving the return address of self-frame on the stack
2424   // and using the size of frame 2 to adjust the stack
2425   // when we are done the return to frame 3 will still be on the stack.
2426 
2427   // Pop deoptimized frame
2428   __ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2429   __ sub(r2, r2, 2 * wordSize);
2430   __ add(sp, sp, r2);
2431   __ ldp(rfp, zr, __ post(sp, 2 * wordSize));
2432 
2433 #ifdef ASSERT
2434   // Compilers generate code that bang the stack by as much as the
2435   // interpreter would need. So this stack banging should never
2436   // trigger a fault. Verify that it does not on non product builds.
2437   __ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2438   __ bang_stack_size(r19, r2);
2439 #endif
2440   // Load address of array of frame pcs into r2
2441   __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset()));
2442 
2443   // Trash the old pc
2444   // __ addptr(sp, wordSize);  FIXME ????
2445 
2446   // Load address of array of frame sizes into r4
2447   __ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset()));
2448 
2449   // Load counter into r3
2450   __ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset()));
2451 
2452   // Now adjust the caller's stack to make up for the extra locals
2453   // but record the original sp so that we can save it in the skeletal interpreter
2454   // frame and the stack walking of interpreter_sender will get the unextended sp
2455   // value and not the "real" sp value.
2456 
2457   const Register sender_sp = r6;
2458 
2459   __ mov(sender_sp, sp);
2460   __ ldrw(r19, Address(r5,
2461                        Deoptimization::UnrollBlock::
2462                        caller_adjustment_offset()));
2463   __ sub(sp, sp, r19);
2464 
2465   // Push interpreter frames in a loop
2466   __ mov(rscratch1, (uint64_t)0xDEADDEAD);        // Make a recognizable pattern
2467   __ mov(rscratch2, rscratch1);
2468   Label loop;
2469   __ bind(loop);
2470   __ ldr(r19, Address(__ post(r4, wordSize)));          // Load frame size
2471   __ sub(r19, r19, 2*wordSize);           // We'll push pc and fp by hand
2472   __ ldr(lr, Address(__ post(r2, wordSize)));  // Load pc
2473   __ enter();                           // Save old & set new fp
2474   __ sub(sp, sp, r19);                  // Prolog
2475   // This value is corrected by layout_activation_impl
2476   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2477   __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2478   __ mov(sender_sp, sp);               // Pass sender_sp to next frame
2479   __ sub(r3, r3, 1);                   // Decrement counter
2480   __ cbnz(r3, loop);
2481 
2482     // Re-push self-frame
2483   __ ldr(lr, Address(r2));
2484   __ enter();
2485 
2486   // Allocate a full sized register save area.  We subtract 2 because
2487   // enter() just pushed 2 words
2488   __ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2489 
2490   // Restore frame locals after moving the frame
2491   __ strd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2492   __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2493 
2494   // Call C code.  Need thread but NOT official VM entry
2495   // crud.  We cannot block on this call, no GC can happen.  Call should
2496   // restore return values to their stack-slots with the new SP.
2497   //
2498   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2499 
2500   // Use rfp because the frames look interpreted now
2501   // Don't need the precise return PC here, just precise enough to point into this code blob.
2502   address the_pc = __ pc();
2503   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2504 
2505   __ mov(c_rarg0, rthread);
2506   __ movw(c_rarg1, rcpool); // second arg: exec_mode
2507   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2508   __ blr(rscratch1);
2509 
2510   // Set an oopmap for the call site
2511   // Use the same PC we used for the last java frame
2512   oop_maps->add_gc_map(the_pc - start,
2513                        new OopMap( frame_size_in_words, 0 ));
2514 
2515   // Clear fp AND pc
2516   __ reset_last_Java_frame(true);
2517 
2518   // Collect return values
2519   __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2520   __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2521   // I think this is useless (throwing pc?)
2522   // __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2523 
2524   // Pop self-frame.
2525   __ leave();                           // Epilog
2526 
2527   // Jump to interpreter
2528   __ ret(lr);
2529 
2530   // Make sure all code is generated
2531   masm->flush();
2532 
2533   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2534   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2535 #if INCLUDE_JVMCI
2536   if (EnableJVMCI) {
2537     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2538     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2539   }
2540 #endif
2541 }
2542 
2543 // Number of stack slots between incoming argument block and the start of
2544 // a new frame.  The PROLOG must add this many slots to the stack.  The
2545 // EPILOG must remove this many slots. aarch64 needs two slots for
2546 // return address and fp.
2547 // TODO think this is correct but check
2548 uint SharedRuntime::in_preserve_stack_slots() {
2549   return 4;
2550 }
2551 
2552 uint SharedRuntime::out_preserve_stack_slots() {
2553   return 0;
2554 }
2555 
2556 VMReg SharedRuntime::thread_register() {
2557   return rthread->as_VMReg();
2558 }
2559 
2560 #ifdef COMPILER2
2561 //------------------------------generate_uncommon_trap_blob--------------------
2562 void SharedRuntime::generate_uncommon_trap_blob() {
2563   // Allocate space for the code
2564   ResourceMark rm;
2565   // Setup code generation tools
2566   CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2567   MacroAssembler* masm = new MacroAssembler(&buffer);
2568 
2569   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2570 
2571   address start = __ pc();
2572 
2573   // Push self-frame.  We get here with a return address in LR
2574   // and sp should be 16 byte aligned
2575   // push rfp and retaddr by hand
2576   __ protect_return_address();
2577   __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
2578   // we don't expect an arg reg save area
2579 #ifndef PRODUCT
2580   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2581 #endif
2582   // compiler left unloaded_class_index in j_rarg0 move to where the
2583   // runtime expects it.
2584   if (c_rarg1 != j_rarg0) {
2585     __ movw(c_rarg1, j_rarg0);
2586   }
2587 
2588   // we need to set the past SP to the stack pointer of the stub frame
2589   // and the pc to the address where this runtime call will return
2590   // although actually any pc in this code blob will do).
2591   Label retaddr;
2592   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2593 
2594   // Call C code.  Need thread but NOT official VM entry
2595   // crud.  We cannot block on this call, no GC can happen.  Call should
2596   // capture callee-saved registers as well as return values.
2597   // Thread is in rdi already.
2598   //
2599   // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
2600   //
2601   // n.b. 2 gp args, 0 fp args, integral return type
2602 
2603   __ mov(c_rarg0, rthread);
2604   __ movw(c_rarg2, (unsigned)Deoptimization::Unpack_uncommon_trap);
2605   __ lea(rscratch1,
2606          RuntimeAddress(CAST_FROM_FN_PTR(address,
2607                                          Deoptimization::uncommon_trap)));
2608   __ blr(rscratch1);
2609   __ bind(retaddr);
2610 
2611   // Set an oopmap for the call site
2612   OopMapSet* oop_maps = new OopMapSet();
2613   OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
2614 
2615   // location of rfp is known implicitly by the frame sender code
2616 
2617   oop_maps->add_gc_map(__ pc() - start, map);
2618 
2619   __ reset_last_Java_frame(false);
2620 
2621   // move UnrollBlock* into r4
2622   __ mov(r4, r0);
2623 
2624 #ifdef ASSERT
2625   { Label L;
2626     __ ldrw(rscratch1, Address(r4, Deoptimization::UnrollBlock::unpack_kind_offset()));
2627     __ cmpw(rscratch1, (unsigned)Deoptimization::Unpack_uncommon_trap);
2628     __ br(Assembler::EQ, L);
2629     __ stop("SharedRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
2630     __ bind(L);
2631   }
2632 #endif
2633 
2634   // Pop all the frames we must move/replace.
2635   //
2636   // Frame picture (youngest to oldest)
2637   // 1: self-frame (no frame link)
2638   // 2: deopting frame  (no frame link)
2639   // 3: caller of deopting frame (could be compiled/interpreted).
2640 
2641   // Pop self-frame.  We have no frame, and must rely only on r0 and sp.
2642   __ add(sp, sp, (SimpleRuntimeFrame::framesize) << LogBytesPerInt); // Epilog!
2643 
2644   // Pop deoptimized frame (int)
2645   __ ldrw(r2, Address(r4,
2646                       Deoptimization::UnrollBlock::
2647                       size_of_deoptimized_frame_offset()));
2648   __ sub(r2, r2, 2 * wordSize);
2649   __ add(sp, sp, r2);
2650   __ ldp(rfp, zr, __ post(sp, 2 * wordSize));
2651 
2652 #ifdef ASSERT
2653   // Compilers generate code that bang the stack by as much as the
2654   // interpreter would need. So this stack banging should never
2655   // trigger a fault. Verify that it does not on non product builds.
2656   __ ldrw(r1, Address(r4,
2657                       Deoptimization::UnrollBlock::
2658                       total_frame_sizes_offset()));
2659   __ bang_stack_size(r1, r2);
2660 #endif
2661 
2662   // Load address of array of frame pcs into r2 (address*)
2663   __ ldr(r2, Address(r4,
2664                      Deoptimization::UnrollBlock::frame_pcs_offset()));
2665 
2666   // Load address of array of frame sizes into r5 (intptr_t*)
2667   __ ldr(r5, Address(r4,
2668                      Deoptimization::UnrollBlock::
2669                      frame_sizes_offset()));
2670 
2671   // Counter
2672   __ ldrw(r3, Address(r4,
2673                       Deoptimization::UnrollBlock::
2674                       number_of_frames_offset())); // (int)
2675 
2676   // Now adjust the caller's stack to make up for the extra locals but
2677   // record the original sp so that we can save it in the skeletal
2678   // interpreter frame and the stack walking of interpreter_sender
2679   // will get the unextended sp value and not the "real" sp value.
2680 
2681   const Register sender_sp = r8;
2682 
2683   __ mov(sender_sp, sp);
2684   __ ldrw(r1, Address(r4,
2685                       Deoptimization::UnrollBlock::
2686                       caller_adjustment_offset())); // (int)
2687   __ sub(sp, sp, r1);
2688 
2689   // Push interpreter frames in a loop
2690   Label loop;
2691   __ bind(loop);
2692   __ ldr(r1, Address(r5, 0));       // Load frame size
2693   __ sub(r1, r1, 2 * wordSize);     // We'll push pc and rfp by hand
2694   __ ldr(lr, Address(r2, 0));       // Save return address
2695   __ enter();                       // and old rfp & set new rfp
2696   __ sub(sp, sp, r1);               // Prolog
2697   __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2698   // This value is corrected by layout_activation_impl
2699   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2700   __ mov(sender_sp, sp);          // Pass sender_sp to next frame
2701   __ add(r5, r5, wordSize);       // Bump array pointer (sizes)
2702   __ add(r2, r2, wordSize);       // Bump array pointer (pcs)
2703   __ subsw(r3, r3, 1);            // Decrement counter
2704   __ br(Assembler::GT, loop);
2705   __ ldr(lr, Address(r2, 0));     // save final return address
2706   // Re-push self-frame
2707   __ enter();                     // & old rfp & set new rfp
2708 
2709   // Use rfp because the frames look interpreted now
2710   // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
2711   // Don't need the precise return PC here, just precise enough to point into this code blob.
2712   address the_pc = __ pc();
2713   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2714 
2715   // Call C code.  Need thread but NOT official VM entry
2716   // crud.  We cannot block on this call, no GC can happen.  Call should
2717   // restore return values to their stack-slots with the new SP.
2718   // Thread is in rdi already.
2719   //
2720   // BasicType unpack_frames(JavaThread* thread, int exec_mode);
2721   //
2722   // n.b. 2 gp args, 0 fp args, integral return type
2723 
2724   // sp should already be aligned
2725   __ mov(c_rarg0, rthread);
2726   __ movw(c_rarg1, (unsigned)Deoptimization::Unpack_uncommon_trap);
2727   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2728   __ blr(rscratch1);
2729 
2730   // Set an oopmap for the call site
2731   // Use the same PC we used for the last java frame
2732   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
2733 
2734   // Clear fp AND pc
2735   __ reset_last_Java_frame(true);
2736 
2737   // Pop self-frame.
2738   __ leave();                 // Epilog
2739 
2740   // Jump to interpreter
2741   __ ret(lr);
2742 
2743   // Make sure all code is generated
2744   masm->flush();
2745 
2746   _uncommon_trap_blob =  UncommonTrapBlob::create(&buffer, oop_maps,
2747                                                  SimpleRuntimeFrame::framesize >> 1);
2748 }
2749 #endif // COMPILER2
2750 
2751 
2752 //------------------------------generate_handler_blob------
2753 //
2754 // Generate a special Compile2Runtime blob that saves all registers,
2755 // and setup oopmap.
2756 //
2757 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
2758   ResourceMark rm;
2759   OopMapSet *oop_maps = new OopMapSet();
2760   OopMap* map;
2761 
2762   // Allocate space for the code.  Setup code generation tools.
2763   CodeBuffer buffer("handler_blob", 2048, 1024);
2764   MacroAssembler* masm = new MacroAssembler(&buffer);
2765 
2766   address start   = __ pc();
2767   address call_pc = nullptr;
2768   int frame_size_in_words;
2769   bool cause_return = (poll_type == POLL_AT_RETURN);
2770   RegisterSaver reg_save(poll_type == POLL_AT_VECTOR_LOOP /* save_vectors */);
2771 
2772   // When the signal occurred, the LR was either signed and stored on the stack (in which
2773   // case it will be restored from the stack before being used) or unsigned and not stored
2774   // on the stack. Stipping ensures we get the right value.
2775   __ strip_return_address();
2776 
2777   // Save Integer and Float registers.
2778   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2779 
2780   // The following is basically a call_VM.  However, we need the precise
2781   // address of the call in order to generate an oopmap. Hence, we do all the
2782   // work ourselves.
2783 
2784   Label retaddr;
2785   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2786 
2787   // The return address must always be correct so that frame constructor never
2788   // sees an invalid pc.
2789 
2790   if (!cause_return) {
2791     // overwrite the return address pushed by save_live_registers
2792     // Additionally, r20 is a callee-saved register so we can look at
2793     // it later to determine if someone changed the return address for
2794     // us!
2795     __ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset()));
2796     __ protect_return_address(r20);
2797     __ str(r20, Address(rfp, wordSize));
2798   }
2799 
2800   // Do the call
2801   __ mov(c_rarg0, rthread);
2802   __ lea(rscratch1, RuntimeAddress(call_ptr));
2803   __ blr(rscratch1);
2804   __ bind(retaddr);
2805 
2806   // Set an oopmap for the call site.  This oopmap will map all
2807   // oop-registers and debug-info registers as callee-saved.  This
2808   // will allow deoptimization at this safepoint to find all possible
2809   // debug-info recordings, as well as let GC find all oops.
2810 
2811   oop_maps->add_gc_map( __ pc() - start, map);
2812 
2813   Label noException;
2814 
2815   __ reset_last_Java_frame(false);
2816 
2817   __ membar(Assembler::LoadLoad | Assembler::LoadStore);
2818 
2819   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2820   __ cbz(rscratch1, noException);
2821 
2822   // Exception pending
2823 
2824   reg_save.restore_live_registers(masm);
2825 
2826   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2827 
2828   // No exception case
2829   __ bind(noException);
2830 
2831   Label no_adjust, bail;
2832   if (!cause_return) {
2833     // If our stashed return pc was modified by the runtime we avoid touching it
2834     __ ldr(rscratch1, Address(rfp, wordSize));
2835     __ cmp(r20, rscratch1);
2836     __ br(Assembler::NE, no_adjust);
2837     __ authenticate_return_address(r20);
2838 
2839 #ifdef ASSERT
2840     // Verify the correct encoding of the poll we're about to skip.
2841     // See NativeInstruction::is_ldrw_to_zr()
2842     __ ldrw(rscratch1, Address(r20));
2843     __ ubfx(rscratch2, rscratch1, 22, 10);
2844     __ cmpw(rscratch2, 0b1011100101);
2845     __ br(Assembler::NE, bail);
2846     __ ubfx(rscratch2, rscratch1, 0, 5);
2847     __ cmpw(rscratch2, 0b11111);
2848     __ br(Assembler::NE, bail);
2849 #endif
2850     // Adjust return pc forward to step over the safepoint poll instruction
2851     __ add(r20, r20, NativeInstruction::instruction_size);
2852     __ protect_return_address(r20);
2853     __ str(r20, Address(rfp, wordSize));
2854   }
2855 
2856   __ bind(no_adjust);
2857   // Normal exit, restore registers and exit.
2858   reg_save.restore_live_registers(masm);
2859 
2860   __ ret(lr);
2861 
2862 #ifdef ASSERT
2863   __ bind(bail);
2864   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2865 #endif
2866 
2867   // Make sure all code is generated
2868   masm->flush();
2869 
2870   // Fill-out other meta info
2871   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2872 }
2873 
2874 //
2875 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2876 //
2877 // Generate a stub that calls into vm to find out the proper destination
2878 // of a java call. All the argument registers are live at this point
2879 // but since this is generic code we don't know what they are and the caller
2880 // must do any gc of the args.
2881 //
2882 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
2883   assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2884 
2885   // allocate space for the code
2886   ResourceMark rm;
2887 
2888   CodeBuffer buffer(name, 1000, 512);
2889   MacroAssembler* masm                = new MacroAssembler(&buffer);
2890 
2891   int frame_size_in_words;
2892   RegisterSaver reg_save(false /* save_vectors */);
2893 
2894   OopMapSet *oop_maps = new OopMapSet();
2895   OopMap* map = nullptr;
2896 
2897   int start = __ offset();
2898 
2899   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2900 
2901   int frame_complete = __ offset();
2902 
2903   {
2904     Label retaddr;
2905     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2906 
2907     __ mov(c_rarg0, rthread);
2908     __ lea(rscratch1, RuntimeAddress(destination));
2909 
2910     __ blr(rscratch1);
2911     __ bind(retaddr);
2912   }
2913 
2914   // Set an oopmap for the call site.
2915   // We need this not only for callee-saved registers, but also for volatile
2916   // registers that the compiler might be keeping live across a safepoint.
2917 
2918   oop_maps->add_gc_map( __ offset() - start, map);
2919 
2920   // r0 contains the address we are going to jump to assuming no exception got installed
2921 
2922   // clear last_Java_sp
2923   __ reset_last_Java_frame(false);
2924   // check for pending exceptions
2925   Label pending;
2926   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2927   __ cbnz(rscratch1, pending);
2928 
2929   // get the returned Method*
2930   __ get_vm_result_2(rmethod, rthread);
2931   __ str(rmethod, Address(sp, reg_save.reg_offset_in_bytes(rmethod)));
2932 
2933   // r0 is where we want to jump, overwrite rscratch1 which is saved and scratch
2934   __ str(r0, Address(sp, reg_save.rscratch1_offset_in_bytes()));
2935   reg_save.restore_live_registers(masm);
2936 
2937   // We are back to the original state on entry and ready to go.
2938 
2939   __ br(rscratch1);
2940 
2941   // Pending exception after the safepoint
2942 
2943   __ bind(pending);
2944 
2945   reg_save.restore_live_registers(masm);
2946 
2947   // exception pending => remove activation and forward to exception handler
2948 
2949   __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
2950 
2951   __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
2952   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2953 
2954   // -------------
2955   // make sure all code is generated
2956   masm->flush();
2957 
2958   // return the  blob
2959   // frame_size_words or bytes??
2960   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2961 }
2962 
2963 #ifdef COMPILER2
2964 // This is here instead of runtime_aarch64_64.cpp because it uses SimpleRuntimeFrame
2965 //
2966 //------------------------------generate_exception_blob---------------------------
2967 // creates exception blob at the end
2968 // Using exception blob, this code is jumped from a compiled method.
2969 // (see emit_exception_handler in x86_64.ad file)
2970 //
2971 // Given an exception pc at a call we call into the runtime for the
2972 // handler in this method. This handler might merely restore state
2973 // (i.e. callee save registers) unwind the frame and jump to the
2974 // exception handler for the nmethod if there is no Java level handler
2975 // for the nmethod.
2976 //
2977 // This code is entered with a jmp.
2978 //
2979 // Arguments:
2980 //   r0: exception oop
2981 //   r3: exception pc
2982 //
2983 // Results:
2984 //   r0: exception oop
2985 //   r3: exception pc in caller or ???
2986 //   destination: exception handler of caller
2987 //
2988 // Note: the exception pc MUST be at a call (precise debug information)
2989 //       Registers r0, r3, r2, r4, r5, r8-r11 are not callee saved.
2990 //
2991 
2992 void OptoRuntime::generate_exception_blob() {
2993   assert(!OptoRuntime::is_callee_saved_register(R3_num), "");
2994   assert(!OptoRuntime::is_callee_saved_register(R0_num), "");
2995   assert(!OptoRuntime::is_callee_saved_register(R2_num), "");
2996 
2997   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2998 
2999   // Allocate space for the code
3000   ResourceMark rm;
3001   // Setup code generation tools
3002   CodeBuffer buffer("exception_blob", 2048, 1024);
3003   MacroAssembler* masm = new MacroAssembler(&buffer);
3004 
3005   // TODO check various assumptions made here
3006   //
3007   // make sure we do so before running this
3008 
3009   address start = __ pc();
3010 
3011   // push rfp and retaddr by hand
3012   // Exception pc is 'return address' for stack walker
3013   __ protect_return_address();
3014   __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
3015   // there are no callee save registers and we don't expect an
3016   // arg reg save area
3017 #ifndef PRODUCT
3018   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
3019 #endif
3020   // Store exception in Thread object. We cannot pass any arguments to the
3021   // handle_exception call, since we do not want to make any assumption
3022   // about the size of the frame where the exception happened in.
3023   __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
3024   __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
3025 
3026   // This call does all the hard work.  It checks if an exception handler
3027   // exists in the method.
3028   // If so, it returns the handler address.
3029   // If not, it prepares for stack-unwinding, restoring the callee-save
3030   // registers of the frame being removed.
3031   //
3032   // address OptoRuntime::handle_exception_C(JavaThread* thread)
3033   //
3034   // n.b. 1 gp arg, 0 fp args, integral return type
3035 
3036   // the stack should always be aligned
3037   address the_pc = __ pc();
3038   __ set_last_Java_frame(sp, noreg, the_pc, rscratch1);
3039   __ mov(c_rarg0, rthread);
3040   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
3041   __ blr(rscratch1);
3042   // handle_exception_C is a special VM call which does not require an explicit
3043   // instruction sync afterwards.
3044 
3045   // May jump to SVE compiled code
3046   __ reinitialize_ptrue();
3047 
3048   // Set an oopmap for the call site.  This oopmap will only be used if we
3049   // are unwinding the stack.  Hence, all locations will be dead.
3050   // Callee-saved registers will be the same as the frame above (i.e.,
3051   // handle_exception_stub), since they were restored when we got the
3052   // exception.
3053 
3054   OopMapSet* oop_maps = new OopMapSet();
3055 
3056   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3057 
3058   __ reset_last_Java_frame(false);
3059 
3060   // Restore callee-saved registers
3061 
3062   // rfp is an implicitly saved callee saved register (i.e. the calling
3063   // convention will save restore it in prolog/epilog) Other than that
3064   // there are no callee save registers now that adapter frames are gone.
3065   // and we dont' expect an arg reg save area
3066   __ ldp(rfp, r3, Address(__ post(sp, 2 * wordSize)));
3067   __ authenticate_return_address(r3);
3068 
3069   // r0: exception handler
3070 
3071   // We have a handler in r0 (could be deopt blob).
3072   __ mov(r8, r0);
3073 
3074   // Get the exception oop
3075   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
3076   // Get the exception pc in case we are deoptimized
3077   __ ldr(r4, Address(rthread, JavaThread::exception_pc_offset()));
3078 #ifdef ASSERT
3079   __ str(zr, Address(rthread, JavaThread::exception_handler_pc_offset()));
3080   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3081 #endif
3082   // Clear the exception oop so GC no longer processes it as a root.
3083   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3084 
3085   // r0: exception oop
3086   // r8:  exception handler
3087   // r4: exception pc
3088   // Jump to handler
3089 
3090   __ br(r8);
3091 
3092   // Make sure all code is generated
3093   masm->flush();
3094 
3095   // Set exception blob
3096   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3097 }
3098 
3099 #endif // COMPILER2