1 /*
   2  * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "code/debugInfoRec.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "code/vtableStubs.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interp_masm.hpp"
  34 #include "logging/log.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "oops/compiledICHolder.hpp"
  37 #include "runtime/safepointMechanism.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/vframeArray.hpp"
  40 #include "utilities/align.hpp"
  41 #include "vmreg_aarch64.inline.hpp"
  42 #ifdef COMPILER1
  43 #include "c1/c1_Runtime1.hpp"
  44 #endif
  45 #if COMPILER2_OR_JVMCI
  46 #include "adfiles/ad_aarch64.hpp"
  47 #include "opto/runtime.hpp"
  48 #endif
  49 #if INCLUDE_JVMCI
  50 #include "jvmci/jvmciJavaClasses.hpp"
  51 #endif
  52 
  53 #ifdef BUILTIN_SIM
  54 #include "../../../../../../simulator/simulator.hpp"
  55 #endif
  56 
  57 #define __ masm->
  58 
  59 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  60 
  61 class SimpleRuntimeFrame {
  62 
  63   public:
  64 
  65   // Most of the runtime stubs have this simple frame layout.
  66   // This class exists to make the layout shared in one place.
  67   // Offsets are for compiler stack slots, which are jints.
  68   enum layout {
  69     // The frame sender code expects that rbp will be in the "natural" place and
  70     // will override any oopMap setting for it. We must therefore force the layout
  71     // so that it agrees with the frame sender code.
  72     // we don't expect any arg reg save area so aarch64 asserts that
  73     // frame::arg_reg_save_area_bytes == 0
  74     rbp_off = 0,
  75     rbp_off2,
  76     return_off, return_off2,
  77     framesize
  78   };
  79 };
  80 
  81 // FIXME -- this is used by C1
  82 class RegisterSaver {
  83  public:
  84   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false);
  85   static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
  86 
  87   // Offsets into the register save area
  88   // Used by deoptimization when it is managing result register
  89   // values on its own
  90 
  91   static int r0_offset_in_bytes(void)    { return (32 + r0->encoding()) * wordSize; }
  92   static int reg_offset_in_bytes(Register r)    { return r0_offset_in_bytes() + r->encoding() * wordSize; }
  93   static int rmethod_offset_in_bytes(void)    { return reg_offset_in_bytes(rmethod); }
  94   static int rscratch1_offset_in_bytes(void)    { return (32 + rscratch1->encoding()) * wordSize; }
  95   static int v0_offset_in_bytes(void)   { return 0; }
  96   static int return_offset_in_bytes(void) { return (32 /* floats*/ + 31 /* gregs*/) * wordSize; }
  97 
  98   // During deoptimization only the result registers need to be restored,
  99   // all the other values have already been extracted.
 100   static void restore_result_registers(MacroAssembler* masm);
 101 
 102     // Capture info about frame layout
 103   enum layout {
 104                 fpu_state_off = 0,
 105                 fpu_state_end = fpu_state_off+FPUStateSizeInWords-1,
 106                 // The frame sender code expects that rfp will be in
 107                 // the "natural" place and will override any oopMap
 108                 // setting for it. We must therefore force the layout
 109                 // so that it agrees with the frame sender code.
 110                 r0_off = fpu_state_off+FPUStateSizeInWords,
 111                 rfp_off = r0_off + 30 * 2,
 112                 return_off = rfp_off + 2,      // slot for return address
 113                 reg_save_size = return_off + 2};
 114 
 115 };
 116 
 117 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
 118 #if COMPILER2_OR_JVMCI
 119   if (save_vectors) {
 120     // Save upper half of vector registers
 121     int vect_words = 32 * 8 / wordSize;
 122     additional_frame_words += vect_words;
 123   }
 124 #else
 125   assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
 126 #endif
 127 
 128   int frame_size_in_bytes = align_up(additional_frame_words*wordSize +
 129                                      reg_save_size*BytesPerInt, 16);
 130   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 131   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 132   // The caller will allocate additional_frame_words
 133   int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt;
 134   // CodeBlob frame size is in words.
 135   int frame_size_in_words = frame_size_in_bytes / wordSize;
 136   *total_frame_words = frame_size_in_words;
 137 
 138   // Save Integer and Float registers.
 139   __ enter();
 140   __ push_CPU_state(save_vectors);
 141 
 142   // Set an oopmap for the call site.  This oopmap will map all
 143   // oop-registers and debug-info registers as callee-saved.  This
 144   // will allow deoptimization at this safepoint to find all possible
 145   // debug-info recordings, as well as let GC find all oops.
 146 
 147   OopMapSet *oop_maps = new OopMapSet();
 148   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 149 
 150   for (int i = 0; i < RegisterImpl::number_of_registers; i++) {
 151     Register r = as_Register(i);
 152     if (r < rheapbase && r != rscratch1 && r != rscratch2) {
 153       int sp_offset = 2 * (i + 32); // SP offsets are in 4-byte words,
 154                                     // register slots are 8 bytes
 155                                     // wide, 32 floating-point
 156                                     // registers
 157       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots),
 158                                 r->as_VMReg());
 159     }
 160   }
 161 
 162   for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++) {
 163     FloatRegister r = as_FloatRegister(i);
 164     int sp_offset = save_vectors ? (4 * i) : (2 * i);
 165     oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
 166                               r->as_VMReg());
 167   }
 168 
 169   return oop_map;
 170 }
 171 
 172 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 173 #ifndef COMPILER2
 174   assert(!restore_vectors, "vectors are generated only by C2 and JVMCI");
 175 #endif
 176   __ pop_CPU_state(restore_vectors);
 177   __ leave();
 178 }
 179 
 180 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 181 
 182   // Just restore result register. Only used by deoptimization. By
 183   // now any callee save register that needs to be restored to a c2
 184   // caller of the deoptee has been extracted into the vframeArray
 185   // and will be stuffed into the c2i adapter we create for later
 186   // restoration so only result registers need to be restored here.
 187 
 188   // Restore fp result register
 189   __ ldrd(v0, Address(sp, v0_offset_in_bytes()));
 190   // Restore integer result register
 191   __ ldr(r0, Address(sp, r0_offset_in_bytes()));
 192 
 193   // Pop all of the register save are off the stack
 194   __ add(sp, sp, align_up(return_offset_in_bytes(), 16));
 195 }
 196 
 197 // Is vector's size (in bytes) bigger than a size saved by default?
 198 // 8 bytes vector registers are saved by default on AArch64.
 199 bool SharedRuntime::is_wide_vector(int size) {
 200   return size > 8;
 201 }
 202 
 203 size_t SharedRuntime::trampoline_size() {
 204   return 16;
 205 }
 206 
 207 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
 208   __ mov(rscratch1, destination);
 209   __ br(rscratch1);
 210 }
 211 
 212 // The java_calling_convention describes stack locations as ideal slots on
 213 // a frame with no abi restrictions. Since we must observe abi restrictions
 214 // (like the placement of the register window) the slots must be biased by
 215 // the following value.
 216 static int reg2offset_in(VMReg r) {
 217   // Account for saved rfp and lr
 218   // This should really be in_preserve_stack_slots
 219   return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
 220 }
 221 
 222 static int reg2offset_out(VMReg r) {
 223   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 224 }
 225 
 226 // ---------------------------------------------------------------------------
 227 // Read the array of BasicTypes from a signature, and compute where the
 228 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 229 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 230 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 231 // as framesizes are fixed.
 232 // VMRegImpl::stack0 refers to the first slot 0(sp).
 233 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
 234 // up to RegisterImpl::number_of_registers) are the 64-bit
 235 // integer registers.
 236 
 237 // Note: the INPUTS in sig_bt are in units of Java argument words,
 238 // which are 64-bit.  The OUTPUTS are in 32-bit units.
 239 
 240 // The Java calling convention is a "shifted" version of the C ABI.
 241 // By skipping the first C ABI register we can call non-static jni
 242 // methods with small numbers of arguments without having to shuffle
 243 // the arguments at all. Since we control the java ABI we ought to at
 244 // least get some advantage out of it.
 245 
 246 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 247                                            VMRegPair *regs,
 248                                            int total_args_passed,
 249                                            int is_outgoing) {
 250 
 251   // Create the mapping between argument positions and
 252   // registers.
 253   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 254     j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7
 255   };
 256   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 257     j_farg0, j_farg1, j_farg2, j_farg3,
 258     j_farg4, j_farg5, j_farg6, j_farg7
 259   };
 260 
 261 
 262   uint int_args = 0;
 263   uint fp_args = 0;
 264   uint stk_args = 0; // inc by 2 each time
 265 
 266   for (int i = 0; i < total_args_passed; i++) {
 267     switch (sig_bt[i]) {
 268     case T_BOOLEAN:
 269     case T_CHAR:
 270     case T_BYTE:
 271     case T_SHORT:
 272     case T_INT:
 273       if (int_args < Argument::n_int_register_parameters_j) {
 274         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 275       } else {
 276         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 277         stk_args += 2;
 278       }
 279       break;
 280     case T_VOID:
 281       // halves of T_LONG or T_DOUBLE
 282       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 283       regs[i].set_bad();
 284       break;
 285     case T_LONG:
 286       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 287       // fall through
 288     case T_OBJECT:
 289     case T_ARRAY:
 290     case T_ADDRESS:
 291       if (int_args < Argument::n_int_register_parameters_j) {
 292         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 293       } else {
 294         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 295         stk_args += 2;
 296       }
 297       break;
 298     case T_FLOAT:
 299       if (fp_args < Argument::n_float_register_parameters_j) {
 300         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 301       } else {
 302         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 303         stk_args += 2;
 304       }
 305       break;
 306     case T_DOUBLE:
 307       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 308       if (fp_args < Argument::n_float_register_parameters_j) {
 309         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 310       } else {
 311         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 312         stk_args += 2;
 313       }
 314       break;
 315     default:
 316       ShouldNotReachHere();
 317       break;
 318     }
 319   }
 320 
 321   return align_up(stk_args, 2);
 322 }
 323 
 324 // Patch the callers callsite with entry to compiled code if it exists.
 325 static void patch_callers_callsite(MacroAssembler *masm) {
 326   Label L;
 327   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 328   __ cbz(rscratch1, L);
 329 
 330   __ enter();
 331   __ push_CPU_state();
 332 
 333   // VM needs caller's callsite
 334   // VM needs target method
 335   // This needs to be a long call since we will relocate this adapter to
 336   // the codeBuffer and it may not reach
 337 
 338 #ifndef PRODUCT
 339   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 340 #endif
 341 
 342   __ mov(c_rarg0, rmethod);
 343   __ mov(c_rarg1, lr);
 344   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 345   __ blrt(rscratch1, 2, 0, 0);
 346   __ maybe_isb();
 347 
 348   __ pop_CPU_state();
 349   // restore sp
 350   __ leave();
 351   __ bind(L);
 352 }
 353 
 354 static void gen_c2i_adapter(MacroAssembler *masm,
 355                             int total_args_passed,
 356                             int comp_args_on_stack,
 357                             const BasicType *sig_bt,
 358                             const VMRegPair *regs,
 359                             Label& skip_fixup) {
 360   // Before we get into the guts of the C2I adapter, see if we should be here
 361   // at all.  We've come from compiled code and are attempting to jump to the
 362   // interpreter, which means the caller made a static call to get here
 363   // (vcalls always get a compiled target if there is one).  Check for a
 364   // compiled target.  If there is one, we need to patch the caller's call.
 365   patch_callers_callsite(masm);
 366 
 367   __ bind(skip_fixup);
 368 
 369   int words_pushed = 0;
 370 
 371   // Since all args are passed on the stack, total_args_passed *
 372   // Interpreter::stackElementSize is the space we need.
 373 
 374   int extraspace = total_args_passed * Interpreter::stackElementSize;
 375 
 376   __ mov(r13, sp);
 377 
 378   // stack is aligned, keep it that way
 379   extraspace = align_up(extraspace, 2*wordSize);
 380 
 381   if (extraspace)
 382     __ sub(sp, sp, extraspace);
 383 
 384   // Now write the args into the outgoing interpreter space
 385   for (int i = 0; i < total_args_passed; i++) {
 386     if (sig_bt[i] == T_VOID) {
 387       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 388       continue;
 389     }
 390 
 391     // offset to start parameters
 392     int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 393     int next_off = st_off - Interpreter::stackElementSize;
 394 
 395     // Say 4 args:
 396     // i   st_off
 397     // 0   32 T_LONG
 398     // 1   24 T_VOID
 399     // 2   16 T_OBJECT
 400     // 3    8 T_BOOL
 401     // -    0 return address
 402     //
 403     // However to make thing extra confusing. Because we can fit a long/double in
 404     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 405     // leaves one slot empty and only stores to a single slot. In this case the
 406     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 407 
 408     VMReg r_1 = regs[i].first();
 409     VMReg r_2 = regs[i].second();
 410     if (!r_1->is_valid()) {
 411       assert(!r_2->is_valid(), "");
 412       continue;
 413     }
 414     if (r_1->is_stack()) {
 415       // memory to memory use rscratch1
 416       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
 417                     + extraspace
 418                     + words_pushed * wordSize);
 419       if (!r_2->is_valid()) {
 420         // sign extend??
 421         __ ldrw(rscratch1, Address(sp, ld_off));
 422         __ str(rscratch1, Address(sp, st_off));
 423 
 424       } else {
 425 
 426         __ ldr(rscratch1, Address(sp, ld_off));
 427 
 428         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 429         // T_DOUBLE and T_LONG use two slots in the interpreter
 430         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 431           // ld_off == LSW, ld_off+wordSize == MSW
 432           // st_off == MSW, next_off == LSW
 433           __ str(rscratch1, Address(sp, next_off));
 434 #ifdef ASSERT
 435           // Overwrite the unused slot with known junk
 436           __ mov(rscratch1, 0xdeadffffdeadaaaaul);
 437           __ str(rscratch1, Address(sp, st_off));
 438 #endif /* ASSERT */
 439         } else {
 440           __ str(rscratch1, Address(sp, st_off));
 441         }
 442       }
 443     } else if (r_1->is_Register()) {
 444       Register r = r_1->as_Register();
 445       if (!r_2->is_valid()) {
 446         // must be only an int (or less ) so move only 32bits to slot
 447         // why not sign extend??
 448         __ str(r, Address(sp, st_off));
 449       } else {
 450         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 451         // T_DOUBLE and T_LONG use two slots in the interpreter
 452         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 453           // long/double in gpr
 454 #ifdef ASSERT
 455           // Overwrite the unused slot with known junk
 456           __ mov(rscratch1, 0xdeadffffdeadaaabul);
 457           __ str(rscratch1, Address(sp, st_off));
 458 #endif /* ASSERT */
 459           __ str(r, Address(sp, next_off));
 460         } else {
 461           __ str(r, Address(sp, st_off));
 462         }
 463       }
 464     } else {
 465       assert(r_1->is_FloatRegister(), "");
 466       if (!r_2->is_valid()) {
 467         // only a float use just part of the slot
 468         __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
 469       } else {
 470 #ifdef ASSERT
 471         // Overwrite the unused slot with known junk
 472         __ mov(rscratch1, 0xdeadffffdeadaaacul);
 473         __ str(rscratch1, Address(sp, st_off));
 474 #endif /* ASSERT */
 475         __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
 476       }
 477     }
 478   }
 479 
 480   __ mov(esp, sp); // Interp expects args on caller's expression stack
 481 
 482   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 483   __ br(rscratch1);
 484 }
 485 
 486 
 487 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 488                                     int total_args_passed,
 489                                     int comp_args_on_stack,
 490                                     const BasicType *sig_bt,
 491                                     const VMRegPair *regs) {
 492 
 493   // Note: r13 contains the senderSP on entry. We must preserve it since
 494   // we may do a i2c -> c2i transition if we lose a race where compiled
 495   // code goes non-entrant while we get args ready.
 496 
 497   // In addition we use r13 to locate all the interpreter args because
 498   // we must align the stack to 16 bytes.
 499 
 500   // Adapters are frameless.
 501 
 502   // An i2c adapter is frameless because the *caller* frame, which is
 503   // interpreted, routinely repairs its own esp (from
 504   // interpreter_frame_last_sp), even if a callee has modified the
 505   // stack pointer.  It also recalculates and aligns sp.
 506 
 507   // A c2i adapter is frameless because the *callee* frame, which is
 508   // interpreted, routinely repairs its caller's sp (from sender_sp,
 509   // which is set up via the senderSP register).
 510 
 511   // In other words, if *either* the caller or callee is interpreted, we can
 512   // get the stack pointer repaired after a call.
 513 
 514   // This is why c2i and i2c adapters cannot be indefinitely composed.
 515   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 516   // both caller and callee would be compiled methods, and neither would
 517   // clean up the stack pointer changes performed by the two adapters.
 518   // If this happens, control eventually transfers back to the compiled
 519   // caller, but with an uncorrected stack, causing delayed havoc.
 520 
 521   if (VerifyAdapterCalls &&
 522       (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
 523 #if 0
 524     // So, let's test for cascading c2i/i2c adapters right now.
 525     //  assert(Interpreter::contains($return_addr) ||
 526     //         StubRoutines::contains($return_addr),
 527     //         "i2c adapter must return to an interpreter frame");
 528     __ block_comment("verify_i2c { ");
 529     Label L_ok;
 530     if (Interpreter::code() != NULL)
 531       range_check(masm, rax, r11,
 532                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 533                   L_ok);
 534     if (StubRoutines::code1() != NULL)
 535       range_check(masm, rax, r11,
 536                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 537                   L_ok);
 538     if (StubRoutines::code2() != NULL)
 539       range_check(masm, rax, r11,
 540                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 541                   L_ok);
 542     const char* msg = "i2c adapter must return to an interpreter frame";
 543     __ block_comment(msg);
 544     __ stop(msg);
 545     __ bind(L_ok);
 546     __ block_comment("} verify_i2ce ");
 547 #endif
 548   }
 549 
 550   // Cut-out for having no stack args.
 551   int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 552   if (comp_args_on_stack) {
 553     __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 554     __ andr(sp, rscratch1, -16);
 555   }
 556 
 557   // Will jump to the compiled code just as if compiled code was doing it.
 558   // Pre-load the register-jump target early, to schedule it better.
 559   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
 560 
 561 #if INCLUDE_JVMCI
 562   if (EnableJVMCI || UseAOT) {
 563     // check if this call should be routed towards a specific entry point
 564     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 565     Label no_alternative_target;
 566     __ cbz(rscratch2, no_alternative_target);
 567     __ mov(rscratch1, rscratch2);
 568     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 569     __ bind(no_alternative_target);
 570   }
 571 #endif // INCLUDE_JVMCI
 572 
 573   // Now generate the shuffle code.
 574   for (int i = 0; i < total_args_passed; i++) {
 575     if (sig_bt[i] == T_VOID) {
 576       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 577       continue;
 578     }
 579 
 580     // Pick up 0, 1 or 2 words from SP+offset.
 581 
 582     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 583             "scrambled load targets?");
 584     // Load in argument order going down.
 585     int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
 586     // Point to interpreter value (vs. tag)
 587     int next_off = ld_off - Interpreter::stackElementSize;
 588     //
 589     //
 590     //
 591     VMReg r_1 = regs[i].first();
 592     VMReg r_2 = regs[i].second();
 593     if (!r_1->is_valid()) {
 594       assert(!r_2->is_valid(), "");
 595       continue;
 596     }
 597     if (r_1->is_stack()) {
 598       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 599       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
 600       if (!r_2->is_valid()) {
 601         // sign extend???
 602         __ ldrsw(rscratch2, Address(esp, ld_off));
 603         __ str(rscratch2, Address(sp, st_off));
 604       } else {
 605         //
 606         // We are using two optoregs. This can be either T_OBJECT,
 607         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 608         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 609         // So we must adjust where to pick up the data to match the
 610         // interpreter.
 611         //
 612         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 613         // are accessed as negative so LSW is at LOW address
 614 
 615         // ld_off is MSW so get LSW
 616         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 617                            next_off : ld_off;
 618         __ ldr(rscratch2, Address(esp, offset));
 619         // st_off is LSW (i.e. reg.first())
 620         __ str(rscratch2, Address(sp, st_off));
 621       }
 622     } else if (r_1->is_Register()) {  // Register argument
 623       Register r = r_1->as_Register();
 624       if (r_2->is_valid()) {
 625         //
 626         // We are using two VMRegs. This can be either T_OBJECT,
 627         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 628         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 629         // So we must adjust where to pick up the data to match the
 630         // interpreter.
 631 
 632         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 633                            next_off : ld_off;
 634 
 635         // this can be a misaligned move
 636         __ ldr(r, Address(esp, offset));
 637       } else {
 638         // sign extend and use a full word?
 639         __ ldrw(r, Address(esp, ld_off));
 640       }
 641     } else {
 642       if (!r_2->is_valid()) {
 643         __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 644       } else {
 645         __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 646       }
 647     }
 648   }
 649 
 650   // 6243940 We might end up in handle_wrong_method if
 651   // the callee is deoptimized as we race thru here. If that
 652   // happens we don't want to take a safepoint because the
 653   // caller frame will look interpreted and arguments are now
 654   // "compiled" so it is much better to make this transition
 655   // invisible to the stack walking code. Unfortunately if
 656   // we try and find the callee by normal means a safepoint
 657   // is possible. So we stash the desired callee in the thread
 658   // and the vm will find there should this case occur.
 659 
 660   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 661 
 662   __ br(rscratch1);
 663 }
 664 
 665 #ifdef BUILTIN_SIM
 666 static void generate_i2c_adapter_name(char *result, int total_args_passed, const BasicType *sig_bt)
 667 {
 668   strcpy(result, "i2c(");
 669   int idx = 4;
 670   for (int i = 0; i < total_args_passed; i++) {
 671     switch(sig_bt[i]) {
 672     case T_BOOLEAN:
 673       result[idx++] = 'Z';
 674       break;
 675     case T_CHAR:
 676       result[idx++] = 'C';
 677       break;
 678     case T_FLOAT:
 679       result[idx++] = 'F';
 680       break;
 681     case T_DOUBLE:
 682       assert((i < (total_args_passed - 1)) && (sig_bt[i+1] == T_VOID),
 683              "double must be followed by void");
 684       i++;
 685       result[idx++] = 'D';
 686       break;
 687     case T_BYTE:
 688       result[idx++] = 'B';
 689       break;
 690     case T_SHORT:
 691       result[idx++] = 'S';
 692       break;
 693     case T_INT:
 694       result[idx++] = 'I';
 695       break;
 696     case T_LONG:
 697       assert((i < (total_args_passed - 1)) && (sig_bt[i+1] == T_VOID),
 698              "long must be followed by void");
 699       i++;
 700       result[idx++] = 'L';
 701       break;
 702     case T_OBJECT:
 703       result[idx++] = 'O';
 704       break;
 705     case T_ARRAY:
 706       result[idx++] = '[';
 707       break;
 708     case T_ADDRESS:
 709       result[idx++] = 'P';
 710       break;
 711     case T_NARROWOOP:
 712       result[idx++] = 'N';
 713       break;
 714     case T_METADATA:
 715       result[idx++] = 'M';
 716       break;
 717     case T_NARROWKLASS:
 718       result[idx++] = 'K';
 719       break;
 720     default:
 721       result[idx++] = '?';
 722       break;
 723     }
 724   }
 725   result[idx++] = ')';
 726   result[idx] = '\0';
 727 }
 728 #endif
 729 
 730 // ---------------------------------------------------------------
 731 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 732                                                             int total_args_passed,
 733                                                             int comp_args_on_stack,
 734                                                             const BasicType *sig_bt,
 735                                                             const VMRegPair *regs,
 736                                                             AdapterFingerPrint* fingerprint) {
 737   address i2c_entry = __ pc();
 738 #ifdef BUILTIN_SIM
 739   char *name = NULL;
 740   AArch64Simulator *sim = NULL;
 741   size_t len = 65536;
 742   if (NotifySimulator) {
 743     name = NEW_C_HEAP_ARRAY(char, len, mtInternal);
 744   }
 745 
 746   if (name) {
 747     generate_i2c_adapter_name(name, total_args_passed, sig_bt);
 748     sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
 749     sim->notifyCompile(name, i2c_entry);
 750   }
 751 #endif
 752   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 753 
 754   address c2i_unverified_entry = __ pc();
 755   Label skip_fixup;
 756 
 757   Label ok;
 758 
 759   Register holder = rscratch2;
 760   Register receiver = j_rarg0;
 761   Register tmp = r10;  // A call-clobbered register not used for arg passing
 762 
 763   // -------------------------------------------------------------------------
 764   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 765   // to the interpreter.  The args start out packed in the compiled layout.  They
 766   // need to be unpacked into the interpreter layout.  This will almost always
 767   // require some stack space.  We grow the current (compiled) stack, then repack
 768   // the args.  We  finally end in a jump to the generic interpreter entry point.
 769   // On exit from the interpreter, the interpreter will restore our SP (lest the
 770   // compiled code, which relys solely on SP and not FP, get sick).
 771 
 772   {
 773     __ block_comment("c2i_unverified_entry {");
 774     __ load_klass(rscratch1, receiver);
 775     __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
 776     __ cmp(rscratch1, tmp);
 777     __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
 778     __ br(Assembler::EQ, ok);
 779     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 780 
 781     __ bind(ok);
 782     // Method might have been compiled since the call site was patched to
 783     // interpreted; if that is the case treat it as a miss so we can get
 784     // the call site corrected.
 785     __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 786     __ cbz(rscratch1, skip_fixup);
 787     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 788     __ block_comment("} c2i_unverified_entry");
 789   }
 790 
 791   address c2i_entry = __ pc();
 792 
 793 #ifdef BUILTIN_SIM
 794   if (name) {
 795     name[0] = 'c';
 796     name[2] = 'i';
 797     sim->notifyCompile(name, c2i_entry);
 798     FREE_C_HEAP_ARRAY(char, name, mtInternal);
 799   }
 800 #endif
 801 
 802   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 803 
 804   __ flush();
 805   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 806 }
 807 
 808 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 809                                          VMRegPair *regs,
 810                                          VMRegPair *regs2,
 811                                          int total_args_passed) {
 812   assert(regs2 == NULL, "not needed on AArch64");
 813 
 814 // We return the amount of VMRegImpl stack slots we need to reserve for all
 815 // the arguments NOT counting out_preserve_stack_slots.
 816 
 817     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 818       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 819     };
 820     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 821       c_farg0, c_farg1, c_farg2, c_farg3,
 822       c_farg4, c_farg5, c_farg6, c_farg7
 823     };
 824 
 825     uint int_args = 0;
 826     uint fp_args = 0;
 827     uint stk_args = 0; // inc by 2 each time
 828 
 829     for (int i = 0; i < total_args_passed; i++) {
 830       switch (sig_bt[i]) {
 831       case T_BOOLEAN:
 832       case T_CHAR:
 833       case T_BYTE:
 834       case T_SHORT:
 835       case T_INT:
 836         if (int_args < Argument::n_int_register_parameters_c) {
 837           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 838         } else {
 839           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 840           stk_args += 2;
 841         }
 842         break;
 843       case T_LONG:
 844         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 845         // fall through
 846       case T_OBJECT:
 847       case T_ARRAY:
 848       case T_ADDRESS:
 849       case T_METADATA:
 850         if (int_args < Argument::n_int_register_parameters_c) {
 851           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 852         } else {
 853           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 854           stk_args += 2;
 855         }
 856         break;
 857       case T_FLOAT:
 858         if (fp_args < Argument::n_float_register_parameters_c) {
 859           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 860         } else {
 861           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 862           stk_args += 2;
 863         }
 864         break;
 865       case T_DOUBLE:
 866         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 867         if (fp_args < Argument::n_float_register_parameters_c) {
 868           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 869         } else {
 870           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 871           stk_args += 2;
 872         }
 873         break;
 874       case T_VOID: // Halves of longs and doubles
 875         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 876         regs[i].set_bad();
 877         break;
 878       default:
 879         ShouldNotReachHere();
 880         break;
 881       }
 882     }
 883 
 884   return stk_args;
 885 }
 886 
 887 // On 64 bit we will store integer like items to the stack as
 888 // 64 bits items (sparc abi) even though java would only store
 889 // 32bits for a parameter. On 32bit it will simply be 32 bits
 890 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
 891 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
 892   if (src.first()->is_stack()) {
 893     if (dst.first()->is_stack()) {
 894       // stack to stack
 895       __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
 896       __ str(rscratch1, Address(sp, reg2offset_out(dst.first())));
 897     } else {
 898       // stack to reg
 899       __ ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
 900     }
 901   } else if (dst.first()->is_stack()) {
 902     // reg to stack
 903     // Do we really have to sign extend???
 904     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
 905     __ str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
 906   } else {
 907     if (dst.first() != src.first()) {
 908       __ sxtw(dst.first()->as_Register(), src.first()->as_Register());
 909     }
 910   }
 911 }
 912 
 913 // An oop arg. Must pass a handle not the oop itself
 914 static void object_move(MacroAssembler* masm,
 915                         OopMap* map,
 916                         int oop_handle_offset,
 917                         int framesize_in_slots,
 918                         VMRegPair src,
 919                         VMRegPair dst,
 920                         bool is_receiver,
 921                         int* receiver_offset) {
 922 
 923   // must pass a handle. First figure out the location we use as a handle
 924 
 925   Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register();
 926 
 927   // See if oop is NULL if it is we need no handle
 928 
 929   if (src.first()->is_stack()) {
 930 
 931     // Oop is already on the stack as an argument
 932     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
 933     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
 934     if (is_receiver) {
 935       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
 936     }
 937 
 938     __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
 939     __ lea(rHandle, Address(rfp, reg2offset_in(src.first())));
 940     // conditionally move a NULL
 941     __ cmp(rscratch1, zr);
 942     __ csel(rHandle, zr, rHandle, Assembler::EQ);
 943   } else {
 944 
 945     // Oop is in an a register we must store it to the space we reserve
 946     // on the stack for oop_handles and pass a handle if oop is non-NULL
 947 
 948     const Register rOop = src.first()->as_Register();
 949     int oop_slot;
 950     if (rOop == j_rarg0)
 951       oop_slot = 0;
 952     else if (rOop == j_rarg1)
 953       oop_slot = 1;
 954     else if (rOop == j_rarg2)
 955       oop_slot = 2;
 956     else if (rOop == j_rarg3)
 957       oop_slot = 3;
 958     else if (rOop == j_rarg4)
 959       oop_slot = 4;
 960     else if (rOop == j_rarg5)
 961       oop_slot = 5;
 962     else if (rOop == j_rarg6)
 963       oop_slot = 6;
 964     else {
 965       assert(rOop == j_rarg7, "wrong register");
 966       oop_slot = 7;
 967     }
 968 
 969     oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
 970     int offset = oop_slot*VMRegImpl::stack_slot_size;
 971 
 972     map->set_oop(VMRegImpl::stack2reg(oop_slot));
 973     // Store oop in handle area, may be NULL
 974     __ str(rOop, Address(sp, offset));
 975     if (is_receiver) {
 976       *receiver_offset = offset;
 977     }
 978 
 979     __ cmp(rOop, zr);
 980     __ lea(rHandle, Address(sp, offset));
 981     // conditionally move a NULL
 982     __ csel(rHandle, zr, rHandle, Assembler::EQ);
 983   }
 984 
 985   // If arg is on the stack then place it otherwise it is already in correct reg.
 986   if (dst.first()->is_stack()) {
 987     __ str(rHandle, Address(sp, reg2offset_out(dst.first())));
 988   }
 989 }
 990 
 991 // A float arg may have to do float reg int reg conversion
 992 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
 993   assert(src.first()->is_stack() && dst.first()->is_stack() ||
 994          src.first()->is_reg() && dst.first()->is_reg(), "Unexpected error");
 995   if (src.first()->is_stack()) {
 996     if (dst.first()->is_stack()) {
 997       __ ldrw(rscratch1, Address(rfp, reg2offset_in(src.first())));
 998       __ strw(rscratch1, Address(sp, reg2offset_out(dst.first())));
 999     } else {
1000       ShouldNotReachHere();
1001     }
1002   } else if (src.first() != dst.first()) {
1003     if (src.is_single_phys_reg() && dst.is_single_phys_reg())
1004       __ fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1005     else
1006       ShouldNotReachHere();
1007   }
1008 }
1009 
1010 // A long move
1011 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1012   if (src.first()->is_stack()) {
1013     if (dst.first()->is_stack()) {
1014       // stack to stack
1015       __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
1016       __ str(rscratch1, Address(sp, reg2offset_out(dst.first())));
1017     } else {
1018       // stack to reg
1019       __ ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
1020     }
1021   } else if (dst.first()->is_stack()) {
1022     // reg to stack
1023     // Do we really have to sign extend???
1024     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
1025     __ str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
1026   } else {
1027     if (dst.first() != src.first()) {
1028       __ mov(dst.first()->as_Register(), src.first()->as_Register());
1029     }
1030   }
1031 }
1032 
1033 
1034 // A double move
1035 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1036   assert(src.first()->is_stack() && dst.first()->is_stack() ||
1037          src.first()->is_reg() && dst.first()->is_reg(), "Unexpected error");
1038   if (src.first()->is_stack()) {
1039     if (dst.first()->is_stack()) {
1040       __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
1041       __ str(rscratch1, Address(sp, reg2offset_out(dst.first())));
1042     } else {
1043       ShouldNotReachHere();
1044     }
1045   } else if (src.first() != dst.first()) {
1046     if (src.is_single_phys_reg() && dst.is_single_phys_reg())
1047       __ fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1048     else
1049       ShouldNotReachHere();
1050   }
1051 }
1052 
1053 
1054 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1055   // We always ignore the frame_slots arg and just use the space just below frame pointer
1056   // which by this time is free to use
1057   switch (ret_type) {
1058   case T_FLOAT:
1059     __ strs(v0, Address(rfp, -wordSize));
1060     break;
1061   case T_DOUBLE:
1062     __ strd(v0, Address(rfp, -wordSize));
1063     break;
1064   case T_VOID:  break;
1065   default: {
1066     __ str(r0, Address(rfp, -wordSize));
1067     }
1068   }
1069 }
1070 
1071 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1072   // We always ignore the frame_slots arg and just use the space just below frame pointer
1073   // which by this time is free to use
1074   switch (ret_type) {
1075   case T_FLOAT:
1076     __ ldrs(v0, Address(rfp, -wordSize));
1077     break;
1078   case T_DOUBLE:
1079     __ ldrd(v0, Address(rfp, -wordSize));
1080     break;
1081   case T_VOID:  break;
1082   default: {
1083     __ ldr(r0, Address(rfp, -wordSize));
1084     }
1085   }
1086 }
1087 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1088   RegSet x;
1089   for ( int i = first_arg ; i < arg_count ; i++ ) {
1090     if (args[i].first()->is_Register()) {
1091       x = x + args[i].first()->as_Register();
1092     } else if (args[i].first()->is_FloatRegister()) {
1093       __ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize)));
1094     }
1095   }
1096   __ push(x, sp);
1097 }
1098 
1099 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1100   RegSet x;
1101   for ( int i = first_arg ; i < arg_count ; i++ ) {
1102     if (args[i].first()->is_Register()) {
1103       x = x + args[i].first()->as_Register();
1104     } else {
1105       ;
1106     }
1107   }
1108   __ pop(x, sp);
1109   for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1110     if (args[i].first()->is_Register()) {
1111       ;
1112     } else if (args[i].first()->is_FloatRegister()) {
1113       __ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize)));
1114     }
1115   }
1116 }
1117 
1118 
1119 // Check GCLocker::needs_gc and enter the runtime if it's true.  This
1120 // keeps a new JNI critical region from starting until a GC has been
1121 // forced.  Save down any oops in registers and describe them in an
1122 // OopMap.
1123 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1124                                                int stack_slots,
1125                                                int total_c_args,
1126                                                int total_in_args,
1127                                                int arg_save_area,
1128                                                OopMapSet* oop_maps,
1129                                                VMRegPair* in_regs,
1130                                                BasicType* in_sig_bt) { Unimplemented(); }
1131 
1132 // Unpack an array argument into a pointer to the body and the length
1133 // if the array is non-null, otherwise pass 0 for both.
1134 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { Unimplemented(); }
1135 
1136 
1137 class ComputeMoveOrder: public StackObj {
1138   class MoveOperation: public ResourceObj {
1139     friend class ComputeMoveOrder;
1140    private:
1141     VMRegPair        _src;
1142     VMRegPair        _dst;
1143     int              _src_index;
1144     int              _dst_index;
1145     bool             _processed;
1146     MoveOperation*  _next;
1147     MoveOperation*  _prev;
1148 
1149     static int get_id(VMRegPair r) { Unimplemented(); return 0; }
1150 
1151    public:
1152     MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1153       _src(src)
1154     , _src_index(src_index)
1155     , _dst(dst)
1156     , _dst_index(dst_index)
1157     , _next(NULL)
1158     , _prev(NULL)
1159     , _processed(false) { Unimplemented(); }
1160 
1161     VMRegPair src() const              { Unimplemented(); return _src; }
1162     int src_id() const                 { Unimplemented(); return 0; }
1163     int src_index() const              { Unimplemented(); return 0; }
1164     VMRegPair dst() const              { Unimplemented(); return _src; }
1165     void set_dst(int i, VMRegPair dst) { Unimplemented(); }
1166     int dst_index() const              { Unimplemented(); return 0; }
1167     int dst_id() const                 { Unimplemented(); return 0; }
1168     MoveOperation* next() const        { Unimplemented(); return 0; }
1169     MoveOperation* prev() const        { Unimplemented(); return 0; }
1170     void set_processed()               { Unimplemented(); }
1171     bool is_processed() const          { Unimplemented(); return 0; }
1172 
1173     // insert
1174     void break_cycle(VMRegPair temp_register) { Unimplemented(); }
1175 
1176     void link(GrowableArray<MoveOperation*>& killer) { Unimplemented(); }
1177   };
1178 
1179  private:
1180   GrowableArray<MoveOperation*> edges;
1181 
1182  public:
1183   ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1184                     BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) { Unimplemented(); }
1185 
1186   // Collected all the move operations
1187   void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) { Unimplemented(); }
1188 
1189   // Walk the edges breaking cycles between moves.  The result list
1190   // can be walked in order to produce the proper set of loads
1191   GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) { Unimplemented(); return 0; }
1192 };
1193 
1194 
1195 static void rt_call(MacroAssembler* masm, address dest, int gpargs, int fpargs, int type) {
1196   CodeBlob *cb = CodeCache::find_blob(dest);
1197   if (cb) {
1198     __ far_call(RuntimeAddress(dest));
1199   } else {
1200     assert((unsigned)gpargs < 256, "eek!");
1201     assert((unsigned)fpargs < 32, "eek!");
1202     __ lea(rscratch1, RuntimeAddress(dest));
1203     if (UseBuiltinSim)   __ mov(rscratch2, (gpargs << 6) | (fpargs << 2) | type);
1204     __ blrt(rscratch1, rscratch2);
1205     __ maybe_isb();
1206   }
1207 }
1208 
1209 static void verify_oop_args(MacroAssembler* masm,
1210                             const methodHandle& method,
1211                             const BasicType* sig_bt,
1212                             const VMRegPair* regs) {
1213   Register temp_reg = r19;  // not part of any compiled calling seq
1214   if (VerifyOops) {
1215     for (int i = 0; i < method->size_of_parameters(); i++) {
1216       if (sig_bt[i] == T_OBJECT ||
1217           sig_bt[i] == T_ARRAY) {
1218         VMReg r = regs[i].first();
1219         assert(r->is_valid(), "bad oop arg");
1220         if (r->is_stack()) {
1221           __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1222           __ verify_oop(temp_reg);
1223         } else {
1224           __ verify_oop(r->as_Register());
1225         }
1226       }
1227     }
1228   }
1229 }
1230 
1231 static void gen_special_dispatch(MacroAssembler* masm,
1232                                  const methodHandle& method,
1233                                  const BasicType* sig_bt,
1234                                  const VMRegPair* regs) {
1235   verify_oop_args(masm, method, sig_bt, regs);
1236   vmIntrinsics::ID iid = method->intrinsic_id();
1237 
1238   // Now write the args into the outgoing interpreter space
1239   bool     has_receiver   = false;
1240   Register receiver_reg   = noreg;
1241   int      member_arg_pos = -1;
1242   Register member_reg     = noreg;
1243   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1244   if (ref_kind != 0) {
1245     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1246     member_reg = r19;  // known to be free at this point
1247     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1248   } else if (iid == vmIntrinsics::_invokeBasic) {
1249     has_receiver = true;
1250   } else {
1251     fatal("unexpected intrinsic id %d", iid);
1252   }
1253 
1254   if (member_reg != noreg) {
1255     // Load the member_arg into register, if necessary.
1256     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1257     VMReg r = regs[member_arg_pos].first();
1258     if (r->is_stack()) {
1259       __ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1260     } else {
1261       // no data motion is needed
1262       member_reg = r->as_Register();
1263     }
1264   }
1265 
1266   if (has_receiver) {
1267     // Make sure the receiver is loaded into a register.
1268     assert(method->size_of_parameters() > 0, "oob");
1269     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1270     VMReg r = regs[0].first();
1271     assert(r->is_valid(), "bad receiver arg");
1272     if (r->is_stack()) {
1273       // Porting note:  This assumes that compiled calling conventions always
1274       // pass the receiver oop in a register.  If this is not true on some
1275       // platform, pick a temp and load the receiver from stack.
1276       fatal("receiver always in a register");
1277       receiver_reg = r2;  // known to be free at this point
1278       __ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1279     } else {
1280       // no data motion is needed
1281       receiver_reg = r->as_Register();
1282     }
1283   }
1284 
1285   // Figure out which address we are really jumping to:
1286   MethodHandles::generate_method_handle_dispatch(masm, iid,
1287                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1288 }
1289 
1290 // ---------------------------------------------------------------------------
1291 // Generate a native wrapper for a given method.  The method takes arguments
1292 // in the Java compiled code convention, marshals them to the native
1293 // convention (handlizes oops, etc), transitions to native, makes the call,
1294 // returns to java state (possibly blocking), unhandlizes any result and
1295 // returns.
1296 //
1297 // Critical native functions are a shorthand for the use of
1298 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1299 // functions.  The wrapper is expected to unpack the arguments before
1300 // passing them to the callee and perform checks before and after the
1301 // native call to ensure that they GCLocker
1302 // lock_critical/unlock_critical semantics are followed.  Some other
1303 // parts of JNI setup are skipped like the tear down of the JNI handle
1304 // block and the check for pending exceptions it's impossible for them
1305 // to be thrown.
1306 //
1307 // They are roughly structured like this:
1308 //    if (GCLocker::needs_gc())
1309 //      SharedRuntime::block_for_jni_critical();
1310 //    tranistion to thread_in_native
1311 //    unpack arrray arguments and call native entry point
1312 //    check for safepoint in progress
1313 //    check if any thread suspend flags are set
1314 //      call into JVM and possible unlock the JNI critical
1315 //      if a GC was suppressed while in the critical native.
1316 //    transition back to thread_in_Java
1317 //    return to caller
1318 //
1319 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1320                                                 const methodHandle& method,
1321                                                 int compile_id,
1322                                                 BasicType* in_sig_bt,
1323                                                 VMRegPair* in_regs,
1324                                                 BasicType ret_type) {
1325 #ifdef BUILTIN_SIM
1326   if (NotifySimulator) {
1327     // Names are up to 65536 chars long.  UTF8-coded strings are up to
1328     // 3 bytes per character.  We concatenate three such strings.
1329     // Yes, I know this is ridiculous, but it's debug code and glibc
1330     // allocates large arrays very efficiently.
1331     size_t len = (65536 * 3) * 3;
1332     char *name = new char[len];
1333 
1334     strncpy(name, method()->method_holder()->name()->as_utf8(), len);
1335     strncat(name, ".", len);
1336     strncat(name, method()->name()->as_utf8(), len);
1337     strncat(name, method()->signature()->as_utf8(), len);
1338     AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck)->notifyCompile(name, __ pc());
1339     delete[] name;
1340   }
1341 #endif
1342 
1343   if (method->is_method_handle_intrinsic()) {
1344     vmIntrinsics::ID iid = method->intrinsic_id();
1345     intptr_t start = (intptr_t)__ pc();
1346     int vep_offset = ((intptr_t)__ pc()) - start;
1347 
1348     // First instruction must be a nop as it may need to be patched on deoptimisation
1349     __ nop();
1350     gen_special_dispatch(masm,
1351                          method,
1352                          in_sig_bt,
1353                          in_regs);
1354     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1355     __ flush();
1356     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1357     return nmethod::new_native_nmethod(method,
1358                                        compile_id,
1359                                        masm->code(),
1360                                        vep_offset,
1361                                        frame_complete,
1362                                        stack_slots / VMRegImpl::slots_per_word,
1363                                        in_ByteSize(-1),
1364                                        in_ByteSize(-1),
1365                                        (OopMapSet*)NULL);
1366   }
1367   bool is_critical_native = true;
1368   address native_func = method->critical_native_function();
1369   if (native_func == NULL) {
1370     native_func = method->native_function();
1371     is_critical_native = false;
1372   }
1373   assert(native_func != NULL, "must have function");
1374 
1375   // An OopMap for lock (and class if static)
1376   OopMapSet *oop_maps = new OopMapSet();
1377   intptr_t start = (intptr_t)__ pc();
1378 
1379   // We have received a description of where all the java arg are located
1380   // on entry to the wrapper. We need to convert these args to where
1381   // the jni function will expect them. To figure out where they go
1382   // we convert the java signature to a C signature by inserting
1383   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1384 
1385   const int total_in_args = method->size_of_parameters();
1386   int total_c_args = total_in_args;
1387   if (!is_critical_native) {
1388     total_c_args += 1;
1389     if (method->is_static()) {
1390       total_c_args++;
1391     }
1392   } else {
1393     for (int i = 0; i < total_in_args; i++) {
1394       if (in_sig_bt[i] == T_ARRAY) {
1395         total_c_args++;
1396       }
1397     }
1398   }
1399 
1400   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1401   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1402   BasicType* in_elem_bt = NULL;
1403 
1404   int argc = 0;
1405   if (!is_critical_native) {
1406     out_sig_bt[argc++] = T_ADDRESS;
1407     if (method->is_static()) {
1408       out_sig_bt[argc++] = T_OBJECT;
1409     }
1410 
1411     for (int i = 0; i < total_in_args ; i++ ) {
1412       out_sig_bt[argc++] = in_sig_bt[i];
1413     }
1414   } else {
1415     Thread* THREAD = Thread::current();
1416     in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1417     SignatureStream ss(method->signature());
1418     for (int i = 0; i < total_in_args ; i++ ) {
1419       if (in_sig_bt[i] == T_ARRAY) {
1420         // Arrays are passed as int, elem* pair
1421         out_sig_bt[argc++] = T_INT;
1422         out_sig_bt[argc++] = T_ADDRESS;
1423         Symbol* atype = ss.as_symbol(CHECK_NULL);
1424         const char* at = atype->as_C_string();
1425         if (strlen(at) == 2) {
1426           assert(at[0] == '[', "must be");
1427           switch (at[1]) {
1428             case 'B': in_elem_bt[i]  = T_BYTE; break;
1429             case 'C': in_elem_bt[i]  = T_CHAR; break;
1430             case 'D': in_elem_bt[i]  = T_DOUBLE; break;
1431             case 'F': in_elem_bt[i]  = T_FLOAT; break;
1432             case 'I': in_elem_bt[i]  = T_INT; break;
1433             case 'J': in_elem_bt[i]  = T_LONG; break;
1434             case 'S': in_elem_bt[i]  = T_SHORT; break;
1435             case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
1436             default: ShouldNotReachHere();
1437           }
1438         }
1439       } else {
1440         out_sig_bt[argc++] = in_sig_bt[i];
1441         in_elem_bt[i] = T_VOID;
1442       }
1443       if (in_sig_bt[i] != T_VOID) {
1444         assert(in_sig_bt[i] == ss.type(), "must match");
1445         ss.next();
1446       }
1447     }
1448   }
1449 
1450   // Now figure out where the args must be stored and how much stack space
1451   // they require.
1452   int out_arg_slots;
1453   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1454 
1455   // Compute framesize for the wrapper.  We need to handlize all oops in
1456   // incoming registers
1457 
1458   // Calculate the total number of stack slots we will need.
1459 
1460   // First count the abi requirement plus all of the outgoing args
1461   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1462 
1463   // Now the space for the inbound oop handle area
1464   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1465   if (is_critical_native) {
1466     // Critical natives may have to call out so they need a save area
1467     // for register arguments.
1468     int double_slots = 0;
1469     int single_slots = 0;
1470     for ( int i = 0; i < total_in_args; i++) {
1471       if (in_regs[i].first()->is_Register()) {
1472         const Register reg = in_regs[i].first()->as_Register();
1473         switch (in_sig_bt[i]) {
1474           case T_BOOLEAN:
1475           case T_BYTE:
1476           case T_SHORT:
1477           case T_CHAR:
1478           case T_INT:  single_slots++; break;
1479           case T_ARRAY:  // specific to LP64 (7145024)
1480           case T_LONG: double_slots++; break;
1481           default:  ShouldNotReachHere();
1482         }
1483       } else if (in_regs[i].first()->is_FloatRegister()) {
1484         ShouldNotReachHere();
1485       }
1486     }
1487     total_save_slots = double_slots * 2 + single_slots;
1488     // align the save area
1489     if (double_slots != 0) {
1490       stack_slots = align_up(stack_slots, 2);
1491     }
1492   }
1493 
1494   int oop_handle_offset = stack_slots;
1495   stack_slots += total_save_slots;
1496 
1497   // Now any space we need for handlizing a klass if static method
1498 
1499   int klass_slot_offset = 0;
1500   int klass_offset = -1;
1501   int lock_slot_offset = 0;
1502   bool is_static = false;
1503 
1504   if (method->is_static()) {
1505     klass_slot_offset = stack_slots;
1506     stack_slots += VMRegImpl::slots_per_word;
1507     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1508     is_static = true;
1509   }
1510 
1511   // Plus a lock if needed
1512 
1513   if (method->is_synchronized()) {
1514     lock_slot_offset = stack_slots;
1515     stack_slots += VMRegImpl::slots_per_word;
1516   }
1517 
1518   // Now a place (+2) to save return values or temp during shuffling
1519   // + 4 for return address (which we own) and saved rfp
1520   stack_slots += 6;
1521 
1522   // Ok The space we have allocated will look like:
1523   //
1524   //
1525   // FP-> |                     |
1526   //      |---------------------|
1527   //      | 2 slots for moves   |
1528   //      |---------------------|
1529   //      | lock box (if sync)  |
1530   //      |---------------------| <- lock_slot_offset
1531   //      | klass (if static)   |
1532   //      |---------------------| <- klass_slot_offset
1533   //      | oopHandle area      |
1534   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1535   //      | outbound memory     |
1536   //      | based arguments     |
1537   //      |                     |
1538   //      |---------------------|
1539   //      |                     |
1540   // SP-> | out_preserved_slots |
1541   //
1542   //
1543 
1544 
1545   // Now compute actual number of stack words we need rounding to make
1546   // stack properly aligned.
1547   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1548 
1549   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1550 
1551   // First thing make an ic check to see if we should even be here
1552 
1553   // We are free to use all registers as temps without saving them and
1554   // restoring them except rfp. rfp is the only callee save register
1555   // as far as the interpreter and the compiler(s) are concerned.
1556 
1557 
1558   const Register ic_reg = rscratch2;
1559   const Register receiver = j_rarg0;
1560 
1561   Label hit;
1562   Label exception_pending;
1563 
1564   assert_different_registers(ic_reg, receiver, rscratch1);
1565   __ verify_oop(receiver);
1566   __ cmp_klass(receiver, ic_reg, rscratch1);
1567   __ br(Assembler::EQ, hit);
1568 
1569   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1570 
1571   // Verified entry point must be aligned
1572   __ align(8);
1573 
1574   __ bind(hit);
1575 
1576   int vep_offset = ((intptr_t)__ pc()) - start;
1577 
1578   // If we have to make this method not-entrant we'll overwrite its
1579   // first instruction with a jump.  For this action to be legal we
1580   // must ensure that this first instruction is a B, BL, NOP, BKPT,
1581   // SVC, HVC, or SMC.  Make it a NOP.
1582   __ nop();
1583 
1584   // Generate stack overflow check
1585   if (UseStackBanging) {
1586     __ bang_stack_with_offset(JavaThread::stack_shadow_zone_size());
1587   } else {
1588     Unimplemented();
1589   }
1590 
1591   // Generate a new frame for the wrapper.
1592   __ enter();
1593   // -2 because return address is already present and so is saved rfp
1594   __ sub(sp, sp, stack_size - 2*wordSize);
1595 
1596   // Frame is now completed as far as size and linkage.
1597   int frame_complete = ((intptr_t)__ pc()) - start;
1598 
1599   // record entry into native wrapper code
1600   if (NotifySimulator) {
1601     __ notify(Assembler::method_entry);
1602   }
1603 
1604   // We use r20 as the oop handle for the receiver/klass
1605   // It is callee save so it survives the call to native
1606 
1607   const Register oop_handle_reg = r20;
1608 
1609   if (is_critical_native) {
1610     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
1611                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
1612   }
1613 
1614   //
1615   // We immediately shuffle the arguments so that any vm call we have to
1616   // make from here on out (sync slow path, jvmti, etc.) we will have
1617   // captured the oops from our caller and have a valid oopMap for
1618   // them.
1619 
1620   // -----------------
1621   // The Grand Shuffle
1622 
1623   // The Java calling convention is either equal (linux) or denser (win64) than the
1624   // c calling convention. However the because of the jni_env argument the c calling
1625   // convention always has at least one more (and two for static) arguments than Java.
1626   // Therefore if we move the args from java -> c backwards then we will never have
1627   // a register->register conflict and we don't have to build a dependency graph
1628   // and figure out how to break any cycles.
1629   //
1630 
1631   // Record esp-based slot for receiver on stack for non-static methods
1632   int receiver_offset = -1;
1633 
1634   // This is a trick. We double the stack slots so we can claim
1635   // the oops in the caller's frame. Since we are sure to have
1636   // more args than the caller doubling is enough to make
1637   // sure we can capture all the incoming oop args from the
1638   // caller.
1639   //
1640   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1641 
1642   // Mark location of rfp (someday)
1643   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp));
1644 
1645 
1646   int float_args = 0;
1647   int int_args = 0;
1648 
1649 #ifdef ASSERT
1650   bool reg_destroyed[RegisterImpl::number_of_registers];
1651   bool freg_destroyed[FloatRegisterImpl::number_of_registers];
1652   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
1653     reg_destroyed[r] = false;
1654   }
1655   for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
1656     freg_destroyed[f] = false;
1657   }
1658 
1659 #endif /* ASSERT */
1660 
1661   // This may iterate in two different directions depending on the
1662   // kind of native it is.  The reason is that for regular JNI natives
1663   // the incoming and outgoing registers are offset upwards and for
1664   // critical natives they are offset down.
1665   GrowableArray<int> arg_order(2 * total_in_args);
1666   VMRegPair tmp_vmreg;
1667   tmp_vmreg.set2(r19->as_VMReg());
1668 
1669   if (!is_critical_native) {
1670     for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1671       arg_order.push(i);
1672       arg_order.push(c_arg);
1673     }
1674   } else {
1675     // Compute a valid move order, using tmp_vmreg to break any cycles
1676     ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
1677   }
1678 
1679   int temploc = -1;
1680   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1681     int i = arg_order.at(ai);
1682     int c_arg = arg_order.at(ai + 1);
1683     __ block_comment(err_msg("move %d -> %d", i, c_arg));
1684     if (c_arg == -1) {
1685       assert(is_critical_native, "should only be required for critical natives");
1686       // This arg needs to be moved to a temporary
1687       __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
1688       in_regs[i] = tmp_vmreg;
1689       temploc = i;
1690       continue;
1691     } else if (i == -1) {
1692       assert(is_critical_native, "should only be required for critical natives");
1693       // Read from the temporary location
1694       assert(temploc != -1, "must be valid");
1695       i = temploc;
1696       temploc = -1;
1697     }
1698 #ifdef ASSERT
1699     if (in_regs[i].first()->is_Register()) {
1700       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1701     } else if (in_regs[i].first()->is_FloatRegister()) {
1702       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1703     }
1704     if (out_regs[c_arg].first()->is_Register()) {
1705       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1706     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1707       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1708     }
1709 #endif /* ASSERT */
1710     switch (in_sig_bt[i]) {
1711       case T_ARRAY:
1712         if (is_critical_native) {
1713           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1714           c_arg++;
1715 #ifdef ASSERT
1716           if (out_regs[c_arg].first()->is_Register()) {
1717             reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1718           } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1719             freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1720           }
1721 #endif
1722           int_args++;
1723           break;
1724         }
1725       case T_OBJECT:
1726         assert(!is_critical_native, "no oop arguments");
1727         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1728                     ((i == 0) && (!is_static)),
1729                     &receiver_offset);
1730         int_args++;
1731         break;
1732       case T_VOID:
1733         break;
1734 
1735       case T_FLOAT:
1736         float_move(masm, in_regs[i], out_regs[c_arg]);
1737         float_args++;
1738         break;
1739 
1740       case T_DOUBLE:
1741         assert( i + 1 < total_in_args &&
1742                 in_sig_bt[i + 1] == T_VOID &&
1743                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1744         double_move(masm, in_regs[i], out_regs[c_arg]);
1745         float_args++;
1746         break;
1747 
1748       case T_LONG :
1749         long_move(masm, in_regs[i], out_regs[c_arg]);
1750         int_args++;
1751         break;
1752 
1753       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1754 
1755       default:
1756         move32_64(masm, in_regs[i], out_regs[c_arg]);
1757         int_args++;
1758     }
1759   }
1760 
1761   // point c_arg at the first arg that is already loaded in case we
1762   // need to spill before we call out
1763   int c_arg = total_c_args - total_in_args;
1764 
1765   // Pre-load a static method's oop into c_rarg1.
1766   if (method->is_static() && !is_critical_native) {
1767 
1768     //  load oop into a register
1769     __ movoop(c_rarg1,
1770               JNIHandles::make_local(method->method_holder()->java_mirror()),
1771               /*immediate*/true);
1772 
1773     // Now handlize the static class mirror it's known not-null.
1774     __ str(c_rarg1, Address(sp, klass_offset));
1775     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1776 
1777     // Now get the handle
1778     __ lea(c_rarg1, Address(sp, klass_offset));
1779     // and protect the arg if we must spill
1780     c_arg--;
1781   }
1782 
1783   // Change state to native (we save the return address in the thread, since it might not
1784   // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
1785   // points into the right code segment. It does not have to be the correct return pc.
1786   // We use the same pc/oopMap repeatedly when we call out
1787 
1788   intptr_t the_pc = (intptr_t) __ pc();
1789   oop_maps->add_gc_map(the_pc - start, map);
1790 
1791   __ set_last_Java_frame(sp, noreg, (address)the_pc, rscratch1);
1792 
1793   Label dtrace_method_entry, dtrace_method_entry_done;
1794   {
1795     unsigned long offset;
1796     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1797     __ ldrb(rscratch1, Address(rscratch1, offset));
1798     __ cbnzw(rscratch1, dtrace_method_entry);
1799     __ bind(dtrace_method_entry_done);
1800   }
1801 
1802   // RedefineClasses() tracing support for obsolete method entry
1803   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1804     // protect the args we've loaded
1805     save_args(masm, total_c_args, c_arg, out_regs);
1806     __ mov_metadata(c_rarg1, method());
1807     __ call_VM_leaf(
1808       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1809       rthread, c_rarg1);
1810     restore_args(masm, total_c_args, c_arg, out_regs);
1811   }
1812 
1813   // Lock a synchronized method
1814 
1815   // Register definitions used by locking and unlocking
1816 
1817   const Register swap_reg = r0;
1818   const Register obj_reg  = r19;  // Will contain the oop
1819   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
1820   const Register old_hdr  = r13;  // value of old header at unlock time
1821   const Register tmp = lr;
1822 
1823   Label slow_path_lock;
1824   Label lock_done;
1825 
1826   if (method->is_synchronized()) {
1827     assert(!is_critical_native, "unhandled");
1828 
1829     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1830 
1831     // Get the handle (the 2nd argument)
1832     __ mov(oop_handle_reg, c_rarg1);
1833 
1834     // Get address of the box
1835 
1836     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1837 
1838     // Load the oop from the handle
1839     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1840 
1841     if (UseBiasedLocking) {
1842       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1843     }
1844 
1845     // Load (object->mark() | 1) into swap_reg %r0
1846     __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1847     __ orr(swap_reg, rscratch1, 1);
1848 
1849     // Save (object->mark() | 1) into BasicLock's displaced header
1850     __ str(swap_reg, Address(lock_reg, mark_word_offset));
1851 
1852     // src -> dest iff dest == r0 else r0 <- dest
1853     { Label here;
1854       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1855     }
1856 
1857     // Hmm should this move to the slow path code area???
1858 
1859     // Test if the oopMark is an obvious stack pointer, i.e.,
1860     //  1) (mark & 3) == 0, and
1861     //  2) sp <= mark < mark + os::pagesize()
1862     // These 3 tests can be done by evaluating the following
1863     // expression: ((mark - sp) & (3 - os::vm_page_size())),
1864     // assuming both stack pointer and pagesize have their
1865     // least significant 2 bits clear.
1866     // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1867 
1868     __ sub(swap_reg, sp, swap_reg);
1869     __ neg(swap_reg, swap_reg);
1870     __ ands(swap_reg, swap_reg, 3 - os::vm_page_size());
1871 
1872     // Save the test result, for recursive case, the result is zero
1873     __ str(swap_reg, Address(lock_reg, mark_word_offset));
1874     __ br(Assembler::NE, slow_path_lock);
1875 
1876     // Slow path will re-enter here
1877 
1878     __ bind(lock_done);
1879   }
1880 
1881 
1882   // Finally just about ready to make the JNI call
1883 
1884   // get JNIEnv* which is first argument to native
1885   if (!is_critical_native) {
1886     __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1887   }
1888 
1889   // Now set thread in native
1890   __ mov(rscratch1, _thread_in_native);
1891   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1892   __ stlrw(rscratch1, rscratch2);
1893 
1894   {
1895     int return_type = 0;
1896     switch (ret_type) {
1897     case T_VOID: break;
1898       return_type = 0; break;
1899     case T_CHAR:
1900     case T_BYTE:
1901     case T_SHORT:
1902     case T_INT:
1903     case T_BOOLEAN:
1904     case T_LONG:
1905       return_type = 1; break;
1906     case T_ARRAY:
1907     case T_OBJECT:
1908       return_type = 1; break;
1909     case T_FLOAT:
1910       return_type = 2; break;
1911     case T_DOUBLE:
1912       return_type = 3; break;
1913     default:
1914       ShouldNotReachHere();
1915     }
1916     rt_call(masm, native_func,
1917             int_args + 2, // AArch64 passes up to 8 args in int registers
1918             float_args,   // and up to 8 float args
1919             return_type);
1920   }
1921 
1922   // Unpack native results.
1923   switch (ret_type) {
1924   case T_BOOLEAN: __ c2bool(r0);                     break;
1925   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
1926   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
1927   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
1928   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
1929   case T_DOUBLE :
1930   case T_FLOAT  :
1931     // Result is in v0 we'll save as needed
1932     break;
1933   case T_ARRAY:                 // Really a handle
1934   case T_OBJECT:                // Really a handle
1935       break; // can't de-handlize until after safepoint check
1936   case T_VOID: break;
1937   case T_LONG: break;
1938   default       : ShouldNotReachHere();
1939   }
1940 
1941   // Switch thread to "native transition" state before reading the synchronization state.
1942   // This additional state is necessary because reading and testing the synchronization
1943   // state is not atomic w.r.t. GC, as this scenario demonstrates:
1944   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1945   //     VM thread changes sync state to synchronizing and suspends threads for GC.
1946   //     Thread A is resumed to finish this native method, but doesn't block here since it
1947   //     didn't see any synchronization is progress, and escapes.
1948   __ mov(rscratch1, _thread_in_native_trans);
1949 
1950   if(os::is_MP()) {
1951     if (UseMembar) {
1952       __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
1953 
1954       // Force this write out before the read below
1955       __ dmb(Assembler::ISH);
1956     } else {
1957       __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1958       __ stlrw(rscratch1, rscratch2);
1959 
1960       // Write serialization page so VM thread can do a pseudo remote membar.
1961       // We use the current thread pointer to calculate a thread specific
1962       // offset to write to within the page. This minimizes bus traffic
1963       // due to cache line collision.
1964       __ serialize_memory(rthread, r2);
1965     }
1966   } else {
1967     __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
1968   }
1969 
1970   // check for safepoint operation in progress and/or pending suspend requests
1971   Label safepoint_in_progress, safepoint_in_progress_done;
1972   {
1973     __ safepoint_poll_acquire(safepoint_in_progress);
1974     __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
1975     __ cbnzw(rscratch1, safepoint_in_progress);
1976     __ bind(safepoint_in_progress_done);
1977   }
1978 
1979   // change thread state
1980   Label after_transition;
1981   __ mov(rscratch1, _thread_in_Java);
1982   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1983   __ stlrw(rscratch1, rscratch2);
1984   __ bind(after_transition);
1985 
1986   Label reguard;
1987   Label reguard_done;
1988   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1989   __ cmpw(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled);
1990   __ br(Assembler::EQ, reguard);
1991   __ bind(reguard_done);
1992 
1993   // native result if any is live
1994 
1995   // Unlock
1996   Label unlock_done;
1997   Label slow_path_unlock;
1998   if (method->is_synchronized()) {
1999 
2000     // Get locked oop from the handle we passed to jni
2001     __ ldr(obj_reg, Address(oop_handle_reg, 0));
2002 
2003     Label done;
2004 
2005     if (UseBiasedLocking) {
2006       __ biased_locking_exit(obj_reg, old_hdr, done);
2007     }
2008 
2009     // Simple recursive lock?
2010 
2011     __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2012     __ cbz(rscratch1, done);
2013 
2014     // Must save r0 if if it is live now because cmpxchg must use it
2015     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2016       save_native_result(masm, ret_type, stack_slots);
2017     }
2018 
2019 
2020     // get address of the stack lock
2021     __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2022     //  get old displaced header
2023     __ ldr(old_hdr, Address(r0, 0));
2024 
2025     // Atomic swap old header if oop still contains the stack lock
2026     Label succeed;
2027     __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
2028     __ bind(succeed);
2029 
2030     // slow path re-enters here
2031     __ bind(unlock_done);
2032     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2033       restore_native_result(masm, ret_type, stack_slots);
2034     }
2035 
2036     __ bind(done);
2037   }
2038 
2039   Label dtrace_method_exit, dtrace_method_exit_done;
2040   {
2041     unsigned long offset;
2042     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
2043     __ ldrb(rscratch1, Address(rscratch1, offset));
2044     __ cbnzw(rscratch1, dtrace_method_exit);
2045     __ bind(dtrace_method_exit_done);
2046   }
2047 
2048   __ reset_last_Java_frame(false);
2049 
2050   // Unbox oop result, e.g. JNIHandles::resolve result.
2051   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2052     __ resolve_jobject(r0, rthread, rscratch2);
2053   }
2054 
2055   if (CheckJNICalls) {
2056     // clear_pending_jni_exception_check
2057     __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
2058   }
2059 
2060   if (!is_critical_native) {
2061     // reset handle block
2062     __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
2063     __ str(zr, Address(r2, JNIHandleBlock::top_offset_in_bytes()));
2064   }
2065 
2066   __ leave();
2067 
2068   if (!is_critical_native) {
2069     // Any exception pending?
2070     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2071     __ cbnz(rscratch1, exception_pending);
2072   }
2073 
2074   // record exit from native wrapper code
2075   if (NotifySimulator) {
2076     __ notify(Assembler::method_reentry);
2077   }
2078 
2079   // We're done
2080   __ ret(lr);
2081 
2082   // Unexpected paths are out of line and go here
2083 
2084   if (!is_critical_native) {
2085     // forward the exception
2086     __ bind(exception_pending);
2087 
2088     // and forward the exception
2089     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2090   }
2091 
2092   // Slow path locking & unlocking
2093   if (method->is_synchronized()) {
2094 
2095     __ block_comment("Slow path lock {");
2096     __ bind(slow_path_lock);
2097 
2098     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2099     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2100 
2101     // protect the args we've loaded
2102     save_args(masm, total_c_args, c_arg, out_regs);
2103 
2104     __ mov(c_rarg0, obj_reg);
2105     __ mov(c_rarg1, lock_reg);
2106     __ mov(c_rarg2, rthread);
2107 
2108     // Not a leaf but we have last_Java_frame setup as we want
2109     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2110     restore_args(masm, total_c_args, c_arg, out_regs);
2111 
2112 #ifdef ASSERT
2113     { Label L;
2114       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2115       __ cbz(rscratch1, L);
2116       __ stop("no pending exception allowed on exit from monitorenter");
2117       __ bind(L);
2118     }
2119 #endif
2120     __ b(lock_done);
2121 
2122     __ block_comment("} Slow path lock");
2123 
2124     __ block_comment("Slow path unlock {");
2125     __ bind(slow_path_unlock);
2126 
2127     // If we haven't already saved the native result we must save it now as xmm registers
2128     // are still exposed.
2129 
2130     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2131       save_native_result(masm, ret_type, stack_slots);
2132     }
2133 
2134     __ mov(c_rarg2, rthread);
2135     __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2136     __ mov(c_rarg0, obj_reg);
2137 
2138     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2139     // NOTE that obj_reg == r19 currently
2140     __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2141     __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2142 
2143     rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), 3, 0, 1);
2144 
2145 #ifdef ASSERT
2146     {
2147       Label L;
2148       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2149       __ cbz(rscratch1, L);
2150       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2151       __ bind(L);
2152     }
2153 #endif /* ASSERT */
2154 
2155     __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2156 
2157     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2158       restore_native_result(masm, ret_type, stack_slots);
2159     }
2160     __ b(unlock_done);
2161 
2162     __ block_comment("} Slow path unlock");
2163 
2164   } // synchronized
2165 
2166   // SLOW PATH Reguard the stack if needed
2167 
2168   __ bind(reguard);
2169   save_native_result(masm, ret_type, stack_slots);
2170   rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), 0, 0, 0);
2171   restore_native_result(masm, ret_type, stack_slots);
2172   // and continue
2173   __ b(reguard_done);
2174 
2175   // SLOW PATH safepoint
2176   {
2177     __ block_comment("safepoint {");
2178     __ bind(safepoint_in_progress);
2179 
2180     // Don't use call_VM as it will see a possible pending exception and forward it
2181     // and never return here preventing us from clearing _last_native_pc down below.
2182     //
2183     save_native_result(masm, ret_type, stack_slots);
2184     __ mov(c_rarg0, rthread);
2185 #ifndef PRODUCT
2186   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2187 #endif
2188     if (!is_critical_native) {
2189       __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2190     } else {
2191       __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
2192     }
2193     __ blrt(rscratch1, 1, 0, 1);
2194     __ maybe_isb();
2195     // Restore any method result value
2196     restore_native_result(masm, ret_type, stack_slots);
2197 
2198     if (is_critical_native) {
2199       // The call above performed the transition to thread_in_Java so
2200       // skip the transition logic above.
2201       __ b(after_transition);
2202     }
2203 
2204     __ b(safepoint_in_progress_done);
2205     __ block_comment("} safepoint");
2206   }
2207 
2208   // SLOW PATH dtrace support
2209   {
2210     __ block_comment("dtrace entry {");
2211     __ bind(dtrace_method_entry);
2212 
2213     // We have all of the arguments setup at this point. We must not touch any register
2214     // argument registers at this point (what if we save/restore them there are no oop?
2215 
2216     save_args(masm, total_c_args, c_arg, out_regs);
2217     __ mov_metadata(c_rarg1, method());
2218     __ call_VM_leaf(
2219       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2220       rthread, c_rarg1);
2221     restore_args(masm, total_c_args, c_arg, out_regs);
2222     __ b(dtrace_method_entry_done);
2223     __ block_comment("} dtrace entry");
2224   }
2225 
2226   {
2227     __ block_comment("dtrace exit {");
2228     __ bind(dtrace_method_exit);
2229     save_native_result(masm, ret_type, stack_slots);
2230     __ mov_metadata(c_rarg1, method());
2231     __ call_VM_leaf(
2232          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2233          rthread, c_rarg1);
2234     restore_native_result(masm, ret_type, stack_slots);
2235     __ b(dtrace_method_exit_done);
2236     __ block_comment("} dtrace exit");
2237   }
2238 
2239 
2240   __ flush();
2241 
2242   nmethod *nm = nmethod::new_native_nmethod(method,
2243                                             compile_id,
2244                                             masm->code(),
2245                                             vep_offset,
2246                                             frame_complete,
2247                                             stack_slots / VMRegImpl::slots_per_word,
2248                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2249                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2250                                             oop_maps);
2251 
2252   if (is_critical_native) {
2253     nm->set_lazy_critical_native(true);
2254   }
2255 
2256   return nm;
2257 
2258 }
2259 
2260 // this function returns the adjust size (in number of words) to a c2i adapter
2261 // activation for use during deoptimization
2262 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2263   assert(callee_locals >= callee_parameters,
2264           "test and remove; got more parms than locals");
2265   if (callee_locals < callee_parameters)
2266     return 0;                   // No adjustment for negative locals
2267   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2268   // diff is counted in stack words
2269   return align_up(diff, 2);
2270 }
2271 
2272 
2273 //------------------------------generate_deopt_blob----------------------------
2274 void SharedRuntime::generate_deopt_blob() {
2275   // Allocate space for the code
2276   ResourceMark rm;
2277   // Setup code generation tools
2278   int pad = 0;
2279 #if INCLUDE_JVMCI
2280   if (EnableJVMCI || UseAOT) {
2281     pad += 512; // Increase the buffer size when compiling for JVMCI
2282   }
2283 #endif
2284   CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
2285   MacroAssembler* masm = new MacroAssembler(&buffer);
2286   int frame_size_in_words;
2287   OopMap* map = NULL;
2288   OopMapSet *oop_maps = new OopMapSet();
2289 
2290 #ifdef BUILTIN_SIM
2291   AArch64Simulator *simulator;
2292   if (NotifySimulator) {
2293     simulator = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
2294     simulator->notifyCompile(const_cast<char*>("SharedRuntime::deopt_blob"), __ pc());
2295   }
2296 #endif
2297 
2298   // -------------
2299   // This code enters when returning to a de-optimized nmethod.  A return
2300   // address has been pushed on the the stack, and return values are in
2301   // registers.
2302   // If we are doing a normal deopt then we were called from the patched
2303   // nmethod from the point we returned to the nmethod. So the return
2304   // address on the stack is wrong by NativeCall::instruction_size
2305   // We will adjust the value so it looks like we have the original return
2306   // address on the stack (like when we eagerly deoptimized).
2307   // In the case of an exception pending when deoptimizing, we enter
2308   // with a return address on the stack that points after the call we patched
2309   // into the exception handler. We have the following register state from,
2310   // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2311   //    r0: exception oop
2312   //    r19: exception handler
2313   //    r3: throwing pc
2314   // So in this case we simply jam r3 into the useless return address and
2315   // the stack looks just like we want.
2316   //
2317   // At this point we need to de-opt.  We save the argument return
2318   // registers.  We call the first C routine, fetch_unroll_info().  This
2319   // routine captures the return values and returns a structure which
2320   // describes the current frame size and the sizes of all replacement frames.
2321   // The current frame is compiled code and may contain many inlined
2322   // functions, each with their own JVM state.  We pop the current frame, then
2323   // push all the new frames.  Then we call the C routine unpack_frames() to
2324   // populate these frames.  Finally unpack_frames() returns us the new target
2325   // address.  Notice that callee-save registers are BLOWN here; they have
2326   // already been captured in the vframeArray at the time the return PC was
2327   // patched.
2328   address start = __ pc();
2329   Label cont;
2330 
2331   // Prolog for non exception case!
2332 
2333   // Save everything in sight.
2334   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2335 
2336   // Normal deoptimization.  Save exec mode for unpack_frames.
2337   __ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved
2338   __ b(cont);
2339 
2340   int reexecute_offset = __ pc() - start;
2341 #if INCLUDE_JVMCI && !defined(COMPILER1)
2342   if (EnableJVMCI && UseJVMCICompiler) {
2343     // JVMCI does not use this kind of deoptimization
2344     __ should_not_reach_here();
2345   }
2346 #endif
2347 
2348   // Reexecute case
2349   // return address is the pc describes what bci to do re-execute at
2350 
2351   // No need to update map as each call to save_live_registers will produce identical oopmap
2352   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2353 
2354   __ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved
2355   __ b(cont);
2356 
2357 #if INCLUDE_JVMCI
2358   Label after_fetch_unroll_info_call;
2359   int implicit_exception_uncommon_trap_offset = 0;
2360   int uncommon_trap_offset = 0;
2361 
2362   if (EnableJVMCI || UseAOT) {
2363     implicit_exception_uncommon_trap_offset = __ pc() - start;
2364 
2365     __ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2366     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2367 
2368     uncommon_trap_offset = __ pc() - start;
2369 
2370     // Save everything in sight.
2371     RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2372     // fetch_unroll_info needs to call last_java_frame()
2373     Label retaddr;
2374     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2375 
2376     __ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2377     __ movw(rscratch1, -1);
2378     __ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2379 
2380     __ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute);
2381     __ mov(c_rarg0, rthread);
2382     __ movw(c_rarg2, rcpool); // exec mode
2383     __ lea(rscratch1,
2384            RuntimeAddress(CAST_FROM_FN_PTR(address,
2385                                            Deoptimization::uncommon_trap)));
2386     __ blrt(rscratch1, 2, 0, MacroAssembler::ret_type_integral);
2387     __ bind(retaddr);
2388     oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2389 
2390     __ reset_last_Java_frame(false);
2391 
2392     __ b(after_fetch_unroll_info_call);
2393   } // EnableJVMCI
2394 #endif // INCLUDE_JVMCI
2395 
2396   int exception_offset = __ pc() - start;
2397 
2398   // Prolog for exception case
2399 
2400   // all registers are dead at this entry point, except for r0, and
2401   // r3 which contain the exception oop and exception pc
2402   // respectively.  Set them in TLS and fall thru to the
2403   // unpack_with_exception_in_tls entry point.
2404 
2405   __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
2406   __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
2407 
2408   int exception_in_tls_offset = __ pc() - start;
2409 
2410   // new implementation because exception oop is now passed in JavaThread
2411 
2412   // Prolog for exception case
2413   // All registers must be preserved because they might be used by LinearScan
2414   // Exceptiop oop and throwing PC are passed in JavaThread
2415   // tos: stack at point of call to method that threw the exception (i.e. only
2416   // args are on the stack, no return address)
2417 
2418   // The return address pushed by save_live_registers will be patched
2419   // later with the throwing pc. The correct value is not available
2420   // now because loading it from memory would destroy registers.
2421 
2422   // NB: The SP at this point must be the SP of the method that is
2423   // being deoptimized.  Deoptimization assumes that the frame created
2424   // here by save_live_registers is immediately below the method's SP.
2425   // This is a somewhat fragile mechanism.
2426 
2427   // Save everything in sight.
2428   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2429 
2430   // Now it is safe to overwrite any register
2431 
2432   // Deopt during an exception.  Save exec mode for unpack_frames.
2433   __ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved
2434 
2435   // load throwing pc from JavaThread and patch it as the return address
2436   // of the current frame. Then clear the field in JavaThread
2437 
2438   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2439   __ str(r3, Address(rfp, wordSize));
2440   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2441 
2442 #ifdef ASSERT
2443   // verify that there is really an exception oop in JavaThread
2444   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2445   __ verify_oop(r0);
2446 
2447   // verify that there is no pending exception
2448   Label no_pending_exception;
2449   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2450   __ cbz(rscratch1, no_pending_exception);
2451   __ stop("must not have pending exception here");
2452   __ bind(no_pending_exception);
2453 #endif
2454 
2455   __ bind(cont);
2456 
2457   // Call C code.  Need thread and this frame, but NOT official VM entry
2458   // crud.  We cannot block on this call, no GC can happen.
2459   //
2460   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2461 
2462   // fetch_unroll_info needs to call last_java_frame().
2463 
2464   Label retaddr;
2465   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2466 #ifdef ASSERT0
2467   { Label L;
2468     __ ldr(rscratch1, Address(rthread,
2469                               JavaThread::last_Java_fp_offset()));
2470     __ cbz(rscratch1, L);
2471     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2472     __ bind(L);
2473   }
2474 #endif // ASSERT
2475   __ mov(c_rarg0, rthread);
2476   __ mov(c_rarg1, rcpool);
2477   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2478   __ blrt(rscratch1, 1, 0, 1);
2479   __ bind(retaddr);
2480 
2481   // Need to have an oopmap that tells fetch_unroll_info where to
2482   // find any register it might need.
2483   oop_maps->add_gc_map(__ pc() - start, map);
2484 
2485   __ reset_last_Java_frame(false);
2486 
2487 #if INCLUDE_JVMCI
2488   if (EnableJVMCI || UseAOT) {
2489     __ bind(after_fetch_unroll_info_call);
2490   }
2491 #endif
2492 
2493   // Load UnrollBlock* into r5
2494   __ mov(r5, r0);
2495 
2496   __ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
2497    Label noException;
2498   __ cmpw(rcpool, Deoptimization::Unpack_exception);   // Was exception pending?
2499   __ br(Assembler::NE, noException);
2500   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2501   // QQQ this is useless it was NULL above
2502   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2503   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
2504   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2505 
2506   __ verify_oop(r0);
2507 
2508   // Overwrite the result registers with the exception results.
2509   __ str(r0, Address(sp, RegisterSaver::r0_offset_in_bytes()));
2510   // I think this is useless
2511   // __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2512 
2513   __ bind(noException);
2514 
2515   // Only register save data is on the stack.
2516   // Now restore the result registers.  Everything else is either dead
2517   // or captured in the vframeArray.
2518   RegisterSaver::restore_result_registers(masm);
2519 
2520   // All of the register save area has been popped of the stack. Only the
2521   // return address remains.
2522 
2523   // Pop all the frames we must move/replace.
2524   //
2525   // Frame picture (youngest to oldest)
2526   // 1: self-frame (no frame link)
2527   // 2: deopting frame  (no frame link)
2528   // 3: caller of deopting frame (could be compiled/interpreted).
2529   //
2530   // Note: by leaving the return address of self-frame on the stack
2531   // and using the size of frame 2 to adjust the stack
2532   // when we are done the return to frame 3 will still be on the stack.
2533 
2534   // Pop deoptimized frame
2535   __ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2536   __ sub(r2, r2, 2 * wordSize);
2537   __ add(sp, sp, r2);
2538   __ ldp(rfp, lr, __ post(sp, 2 * wordSize));
2539   // LR should now be the return address to the caller (3)
2540 
2541 #ifdef ASSERT
2542   // Compilers generate code that bang the stack by as much as the
2543   // interpreter would need. So this stack banging should never
2544   // trigger a fault. Verify that it does not on non product builds.
2545   if (UseStackBanging) {
2546     __ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2547     __ bang_stack_size(r19, r2);
2548   }
2549 #endif
2550   // Load address of array of frame pcs into r2
2551   __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2552 
2553   // Trash the old pc
2554   // __ addptr(sp, wordSize);  FIXME ????
2555 
2556   // Load address of array of frame sizes into r4
2557   __ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2558 
2559   // Load counter into r3
2560   __ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2561 
2562   // Now adjust the caller's stack to make up for the extra locals
2563   // but record the original sp so that we can save it in the skeletal interpreter
2564   // frame and the stack walking of interpreter_sender will get the unextended sp
2565   // value and not the "real" sp value.
2566 
2567   const Register sender_sp = r6;
2568 
2569   __ mov(sender_sp, sp);
2570   __ ldrw(r19, Address(r5,
2571                        Deoptimization::UnrollBlock::
2572                        caller_adjustment_offset_in_bytes()));
2573   __ sub(sp, sp, r19);
2574 
2575   // Push interpreter frames in a loop
2576   __ mov(rscratch1, (address)0xDEADDEAD);        // Make a recognizable pattern
2577   __ mov(rscratch2, rscratch1);
2578   Label loop;
2579   __ bind(loop);
2580   __ ldr(r19, Address(__ post(r4, wordSize)));          // Load frame size
2581   __ sub(r19, r19, 2*wordSize);           // We'll push pc and fp by hand
2582   __ ldr(lr, Address(__ post(r2, wordSize)));  // Load pc
2583   __ enter();                           // Save old & set new fp
2584   __ sub(sp, sp, r19);                  // Prolog
2585   // This value is corrected by layout_activation_impl
2586   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2587   __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2588   __ mov(sender_sp, sp);               // Pass sender_sp to next frame
2589   __ sub(r3, r3, 1);                   // Decrement counter
2590   __ cbnz(r3, loop);
2591 
2592     // Re-push self-frame
2593   __ ldr(lr, Address(r2));
2594   __ enter();
2595 
2596   // Allocate a full sized register save area.  We subtract 2 because
2597   // enter() just pushed 2 words
2598   __ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2599 
2600   // Restore frame locals after moving the frame
2601   __ strd(v0, Address(sp, RegisterSaver::v0_offset_in_bytes()));
2602   __ str(r0, Address(sp, RegisterSaver::r0_offset_in_bytes()));
2603 
2604   // Call C code.  Need thread but NOT official VM entry
2605   // crud.  We cannot block on this call, no GC can happen.  Call should
2606   // restore return values to their stack-slots with the new SP.
2607   //
2608   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2609 
2610   // Use rfp because the frames look interpreted now
2611   // Don't need the precise return PC here, just precise enough to point into this code blob.
2612   address the_pc = __ pc();
2613   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2614 
2615   __ mov(c_rarg0, rthread);
2616   __ movw(c_rarg1, rcpool); // second arg: exec_mode
2617   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2618   __ blrt(rscratch1, 2, 0, 0);
2619 
2620   // Set an oopmap for the call site
2621   // Use the same PC we used for the last java frame
2622   oop_maps->add_gc_map(the_pc - start,
2623                        new OopMap( frame_size_in_words, 0 ));
2624 
2625   // Clear fp AND pc
2626   __ reset_last_Java_frame(true);
2627 
2628   // Collect return values
2629   __ ldrd(v0, Address(sp, RegisterSaver::v0_offset_in_bytes()));
2630   __ ldr(r0, Address(sp, RegisterSaver::r0_offset_in_bytes()));
2631   // I think this is useless (throwing pc?)
2632   // __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2633 
2634   // Pop self-frame.
2635   __ leave();                           // Epilog
2636 
2637   // Jump to interpreter
2638   __ ret(lr);
2639 
2640   // Make sure all code is generated
2641   masm->flush();
2642 
2643   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2644   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2645 #if INCLUDE_JVMCI
2646   if (EnableJVMCI || UseAOT) {
2647     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2648     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2649   }
2650 #endif
2651 #ifdef BUILTIN_SIM
2652   if (NotifySimulator) {
2653     unsigned char *base = _deopt_blob->code_begin();
2654     simulator->notifyRelocate(start, base - start);
2655   }
2656 #endif
2657 }
2658 
2659 uint SharedRuntime::out_preserve_stack_slots() {
2660   return 0;
2661 }
2662 
2663 #if COMPILER2_OR_JVMCI
2664 //------------------------------generate_uncommon_trap_blob--------------------
2665 void SharedRuntime::generate_uncommon_trap_blob() {
2666   // Allocate space for the code
2667   ResourceMark rm;
2668   // Setup code generation tools
2669   CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2670   MacroAssembler* masm = new MacroAssembler(&buffer);
2671 
2672 #ifdef BUILTIN_SIM
2673   AArch64Simulator *simulator;
2674   if (NotifySimulator) {
2675     simulator = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
2676     simulator->notifyCompile(const_cast<char*>("SharedRuntime:uncommon_trap_blob"), __ pc());
2677   }
2678 #endif
2679 
2680   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2681 
2682   address start = __ pc();
2683 
2684   // Push self-frame.  We get here with a return address in LR
2685   // and sp should be 16 byte aligned
2686   // push rfp and retaddr by hand
2687   __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
2688   // we don't expect an arg reg save area
2689 #ifndef PRODUCT
2690   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2691 #endif
2692   // compiler left unloaded_class_index in j_rarg0 move to where the
2693   // runtime expects it.
2694   if (c_rarg1 != j_rarg0) {
2695     __ movw(c_rarg1, j_rarg0);
2696   }
2697 
2698   // we need to set the past SP to the stack pointer of the stub frame
2699   // and the pc to the address where this runtime call will return
2700   // although actually any pc in this code blob will do).
2701   Label retaddr;
2702   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2703 
2704   // Call C code.  Need thread but NOT official VM entry
2705   // crud.  We cannot block on this call, no GC can happen.  Call should
2706   // capture callee-saved registers as well as return values.
2707   // Thread is in rdi already.
2708   //
2709   // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
2710   //
2711   // n.b. 2 gp args, 0 fp args, integral return type
2712 
2713   __ mov(c_rarg0, rthread);
2714   __ movw(c_rarg2, (unsigned)Deoptimization::Unpack_uncommon_trap);
2715   __ lea(rscratch1,
2716          RuntimeAddress(CAST_FROM_FN_PTR(address,
2717                                          Deoptimization::uncommon_trap)));
2718   __ blrt(rscratch1, 2, 0, MacroAssembler::ret_type_integral);
2719   __ bind(retaddr);
2720 
2721   // Set an oopmap for the call site
2722   OopMapSet* oop_maps = new OopMapSet();
2723   OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
2724 
2725   // location of rfp is known implicitly by the frame sender code
2726 
2727   oop_maps->add_gc_map(__ pc() - start, map);
2728 
2729   __ reset_last_Java_frame(false);
2730 
2731   // move UnrollBlock* into r4
2732   __ mov(r4, r0);
2733 
2734 #ifdef ASSERT
2735   { Label L;
2736     __ ldrw(rscratch1, Address(r4, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
2737     __ cmpw(rscratch1, (unsigned)Deoptimization::Unpack_uncommon_trap);
2738     __ br(Assembler::EQ, L);
2739     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2740     __ bind(L);
2741   }
2742 #endif
2743 
2744   // Pop all the frames we must move/replace.
2745   //
2746   // Frame picture (youngest to oldest)
2747   // 1: self-frame (no frame link)
2748   // 2: deopting frame  (no frame link)
2749   // 3: caller of deopting frame (could be compiled/interpreted).
2750 
2751   // Pop self-frame.  We have no frame, and must rely only on r0 and sp.
2752   __ add(sp, sp, (SimpleRuntimeFrame::framesize) << LogBytesPerInt); // Epilog!
2753 
2754   // Pop deoptimized frame (int)
2755   __ ldrw(r2, Address(r4,
2756                       Deoptimization::UnrollBlock::
2757                       size_of_deoptimized_frame_offset_in_bytes()));
2758   __ sub(r2, r2, 2 * wordSize);
2759   __ add(sp, sp, r2);
2760   __ ldp(rfp, lr, __ post(sp, 2 * wordSize));
2761   // LR should now be the return address to the caller (3) frame
2762 
2763 #ifdef ASSERT
2764   // Compilers generate code that bang the stack by as much as the
2765   // interpreter would need. So this stack banging should never
2766   // trigger a fault. Verify that it does not on non product builds.
2767   if (UseStackBanging) {
2768     __ ldrw(r1, Address(r4,
2769                         Deoptimization::UnrollBlock::
2770                         total_frame_sizes_offset_in_bytes()));
2771     __ bang_stack_size(r1, r2);
2772   }
2773 #endif
2774 
2775   // Load address of array of frame pcs into r2 (address*)
2776   __ ldr(r2, Address(r4,
2777                      Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2778 
2779   // Load address of array of frame sizes into r5 (intptr_t*)
2780   __ ldr(r5, Address(r4,
2781                      Deoptimization::UnrollBlock::
2782                      frame_sizes_offset_in_bytes()));
2783 
2784   // Counter
2785   __ ldrw(r3, Address(r4,
2786                       Deoptimization::UnrollBlock::
2787                       number_of_frames_offset_in_bytes())); // (int)
2788 
2789   // Now adjust the caller's stack to make up for the extra locals but
2790   // record the original sp so that we can save it in the skeletal
2791   // interpreter frame and the stack walking of interpreter_sender
2792   // will get the unextended sp value and not the "real" sp value.
2793 
2794   const Register sender_sp = r8;
2795 
2796   __ mov(sender_sp, sp);
2797   __ ldrw(r1, Address(r4,
2798                       Deoptimization::UnrollBlock::
2799                       caller_adjustment_offset_in_bytes())); // (int)
2800   __ sub(sp, sp, r1);
2801 
2802   // Push interpreter frames in a loop
2803   Label loop;
2804   __ bind(loop);
2805   __ ldr(r1, Address(r5, 0));       // Load frame size
2806   __ sub(r1, r1, 2 * wordSize);     // We'll push pc and rfp by hand
2807   __ ldr(lr, Address(r2, 0));       // Save return address
2808   __ enter();                       // and old rfp & set new rfp
2809   __ sub(sp, sp, r1);               // Prolog
2810   __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2811   // This value is corrected by layout_activation_impl
2812   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2813   __ mov(sender_sp, sp);          // Pass sender_sp to next frame
2814   __ add(r5, r5, wordSize);       // Bump array pointer (sizes)
2815   __ add(r2, r2, wordSize);       // Bump array pointer (pcs)
2816   __ subsw(r3, r3, 1);            // Decrement counter
2817   __ br(Assembler::GT, loop);
2818   __ ldr(lr, Address(r2, 0));     // save final return address
2819   // Re-push self-frame
2820   __ enter();                     // & old rfp & set new rfp
2821 
2822   // Use rfp because the frames look interpreted now
2823   // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
2824   // Don't need the precise return PC here, just precise enough to point into this code blob.
2825   address the_pc = __ pc();
2826   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2827 
2828   // Call C code.  Need thread but NOT official VM entry
2829   // crud.  We cannot block on this call, no GC can happen.  Call should
2830   // restore return values to their stack-slots with the new SP.
2831   // Thread is in rdi already.
2832   //
2833   // BasicType unpack_frames(JavaThread* thread, int exec_mode);
2834   //
2835   // n.b. 2 gp args, 0 fp args, integral return type
2836 
2837   // sp should already be aligned
2838   __ mov(c_rarg0, rthread);
2839   __ movw(c_rarg1, (unsigned)Deoptimization::Unpack_uncommon_trap);
2840   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2841   __ blrt(rscratch1, 2, 0, MacroAssembler::ret_type_integral);
2842 
2843   // Set an oopmap for the call site
2844   // Use the same PC we used for the last java frame
2845   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
2846 
2847   // Clear fp AND pc
2848   __ reset_last_Java_frame(true);
2849 
2850   // Pop self-frame.
2851   __ leave();                 // Epilog
2852 
2853   // Jump to interpreter
2854   __ ret(lr);
2855 
2856   // Make sure all code is generated
2857   masm->flush();
2858 
2859   _uncommon_trap_blob =  UncommonTrapBlob::create(&buffer, oop_maps,
2860                                                  SimpleRuntimeFrame::framesize >> 1);
2861 
2862 #ifdef BUILTIN_SIM
2863   if (NotifySimulator) {
2864     unsigned char *base = _deopt_blob->code_begin();
2865     simulator->notifyRelocate(start, base - start);
2866   }
2867 #endif
2868 }
2869 #endif // COMPILER2_OR_JVMCI
2870 
2871 
2872 //------------------------------generate_handler_blob------
2873 //
2874 // Generate a special Compile2Runtime blob that saves all registers,
2875 // and setup oopmap.
2876 //
2877 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
2878   ResourceMark rm;
2879   OopMapSet *oop_maps = new OopMapSet();
2880   OopMap* map;
2881 
2882   // Allocate space for the code.  Setup code generation tools.
2883   CodeBuffer buffer("handler_blob", 2048, 1024);
2884   MacroAssembler* masm = new MacroAssembler(&buffer);
2885 
2886   address start   = __ pc();
2887   address call_pc = NULL;
2888   int frame_size_in_words;
2889   bool cause_return = (poll_type == POLL_AT_RETURN);
2890   bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
2891 
2892   // Save Integer and Float registers.
2893   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
2894 
2895   // The following is basically a call_VM.  However, we need the precise
2896   // address of the call in order to generate an oopmap. Hence, we do all the
2897   // work outselves.
2898 
2899   Label retaddr;
2900   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2901 
2902   // The return address must always be correct so that frame constructor never
2903   // sees an invalid pc.
2904 
2905   if (!cause_return) {
2906     // overwrite the return address pushed by save_live_registers
2907     // Additionally, r20 is a callee-saved register so we can look at
2908     // it later to determine if someone changed the return address for
2909     // us!
2910     __ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset()));
2911     __ str(r20, Address(rfp, wordSize));
2912   }
2913 
2914   // Do the call
2915   __ mov(c_rarg0, rthread);
2916   __ lea(rscratch1, RuntimeAddress(call_ptr));
2917   __ blrt(rscratch1, 1, 0, 1);
2918   __ bind(retaddr);
2919 
2920   // Set an oopmap for the call site.  This oopmap will map all
2921   // oop-registers and debug-info registers as callee-saved.  This
2922   // will allow deoptimization at this safepoint to find all possible
2923   // debug-info recordings, as well as let GC find all oops.
2924 
2925   oop_maps->add_gc_map( __ pc() - start, map);
2926 
2927   Label noException;
2928 
2929   __ reset_last_Java_frame(false);
2930 
2931   __ maybe_isb();
2932   __ membar(Assembler::LoadLoad | Assembler::LoadStore);
2933 
2934   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2935   __ cbz(rscratch1, noException);
2936 
2937   // Exception pending
2938 
2939   RegisterSaver::restore_live_registers(masm, save_vectors);
2940 
2941   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2942 
2943   // No exception case
2944   __ bind(noException);
2945 
2946   Label no_adjust, bail;
2947   if (SafepointMechanism::uses_thread_local_poll() && !cause_return) {
2948     // If our stashed return pc was modified by the runtime we avoid touching it
2949     __ ldr(rscratch1, Address(rfp, wordSize));
2950     __ cmp(r20, rscratch1);
2951     __ br(Assembler::NE, no_adjust);
2952 
2953 #ifdef ASSERT
2954     // Verify the correct encoding of the poll we're about to skip.
2955     // See NativeInstruction::is_ldrw_to_zr()
2956     __ ldrw(rscratch1, Address(r20));
2957     __ ubfx(rscratch2, rscratch1, 22, 10);
2958     __ cmpw(rscratch2, 0b1011100101);
2959     __ br(Assembler::NE, bail);
2960     __ ubfx(rscratch2, rscratch1, 0, 5);
2961     __ cmpw(rscratch2, 0b11111);
2962     __ br(Assembler::NE, bail);
2963 #endif
2964     // Adjust return pc forward to step over the safepoint poll instruction
2965     __ add(r20, r20, NativeInstruction::instruction_size);
2966     __ str(r20, Address(rfp, wordSize));
2967   }
2968 
2969   __ bind(no_adjust);
2970   // Normal exit, restore registers and exit.
2971   RegisterSaver::restore_live_registers(masm, save_vectors);
2972 
2973   __ ret(lr);
2974 
2975 #ifdef ASSERT
2976   __ bind(bail);
2977   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2978 #endif
2979 
2980   // Make sure all code is generated
2981   masm->flush();
2982 
2983   // Fill-out other meta info
2984   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2985 }
2986 
2987 //
2988 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2989 //
2990 // Generate a stub that calls into vm to find out the proper destination
2991 // of a java call. All the argument registers are live at this point
2992 // but since this is generic code we don't know what they are and the caller
2993 // must do any gc of the args.
2994 //
2995 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
2996   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2997 
2998   // allocate space for the code
2999   ResourceMark rm;
3000 
3001   CodeBuffer buffer(name, 1000, 512);
3002   MacroAssembler* masm                = new MacroAssembler(&buffer);
3003 
3004   int frame_size_in_words;
3005 
3006   OopMapSet *oop_maps = new OopMapSet();
3007   OopMap* map = NULL;
3008 
3009   int start = __ offset();
3010 
3011   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3012 
3013   int frame_complete = __ offset();
3014 
3015   {
3016     Label retaddr;
3017     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
3018 
3019     __ mov(c_rarg0, rthread);
3020     __ lea(rscratch1, RuntimeAddress(destination));
3021 
3022     __ blrt(rscratch1, 1, 0, 1);
3023     __ bind(retaddr);
3024   }
3025 
3026   // Set an oopmap for the call site.
3027   // We need this not only for callee-saved registers, but also for volatile
3028   // registers that the compiler might be keeping live across a safepoint.
3029 
3030   oop_maps->add_gc_map( __ offset() - start, map);
3031 
3032   __ maybe_isb();
3033 
3034   // r0 contains the address we are going to jump to assuming no exception got installed
3035 
3036   // clear last_Java_sp
3037   __ reset_last_Java_frame(false);
3038   // check for pending exceptions
3039   Label pending;
3040   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
3041   __ cbnz(rscratch1, pending);
3042 
3043   // get the returned Method*
3044   __ get_vm_result_2(rmethod, rthread);
3045   __ str(rmethod, Address(sp, RegisterSaver::reg_offset_in_bytes(rmethod)));
3046 
3047   // r0 is where we want to jump, overwrite rscratch1 which is saved and scratch
3048   __ str(r0, Address(sp, RegisterSaver::rscratch1_offset_in_bytes()));
3049   RegisterSaver::restore_live_registers(masm);
3050 
3051   // We are back the the original state on entry and ready to go.
3052 
3053   __ br(rscratch1);
3054 
3055   // Pending exception after the safepoint
3056 
3057   __ bind(pending);
3058 
3059   RegisterSaver::restore_live_registers(masm);
3060 
3061   // exception pending => remove activation and forward to exception handler
3062 
3063   __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
3064 
3065   __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
3066   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3067 
3068   // -------------
3069   // make sure all code is generated
3070   masm->flush();
3071 
3072   // return the  blob
3073   // frame_size_words or bytes??
3074   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3075 }
3076 
3077 #if COMPILER2_OR_JVMCI
3078 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
3079 //
3080 //------------------------------generate_exception_blob---------------------------
3081 // creates exception blob at the end
3082 // Using exception blob, this code is jumped from a compiled method.
3083 // (see emit_exception_handler in x86_64.ad file)
3084 //
3085 // Given an exception pc at a call we call into the runtime for the
3086 // handler in this method. This handler might merely restore state
3087 // (i.e. callee save registers) unwind the frame and jump to the
3088 // exception handler for the nmethod if there is no Java level handler
3089 // for the nmethod.
3090 //
3091 // This code is entered with a jmp.
3092 //
3093 // Arguments:
3094 //   r0: exception oop
3095 //   r3: exception pc
3096 //
3097 // Results:
3098 //   r0: exception oop
3099 //   r3: exception pc in caller or ???
3100 //   destination: exception handler of caller
3101 //
3102 // Note: the exception pc MUST be at a call (precise debug information)
3103 //       Registers r0, r3, r2, r4, r5, r8-r11 are not callee saved.
3104 //
3105 
3106 void OptoRuntime::generate_exception_blob() {
3107   assert(!OptoRuntime::is_callee_saved_register(R3_num), "");
3108   assert(!OptoRuntime::is_callee_saved_register(R0_num), "");
3109   assert(!OptoRuntime::is_callee_saved_register(R2_num), "");
3110 
3111   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3112 
3113   // Allocate space for the code
3114   ResourceMark rm;
3115   // Setup code generation tools
3116   CodeBuffer buffer("exception_blob", 2048, 1024);
3117   MacroAssembler* masm = new MacroAssembler(&buffer);
3118 
3119   // TODO check various assumptions made here
3120   //
3121   // make sure we do so before running this
3122 
3123   address start = __ pc();
3124 
3125   // push rfp and retaddr by hand
3126   // Exception pc is 'return address' for stack walker
3127   __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
3128   // there are no callee save registers and we don't expect an
3129   // arg reg save area
3130 #ifndef PRODUCT
3131   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
3132 #endif
3133   // Store exception in Thread object. We cannot pass any arguments to the
3134   // handle_exception call, since we do not want to make any assumption
3135   // about the size of the frame where the exception happened in.
3136   __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
3137   __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
3138 
3139   // This call does all the hard work.  It checks if an exception handler
3140   // exists in the method.
3141   // If so, it returns the handler address.
3142   // If not, it prepares for stack-unwinding, restoring the callee-save
3143   // registers of the frame being removed.
3144   //
3145   // address OptoRuntime::handle_exception_C(JavaThread* thread)
3146   //
3147   // n.b. 1 gp arg, 0 fp args, integral return type
3148 
3149   // the stack should always be aligned
3150   address the_pc = __ pc();
3151   __ set_last_Java_frame(sp, noreg, the_pc, rscratch1);
3152   __ mov(c_rarg0, rthread);
3153   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
3154   __ blrt(rscratch1, 1, 0, MacroAssembler::ret_type_integral);
3155   __ maybe_isb();
3156 
3157   // Set an oopmap for the call site.  This oopmap will only be used if we
3158   // are unwinding the stack.  Hence, all locations will be dead.
3159   // Callee-saved registers will be the same as the frame above (i.e.,
3160   // handle_exception_stub), since they were restored when we got the
3161   // exception.
3162 
3163   OopMapSet* oop_maps = new OopMapSet();
3164 
3165   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3166 
3167   __ reset_last_Java_frame(false);
3168 
3169   // Restore callee-saved registers
3170 
3171   // rfp is an implicitly saved callee saved register (i.e. the calling
3172   // convention will save restore it in prolog/epilog) Other than that
3173   // there are no callee save registers now that adapter frames are gone.
3174   // and we dont' expect an arg reg save area
3175   __ ldp(rfp, r3, Address(__ post(sp, 2 * wordSize)));
3176 
3177   // r0: exception handler
3178 
3179   // We have a handler in r0 (could be deopt blob).
3180   __ mov(r8, r0);
3181 
3182   // Get the exception oop
3183   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
3184   // Get the exception pc in case we are deoptimized
3185   __ ldr(r4, Address(rthread, JavaThread::exception_pc_offset()));
3186 #ifdef ASSERT
3187   __ str(zr, Address(rthread, JavaThread::exception_handler_pc_offset()));
3188   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3189 #endif
3190   // Clear the exception oop so GC no longer processes it as a root.
3191   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3192 
3193   // r0: exception oop
3194   // r8:  exception handler
3195   // r4: exception pc
3196   // Jump to handler
3197 
3198   __ br(r8);
3199 
3200   // Make sure all code is generated
3201   masm->flush();
3202 
3203   // Set exception blob
3204   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3205 }
3206 #endif // COMPILER2_OR_JVMCI