1 /*
   2  * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #ifndef _WINDOWS
  27 #include "alloca.h"
  28 #endif
  29 #include "asm/macroAssembler.hpp"
  30 #include "asm/macroAssembler.inline.hpp"
  31 #include "code/debugInfoRec.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "interpreter/interpreter.hpp"
  35 #include "oops/compiledICHolder.hpp"
  36 #include "prims/jvmtiRedefineClassesTrace.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 #include "runtime/vframeArray.hpp"
  39 #include "vmreg_x86.inline.hpp"
  40 #ifdef COMPILER1
  41 #include "c1/c1_Runtime1.hpp"
  42 #endif
  43 #ifdef COMPILER2
  44 #include "opto/runtime.hpp"
  45 #endif
  46 
  47 #define __ masm->
  48 
  49 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  50 
  51 class SimpleRuntimeFrame {
  52 
  53   public:
  54 
  55   // Most of the runtime stubs have this simple frame layout.
  56   // This class exists to make the layout shared in one place.
  57   // Offsets are for compiler stack slots, which are jints.
  58   enum layout {
  59     // The frame sender code expects that rbp will be in the "natural" place and
  60     // will override any oopMap setting for it. We must therefore force the layout
  61     // so that it agrees with the frame sender code.
  62     rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
  63     rbp_off2,
  64     return_off, return_off2,
  65     framesize
  66   };
  67 };
  68 
  69 class RegisterSaver {
  70   // Capture info about frame layout.  Layout offsets are in jint
  71   // units because compiler frame slots are jints.
  72 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
  73   enum layout {
  74     fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
  75     xmm_off       = fpu_state_off + 160/BytesPerInt,            // offset in fxsave save area
  76     DEF_XMM_OFFS(0),
  77     DEF_XMM_OFFS(1),
  78     DEF_XMM_OFFS(2),
  79     DEF_XMM_OFFS(3),
  80     DEF_XMM_OFFS(4),
  81     DEF_XMM_OFFS(5),
  82     DEF_XMM_OFFS(6),
  83     DEF_XMM_OFFS(7),
  84     DEF_XMM_OFFS(8),
  85     DEF_XMM_OFFS(9),
  86     DEF_XMM_OFFS(10),
  87     DEF_XMM_OFFS(11),
  88     DEF_XMM_OFFS(12),
  89     DEF_XMM_OFFS(13),
  90     DEF_XMM_OFFS(14),
  91     DEF_XMM_OFFS(15),
  92     fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
  93     fpu_stateH_end,
  94     r15_off, r15H_off,
  95     r14_off, r14H_off,
  96     r13_off, r13H_off,
  97     r12_off, r12H_off,
  98     r11_off, r11H_off,
  99     r10_off, r10H_off,
 100     r9_off,  r9H_off,
 101     r8_off,  r8H_off,
 102     rdi_off, rdiH_off,
 103     rsi_off, rsiH_off,
 104     ignore_off, ignoreH_off,  // extra copy of rbp
 105     rsp_off, rspH_off,
 106     rbx_off, rbxH_off,
 107     rdx_off, rdxH_off,
 108     rcx_off, rcxH_off,
 109     rax_off, raxH_off,
 110     // 16-byte stack alignment fill word: see MacroAssembler::push/pop_IU_state
 111     align_off, alignH_off,
 112     flags_off, flagsH_off,
 113     // The frame sender code expects that rbp will be in the "natural" place and
 114     // will override any oopMap setting for it. We must therefore force the layout
 115     // so that it agrees with the frame sender code.
 116     rbp_off, rbpH_off,        // copy of rbp we will restore
 117     return_off, returnH_off,  // slot for return address
 118     reg_save_size             // size in compiler stack slots
 119   };
 120 
 121  public:
 122   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false);
 123   static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
 124 
 125   // Offsets into the register save area
 126   // Used by deoptimization when it is managing result register
 127   // values on its own
 128 
 129   static int rax_offset_in_bytes(void)    { return BytesPerInt * rax_off; }
 130   static int rdx_offset_in_bytes(void)    { return BytesPerInt * rdx_off; }
 131   static int rbx_offset_in_bytes(void)    { return BytesPerInt * rbx_off; }
 132   static int xmm0_offset_in_bytes(void)   { return BytesPerInt * xmm0_off; }
 133   static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
 134 
 135   // During deoptimization only the result registers need to be restored,
 136   // all the other values have already been extracted.
 137   static void restore_result_registers(MacroAssembler* masm);
 138 };
 139 
 140 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
 141   int vect_words = 0;
 142 #ifdef COMPILER2
 143   if (save_vectors) {
 144     assert(UseAVX > 0, "256bit vectors are supported only with AVX");
 145     assert(MaxVectorSize == 32, "only 256bit vectors are supported now");
 146     // Save upper half of YMM registes
 147     vect_words = 16 * 16 / wordSize;
 148     additional_frame_words += vect_words;
 149   }
 150 #else
 151   assert(!save_vectors, "vectors are generated only by C2");
 152 #endif
 153 
 154   // Always make the frame size 16-byte aligned
 155   int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
 156                                      reg_save_size*BytesPerInt, 16);
 157   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 158   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 159   // The caller will allocate additional_frame_words
 160   int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt;
 161   // CodeBlob frame size is in words.
 162   int frame_size_in_words = frame_size_in_bytes / wordSize;
 163   *total_frame_words = frame_size_in_words;
 164 
 165   // Save registers, fpu state, and flags.
 166   // We assume caller has already pushed the return address onto the
 167   // stack, so rsp is 8-byte aligned here.
 168   // We push rpb twice in this sequence because we want the real rbp
 169   // to be under the return like a normal enter.
 170 
 171   __ enter();          // rsp becomes 16-byte aligned here
 172   __ push_CPU_state(); // Push a multiple of 16 bytes
 173 
 174   if (vect_words > 0) {
 175     assert(vect_words*wordSize == 256, "");
 176     __ subptr(rsp, 256); // Save upper half of YMM registes
 177     __ vextractf128h(Address(rsp,  0),xmm0);
 178     __ vextractf128h(Address(rsp, 16),xmm1);
 179     __ vextractf128h(Address(rsp, 32),xmm2);
 180     __ vextractf128h(Address(rsp, 48),xmm3);
 181     __ vextractf128h(Address(rsp, 64),xmm4);
 182     __ vextractf128h(Address(rsp, 80),xmm5);
 183     __ vextractf128h(Address(rsp, 96),xmm6);
 184     __ vextractf128h(Address(rsp,112),xmm7);
 185     __ vextractf128h(Address(rsp,128),xmm8);
 186     __ vextractf128h(Address(rsp,144),xmm9);
 187     __ vextractf128h(Address(rsp,160),xmm10);
 188     __ vextractf128h(Address(rsp,176),xmm11);
 189     __ vextractf128h(Address(rsp,192),xmm12);
 190     __ vextractf128h(Address(rsp,208),xmm13);
 191     __ vextractf128h(Address(rsp,224),xmm14);
 192     __ vextractf128h(Address(rsp,240),xmm15);
 193   }
 194   if (frame::arg_reg_save_area_bytes != 0) {
 195     // Allocate argument register save area
 196     __ subptr(rsp, frame::arg_reg_save_area_bytes);
 197   }
 198 
 199   // Set an oopmap for the call site.  This oopmap will map all
 200   // oop-registers and debug-info registers as callee-saved.  This
 201   // will allow deoptimization at this safepoint to find all possible
 202   // debug-info recordings, as well as let GC find all oops.
 203 
 204   OopMapSet *oop_maps = new OopMapSet();
 205   OopMap* map = new OopMap(frame_size_in_slots, 0);
 206 
 207 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_slots)
 208 
 209   map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg());
 210   map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg());
 211   map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx->as_VMReg());
 212   map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx->as_VMReg());
 213   // rbp location is known implicitly by the frame sender code, needs no oopmap
 214   // and the location where rbp was saved by is ignored
 215   map->set_callee_saved(STACK_OFFSET( rsi_off ), rsi->as_VMReg());
 216   map->set_callee_saved(STACK_OFFSET( rdi_off ), rdi->as_VMReg());
 217   map->set_callee_saved(STACK_OFFSET( r8_off  ), r8->as_VMReg());
 218   map->set_callee_saved(STACK_OFFSET( r9_off  ), r9->as_VMReg());
 219   map->set_callee_saved(STACK_OFFSET( r10_off ), r10->as_VMReg());
 220   map->set_callee_saved(STACK_OFFSET( r11_off ), r11->as_VMReg());
 221   map->set_callee_saved(STACK_OFFSET( r12_off ), r12->as_VMReg());
 222   map->set_callee_saved(STACK_OFFSET( r13_off ), r13->as_VMReg());
 223   map->set_callee_saved(STACK_OFFSET( r14_off ), r14->as_VMReg());
 224   map->set_callee_saved(STACK_OFFSET( r15_off ), r15->as_VMReg());
 225   map->set_callee_saved(STACK_OFFSET(xmm0_off ), xmm0->as_VMReg());
 226   map->set_callee_saved(STACK_OFFSET(xmm1_off ), xmm1->as_VMReg());
 227   map->set_callee_saved(STACK_OFFSET(xmm2_off ), xmm2->as_VMReg());
 228   map->set_callee_saved(STACK_OFFSET(xmm3_off ), xmm3->as_VMReg());
 229   map->set_callee_saved(STACK_OFFSET(xmm4_off ), xmm4->as_VMReg());
 230   map->set_callee_saved(STACK_OFFSET(xmm5_off ), xmm5->as_VMReg());
 231   map->set_callee_saved(STACK_OFFSET(xmm6_off ), xmm6->as_VMReg());
 232   map->set_callee_saved(STACK_OFFSET(xmm7_off ), xmm7->as_VMReg());
 233   map->set_callee_saved(STACK_OFFSET(xmm8_off ), xmm8->as_VMReg());
 234   map->set_callee_saved(STACK_OFFSET(xmm9_off ), xmm9->as_VMReg());
 235   map->set_callee_saved(STACK_OFFSET(xmm10_off), xmm10->as_VMReg());
 236   map->set_callee_saved(STACK_OFFSET(xmm11_off), xmm11->as_VMReg());
 237   map->set_callee_saved(STACK_OFFSET(xmm12_off), xmm12->as_VMReg());
 238   map->set_callee_saved(STACK_OFFSET(xmm13_off), xmm13->as_VMReg());
 239   map->set_callee_saved(STACK_OFFSET(xmm14_off), xmm14->as_VMReg());
 240   map->set_callee_saved(STACK_OFFSET(xmm15_off), xmm15->as_VMReg());
 241 
 242   // %%% These should all be a waste but we'll keep things as they were for now
 243   if (true) {
 244     map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next());
 245     map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next());
 246     map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next());
 247     map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx->as_VMReg()->next());
 248     // rbp location is known implicitly by the frame sender code, needs no oopmap
 249     map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi->as_VMReg()->next());
 250     map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi->as_VMReg()->next());
 251     map->set_callee_saved(STACK_OFFSET( r8H_off  ), r8->as_VMReg()->next());
 252     map->set_callee_saved(STACK_OFFSET( r9H_off  ), r9->as_VMReg()->next());
 253     map->set_callee_saved(STACK_OFFSET( r10H_off ), r10->as_VMReg()->next());
 254     map->set_callee_saved(STACK_OFFSET( r11H_off ), r11->as_VMReg()->next());
 255     map->set_callee_saved(STACK_OFFSET( r12H_off ), r12->as_VMReg()->next());
 256     map->set_callee_saved(STACK_OFFSET( r13H_off ), r13->as_VMReg()->next());
 257     map->set_callee_saved(STACK_OFFSET( r14H_off ), r14->as_VMReg()->next());
 258     map->set_callee_saved(STACK_OFFSET( r15H_off ), r15->as_VMReg()->next());
 259     map->set_callee_saved(STACK_OFFSET(xmm0H_off ), xmm0->as_VMReg()->next());
 260     map->set_callee_saved(STACK_OFFSET(xmm1H_off ), xmm1->as_VMReg()->next());
 261     map->set_callee_saved(STACK_OFFSET(xmm2H_off ), xmm2->as_VMReg()->next());
 262     map->set_callee_saved(STACK_OFFSET(xmm3H_off ), xmm3->as_VMReg()->next());
 263     map->set_callee_saved(STACK_OFFSET(xmm4H_off ), xmm4->as_VMReg()->next());
 264     map->set_callee_saved(STACK_OFFSET(xmm5H_off ), xmm5->as_VMReg()->next());
 265     map->set_callee_saved(STACK_OFFSET(xmm6H_off ), xmm6->as_VMReg()->next());
 266     map->set_callee_saved(STACK_OFFSET(xmm7H_off ), xmm7->as_VMReg()->next());
 267     map->set_callee_saved(STACK_OFFSET(xmm8H_off ), xmm8->as_VMReg()->next());
 268     map->set_callee_saved(STACK_OFFSET(xmm9H_off ), xmm9->as_VMReg()->next());
 269     map->set_callee_saved(STACK_OFFSET(xmm10H_off), xmm10->as_VMReg()->next());
 270     map->set_callee_saved(STACK_OFFSET(xmm11H_off), xmm11->as_VMReg()->next());
 271     map->set_callee_saved(STACK_OFFSET(xmm12H_off), xmm12->as_VMReg()->next());
 272     map->set_callee_saved(STACK_OFFSET(xmm13H_off), xmm13->as_VMReg()->next());
 273     map->set_callee_saved(STACK_OFFSET(xmm14H_off), xmm14->as_VMReg()->next());
 274     map->set_callee_saved(STACK_OFFSET(xmm15H_off), xmm15->as_VMReg()->next());
 275   }
 276 
 277   return map;
 278 }
 279 
 280 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 281   if (frame::arg_reg_save_area_bytes != 0) {
 282     // Pop arg register save area
 283     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 284   }
 285 #ifdef COMPILER2
 286   if (restore_vectors) {
 287     // Restore upper half of YMM registes.
 288     assert(UseAVX > 0, "256bit vectors are supported only with AVX");
 289     assert(MaxVectorSize == 32, "only 256bit vectors are supported now");
 290     __ vinsertf128h(xmm0, Address(rsp,  0));
 291     __ vinsertf128h(xmm1, Address(rsp, 16));
 292     __ vinsertf128h(xmm2, Address(rsp, 32));
 293     __ vinsertf128h(xmm3, Address(rsp, 48));
 294     __ vinsertf128h(xmm4, Address(rsp, 64));
 295     __ vinsertf128h(xmm5, Address(rsp, 80));
 296     __ vinsertf128h(xmm6, Address(rsp, 96));
 297     __ vinsertf128h(xmm7, Address(rsp,112));
 298     __ vinsertf128h(xmm8, Address(rsp,128));
 299     __ vinsertf128h(xmm9, Address(rsp,144));
 300     __ vinsertf128h(xmm10, Address(rsp,160));
 301     __ vinsertf128h(xmm11, Address(rsp,176));
 302     __ vinsertf128h(xmm12, Address(rsp,192));
 303     __ vinsertf128h(xmm13, Address(rsp,208));
 304     __ vinsertf128h(xmm14, Address(rsp,224));
 305     __ vinsertf128h(xmm15, Address(rsp,240));
 306     __ addptr(rsp, 256);
 307   }
 308 #else
 309   assert(!restore_vectors, "vectors are generated only by C2");
 310 #endif
 311   // Recover CPU state
 312   __ pop_CPU_state();
 313   // Get the rbp described implicitly by the calling convention (no oopMap)
 314   __ pop(rbp);
 315 }
 316 
 317 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 318 
 319   // Just restore result register. Only used by deoptimization. By
 320   // now any callee save register that needs to be restored to a c2
 321   // caller of the deoptee has been extracted into the vframeArray
 322   // and will be stuffed into the c2i adapter we create for later
 323   // restoration so only result registers need to be restored here.
 324 
 325   // Restore fp result register
 326   __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
 327   // Restore integer result register
 328   __ movptr(rax, Address(rsp, rax_offset_in_bytes()));
 329   __ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
 330 
 331   // Pop all of the register save are off the stack except the return address
 332   __ addptr(rsp, return_offset_in_bytes());
 333 }
 334 
 335 // Is vector's size (in bytes) bigger than a size saved by default?
 336 // 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions.
 337 bool SharedRuntime::is_wide_vector(int size) {
 338   return size > 16;
 339 }
 340 
 341 // The java_calling_convention describes stack locations as ideal slots on
 342 // a frame with no abi restrictions. Since we must observe abi restrictions
 343 // (like the placement of the register window) the slots must be biased by
 344 // the following value.
 345 static int reg2offset_in(VMReg r) {
 346   // Account for saved rbp and return address
 347   // This should really be in_preserve_stack_slots
 348   return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
 349 }
 350 
 351 static int reg2offset_out(VMReg r) {
 352   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 353 }
 354 
 355 // ---------------------------------------------------------------------------
 356 // Read the array of BasicTypes from a signature, and compute where the
 357 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 358 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 359 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 360 // as framesizes are fixed.
 361 // VMRegImpl::stack0 refers to the first slot 0(sp).
 362 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
 363 // up to RegisterImpl::number_of_registers) are the 64-bit
 364 // integer registers.
 365 
 366 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 367 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
 368 // units regardless of build. Of course for i486 there is no 64 bit build
 369 
 370 // The Java calling convention is a "shifted" version of the C ABI.
 371 // By skipping the first C ABI register we can call non-static jni methods
 372 // with small numbers of arguments without having to shuffle the arguments
 373 // at all. Since we control the java ABI we ought to at least get some
 374 // advantage out of it.
 375 
 376 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 377                                            VMRegPair *regs,
 378                                            int total_args_passed,
 379                                            int is_outgoing) {
 380 
 381   // Create the mapping between argument positions and
 382   // registers.
 383   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 384     j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5
 385   };
 386   static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 387     j_farg0, j_farg1, j_farg2, j_farg3,
 388     j_farg4, j_farg5, j_farg6, j_farg7
 389   };
 390 
 391 
 392   uint int_args = 0;
 393   uint fp_args = 0;
 394   uint stk_args = 0; // inc by 2 each time
 395 
 396   for (int i = 0; i < total_args_passed; i++) {
 397     switch (sig_bt[i]) {
 398     case T_BOOLEAN:
 399     case T_CHAR:
 400     case T_BYTE:
 401     case T_SHORT:
 402     case T_INT:
 403       if (int_args < Argument::n_int_register_parameters_j) {
 404         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 405       } else {
 406         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 407         stk_args += 2;
 408       }
 409       break;
 410     case T_VOID:
 411       // halves of T_LONG or T_DOUBLE
 412       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 413       regs[i].set_bad();
 414       break;
 415     case T_LONG:
 416       assert(sig_bt[i + 1] == T_VOID, "expecting half");
 417       // fall through
 418     case T_OBJECT:
 419     case T_ARRAY:
 420     case T_ADDRESS:
 421       if (int_args < Argument::n_int_register_parameters_j) {
 422         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 423       } else {
 424         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 425         stk_args += 2;
 426       }
 427       break;
 428     case T_FLOAT:
 429       if (fp_args < Argument::n_float_register_parameters_j) {
 430         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 431       } else {
 432         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 433         stk_args += 2;
 434       }
 435       break;
 436     case T_DOUBLE:
 437       assert(sig_bt[i + 1] == T_VOID, "expecting half");
 438       if (fp_args < Argument::n_float_register_parameters_j) {
 439         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 440       } else {
 441         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 442         stk_args += 2;
 443       }
 444       break;
 445     default:
 446       ShouldNotReachHere();
 447       break;
 448     }
 449   }
 450 
 451   return round_to(stk_args, 2);
 452 }
 453 
 454 // Patch the callers callsite with entry to compiled code if it exists.
 455 static void patch_callers_callsite(MacroAssembler *masm) {
 456   Label L;
 457   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 458   __ jcc(Assembler::equal, L);
 459 
 460   // Save the current stack pointer
 461   __ mov(r13, rsp);
 462   // Schedule the branch target address early.
 463   // Call into the VM to patch the caller, then jump to compiled callee
 464   // rax isn't live so capture return address while we easily can
 465   __ movptr(rax, Address(rsp, 0));
 466 
 467   // align stack so push_CPU_state doesn't fault
 468   __ andptr(rsp, -(StackAlignmentInBytes));
 469   __ push_CPU_state();
 470 
 471   // VM needs caller's callsite
 472   // VM needs target method
 473   // This needs to be a long call since we will relocate this adapter to
 474   // the codeBuffer and it may not reach
 475 
 476   // Allocate argument register save area
 477   if (frame::arg_reg_save_area_bytes != 0) {
 478     __ subptr(rsp, frame::arg_reg_save_area_bytes);
 479   }
 480   __ mov(c_rarg0, rbx);
 481   __ mov(c_rarg1, rax);
 482   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 483 
 484   // De-allocate argument register save area
 485   if (frame::arg_reg_save_area_bytes != 0) {
 486     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 487   }
 488 
 489   __ pop_CPU_state();
 490   // restore sp
 491   __ mov(rsp, r13);
 492   __ bind(L);
 493 }
 494 
 495 
 496 static void gen_c2i_adapter(MacroAssembler *masm,
 497                             int total_args_passed,
 498                             int comp_args_on_stack,
 499                             const BasicType *sig_bt,
 500                             const VMRegPair *regs,
 501                             Label& skip_fixup) {
 502   // Before we get into the guts of the C2I adapter, see if we should be here
 503   // at all.  We've come from compiled code and are attempting to jump to the
 504   // interpreter, which means the caller made a static call to get here
 505   // (vcalls always get a compiled target if there is one).  Check for a
 506   // compiled target.  If there is one, we need to patch the caller's call.
 507   patch_callers_callsite(masm);
 508 
 509   __ bind(skip_fixup);
 510 
 511   // Since all args are passed on the stack, total_args_passed *
 512   // Interpreter::stackElementSize is the space we need. Plus 1 because
 513   // we also account for the return address location since
 514   // we store it first rather than hold it in rax across all the shuffling
 515 
 516   int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
 517 
 518   // stack is aligned, keep it that way
 519   extraspace = round_to(extraspace, 2*wordSize);
 520 
 521   // Get return address
 522   __ pop(rax);
 523 
 524   // set senderSP value
 525   __ mov(r13, rsp);
 526 
 527   __ subptr(rsp, extraspace);
 528 
 529   // Store the return address in the expected location
 530   __ movptr(Address(rsp, 0), rax);
 531 
 532   // Now write the args into the outgoing interpreter space
 533   for (int i = 0; i < total_args_passed; i++) {
 534     if (sig_bt[i] == T_VOID) {
 535       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 536       continue;
 537     }
 538 
 539     // offset to start parameters
 540     int st_off   = (total_args_passed - i) * Interpreter::stackElementSize;
 541     int next_off = st_off - Interpreter::stackElementSize;
 542 
 543     // Say 4 args:
 544     // i   st_off
 545     // 0   32 T_LONG
 546     // 1   24 T_VOID
 547     // 2   16 T_OBJECT
 548     // 3    8 T_BOOL
 549     // -    0 return address
 550     //
 551     // However to make thing extra confusing. Because we can fit a long/double in
 552     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 553     // leaves one slot empty and only stores to a single slot. In this case the
 554     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 555 
 556     VMReg r_1 = regs[i].first();
 557     VMReg r_2 = regs[i].second();
 558     if (!r_1->is_valid()) {
 559       assert(!r_2->is_valid(), "");
 560       continue;
 561     }
 562     if (r_1->is_stack()) {
 563       // memory to memory use rax
 564       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 565       if (!r_2->is_valid()) {
 566         // sign extend??
 567         __ movl(rax, Address(rsp, ld_off));
 568         __ movptr(Address(rsp, st_off), rax);
 569 
 570       } else {
 571 
 572         __ movq(rax, Address(rsp, ld_off));
 573 
 574         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 575         // T_DOUBLE and T_LONG use two slots in the interpreter
 576         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 577           // ld_off == LSW, ld_off+wordSize == MSW
 578           // st_off == MSW, next_off == LSW
 579           __ movq(Address(rsp, next_off), rax);
 580 #ifdef ASSERT
 581           // Overwrite the unused slot with known junk
 582           __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
 583           __ movptr(Address(rsp, st_off), rax);
 584 #endif /* ASSERT */
 585         } else {
 586           __ movq(Address(rsp, st_off), rax);
 587         }
 588       }
 589     } else if (r_1->is_Register()) {
 590       Register r = r_1->as_Register();
 591       if (!r_2->is_valid()) {
 592         // must be only an int (or less ) so move only 32bits to slot
 593         // why not sign extend??
 594         __ movl(Address(rsp, st_off), r);
 595       } else {
 596         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 597         // T_DOUBLE and T_LONG use two slots in the interpreter
 598         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 599           // long/double in gpr
 600 #ifdef ASSERT
 601           // Overwrite the unused slot with known junk
 602           __ mov64(rax, CONST64(0xdeadffffdeadaaab));
 603           __ movptr(Address(rsp, st_off), rax);
 604 #endif /* ASSERT */
 605           __ movq(Address(rsp, next_off), r);
 606         } else {
 607           __ movptr(Address(rsp, st_off), r);
 608         }
 609       }
 610     } else {
 611       assert(r_1->is_XMMRegister(), "");
 612       if (!r_2->is_valid()) {
 613         // only a float use just part of the slot
 614         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
 615       } else {
 616 #ifdef ASSERT
 617         // Overwrite the unused slot with known junk
 618         __ mov64(rax, CONST64(0xdeadffffdeadaaac));
 619         __ movptr(Address(rsp, st_off), rax);
 620 #endif /* ASSERT */
 621         __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
 622       }
 623     }
 624   }
 625 
 626   // Schedule the branch target address early.
 627   __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
 628   __ jmp(rcx);
 629 }
 630 
 631 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
 632                         address code_start, address code_end,
 633                         Label& L_ok) {
 634   Label L_fail;
 635   __ lea(temp_reg, ExternalAddress(code_start));
 636   __ cmpptr(pc_reg, temp_reg);
 637   __ jcc(Assembler::belowEqual, L_fail);
 638   __ lea(temp_reg, ExternalAddress(code_end));
 639   __ cmpptr(pc_reg, temp_reg);
 640   __ jcc(Assembler::below, L_ok);
 641   __ bind(L_fail);
 642 }
 643 
 644 static void gen_i2c_adapter(MacroAssembler *masm,
 645                             int total_args_passed,
 646                             int comp_args_on_stack,
 647                             const BasicType *sig_bt,
 648                             const VMRegPair *regs) {
 649 
 650   // Note: r13 contains the senderSP on entry. We must preserve it since
 651   // we may do a i2c -> c2i transition if we lose a race where compiled
 652   // code goes non-entrant while we get args ready.
 653   // In addition we use r13 to locate all the interpreter args as
 654   // we must align the stack to 16 bytes on an i2c entry else we
 655   // lose alignment we expect in all compiled code and register
 656   // save code can segv when fxsave instructions find improperly
 657   // aligned stack pointer.
 658 
 659   // Adapters can be frameless because they do not require the caller
 660   // to perform additional cleanup work, such as correcting the stack pointer.
 661   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
 662   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
 663   // even if a callee has modified the stack pointer.
 664   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
 665   // routinely repairs its caller's stack pointer (from sender_sp, which is set
 666   // up via the senderSP register).
 667   // In other words, if *either* the caller or callee is interpreted, we can
 668   // get the stack pointer repaired after a call.
 669   // This is why c2i and i2c adapters cannot be indefinitely composed.
 670   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 671   // both caller and callee would be compiled methods, and neither would
 672   // clean up the stack pointer changes performed by the two adapters.
 673   // If this happens, control eventually transfers back to the compiled
 674   // caller, but with an uncorrected stack, causing delayed havoc.
 675 
 676   // Pick up the return address
 677   __ movptr(rax, Address(rsp, 0));
 678 
 679   if (VerifyAdapterCalls &&
 680       (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
 681     // So, let's test for cascading c2i/i2c adapters right now.
 682     //  assert(Interpreter::contains($return_addr) ||
 683     //         StubRoutines::contains($return_addr),
 684     //         "i2c adapter must return to an interpreter frame");
 685     __ block_comment("verify_i2c { ");
 686     Label L_ok;
 687     if (Interpreter::code() != NULL)
 688       range_check(masm, rax, r11,
 689                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 690                   L_ok);
 691     if (StubRoutines::code1() != NULL)
 692       range_check(masm, rax, r11,
 693                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 694                   L_ok);
 695     if (StubRoutines::code2() != NULL)
 696       range_check(masm, rax, r11,
 697                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 698                   L_ok);
 699     const char* msg = "i2c adapter must return to an interpreter frame";
 700     __ block_comment(msg);
 701     __ stop(msg);
 702     __ bind(L_ok);
 703     __ block_comment("} verify_i2ce ");
 704   }
 705 
 706   // Must preserve original SP for loading incoming arguments because
 707   // we need to align the outgoing SP for compiled code.
 708   __ movptr(r11, rsp);
 709 
 710   // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
 711   // in registers, we will occasionally have no stack args.
 712   int comp_words_on_stack = 0;
 713   if (comp_args_on_stack) {
 714     // Sig words on the stack are greater-than VMRegImpl::stack0.  Those in
 715     // registers are below.  By subtracting stack0, we either get a negative
 716     // number (all values in registers) or the maximum stack slot accessed.
 717 
 718     // Convert 4-byte c2 stack slots to words.
 719     comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 720     // Round up to miminum stack alignment, in wordSize
 721     comp_words_on_stack = round_to(comp_words_on_stack, 2);
 722     __ subptr(rsp, comp_words_on_stack * wordSize);
 723   }
 724 
 725 
 726   // Ensure compiled code always sees stack at proper alignment
 727   __ andptr(rsp, -16);
 728 
 729   // push the return address and misalign the stack that youngest frame always sees
 730   // as far as the placement of the call instruction
 731   __ push(rax);
 732 
 733   // Put saved SP in another register
 734   const Register saved_sp = rax;
 735   __ movptr(saved_sp, r11);
 736 
 737   // Will jump to the compiled code just as if compiled code was doing it.
 738   // Pre-load the register-jump target early, to schedule it better.
 739   __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
 740 
 741   // Now generate the shuffle code.  Pick up all register args and move the
 742   // rest through the floating point stack top.
 743   for (int i = 0; i < total_args_passed; i++) {
 744     if (sig_bt[i] == T_VOID) {
 745       // Longs and doubles are passed in native word order, but misaligned
 746       // in the 32-bit build.
 747       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 748       continue;
 749     }
 750 
 751     // Pick up 0, 1 or 2 words from SP+offset.
 752 
 753     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 754             "scrambled load targets?");
 755     // Load in argument order going down.
 756     int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
 757     // Point to interpreter value (vs. tag)
 758     int next_off = ld_off - Interpreter::stackElementSize;
 759     //
 760     //
 761     //
 762     VMReg r_1 = regs[i].first();
 763     VMReg r_2 = regs[i].second();
 764     if (!r_1->is_valid()) {
 765       assert(!r_2->is_valid(), "");
 766       continue;
 767     }
 768     if (r_1->is_stack()) {
 769       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 770       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
 771 
 772       // We can use r13 as a temp here because compiled code doesn't need r13 as an input
 773       // and if we end up going thru a c2i because of a miss a reasonable value of r13
 774       // will be generated.
 775       if (!r_2->is_valid()) {
 776         // sign extend???
 777         __ movl(r13, Address(saved_sp, ld_off));
 778         __ movptr(Address(rsp, st_off), r13);
 779       } else {
 780         //
 781         // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 782         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 783         // So we must adjust where to pick up the data to match the interpreter.
 784         //
 785         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 786         // are accessed as negative so LSW is at LOW address
 787 
 788         // ld_off is MSW so get LSW
 789         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 790                            next_off : ld_off;
 791         __ movq(r13, Address(saved_sp, offset));
 792         // st_off is LSW (i.e. reg.first())
 793         __ movq(Address(rsp, st_off), r13);
 794       }
 795     } else if (r_1->is_Register()) {  // Register argument
 796       Register r = r_1->as_Register();
 797       assert(r != rax, "must be different");
 798       if (r_2->is_valid()) {
 799         //
 800         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 801         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 802         // So we must adjust where to pick up the data to match the interpreter.
 803 
 804         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 805                            next_off : ld_off;
 806 
 807         // this can be a misaligned move
 808         __ movq(r, Address(saved_sp, offset));
 809       } else {
 810         // sign extend and use a full word?
 811         __ movl(r, Address(saved_sp, ld_off));
 812       }
 813     } else {
 814       if (!r_2->is_valid()) {
 815         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
 816       } else {
 817         __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
 818       }
 819     }
 820   }
 821 
 822   // 6243940 We might end up in handle_wrong_method if
 823   // the callee is deoptimized as we race thru here. If that
 824   // happens we don't want to take a safepoint because the
 825   // caller frame will look interpreted and arguments are now
 826   // "compiled" so it is much better to make this transition
 827   // invisible to the stack walking code. Unfortunately if
 828   // we try and find the callee by normal means a safepoint
 829   // is possible. So we stash the desired callee in the thread
 830   // and the vm will find there should this case occur.
 831 
 832   __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
 833 
 834   // put Method* where a c2i would expect should we end up there
 835   // only needed becaus eof c2 resolve stubs return Method* as a result in
 836   // rax
 837   __ mov(rax, rbx);
 838   __ jmp(r11);
 839 }
 840 
 841 // ---------------------------------------------------------------
 842 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 843                                                             int total_args_passed,
 844                                                             int comp_args_on_stack,
 845                                                             const BasicType *sig_bt,
 846                                                             const VMRegPair *regs,
 847                                                             AdapterFingerPrint* fingerprint) {
 848   address i2c_entry = __ pc();
 849 
 850   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 851 
 852   // -------------------------------------------------------------------------
 853   // Generate a C2I adapter.  On entry we know rbx holds the Method* during calls
 854   // to the interpreter.  The args start out packed in the compiled layout.  They
 855   // need to be unpacked into the interpreter layout.  This will almost always
 856   // require some stack space.  We grow the current (compiled) stack, then repack
 857   // the args.  We  finally end in a jump to the generic interpreter entry point.
 858   // On exit from the interpreter, the interpreter will restore our SP (lest the
 859   // compiled code, which relys solely on SP and not RBP, get sick).
 860 
 861   address c2i_unverified_entry = __ pc();
 862   Label skip_fixup;
 863   Label ok;
 864 
 865   Register holder = rax;
 866   Register receiver = j_rarg0;
 867   Register temp = rbx;
 868 
 869   {
 870     __ load_klass(temp, receiver);
 871     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
 872     __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
 873     __ jcc(Assembler::equal, ok);
 874     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 875 
 876     __ bind(ok);
 877     // Method might have been compiled since the call site was patched to
 878     // interpreted if that is the case treat it as a miss so we can get
 879     // the call site corrected.
 880     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 881     __ jcc(Assembler::equal, skip_fixup);
 882     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 883   }
 884 
 885   address c2i_entry = __ pc();
 886 
 887   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 888 
 889   __ flush();
 890   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 891 }
 892 
 893 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 894                                          VMRegPair *regs,
 895                                          VMRegPair *regs2,
 896                                          int total_args_passed) {
 897   assert(regs2 == NULL, "not needed on x86");
 898 // We return the amount of VMRegImpl stack slots we need to reserve for all
 899 // the arguments NOT counting out_preserve_stack_slots.
 900 
 901 // NOTE: These arrays will have to change when c1 is ported
 902 #ifdef _WIN64
 903     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 904       c_rarg0, c_rarg1, c_rarg2, c_rarg3
 905     };
 906     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 907       c_farg0, c_farg1, c_farg2, c_farg3
 908     };
 909 #else
 910     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 911       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
 912     };
 913     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 914       c_farg0, c_farg1, c_farg2, c_farg3,
 915       c_farg4, c_farg5, c_farg6, c_farg7
 916     };
 917 #endif // _WIN64
 918 
 919 
 920     uint int_args = 0;
 921     uint fp_args = 0;
 922     uint stk_args = 0; // inc by 2 each time
 923 
 924     for (int i = 0; i < total_args_passed; i++) {
 925       switch (sig_bt[i]) {
 926       case T_BOOLEAN:
 927       case T_CHAR:
 928       case T_BYTE:
 929       case T_SHORT:
 930       case T_INT:
 931         if (int_args < Argument::n_int_register_parameters_c) {
 932           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 933 #ifdef _WIN64
 934           fp_args++;
 935           // Allocate slots for callee to stuff register args the stack.
 936           stk_args += 2;
 937 #endif
 938         } else {
 939           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 940           stk_args += 2;
 941         }
 942         break;
 943       case T_LONG:
 944         assert(sig_bt[i + 1] == T_VOID, "expecting half");
 945         // fall through
 946       case T_OBJECT:
 947       case T_ARRAY:
 948       case T_ADDRESS:
 949       case T_METADATA:
 950         if (int_args < Argument::n_int_register_parameters_c) {
 951           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 952 #ifdef _WIN64
 953           fp_args++;
 954           stk_args += 2;
 955 #endif
 956         } else {
 957           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 958           stk_args += 2;
 959         }
 960         break;
 961       case T_FLOAT:
 962         if (fp_args < Argument::n_float_register_parameters_c) {
 963           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 964 #ifdef _WIN64
 965           int_args++;
 966           // Allocate slots for callee to stuff register args the stack.
 967           stk_args += 2;
 968 #endif
 969         } else {
 970           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 971           stk_args += 2;
 972         }
 973         break;
 974       case T_DOUBLE:
 975         assert(sig_bt[i + 1] == T_VOID, "expecting half");
 976         if (fp_args < Argument::n_float_register_parameters_c) {
 977           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 978 #ifdef _WIN64
 979           int_args++;
 980           // Allocate slots for callee to stuff register args the stack.
 981           stk_args += 2;
 982 #endif
 983         } else {
 984           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 985           stk_args += 2;
 986         }
 987         break;
 988       case T_VOID: // Halves of longs and doubles
 989         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 990         regs[i].set_bad();
 991         break;
 992       default:
 993         ShouldNotReachHere();
 994         break;
 995       }
 996     }
 997 #ifdef _WIN64
 998   // windows abi requires that we always allocate enough stack space
 999   // for 4 64bit registers to be stored down.
1000   if (stk_args < 8) {
1001     stk_args = 8;
1002   }
1003 #endif // _WIN64
1004 
1005   return stk_args;
1006 }
1007 
1008 // On 64 bit we will store integer like items to the stack as
1009 // 64 bits items (sparc abi) even though java would only store
1010 // 32bits for a parameter. On 32bit it will simply be 32 bits
1011 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1012 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1013   if (src.first()->is_stack()) {
1014     if (dst.first()->is_stack()) {
1015       // stack to stack
1016       __ movslq(rax, Address(rbp, reg2offset_in(src.first())));
1017       __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1018     } else {
1019       // stack to reg
1020       __ movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1021     }
1022   } else if (dst.first()->is_stack()) {
1023     // reg to stack
1024     // Do we really have to sign extend???
1025     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
1026     __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1027   } else {
1028     // Do we really have to sign extend???
1029     // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
1030     if (dst.first() != src.first()) {
1031       __ movq(dst.first()->as_Register(), src.first()->as_Register());
1032     }
1033   }
1034 }
1035 
1036 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1037   if (src.first()->is_stack()) {
1038     if (dst.first()->is_stack()) {
1039       // stack to stack
1040       __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1041       __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1042     } else {
1043       // stack to reg
1044       __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1045     }
1046   } else if (dst.first()->is_stack()) {
1047     // reg to stack
1048     __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1049   } else {
1050     if (dst.first() != src.first()) {
1051       __ movq(dst.first()->as_Register(), src.first()->as_Register());
1052     }
1053   }
1054 }
1055 
1056 // An oop arg. Must pass a handle not the oop itself
1057 static void object_move(MacroAssembler* masm,
1058                         OopMap* map,
1059                         int oop_handle_offset,
1060                         int framesize_in_slots,
1061                         VMRegPair src,
1062                         VMRegPair dst,
1063                         bool is_receiver,
1064                         int* receiver_offset) {
1065 
1066   // must pass a handle. First figure out the location we use as a handle
1067 
1068   Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
1069 
1070   // See if oop is NULL if it is we need no handle
1071 
1072   if (src.first()->is_stack()) {
1073 
1074     // Oop is already on the stack as an argument
1075     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1076     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1077     if (is_receiver) {
1078       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1079     }
1080 
1081     __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1082     __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1083     // conditionally move a NULL
1084     __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
1085   } else {
1086 
1087     // Oop is in an a register we must store it to the space we reserve
1088     // on the stack for oop_handles and pass a handle if oop is non-NULL
1089 
1090     const Register rOop = src.first()->as_Register();
1091     int oop_slot;
1092     if (rOop == j_rarg0)
1093       oop_slot = 0;
1094     else if (rOop == j_rarg1)
1095       oop_slot = 1;
1096     else if (rOop == j_rarg2)
1097       oop_slot = 2;
1098     else if (rOop == j_rarg3)
1099       oop_slot = 3;
1100     else if (rOop == j_rarg4)
1101       oop_slot = 4;
1102     else {
1103       assert(rOop == j_rarg5, "wrong register");
1104       oop_slot = 5;
1105     }
1106 
1107     oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
1108     int offset = oop_slot*VMRegImpl::stack_slot_size;
1109 
1110     map->set_oop(VMRegImpl::stack2reg(oop_slot));
1111     // Store oop in handle area, may be NULL
1112     __ movptr(Address(rsp, offset), rOop);
1113     if (is_receiver) {
1114       *receiver_offset = offset;
1115     }
1116 
1117     __ cmpptr(rOop, (int32_t)NULL_WORD);
1118     __ lea(rHandle, Address(rsp, offset));
1119     // conditionally move a NULL from the handle area where it was just stored
1120     __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
1121   }
1122 
1123   // If arg is on the stack then place it otherwise it is already in correct reg.
1124   if (dst.first()->is_stack()) {
1125     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1126   }
1127 }
1128 
1129 // A float arg may have to do float reg int reg conversion
1130 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1131   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1132 
1133   // The calling conventions assures us that each VMregpair is either
1134   // all really one physical register or adjacent stack slots.
1135   // This greatly simplifies the cases here compared to sparc.
1136 
1137   if (src.first()->is_stack()) {
1138     if (dst.first()->is_stack()) {
1139       __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1140       __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1141     } else {
1142       // stack to reg
1143       assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1144       __ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
1145     }
1146   } else if (dst.first()->is_stack()) {
1147     // reg to stack
1148     assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1149     __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1150   } else {
1151     // reg to reg
1152     // In theory these overlap but the ordering is such that this is likely a nop
1153     if ( src.first() != dst.first()) {
1154       __ movdbl(dst.first()->as_XMMRegister(),  src.first()->as_XMMRegister());
1155     }
1156   }
1157 }
1158 
1159 // A long move
1160 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1161 
1162   // The calling conventions assures us that each VMregpair is either
1163   // all really one physical register or adjacent stack slots.
1164   // This greatly simplifies the cases here compared to sparc.
1165 
1166   if (src.is_single_phys_reg() ) {
1167     if (dst.is_single_phys_reg()) {
1168       if (dst.first() != src.first()) {
1169         __ mov(dst.first()->as_Register(), src.first()->as_Register());
1170       }
1171     } else {
1172       assert(dst.is_single_reg(), "not a stack pair");
1173       __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1174     }
1175   } else if (dst.is_single_phys_reg()) {
1176     assert(src.is_single_reg(),  "not a stack pair");
1177     __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first())));
1178   } else {
1179     assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1180     __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1181     __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1182   }
1183 }
1184 
1185 // A double move
1186 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1187 
1188   // The calling conventions assures us that each VMregpair is either
1189   // all really one physical register or adjacent stack slots.
1190   // This greatly simplifies the cases here compared to sparc.
1191 
1192   if (src.is_single_phys_reg() ) {
1193     if (dst.is_single_phys_reg()) {
1194       // In theory these overlap but the ordering is such that this is likely a nop
1195       if ( src.first() != dst.first()) {
1196         __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1197       }
1198     } else {
1199       assert(dst.is_single_reg(), "not a stack pair");
1200       __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1201     }
1202   } else if (dst.is_single_phys_reg()) {
1203     assert(src.is_single_reg(),  "not a stack pair");
1204     __ movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first())));
1205   } else {
1206     assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1207     __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1208     __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1209   }
1210 }
1211 
1212 
1213 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1214   // We always ignore the frame_slots arg and just use the space just below frame pointer
1215   // which by this time is free to use
1216   switch (ret_type) {
1217   case T_FLOAT:
1218     __ movflt(Address(rbp, -wordSize), xmm0);
1219     break;
1220   case T_DOUBLE:
1221     __ movdbl(Address(rbp, -wordSize), xmm0);
1222     break;
1223   case T_VOID:  break;
1224   default: {
1225     __ movptr(Address(rbp, -wordSize), rax);
1226     }
1227   }
1228 }
1229 
1230 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1231   // We always ignore the frame_slots arg and just use the space just below frame pointer
1232   // which by this time is free to use
1233   switch (ret_type) {
1234   case T_FLOAT:
1235     __ movflt(xmm0, Address(rbp, -wordSize));
1236     break;
1237   case T_DOUBLE:
1238     __ movdbl(xmm0, Address(rbp, -wordSize));
1239     break;
1240   case T_VOID:  break;
1241   default: {
1242     __ movptr(rax, Address(rbp, -wordSize));
1243     }
1244   }
1245 }
1246 
1247 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1248     for ( int i = first_arg ; i < arg_count ; i++ ) {
1249       if (args[i].first()->is_Register()) {
1250         __ push(args[i].first()->as_Register());
1251       } else if (args[i].first()->is_XMMRegister()) {
1252         __ subptr(rsp, 2*wordSize);
1253         __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
1254       }
1255     }
1256 }
1257 
1258 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1259     for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1260       if (args[i].first()->is_Register()) {
1261         __ pop(args[i].first()->as_Register());
1262       } else if (args[i].first()->is_XMMRegister()) {
1263         __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
1264         __ addptr(rsp, 2*wordSize);
1265       }
1266     }
1267 }
1268 
1269 
1270 static void save_or_restore_arguments(MacroAssembler* masm,
1271                                       const int stack_slots,
1272                                       const int total_in_args,
1273                                       const int arg_save_area,
1274                                       OopMap* map,
1275                                       VMRegPair* in_regs,
1276                                       BasicType* in_sig_bt) {
1277   // if map is non-NULL then the code should store the values,
1278   // otherwise it should load them.
1279   int slot = arg_save_area;
1280   // Save down double word first
1281   for ( int i = 0; i < total_in_args; i++) {
1282     if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1283       int offset = slot * VMRegImpl::stack_slot_size;
1284       slot += VMRegImpl::slots_per_word;
1285       assert(slot <= stack_slots, "overflow");
1286       if (map != NULL) {
1287         __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1288       } else {
1289         __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1290       }
1291     }
1292     if (in_regs[i].first()->is_Register() &&
1293         (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
1294       int offset = slot * VMRegImpl::stack_slot_size;
1295       if (map != NULL) {
1296         __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
1297         if (in_sig_bt[i] == T_ARRAY) {
1298           map->set_oop(VMRegImpl::stack2reg(slot));;
1299         }
1300       } else {
1301         __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
1302       }
1303       slot += VMRegImpl::slots_per_word;
1304     }
1305   }
1306   // Save or restore single word registers
1307   for ( int i = 0; i < total_in_args; i++) {
1308     if (in_regs[i].first()->is_Register()) {
1309       int offset = slot * VMRegImpl::stack_slot_size;
1310       slot++;
1311       assert(slot <= stack_slots, "overflow");
1312 
1313       // Value is in an input register pass we must flush it to the stack
1314       const Register reg = in_regs[i].first()->as_Register();
1315       switch (in_sig_bt[i]) {
1316         case T_BOOLEAN:
1317         case T_CHAR:
1318         case T_BYTE:
1319         case T_SHORT:
1320         case T_INT:
1321           if (map != NULL) {
1322             __ movl(Address(rsp, offset), reg);
1323           } else {
1324             __ movl(reg, Address(rsp, offset));
1325           }
1326           break;
1327         case T_ARRAY:
1328         case T_LONG:
1329           // handled above
1330           break;
1331         case T_OBJECT:
1332         default: ShouldNotReachHere();
1333       }
1334     } else if (in_regs[i].first()->is_XMMRegister()) {
1335       if (in_sig_bt[i] == T_FLOAT) {
1336         int offset = slot * VMRegImpl::stack_slot_size;
1337         slot++;
1338         assert(slot <= stack_slots, "overflow");
1339         if (map != NULL) {
1340           __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1341         } else {
1342           __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1343         }
1344       }
1345     } else if (in_regs[i].first()->is_stack()) {
1346       if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1347         int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1348         map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1349       }
1350     }
1351   }
1352 }
1353 
1354 // Pin incoming array argument of java critical method
1355 static void pin_critical_native_array(MacroAssembler* masm,
1356                                       VMRegPair reg,
1357                                       int& pinned_slot) {
1358   __ block_comment("pin_critical_native_array {");
1359   Register tmp_reg = rax;
1360 
1361   Label is_null;
1362   VMRegPair tmp;
1363   VMRegPair in_reg = reg;
1364   bool on_stack = false;
1365 
1366   tmp.set_ptr(tmp_reg->as_VMReg());
1367   if (reg.first()->is_stack()) {
1368     // Load the arg up from the stack
1369     move_ptr(masm, reg, tmp);
1370    reg = tmp;
1371     on_stack = true;
1372   } else {
1373     __ movptr(rax, reg.first()->as_Register());
1374   }
1375  __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1376   __ jccb(Assembler::equal, is_null);
1377 
1378   __ push(c_rarg0);
1379   __ push(c_rarg1);
1380   __ push(c_rarg2);
1381   __ push(c_rarg3);
1382 #ifdef _WIN64
1383   // caller-saved registers on Windows
1384   __ push(r10);
1385   __ push(r11);
1386 #else
1387   __ push(c_rarg4);
1388   __ push(c_rarg5);
1389 #endif
1390 
1391   if (reg.first()->as_Register() != c_rarg1) {
1392     __ movptr(c_rarg1, reg.first()->as_Register());
1393   }
1394   __ movptr(c_rarg0, r15_thread);
1395   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::pin_object)));
1396 
1397 #ifdef _WIN64
1398   __ pop(r11);
1399   __ pop(r10);
1400 #else
1401  __ pop(c_rarg5);
1402   __ pop(c_rarg4);
1403 #endif
1404   __ pop(c_rarg3);
1405   __ pop(c_rarg2);
1406   __ pop(c_rarg1);
1407   __ pop(c_rarg0);
1408 
1409   if (on_stack) {
1410     __ movptr(Address(rbp, reg2offset_in(in_reg.first())), rax);
1411     __ bind(is_null);
1412   } else {
1413     __ movptr(reg.first()->as_Register(), rax);
1414 
1415     // save on stack for unpinning later
1416     __ bind(is_null);
1417     assert(reg.first()->is_Register(), "Must be a register");
1418     int offset = pinned_slot * VMRegImpl::stack_slot_size;
1419     pinned_slot += VMRegImpl::slots_per_word;
1420     __ movq(Address(rsp, offset), rax);
1421   }
1422   __ block_comment("} pin_critical_native_array");
1423 }
1424 
1425 // Unpin array argument of java critical method
1426 static void unpin_critical_native_array(MacroAssembler* masm,
1427                                         VMRegPair reg,
1428                                         int& pinned_slot) {
1429   __ block_comment("unpin_critical_native_array {");
1430   Label is_null;
1431 
1432   if (reg.first()->is_stack()) {
1433     __ movptr(c_rarg1, Address(rbp, reg2offset_in(reg.first())));
1434   } else {
1435     int offset = pinned_slot * VMRegImpl::stack_slot_size;
1436     pinned_slot += VMRegImpl::slots_per_word;
1437     __ movq(c_rarg1, Address(rsp, offset));
1438   }
1439   __ testptr(c_rarg1, c_rarg1);
1440   __ jccb(Assembler::equal, is_null);
1441 
1442   __ movptr(c_rarg0, r15_thread);
1443   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object)));
1444 
1445   __ bind(is_null);
1446   __ block_comment("} unpin_critical_native_array");
1447 }
1448 
1449 
1450 // Check GC_locker::needs_gc and enter the runtime if it's true.  This
1451 // keeps a new JNI critical region from starting until a GC has been
1452 // forced.  Save down any oops in registers and describe them in an
1453 // OopMap.
1454 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1455                                                int stack_slots,
1456                                                int total_c_args,
1457                                                int total_in_args,
1458                                                int arg_save_area,
1459                                                OopMapSet* oop_maps,
1460                                                VMRegPair* in_regs,
1461                                                BasicType* in_sig_bt) {
1462   __ block_comment("check GC_locker::needs_gc");
1463   Label cont;
1464   __ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
1465   __ jcc(Assembler::equal, cont);
1466 
1467   // Save down any incoming oops and call into the runtime to halt for a GC
1468 
1469   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1470   save_or_restore_arguments(masm, stack_slots, total_in_args,
1471                             arg_save_area, map, in_regs, in_sig_bt);
1472 
1473   address the_pc = __ pc();
1474   oop_maps->add_gc_map( __ offset(), map);
1475   __ set_last_Java_frame(rsp, noreg, the_pc);
1476 
1477   __ block_comment("block_for_jni_critical");
1478   __ movptr(c_rarg0, r15_thread);
1479   __ mov(r12, rsp); // remember sp
1480   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1481   __ andptr(rsp, -16); // align stack as required by ABI
1482   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1483   __ mov(rsp, r12); // restore sp
1484   __ reinit_heapbase();
1485 
1486   __ reset_last_Java_frame(false);
1487 
1488   save_or_restore_arguments(masm, stack_slots, total_in_args,
1489                             arg_save_area, NULL, in_regs, in_sig_bt);
1490 
1491   __ bind(cont);
1492 #ifdef ASSERT
1493   if (StressCriticalJNINatives) {
1494     // Stress register saving
1495     OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1496     save_or_restore_arguments(masm, stack_slots, total_in_args,
1497                               arg_save_area, map, in_regs, in_sig_bt);
1498     // Destroy argument registers
1499     for (int i = 0; i < total_in_args - 1; i++) {
1500       if (in_regs[i].first()->is_Register()) {
1501         const Register reg = in_regs[i].first()->as_Register();
1502         __ xorptr(reg, reg);
1503       } else if (in_regs[i].first()->is_XMMRegister()) {
1504         __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1505       } else if (in_regs[i].first()->is_FloatRegister()) {
1506         ShouldNotReachHere();
1507       } else if (in_regs[i].first()->is_stack()) {
1508         // Nothing to do
1509       } else {
1510         ShouldNotReachHere();
1511       }
1512       if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1513         i++;
1514       }
1515     }
1516 
1517     save_or_restore_arguments(masm, stack_slots, total_in_args,
1518                               arg_save_area, NULL, in_regs, in_sig_bt);
1519   }
1520 #endif
1521 }
1522 
1523 // Unpack an array argument into a pointer to the body and the length
1524 // if the array is non-null, otherwise pass 0 for both.
1525 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1526   Register tmp_reg = rax;
1527   assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1528          "possible collision");
1529   assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1530          "possible collision");
1531 
1532   __ block_comment("unpack_array_argument {");
1533 
1534   // Pass the length, ptr pair
1535   Label is_null, done;
1536   VMRegPair tmp;
1537   tmp.set_ptr(tmp_reg->as_VMReg());
1538   if (reg.first()->is_stack()) {
1539     // Load the arg up from the stack
1540     move_ptr(masm, reg, tmp);
1541     reg = tmp;
1542   }
1543   __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1544   __ jccb(Assembler::equal, is_null);
1545   __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1546   move_ptr(masm, tmp, body_arg);
1547   // load the length relative to the body.
1548   __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1549                            arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1550   move32_64(masm, tmp, length_arg);
1551   __ jmpb(done);
1552   __ bind(is_null);
1553   // Pass zeros
1554   __ xorptr(tmp_reg, tmp_reg);
1555   move_ptr(masm, tmp, body_arg);
1556   move32_64(masm, tmp, length_arg);
1557   __ bind(done);
1558 
1559   __ block_comment("} unpack_array_argument");
1560 }
1561 
1562 
1563 // Different signatures may require very different orders for the move
1564 // to avoid clobbering other arguments.  There's no simple way to
1565 // order them safely.  Compute a safe order for issuing stores and
1566 // break any cycles in those stores.  This code is fairly general but
1567 // it's not necessary on the other platforms so we keep it in the
1568 // platform dependent code instead of moving it into a shared file.
1569 // (See bugs 7013347 & 7145024.)
1570 // Note that this code is specific to LP64.
1571 class ComputeMoveOrder: public StackObj {
1572   class MoveOperation: public ResourceObj {
1573     friend class ComputeMoveOrder;
1574    private:
1575     VMRegPair        _src;
1576     VMRegPair        _dst;
1577     int              _src_index;
1578     int              _dst_index;
1579     bool             _processed;
1580     MoveOperation*  _next;
1581     MoveOperation*  _prev;
1582 
1583     static int get_id(VMRegPair r) {
1584       return r.first()->value();
1585     }
1586 
1587    public:
1588     MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1589       _src(src)
1590     , _src_index(src_index)
1591     , _dst(dst)
1592     , _dst_index(dst_index)
1593     , _next(NULL)
1594     , _prev(NULL)
1595     , _processed(false) {
1596     }
1597 
1598     VMRegPair src() const              { return _src; }
1599     int src_id() const                 { return get_id(src()); }
1600     int src_index() const              { return _src_index; }
1601     VMRegPair dst() const              { return _dst; }
1602     void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; }
1603     int dst_index() const              { return _dst_index; }
1604     int dst_id() const                 { return get_id(dst()); }
1605     MoveOperation* next() const       { return _next; }
1606     MoveOperation* prev() const       { return _prev; }
1607     void set_processed()               { _processed = true; }
1608     bool is_processed() const          { return _processed; }
1609 
1610     // insert
1611     void break_cycle(VMRegPair temp_register) {
1612       // create a new store following the last store
1613       // to move from the temp_register to the original
1614       MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst());
1615 
1616       // break the cycle of links and insert new_store at the end
1617       // break the reverse link.
1618       MoveOperation* p = prev();
1619       assert(p->next() == this, "must be");
1620       _prev = NULL;
1621       p->_next = new_store;
1622       new_store->_prev = p;
1623 
1624       // change the original store to save it's value in the temp.
1625       set_dst(-1, temp_register);
1626     }
1627 
1628     void link(GrowableArray<MoveOperation*>& killer) {
1629       // link this store in front the store that it depends on
1630       MoveOperation* n = killer.at_grow(src_id(), NULL);
1631       if (n != NULL) {
1632         assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
1633         _next = n;
1634         n->_prev = this;
1635       }
1636     }
1637   };
1638 
1639  private:
1640   GrowableArray<MoveOperation*> edges;
1641 
1642  public:
1643   ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1644                     BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
1645     // Move operations where the dest is the stack can all be
1646     // scheduled first since they can't interfere with the other moves.
1647     for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1648       if (in_sig_bt[i] == T_ARRAY) {
1649         c_arg--;
1650         if (out_regs[c_arg].first()->is_stack() &&
1651             out_regs[c_arg + 1].first()->is_stack()) {
1652           arg_order.push(i);
1653           arg_order.push(c_arg);
1654         } else {
1655           if (out_regs[c_arg].first()->is_stack() ||
1656               in_regs[i].first() == out_regs[c_arg].first()) {
1657             add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]);
1658           } else {
1659             add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1660           }
1661         }
1662       } else if (in_sig_bt[i] == T_VOID) {
1663         arg_order.push(i);
1664         arg_order.push(c_arg);
1665       } else {
1666         if (out_regs[c_arg].first()->is_stack() ||
1667             in_regs[i].first() == out_regs[c_arg].first()) {
1668           arg_order.push(i);
1669           arg_order.push(c_arg);
1670         } else {
1671           add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1672         }
1673       }
1674     }
1675     // Break any cycles in the register moves and emit the in the
1676     // proper order.
1677     GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg);
1678     for (int i = 0; i < stores->length(); i++) {
1679       arg_order.push(stores->at(i)->src_index());
1680       arg_order.push(stores->at(i)->dst_index());
1681     }
1682  }
1683 
1684   // Collected all the move operations
1685   void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) {
1686     if (src.first() == dst.first()) return;
1687     edges.append(new MoveOperation(src_index, src, dst_index, dst));
1688   }
1689 
1690   // Walk the edges breaking cycles between moves.  The result list
1691   // can be walked in order to produce the proper set of loads
1692   GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) {
1693     // Record which moves kill which values
1694     GrowableArray<MoveOperation*> killer;
1695     for (int i = 0; i < edges.length(); i++) {
1696       MoveOperation* s = edges.at(i);
1697       assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer");
1698       killer.at_put_grow(s->dst_id(), s, NULL);
1699     }
1700     assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
1701            "make sure temp isn't in the registers that are killed");
1702 
1703     // create links between loads and stores
1704     for (int i = 0; i < edges.length(); i++) {
1705       edges.at(i)->link(killer);
1706     }
1707 
1708     // at this point, all the move operations are chained together
1709     // in a doubly linked list.  Processing it backwards finds
1710     // the beginning of the chain, forwards finds the end.  If there's
1711     // a cycle it can be broken at any point,  so pick an edge and walk
1712     // backward until the list ends or we end where we started.
1713     GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>();
1714     for (int e = 0; e < edges.length(); e++) {
1715       MoveOperation* s = edges.at(e);
1716       if (!s->is_processed()) {
1717         MoveOperation* start = s;
1718         // search for the beginning of the chain or cycle
1719         while (start->prev() != NULL && start->prev() != s) {
1720           start = start->prev();
1721         }
1722         if (start->prev() == s) {
1723           start->break_cycle(temp_register);
1724         }
1725         // walk the chain forward inserting to store list
1726         while (start != NULL) {
1727           stores->append(start);
1728           start->set_processed();
1729           start = start->next();
1730         }
1731       }
1732     }
1733     return stores;
1734   }
1735 };
1736 
1737 static void verify_oop_args(MacroAssembler* masm,
1738                             methodHandle method,
1739                             const BasicType* sig_bt,
1740                             const VMRegPair* regs) {
1741   Register temp_reg = rbx;  // not part of any compiled calling seq
1742   if (VerifyOops) {
1743     for (int i = 0; i < method->size_of_parameters(); i++) {
1744       if (sig_bt[i] == T_OBJECT ||
1745           sig_bt[i] == T_ARRAY) {
1746         VMReg r = regs[i].first();
1747         assert(r->is_valid(), "bad oop arg");
1748         if (r->is_stack()) {
1749           __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1750           __ verify_oop(temp_reg);
1751         } else {
1752           __ verify_oop(r->as_Register());
1753         }
1754       }
1755     }
1756   }
1757 }
1758 
1759 static void gen_special_dispatch(MacroAssembler* masm,
1760                                  methodHandle method,
1761                                  const BasicType* sig_bt,
1762                                  const VMRegPair* regs) {
1763   verify_oop_args(masm, method, sig_bt, regs);
1764   vmIntrinsics::ID iid = method->intrinsic_id();
1765 
1766   // Now write the args into the outgoing interpreter space
1767   bool     has_receiver   = false;
1768   Register receiver_reg   = noreg;
1769   int      member_arg_pos = -1;
1770   Register member_reg     = noreg;
1771   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1772   if (ref_kind != 0) {
1773     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1774     member_reg = rbx;  // known to be free at this point
1775     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1776   } else if (iid == vmIntrinsics::_invokeBasic) {
1777     has_receiver = true;
1778   } else {
1779     fatal(err_msg_res("unexpected intrinsic id %d", iid));
1780   }
1781 
1782   if (member_reg != noreg) {
1783     // Load the member_arg into register, if necessary.
1784     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1785     VMReg r = regs[member_arg_pos].first();
1786     if (r->is_stack()) {
1787       __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1788     } else {
1789       // no data motion is needed
1790       member_reg = r->as_Register();
1791     }
1792   }
1793 
1794   if (has_receiver) {
1795     // Make sure the receiver is loaded into a register.
1796     assert(method->size_of_parameters() > 0, "oob");
1797     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1798     VMReg r = regs[0].first();
1799     assert(r->is_valid(), "bad receiver arg");
1800     if (r->is_stack()) {
1801       // Porting note:  This assumes that compiled calling conventions always
1802       // pass the receiver oop in a register.  If this is not true on some
1803       // platform, pick a temp and load the receiver from stack.
1804       fatal("receiver always in a register");
1805       receiver_reg = j_rarg0;  // known to be free at this point
1806       __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1807     } else {
1808       // no data motion is needed
1809       receiver_reg = r->as_Register();
1810     }
1811   }
1812 
1813   // Figure out which address we are really jumping to:
1814   MethodHandles::generate_method_handle_dispatch(masm, iid,
1815                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1816 }
1817 
1818 // ---------------------------------------------------------------------------
1819 // Generate a native wrapper for a given method.  The method takes arguments
1820 // in the Java compiled code convention, marshals them to the native
1821 // convention (handlizes oops, etc), transitions to native, makes the call,
1822 // returns to java state (possibly blocking), unhandlizes any result and
1823 // returns.
1824 //
1825 // Critical native functions are a shorthand for the use of
1826 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1827 // functions.  The wrapper is expected to unpack the arguments before
1828 // passing them to the callee and perform checks before and after the
1829 // native call to ensure that they GC_locker
1830 // lock_critical/unlock_critical semantics are followed.  Some other
1831 // parts of JNI setup are skipped like the tear down of the JNI handle
1832 // block and the check for pending exceptions it's impossible for them
1833 // to be thrown.
1834 //
1835 // They are roughly structured like this:
1836 //    if (GC_locker::needs_gc())
1837 //      SharedRuntime::block_for_jni_critical();
1838 //    tranistion to thread_in_native
1839 //    unpack arrray arguments and call native entry point
1840 //    check for safepoint in progress
1841 //    check if any thread suspend flags are set
1842 //      call into JVM and possible unlock the JNI critical
1843 //      if a GC was suppressed while in the critical native.
1844 //    transition back to thread_in_Java
1845 //    return to caller
1846 //
1847 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1848                                                 methodHandle method,
1849                                                 int compile_id,
1850                                                 BasicType* in_sig_bt,
1851                                                 VMRegPair* in_regs,
1852                                                 BasicType ret_type) {
1853   if (method->is_method_handle_intrinsic()) {
1854     vmIntrinsics::ID iid = method->intrinsic_id();
1855     intptr_t start = (intptr_t)__ pc();
1856     int vep_offset = ((intptr_t)__ pc()) - start;
1857     gen_special_dispatch(masm,
1858                          method,
1859                          in_sig_bt,
1860                          in_regs);
1861     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1862     __ flush();
1863     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1864     return nmethod::new_native_nmethod(method,
1865                                        compile_id,
1866                                        masm->code(),
1867                                        vep_offset,
1868                                        frame_complete,
1869                                        stack_slots / VMRegImpl::slots_per_word,
1870                                        in_ByteSize(-1),
1871                                        in_ByteSize(-1),
1872                                        (OopMapSet*)NULL);
1873   }
1874   bool is_critical_native = true;
1875   address native_func = method->critical_native_function();
1876   if (native_func == NULL) {
1877     native_func = method->native_function();
1878     is_critical_native = false;
1879   }
1880   assert(native_func != NULL, "must have function");
1881 
1882   // An OopMap for lock (and class if static)
1883   OopMapSet *oop_maps = new OopMapSet();
1884   intptr_t start = (intptr_t)__ pc();
1885 
1886   // We have received a description of where all the java arg are located
1887   // on entry to the wrapper. We need to convert these args to where
1888   // the jni function will expect them. To figure out where they go
1889   // we convert the java signature to a C signature by inserting
1890   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1891 
1892   const int total_in_args = method->size_of_parameters();
1893   int total_c_args = total_in_args;
1894   if (!is_critical_native) {
1895     total_c_args += 1;
1896     if (method->is_static()) {
1897       total_c_args++;
1898     }
1899   } else {
1900     for (int i = 0; i < total_in_args; i++) {
1901       if (in_sig_bt[i] == T_ARRAY) {
1902         total_c_args++;
1903       }
1904     }
1905   }
1906 
1907   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1908   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1909   BasicType* in_elem_bt = NULL;
1910 
1911   int argc = 0;
1912   if (!is_critical_native) {
1913     out_sig_bt[argc++] = T_ADDRESS;
1914     if (method->is_static()) {
1915       out_sig_bt[argc++] = T_OBJECT;
1916     }
1917 
1918     for (int i = 0; i < total_in_args ; i++ ) {
1919       out_sig_bt[argc++] = in_sig_bt[i];
1920     }
1921   } else {
1922     Thread* THREAD = Thread::current();
1923     in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1924     SignatureStream ss(method->signature());
1925     for (int i = 0; i < total_in_args ; i++ ) {
1926       if (in_sig_bt[i] == T_ARRAY) {
1927         // Arrays are passed as int, elem* pair
1928         out_sig_bt[argc++] = T_INT;
1929         out_sig_bt[argc++] = T_ADDRESS;
1930         Symbol* atype = ss.as_symbol(CHECK_NULL);
1931         const char* at = atype->as_C_string();
1932         if (strlen(at) == 2) {
1933           assert(at[0] == '[', "must be");
1934           switch (at[1]) {
1935             case 'B': in_elem_bt[i]  = T_BYTE; break;
1936             case 'C': in_elem_bt[i]  = T_CHAR; break;
1937             case 'D': in_elem_bt[i]  = T_DOUBLE; break;
1938             case 'F': in_elem_bt[i]  = T_FLOAT; break;
1939             case 'I': in_elem_bt[i]  = T_INT; break;
1940             case 'J': in_elem_bt[i]  = T_LONG; break;
1941             case 'S': in_elem_bt[i]  = T_SHORT; break;
1942             case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
1943             default: ShouldNotReachHere();
1944           }
1945         }
1946       } else {
1947         out_sig_bt[argc++] = in_sig_bt[i];
1948         in_elem_bt[i] = T_VOID;
1949       }
1950       if (in_sig_bt[i] != T_VOID) {
1951         assert(in_sig_bt[i] == ss.type(), "must match");
1952         ss.next();
1953       }
1954     }
1955   }
1956 
1957   // Now figure out where the args must be stored and how much stack space
1958   // they require.
1959   int out_arg_slots;
1960   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1961 
1962   // Compute framesize for the wrapper.  We need to handlize all oops in
1963   // incoming registers
1964 
1965   // Calculate the total number of stack slots we will need.
1966 
1967   // First count the abi requirement plus all of the outgoing args
1968   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1969 
1970   // Now the space for the inbound oop handle area
1971   int total_save_slots = 6 * VMRegImpl::slots_per_word;  // 6 arguments passed in registers
1972   if (is_critical_native) {
1973     // Critical natives may have to call out so they need a save area
1974     // for register arguments.
1975     int double_slots = 0;
1976     int single_slots = 0;
1977     for ( int i = 0; i < total_in_args; i++) {
1978       if (in_regs[i].first()->is_Register()) {
1979         const Register reg = in_regs[i].first()->as_Register();
1980         switch (in_sig_bt[i]) {
1981           case T_BOOLEAN:
1982           case T_BYTE:
1983           case T_SHORT:
1984           case T_CHAR:
1985           case T_INT:  single_slots++; break;
1986           case T_ARRAY:  // specific to LP64 (7145024)
1987           case T_LONG: double_slots++; break;
1988           default:  ShouldNotReachHere();
1989         }
1990       } else if (in_regs[i].first()->is_XMMRegister()) {
1991         switch (in_sig_bt[i]) {
1992           case T_FLOAT:  single_slots++; break;
1993           case T_DOUBLE: double_slots++; break;
1994           default:  ShouldNotReachHere();
1995         }
1996       } else if (in_regs[i].first()->is_FloatRegister()) {
1997         ShouldNotReachHere();
1998       }
1999     }
2000     total_save_slots = double_slots * 2 + single_slots;
2001     // align the save area
2002     if (double_slots != 0) {
2003       stack_slots = round_to(stack_slots, 2);
2004     }
2005   }
2006 
2007   int oop_handle_offset = stack_slots;
2008   stack_slots += total_save_slots;
2009 
2010   // Now any space we need for handlizing a klass if static method
2011 
2012   int klass_slot_offset = 0;
2013   int klass_offset = -1;
2014   int lock_slot_offset = 0;
2015   bool is_static = false;
2016 
2017   if (method->is_static()) {
2018     klass_slot_offset = stack_slots;
2019     stack_slots += VMRegImpl::slots_per_word;
2020     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2021     is_static = true;
2022   }
2023 
2024   // Plus a lock if needed
2025 
2026   if (method->is_synchronized()) {
2027     lock_slot_offset = stack_slots;
2028     stack_slots += VMRegImpl::slots_per_word;
2029   }
2030 
2031   // Now a place (+2) to save return values or temp during shuffling
2032   // + 4 for return address (which we own) and saved rbp
2033   stack_slots += 6;
2034 
2035   // Ok The space we have allocated will look like:
2036   //
2037   //
2038   // FP-> |                     |
2039   //      |---------------------|
2040   //      | 2 slots for moves   |
2041   //      |---------------------|
2042   //      | lock box (if sync)  |
2043   //      |---------------------| <- lock_slot_offset
2044   //      | klass (if static)   |
2045   //      |---------------------| <- klass_slot_offset
2046   //      | oopHandle area      |
2047   //      |---------------------| <- oop_handle_offset (6 java arg registers)
2048   //      | outbound memory     |
2049   //      | based arguments     |
2050   //      |                     |
2051   //      |---------------------|
2052   //      |                     |
2053   // SP-> | out_preserved_slots |
2054   //
2055   //
2056 
2057 
2058   // Now compute actual number of stack words we need rounding to make
2059   // stack properly aligned.
2060   stack_slots = round_to(stack_slots, StackAlignmentInSlots);
2061 
2062   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2063 
2064   // First thing make an ic check to see if we should even be here
2065 
2066   // We are free to use all registers as temps without saving them and
2067   // restoring them except rbp. rbp is the only callee save register
2068   // as far as the interpreter and the compiler(s) are concerned.
2069 
2070 
2071   const Register ic_reg = rax;
2072   const Register receiver = j_rarg0;
2073 
2074   Label hit;
2075   Label exception_pending;
2076 
2077   assert_different_registers(ic_reg, receiver, rscratch1);
2078   __ verify_oop(receiver);
2079   __ load_klass(rscratch1, receiver);
2080   __ cmpq(ic_reg, rscratch1);
2081   __ jcc(Assembler::equal, hit);
2082 
2083   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2084 
2085   // Verified entry point must be aligned
2086   __ align(8);
2087 
2088   __ bind(hit);
2089 
2090   int vep_offset = ((intptr_t)__ pc()) - start;
2091 
2092   // The instruction at the verified entry point must be 5 bytes or longer
2093   // because it can be patched on the fly by make_non_entrant. The stack bang
2094   // instruction fits that requirement.
2095 
2096   // Generate stack overflow check
2097 
2098   if (UseStackBanging) {
2099     __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
2100   } else {
2101     // need a 5 byte instruction to allow MT safe patching to non-entrant
2102     __ fat_nop();
2103   }
2104 
2105   // Generate a new frame for the wrapper.
2106   __ enter();
2107   // -2 because return address is already present and so is saved rbp
2108   __ subptr(rsp, stack_size - 2*wordSize);
2109 
2110   // Frame is now completed as far as size and linkage.
2111   int frame_complete = ((intptr_t)__ pc()) - start;
2112 
2113     if (UseRTMLocking) {
2114       // Abort RTM transaction before calling JNI
2115       // because critical section will be large and will be
2116       // aborted anyway. Also nmethod could be deoptimized.
2117       __ xabort(0);
2118     }
2119 
2120 #ifdef ASSERT
2121     {
2122       Label L;
2123       __ mov(rax, rsp);
2124       __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2125       __ cmpptr(rax, rsp);
2126       __ jcc(Assembler::equal, L);
2127       __ stop("improperly aligned stack");
2128       __ bind(L);
2129     }
2130 #endif /* ASSERT */
2131 
2132 
2133   // We use r14 as the oop handle for the receiver/klass
2134   // It is callee save so it survives the call to native
2135 
2136   const Register oop_handle_reg = r14;
2137 
2138   if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
2139     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2140                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2141   }
2142 
2143   //
2144   // We immediately shuffle the arguments so that any vm call we have to
2145   // make from here on out (sync slow path, jvmti, etc.) we will have
2146   // captured the oops from our caller and have a valid oopMap for
2147   // them.
2148 
2149   // -----------------
2150   // The Grand Shuffle
2151 
2152   // The Java calling convention is either equal (linux) or denser (win64) than the
2153   // c calling convention. However the because of the jni_env argument the c calling
2154   // convention always has at least one more (and two for static) arguments than Java.
2155   // Therefore if we move the args from java -> c backwards then we will never have
2156   // a register->register conflict and we don't have to build a dependency graph
2157   // and figure out how to break any cycles.
2158   //
2159 
2160   // Record esp-based slot for receiver on stack for non-static methods
2161   int receiver_offset = -1;
2162 
2163   // This is a trick. We double the stack slots so we can claim
2164   // the oops in the caller's frame. Since we are sure to have
2165   // more args than the caller doubling is enough to make
2166   // sure we can capture all the incoming oop args from the
2167   // caller.
2168   //
2169   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2170 
2171   // Mark location of rbp (someday)
2172   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
2173 
2174   // Use eax, ebx as temporaries during any memory-memory moves we have to do
2175   // All inbound args are referenced based on rbp and all outbound args via rsp.
2176 
2177 
2178 #ifdef ASSERT
2179   bool reg_destroyed[RegisterImpl::number_of_registers];
2180   bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2181   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2182     reg_destroyed[r] = false;
2183   }
2184   for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2185     freg_destroyed[f] = false;
2186   }
2187 
2188 #endif /* ASSERT */
2189 
2190   // This may iterate in two different directions depending on the
2191   // kind of native it is.  The reason is that for regular JNI natives
2192   // the incoming and outgoing registers are offset upwards and for
2193   // critical natives they are offset down.
2194   GrowableArray<int> arg_order(2 * total_in_args);
2195   // Inbound arguments that need to be pinned for critical natives
2196   GrowableArray<int> pinned_args(total_in_args);
2197   // Current stack slot for storing register based array argument
2198   int pinned_slot = oop_handle_offset;
2199 
2200   VMRegPair tmp_vmreg;
2201   tmp_vmreg.set2(rbx->as_VMReg());
2202 
2203   if (!is_critical_native) {
2204     for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2205       arg_order.push(i);
2206       arg_order.push(c_arg);
2207     }
2208   } else {
2209     // Compute a valid move order, using tmp_vmreg to break any cycles
2210     ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2211   }
2212 
2213   int temploc = -1;
2214   for (int ai = 0; ai < arg_order.length(); ai += 2) {
2215     int i = arg_order.at(ai);
2216     int c_arg = arg_order.at(ai + 1);
2217     __ block_comment(err_msg("move %d -> %d", i, c_arg));
2218     if (c_arg == -1) {
2219       assert(is_critical_native, "should only be required for critical natives");
2220       // This arg needs to be moved to a temporary
2221       __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
2222       in_regs[i] = tmp_vmreg;
2223       temploc = i;
2224       continue;
2225     } else if (i == -1) {
2226       assert(is_critical_native, "should only be required for critical natives");
2227       // Read from the temporary location
2228       assert(temploc != -1, "must be valid");
2229       i = temploc;
2230       temploc = -1;
2231     }
2232 #ifdef ASSERT
2233     if (in_regs[i].first()->is_Register()) {
2234       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2235     } else if (in_regs[i].first()->is_XMMRegister()) {
2236       assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2237     }
2238     if (out_regs[c_arg].first()->is_Register()) {
2239       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2240     } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2241       freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2242     }
2243 #endif /* ASSERT */
2244     switch (in_sig_bt[i]) {
2245       case T_ARRAY:
2246         if (is_critical_native) {
2247           // pin before unpack
2248           if (Universe::heap()->supports_object_pinning()) {
2249             assert(pinned_slot <= stack_slots, "overflow");
2250             pin_critical_native_array(masm, in_regs[i], pinned_slot);
2251             pinned_args.append(i);
2252           }
2253           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2254           c_arg++;
2255 #ifdef ASSERT
2256           if (out_regs[c_arg].first()->is_Register()) {
2257             reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2258           } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2259             freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2260           }
2261 #endif
2262           break;
2263         }
2264       case T_OBJECT:
2265         assert(!is_critical_native, "no oop arguments");
2266         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2267                     ((i == 0) && (!is_static)),
2268                     &receiver_offset);
2269         break;
2270       case T_VOID:
2271         break;
2272 
2273       case T_FLOAT:
2274         float_move(masm, in_regs[i], out_regs[c_arg]);
2275           break;
2276 
2277       case T_DOUBLE:
2278         assert( i + 1 < total_in_args &&
2279                 in_sig_bt[i + 1] == T_VOID &&
2280                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2281         double_move(masm, in_regs[i], out_regs[c_arg]);
2282         break;
2283 
2284       case T_LONG :
2285         long_move(masm, in_regs[i], out_regs[c_arg]);
2286         break;
2287 
2288       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2289 
2290       default:
2291         move32_64(masm, in_regs[i], out_regs[c_arg]);
2292     }
2293   }
2294 
2295   int c_arg;
2296 
2297   // Pre-load a static method's oop into r14.  Used both by locking code and
2298   // the normal JNI call code.
2299   if (!is_critical_native) {
2300     // point c_arg at the first arg that is already loaded in case we
2301     // need to spill before we call out
2302     c_arg = total_c_args - total_in_args;
2303 
2304     if (method->is_static()) {
2305 
2306       //  load oop into a register
2307       __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
2308 
2309       // Now handlize the static class mirror it's known not-null.
2310       __ movptr(Address(rsp, klass_offset), oop_handle_reg);
2311       map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2312 
2313       // Now get the handle
2314       __ lea(oop_handle_reg, Address(rsp, klass_offset));
2315       // store the klass handle as second argument
2316       __ movptr(c_rarg1, oop_handle_reg);
2317       // and protect the arg if we must spill
2318       c_arg--;
2319     }
2320   } else {
2321     // For JNI critical methods we need to save all registers in save_args.
2322     c_arg = 0;
2323   }
2324 
2325   // Change state to native (we save the return address in the thread, since it might not
2326   // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
2327   // points into the right code segment. It does not have to be the correct return pc.
2328   // We use the same pc/oopMap repeatedly when we call out
2329 
2330   intptr_t the_pc = (intptr_t) __ pc();
2331   oop_maps->add_gc_map(the_pc - start, map);
2332 
2333   __ set_last_Java_frame(rsp, noreg, (address)the_pc);
2334 
2335 
2336   // We have all of the arguments setup at this point. We must not touch any register
2337   // argument registers at this point (what if we save/restore them there are no oop?
2338 
2339   {
2340     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2341     // protect the args we've loaded
2342     save_args(masm, total_c_args, c_arg, out_regs);
2343     __ mov_metadata(c_rarg1, method());
2344     __ call_VM_leaf(
2345       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2346       r15_thread, c_rarg1);
2347     restore_args(masm, total_c_args, c_arg, out_regs);
2348   }
2349 
2350   // RedefineClasses() tracing support for obsolete method entry
2351   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2352     // protect the args we've loaded
2353     save_args(masm, total_c_args, c_arg, out_regs);
2354     __ mov_metadata(c_rarg1, method());
2355     __ call_VM_leaf(
2356       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2357       r15_thread, c_rarg1);
2358     restore_args(masm, total_c_args, c_arg, out_regs);
2359   }
2360 
2361   // Lock a synchronized method
2362 
2363   // Register definitions used by locking and unlocking
2364 
2365   const Register swap_reg = rax;  // Must use rax for cmpxchg instruction
2366   const Register obj_reg  = rbx;  // Will contain the oop
2367   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
2368   const Register old_hdr  = r13;  // value of old header at unlock time
2369 
2370   Label slow_path_lock;
2371   Label lock_done;
2372 
2373   if (method->is_synchronized()) {
2374     assert(!is_critical_native, "unhandled");
2375 
2376 
2377     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2378 
2379     // Get the handle (the 2nd argument)
2380     __ mov(oop_handle_reg, c_rarg1);
2381 
2382     // Get address of the box
2383 
2384     __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2385 
2386     // Load the oop from the handle
2387     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2388 
2389     if (UseBiasedLocking) {
2390       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
2391     }
2392 
2393     // Load immediate 1 into swap_reg %rax
2394     __ movl(swap_reg, 1);
2395 
2396     // Load (object->mark() | 1) into swap_reg %rax
2397     __ orptr(swap_reg, Address(obj_reg, 0));
2398 
2399     // Save (object->mark() | 1) into BasicLock's displaced header
2400     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2401 
2402     if (os::is_MP()) {
2403       __ lock();
2404     }
2405 
2406     // src -> dest iff dest == rax else rax <- dest
2407     __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
2408     __ jcc(Assembler::equal, lock_done);
2409 
2410     // Hmm should this move to the slow path code area???
2411 
2412     // Test if the oopMark is an obvious stack pointer, i.e.,
2413     //  1) (mark & 3) == 0, and
2414     //  2) rsp <= mark < mark + os::pagesize()
2415     // These 3 tests can be done by evaluating the following
2416     // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2417     // assuming both stack pointer and pagesize have their
2418     // least significant 2 bits clear.
2419     // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2420 
2421     __ subptr(swap_reg, rsp);
2422     __ andptr(swap_reg, 3 - os::vm_page_size());
2423 
2424     // Save the test result, for recursive case, the result is zero
2425     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2426     __ jcc(Assembler::notEqual, slow_path_lock);
2427 
2428     // Slow path will re-enter here
2429 
2430     __ bind(lock_done);
2431   }
2432 
2433 
2434   // Finally just about ready to make the JNI call
2435 
2436 
2437   // get JNIEnv* which is first argument to native
2438   if (!is_critical_native) {
2439     __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2440   }
2441 
2442   // Now set thread in native
2443   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2444 
2445   __ call(RuntimeAddress(native_func));
2446 
2447   // Verify or restore cpu control state after JNI call
2448   __ restore_cpu_control_state_after_jni();
2449 
2450   // Unpack native results.
2451   switch (ret_type) {
2452   case T_BOOLEAN: __ c2bool(rax);            break;
2453   case T_CHAR   : __ movzwl(rax, rax);      break;
2454   case T_BYTE   : __ sign_extend_byte (rax); break;
2455   case T_SHORT  : __ sign_extend_short(rax); break;
2456   case T_INT    : /* nothing to do */        break;
2457   case T_DOUBLE :
2458   case T_FLOAT  :
2459     // Result is in xmm0 we'll save as needed
2460     break;
2461   case T_ARRAY:                 // Really a handle
2462   case T_OBJECT:                // Really a handle
2463       break; // can't de-handlize until after safepoint check
2464   case T_VOID: break;
2465   case T_LONG: break;
2466   default       : ShouldNotReachHere();
2467   }
2468 
2469   // unpin pinned arguments
2470   pinned_slot = oop_handle_offset;
2471   if (pinned_args.length() > 0) {
2472     // save return value that may be overwritten otherwise.
2473     save_native_result(masm, ret_type, stack_slots);
2474     for (int index = 0; index < pinned_args.length(); index ++) {
2475       int i = pinned_args.at(index);
2476       assert(pinned_slot <= stack_slots, "overflow");
2477       unpin_critical_native_array(masm, in_regs[i], pinned_slot);
2478     }
2479     restore_native_result(masm, ret_type, stack_slots);
2480   }
2481 
2482   // Switch thread to "native transition" state before reading the synchronization state.
2483   // This additional state is necessary because reading and testing the synchronization
2484   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2485   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2486   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2487   //     Thread A is resumed to finish this native method, but doesn't block here since it
2488   //     didn't see any synchronization is progress, and escapes.
2489   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2490 
2491   if(os::is_MP()) {
2492     if (UseMembar) {
2493       // Force this write out before the read below
2494       __ membar(Assembler::Membar_mask_bits(
2495            Assembler::LoadLoad | Assembler::LoadStore |
2496            Assembler::StoreLoad | Assembler::StoreStore));
2497     } else {
2498       // Write serialization page so VM thread can do a pseudo remote membar.
2499       // We use the current thread pointer to calculate a thread specific
2500       // offset to write to within the page. This minimizes bus traffic
2501       // due to cache line collision.
2502       __ serialize_memory(r15_thread, rcx);
2503     }
2504   }
2505 
2506   Label after_transition;
2507 
2508   // check for safepoint operation in progress and/or pending suspend requests
2509   {
2510     Label Continue;
2511 
2512     __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
2513              SafepointSynchronize::_not_synchronized);
2514 
2515     Label L;
2516     __ jcc(Assembler::notEqual, L);
2517     __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
2518     __ jcc(Assembler::equal, Continue);
2519     __ bind(L);
2520 
2521     // Don't use call_VM as it will see a possible pending exception and forward it
2522     // and never return here preventing us from clearing _last_native_pc down below.
2523     // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2524     // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2525     // by hand.
2526     //
2527     save_native_result(masm, ret_type, stack_slots);
2528     __ mov(c_rarg0, r15_thread);
2529     __ mov(r12, rsp); // remember sp
2530     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2531     __ andptr(rsp, -16); // align stack as required by ABI
2532     if (!is_critical_native) {
2533       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2534     } else {
2535       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
2536     }
2537     __ mov(rsp, r12); // restore sp
2538     __ reinit_heapbase();
2539     // Restore any method result value
2540     restore_native_result(masm, ret_type, stack_slots);
2541 
2542     if (is_critical_native) {
2543       // The call above performed the transition to thread_in_Java so
2544       // skip the transition logic below.
2545       __ jmpb(after_transition);
2546     }
2547 
2548     __ bind(Continue);
2549   }
2550 
2551   // change thread state
2552   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2553   __ bind(after_transition);
2554 
2555   Label reguard;
2556   Label reguard_done;
2557   __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
2558   __ jcc(Assembler::equal, reguard);
2559   __ bind(reguard_done);
2560 
2561   // native result if any is live
2562 
2563   // Unlock
2564   Label unlock_done;
2565   Label slow_path_unlock;
2566   if (method->is_synchronized()) {
2567 
2568     // Get locked oop from the handle we passed to jni
2569     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2570 
2571     Label done;
2572 
2573     if (UseBiasedLocking) {
2574       __ biased_locking_exit(obj_reg, old_hdr, done);
2575     }
2576 
2577     // Simple recursive lock?
2578 
2579     __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2580     __ jcc(Assembler::equal, done);
2581 
2582     // Must save rax if if it is live now because cmpxchg must use it
2583     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2584       save_native_result(masm, ret_type, stack_slots);
2585     }
2586 
2587 
2588     // get address of the stack lock
2589     __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2590     //  get old displaced header
2591     __ movptr(old_hdr, Address(rax, 0));
2592 
2593     // Atomic swap old header if oop still contains the stack lock
2594     if (os::is_MP()) {
2595       __ lock();
2596     }
2597     __ cmpxchgptr(old_hdr, Address(obj_reg, 0));
2598     __ jcc(Assembler::notEqual, slow_path_unlock);
2599 
2600     // slow path re-enters here
2601     __ bind(unlock_done);
2602     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2603       restore_native_result(masm, ret_type, stack_slots);
2604     }
2605 
2606     __ bind(done);
2607 
2608   }
2609   {
2610     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2611     save_native_result(masm, ret_type, stack_slots);
2612     __ mov_metadata(c_rarg1, method());
2613     __ call_VM_leaf(
2614          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2615          r15_thread, c_rarg1);
2616     restore_native_result(masm, ret_type, stack_slots);
2617   }
2618 
2619   __ reset_last_Java_frame(false);
2620 
2621   // Unbox oop result, e.g. JNIHandles::resolve value.
2622   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2623     __ resolve_jobject(rax /* value */,
2624                        r15_thread /* thread */,
2625                        rcx /* tmp */);
2626   }
2627 
2628   if (!is_critical_native) {
2629     // reset handle block
2630     __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
2631     __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
2632   }
2633 
2634   // pop our frame
2635 
2636   __ leave();
2637 
2638   if (!is_critical_native) {
2639     // Any exception pending?
2640     __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2641     __ jcc(Assembler::notEqual, exception_pending);
2642   }
2643 
2644   // Return
2645 
2646   __ ret(0);
2647 
2648   // Unexpected paths are out of line and go here
2649 
2650   if (!is_critical_native) {
2651     // forward the exception
2652     __ bind(exception_pending);
2653 
2654     // and forward the exception
2655     __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2656   }
2657 
2658   // Slow path locking & unlocking
2659   if (method->is_synchronized()) {
2660 
2661     // BEGIN Slow path lock
2662     __ bind(slow_path_lock);
2663 
2664     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2665     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2666 
2667     // protect the args we've loaded
2668     save_args(masm, total_c_args, c_arg, out_regs);
2669 
2670     __ mov(c_rarg0, obj_reg);
2671     __ mov(c_rarg1, lock_reg);
2672     __ mov(c_rarg2, r15_thread);
2673 
2674     // Not a leaf but we have last_Java_frame setup as we want
2675     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2676     restore_args(masm, total_c_args, c_arg, out_regs);
2677 
2678 #ifdef ASSERT
2679     { Label L;
2680     __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2681     __ jcc(Assembler::equal, L);
2682     __ stop("no pending exception allowed on exit from monitorenter");
2683     __ bind(L);
2684     }
2685 #endif
2686     __ jmp(lock_done);
2687 
2688     // END Slow path lock
2689 
2690     // BEGIN Slow path unlock
2691     __ bind(slow_path_unlock);
2692 
2693     // If we haven't already saved the native result we must save it now as xmm registers
2694     // are still exposed.
2695 
2696     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2697       save_native_result(masm, ret_type, stack_slots);
2698     }
2699 
2700     __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2701 
2702     __ mov(c_rarg0, obj_reg);
2703     __ mov(r12, rsp); // remember sp
2704     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2705     __ andptr(rsp, -16); // align stack as required by ABI
2706 
2707     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2708     // NOTE that obj_reg == rbx currently
2709     __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
2710     __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2711 
2712     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2713     __ mov(rsp, r12); // restore sp
2714     __ reinit_heapbase();
2715 #ifdef ASSERT
2716     {
2717       Label L;
2718       __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2719       __ jcc(Assembler::equal, L);
2720       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2721       __ bind(L);
2722     }
2723 #endif /* ASSERT */
2724 
2725     __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
2726 
2727     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2728       restore_native_result(masm, ret_type, stack_slots);
2729     }
2730     __ jmp(unlock_done);
2731 
2732     // END Slow path unlock
2733 
2734   } // synchronized
2735 
2736   // SLOW PATH Reguard the stack if needed
2737 
2738   __ bind(reguard);
2739   save_native_result(masm, ret_type, stack_slots);
2740   __ mov(r12, rsp); // remember sp
2741   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2742   __ andptr(rsp, -16); // align stack as required by ABI
2743   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2744   __ mov(rsp, r12); // restore sp
2745   __ reinit_heapbase();
2746   restore_native_result(masm, ret_type, stack_slots);
2747   // and continue
2748   __ jmp(reguard_done);
2749 
2750 
2751 
2752   __ flush();
2753 
2754   nmethod *nm = nmethod::new_native_nmethod(method,
2755                                             compile_id,
2756                                             masm->code(),
2757                                             vep_offset,
2758                                             frame_complete,
2759                                             stack_slots / VMRegImpl::slots_per_word,
2760                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2761                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2762                                             oop_maps);
2763 
2764   if (is_critical_native) {
2765     nm->set_lazy_critical_native(true);
2766   }
2767 
2768   return nm;
2769 
2770 }
2771 
2772 #ifdef HAVE_DTRACE_H
2773 // ---------------------------------------------------------------------------
2774 // Generate a dtrace nmethod for a given signature.  The method takes arguments
2775 // in the Java compiled code convention, marshals them to the native
2776 // abi and then leaves nops at the position you would expect to call a native
2777 // function. When the probe is enabled the nops are replaced with a trap
2778 // instruction that dtrace inserts and the trace will cause a notification
2779 // to dtrace.
2780 //
2781 // The probes are only able to take primitive types and java/lang/String as
2782 // arguments.  No other java types are allowed. Strings are converted to utf8
2783 // strings so that from dtrace point of view java strings are converted to C
2784 // strings. There is an arbitrary fixed limit on the total space that a method
2785 // can use for converting the strings. (256 chars per string in the signature).
2786 // So any java string larger then this is truncated.
2787 
2788 static int  fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
2789 static bool offsets_initialized = false;
2790 
2791 
2792 nmethod *SharedRuntime::generate_dtrace_nmethod(MacroAssembler *masm,
2793                                                 methodHandle method) {
2794 
2795 
2796   // generate_dtrace_nmethod is guarded by a mutex so we are sure to
2797   // be single threaded in this method.
2798   assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
2799 
2800   if (!offsets_initialized) {
2801     fp_offset[c_rarg0->as_VMReg()->value()] = -1 * wordSize;
2802     fp_offset[c_rarg1->as_VMReg()->value()] = -2 * wordSize;
2803     fp_offset[c_rarg2->as_VMReg()->value()] = -3 * wordSize;
2804     fp_offset[c_rarg3->as_VMReg()->value()] = -4 * wordSize;
2805     fp_offset[c_rarg4->as_VMReg()->value()] = -5 * wordSize;
2806     fp_offset[c_rarg5->as_VMReg()->value()] = -6 * wordSize;
2807 
2808     fp_offset[c_farg0->as_VMReg()->value()] = -7 * wordSize;
2809     fp_offset[c_farg1->as_VMReg()->value()] = -8 * wordSize;
2810     fp_offset[c_farg2->as_VMReg()->value()] = -9 * wordSize;
2811     fp_offset[c_farg3->as_VMReg()->value()] = -10 * wordSize;
2812     fp_offset[c_farg4->as_VMReg()->value()] = -11 * wordSize;
2813     fp_offset[c_farg5->as_VMReg()->value()] = -12 * wordSize;
2814     fp_offset[c_farg6->as_VMReg()->value()] = -13 * wordSize;
2815     fp_offset[c_farg7->as_VMReg()->value()] = -14 * wordSize;
2816 
2817     offsets_initialized = true;
2818   }
2819   // Fill in the signature array, for the calling-convention call.
2820   int total_args_passed = method->size_of_parameters();
2821 
2822   BasicType* in_sig_bt  = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2823   VMRegPair  *in_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2824 
2825   // The signature we are going to use for the trap that dtrace will see
2826   // java/lang/String is converted. We drop "this" and any other object
2827   // is converted to NULL.  (A one-slot java/lang/Long object reference
2828   // is converted to a two-slot long, which is why we double the allocation).
2829   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
2830   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
2831 
2832   int i=0;
2833   int total_strings = 0;
2834   int first_arg_to_pass = 0;
2835   int total_c_args = 0;
2836 
2837   // Skip the receiver as dtrace doesn't want to see it
2838   if( !method->is_static() ) {
2839     in_sig_bt[i++] = T_OBJECT;
2840     first_arg_to_pass = 1;
2841   }
2842 
2843   // We need to convert the java args to where a native (non-jni) function
2844   // would expect them. To figure out where they go we convert the java
2845   // signature to a C signature.
2846 
2847   SignatureStream ss(method->signature());
2848   for ( ; !ss.at_return_type(); ss.next()) {
2849     BasicType bt = ss.type();
2850     in_sig_bt[i++] = bt;  // Collect remaining bits of signature
2851     out_sig_bt[total_c_args++] = bt;
2852     if( bt == T_OBJECT) {
2853       Symbol* s = ss.as_symbol_or_null();   // symbol is created
2854       if (s == vmSymbols::java_lang_String()) {
2855         total_strings++;
2856         out_sig_bt[total_c_args-1] = T_ADDRESS;
2857       } else if (s == vmSymbols::java_lang_Boolean() ||
2858                  s == vmSymbols::java_lang_Character() ||
2859                  s == vmSymbols::java_lang_Byte() ||
2860                  s == vmSymbols::java_lang_Short() ||
2861                  s == vmSymbols::java_lang_Integer() ||
2862                  s == vmSymbols::java_lang_Float()) {
2863         out_sig_bt[total_c_args-1] = T_INT;
2864       } else if (s == vmSymbols::java_lang_Long() ||
2865                  s == vmSymbols::java_lang_Double()) {
2866         out_sig_bt[total_c_args-1] = T_LONG;
2867         out_sig_bt[total_c_args++] = T_VOID;
2868       }
2869     } else if ( bt == T_LONG || bt == T_DOUBLE ) {
2870       in_sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
2871       // We convert double to long
2872       out_sig_bt[total_c_args-1] = T_LONG;
2873       out_sig_bt[total_c_args++] = T_VOID;
2874     } else if ( bt == T_FLOAT) {
2875       // We convert float to int
2876       out_sig_bt[total_c_args-1] = T_INT;
2877     }
2878   }
2879 
2880   assert(i==total_args_passed, "validly parsed signature");
2881 
2882   // Now get the compiled-Java layout as input arguments
2883   int comp_args_on_stack;
2884   comp_args_on_stack = SharedRuntime::java_calling_convention(
2885       in_sig_bt, in_regs, total_args_passed, false);
2886 
2887   // Now figure out where the args must be stored and how much stack space
2888   // they require (neglecting out_preserve_stack_slots but space for storing
2889   // the 1st six register arguments). It's weird see int_stk_helper.
2890 
2891   int out_arg_slots;
2892   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
2893 
2894   // Calculate the total number of stack slots we will need.
2895 
2896   // First count the abi requirement plus all of the outgoing args
2897   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2898 
2899   // Now space for the string(s) we must convert
2900   int* string_locs   = NEW_RESOURCE_ARRAY(int, total_strings + 1);
2901   for (i = 0; i < total_strings ; i++) {
2902     string_locs[i] = stack_slots;
2903     stack_slots += max_dtrace_string_size / VMRegImpl::stack_slot_size;
2904   }
2905 
2906   // Plus the temps we might need to juggle register args
2907   // regs take two slots each
2908   stack_slots += (Argument::n_int_register_parameters_c +
2909                   Argument::n_float_register_parameters_c) * 2;
2910 
2911 
2912   // + 4 for return address (which we own) and saved rbp,
2913 
2914   stack_slots += 4;
2915 
2916   // Ok The space we have allocated will look like:
2917   //
2918   //
2919   // FP-> |                     |
2920   //      |---------------------|
2921   //      | string[n]           |
2922   //      |---------------------| <- string_locs[n]
2923   //      | string[n-1]         |
2924   //      |---------------------| <- string_locs[n-1]
2925   //      | ...                 |
2926   //      | ...                 |
2927   //      |---------------------| <- string_locs[1]
2928   //      | string[0]           |
2929   //      |---------------------| <- string_locs[0]
2930   //      | outbound memory     |
2931   //      | based arguments     |
2932   //      |                     |
2933   //      |---------------------|
2934   //      |                     |
2935   // SP-> | out_preserved_slots |
2936   //
2937   //
2938 
2939   // Now compute actual number of stack words we need rounding to make
2940   // stack properly aligned.
2941   stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
2942 
2943   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2944 
2945   intptr_t start = (intptr_t)__ pc();
2946 
2947   // First thing make an ic check to see if we should even be here
2948 
2949   // We are free to use all registers as temps without saving them and
2950   // restoring them except rbp. rbp, is the only callee save register
2951   // as far as the interpreter and the compiler(s) are concerned.
2952 
2953   const Register ic_reg = rax;
2954   const Register receiver = rcx;
2955   Label hit;
2956   Label exception_pending;
2957 
2958 
2959   __ verify_oop(receiver);
2960   __ cmpl(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
2961   __ jcc(Assembler::equal, hit);
2962 
2963   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2964 
2965   // verified entry must be aligned for code patching.
2966   // and the first 5 bytes must be in the same cache line
2967   // if we align at 8 then we will be sure 5 bytes are in the same line
2968   __ align(8);
2969 
2970   __ bind(hit);
2971 
2972   int vep_offset = ((intptr_t)__ pc()) - start;
2973 
2974 
2975   // The instruction at the verified entry point must be 5 bytes or longer
2976   // because it can be patched on the fly by make_non_entrant. The stack bang
2977   // instruction fits that requirement.
2978 
2979   // Generate stack overflow check
2980 
2981   if (UseStackBanging) {
2982     if (stack_size <= StackShadowPages*os::vm_page_size()) {
2983       __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
2984     } else {
2985       __ movl(rax, stack_size);
2986       __ bang_stack_size(rax, rbx);
2987     }
2988   } else {
2989     // need a 5 byte instruction to allow MT safe patching to non-entrant
2990     __ fat_nop();
2991   }
2992 
2993   assert(((uintptr_t)__ pc() - start - vep_offset) >= 5,
2994          "valid size for make_non_entrant");
2995 
2996   // Generate a new frame for the wrapper.
2997   __ enter();
2998 
2999   // -4 because return address is already present and so is saved rbp,
3000   if (stack_size - 2*wordSize != 0) {
3001     __ subq(rsp, stack_size - 2*wordSize);
3002   }
3003 
3004   // Frame is now completed as far a size and linkage.
3005 
3006   int frame_complete = ((intptr_t)__ pc()) - start;
3007 
3008   int c_arg, j_arg;
3009 
3010   // State of input register args
3011 
3012   bool  live[ConcreteRegisterImpl::number_of_registers];
3013 
3014   live[j_rarg0->as_VMReg()->value()] = false;
3015   live[j_rarg1->as_VMReg()->value()] = false;
3016   live[j_rarg2->as_VMReg()->value()] = false;
3017   live[j_rarg3->as_VMReg()->value()] = false;
3018   live[j_rarg4->as_VMReg()->value()] = false;
3019   live[j_rarg5->as_VMReg()->value()] = false;
3020 
3021   live[j_farg0->as_VMReg()->value()] = false;
3022   live[j_farg1->as_VMReg()->value()] = false;
3023   live[j_farg2->as_VMReg()->value()] = false;
3024   live[j_farg3->as_VMReg()->value()] = false;
3025   live[j_farg4->as_VMReg()->value()] = false;
3026   live[j_farg5->as_VMReg()->value()] = false;
3027   live[j_farg6->as_VMReg()->value()] = false;
3028   live[j_farg7->as_VMReg()->value()] = false;
3029 
3030 
3031   bool rax_is_zero = false;
3032 
3033   // All args (except strings) destined for the stack are moved first
3034   for (j_arg = first_arg_to_pass, c_arg = 0 ;
3035        j_arg < total_args_passed ; j_arg++, c_arg++ ) {
3036     VMRegPair src = in_regs[j_arg];
3037     VMRegPair dst = out_regs[c_arg];
3038 
3039     // Get the real reg value or a dummy (rsp)
3040 
3041     int src_reg = src.first()->is_reg() ?
3042                   src.first()->value() :
3043                   rsp->as_VMReg()->value();
3044 
3045     bool useless =  in_sig_bt[j_arg] == T_ARRAY ||
3046                     (in_sig_bt[j_arg] == T_OBJECT &&
3047                      out_sig_bt[c_arg] != T_INT &&
3048                      out_sig_bt[c_arg] != T_ADDRESS &&
3049                      out_sig_bt[c_arg] != T_LONG);
3050 
3051     live[src_reg] = !useless;
3052 
3053     if (dst.first()->is_stack()) {
3054 
3055       // Even though a string arg in a register is still live after this loop
3056       // after the string conversion loop (next) it will be dead so we take
3057       // advantage of that now for simpler code to manage live.
3058 
3059       live[src_reg] = false;
3060       switch (in_sig_bt[j_arg]) {
3061 
3062         case T_ARRAY:
3063         case T_OBJECT:
3064           {
3065             Address stack_dst(rsp, reg2offset_out(dst.first()));
3066 
3067             if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
3068               // need to unbox a one-word value
3069               Register in_reg = rax;
3070               if ( src.first()->is_reg() ) {
3071                 in_reg = src.first()->as_Register();
3072               } else {
3073                 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
3074                 rax_is_zero = false;
3075               }
3076               Label skipUnbox;
3077               __ movptr(Address(rsp, reg2offset_out(dst.first())),
3078                         (int32_t)NULL_WORD);
3079               __ testq(in_reg, in_reg);
3080               __ jcc(Assembler::zero, skipUnbox);
3081 
3082               BasicType bt = out_sig_bt[c_arg];
3083               int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
3084               Address src1(in_reg, box_offset);
3085               if ( bt == T_LONG ) {
3086                 __ movq(in_reg,  src1);
3087                 __ movq(stack_dst, in_reg);
3088                 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3089                 ++c_arg; // skip over T_VOID to keep the loop indices in sync
3090               } else {
3091                 __ movl(in_reg,  src1);
3092                 __ movl(stack_dst, in_reg);
3093               }
3094 
3095               __ bind(skipUnbox);
3096             } else if (out_sig_bt[c_arg] != T_ADDRESS) {
3097               // Convert the arg to NULL
3098               if (!rax_is_zero) {
3099                 __ xorq(rax, rax);
3100                 rax_is_zero = true;
3101               }
3102               __ movq(stack_dst, rax);
3103             }
3104           }
3105           break;
3106 
3107         case T_VOID:
3108           break;
3109 
3110         case T_FLOAT:
3111           // This does the right thing since we know it is destined for the
3112           // stack
3113           float_move(masm, src, dst);
3114           break;
3115 
3116         case T_DOUBLE:
3117           // This does the right thing since we know it is destined for the
3118           // stack
3119           double_move(masm, src, dst);
3120           break;
3121 
3122         case T_LONG :
3123           long_move(masm, src, dst);
3124           break;
3125 
3126         case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
3127 
3128         default:
3129           move32_64(masm, src, dst);
3130       }
3131     }
3132 
3133   }
3134 
3135   // If we have any strings we must store any register based arg to the stack
3136   // This includes any still live xmm registers too.
3137 
3138   int sid = 0;
3139 
3140   if (total_strings > 0 ) {
3141     for (j_arg = first_arg_to_pass, c_arg = 0 ;
3142          j_arg < total_args_passed ; j_arg++, c_arg++ ) {
3143       VMRegPair src = in_regs[j_arg];
3144       VMRegPair dst = out_regs[c_arg];
3145 
3146       if (src.first()->is_reg()) {
3147         Address src_tmp(rbp, fp_offset[src.first()->value()]);
3148 
3149         // string oops were left untouched by the previous loop even if the
3150         // eventual (converted) arg is destined for the stack so park them
3151         // away now (except for first)
3152 
3153         if (out_sig_bt[c_arg] == T_ADDRESS) {
3154           Address utf8_addr = Address(
3155               rsp, string_locs[sid++] * VMRegImpl::stack_slot_size);
3156           if (sid != 1) {
3157             // The first string arg won't be killed until after the utf8
3158             // conversion
3159             __ movq(utf8_addr, src.first()->as_Register());
3160           }
3161         } else if (dst.first()->is_reg()) {
3162           if (in_sig_bt[j_arg] == T_FLOAT || in_sig_bt[j_arg] == T_DOUBLE) {
3163 
3164             // Convert the xmm register to an int and store it in the reserved
3165             // location for the eventual c register arg
3166             XMMRegister f = src.first()->as_XMMRegister();
3167             if (in_sig_bt[j_arg] == T_FLOAT) {
3168               __ movflt(src_tmp, f);
3169             } else {
3170               __ movdbl(src_tmp, f);
3171             }
3172           } else {
3173             // If the arg is an oop type we don't support don't bother to store
3174             // it remember string was handled above.
3175             bool useless =  in_sig_bt[j_arg] == T_ARRAY ||
3176                             (in_sig_bt[j_arg] == T_OBJECT &&
3177                              out_sig_bt[c_arg] != T_INT &&
3178                              out_sig_bt[c_arg] != T_LONG);
3179 
3180             if (!useless) {
3181               __ movq(src_tmp, src.first()->as_Register());
3182             }
3183           }
3184         }
3185       }
3186       if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
3187         assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3188         ++c_arg; // skip over T_VOID to keep the loop indices in sync
3189       }
3190     }
3191 
3192     // Now that the volatile registers are safe, convert all the strings
3193     sid = 0;
3194 
3195     for (j_arg = first_arg_to_pass, c_arg = 0 ;
3196          j_arg < total_args_passed ; j_arg++, c_arg++ ) {
3197       if (out_sig_bt[c_arg] == T_ADDRESS) {
3198         // It's a string
3199         Address utf8_addr = Address(
3200             rsp, string_locs[sid++] * VMRegImpl::stack_slot_size);
3201         // The first string we find might still be in the original java arg
3202         // register
3203 
3204         VMReg src = in_regs[j_arg].first();
3205 
3206         // We will need to eventually save the final argument to the trap
3207         // in the von-volatile location dedicated to src. This is the offset
3208         // from fp we will use.
3209         int src_off = src->is_reg() ?
3210             fp_offset[src->value()] : reg2offset_in(src);
3211 
3212         // This is where the argument will eventually reside
3213         VMRegPair dst = out_regs[c_arg];
3214 
3215         if (src->is_reg()) {
3216           if (sid == 1) {
3217             __ movq(c_rarg0, src->as_Register());
3218           } else {
3219             __ movq(c_rarg0, utf8_addr);
3220           }
3221         } else {
3222           // arg is still in the original location
3223           __ movq(c_rarg0, Address(rbp, reg2offset_in(src)));
3224         }
3225         Label done, convert;
3226 
3227         // see if the oop is NULL
3228         __ testq(c_rarg0, c_rarg0);
3229         __ jcc(Assembler::notEqual, convert);
3230 
3231         if (dst.first()->is_reg()) {
3232           // Save the ptr to utf string in the origina src loc or the tmp
3233           // dedicated to it
3234           __ movq(Address(rbp, src_off), c_rarg0);
3235         } else {
3236           __ movq(Address(rsp, reg2offset_out(dst.first())), c_rarg0);
3237         }
3238         __ jmp(done);
3239 
3240         __ bind(convert);
3241 
3242         __ lea(c_rarg1, utf8_addr);
3243         if (dst.first()->is_reg()) {
3244           __ movq(Address(rbp, src_off), c_rarg1);
3245         } else {
3246           __ movq(Address(rsp, reg2offset_out(dst.first())), c_rarg1);
3247         }
3248         // And do the conversion
3249         __ call(RuntimeAddress(
3250                 CAST_FROM_FN_PTR(address, SharedRuntime::get_utf)));
3251 
3252         __ bind(done);
3253       }
3254       if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
3255         assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3256         ++c_arg; // skip over T_VOID to keep the loop indices in sync
3257       }
3258     }
3259     // The get_utf call killed all the c_arg registers
3260     live[c_rarg0->as_VMReg()->value()] = false;
3261     live[c_rarg1->as_VMReg()->value()] = false;
3262     live[c_rarg2->as_VMReg()->value()] = false;
3263     live[c_rarg3->as_VMReg()->value()] = false;
3264     live[c_rarg4->as_VMReg()->value()] = false;
3265     live[c_rarg5->as_VMReg()->value()] = false;
3266 
3267     live[c_farg0->as_VMReg()->value()] = false;
3268     live[c_farg1->as_VMReg()->value()] = false;
3269     live[c_farg2->as_VMReg()->value()] = false;
3270     live[c_farg3->as_VMReg()->value()] = false;
3271     live[c_farg4->as_VMReg()->value()] = false;
3272     live[c_farg5->as_VMReg()->value()] = false;
3273     live[c_farg6->as_VMReg()->value()] = false;
3274     live[c_farg7->as_VMReg()->value()] = false;
3275   }
3276 
3277   // Now we can finally move the register args to their desired locations
3278 
3279   rax_is_zero = false;
3280 
3281   for (j_arg = first_arg_to_pass, c_arg = 0 ;
3282        j_arg < total_args_passed ; j_arg++, c_arg++ ) {
3283 
3284     VMRegPair src = in_regs[j_arg];
3285     VMRegPair dst = out_regs[c_arg];
3286 
3287     // Only need to look for args destined for the interger registers (since we
3288     // convert float/double args to look like int/long outbound)
3289     if (dst.first()->is_reg()) {
3290       Register r =  dst.first()->as_Register();
3291 
3292       // Check if the java arg is unsupported and thereofre useless
3293       bool useless =  in_sig_bt[j_arg] == T_ARRAY ||
3294                       (in_sig_bt[j_arg] == T_OBJECT &&
3295                        out_sig_bt[c_arg] != T_INT &&
3296                        out_sig_bt[c_arg] != T_ADDRESS &&
3297                        out_sig_bt[c_arg] != T_LONG);
3298 
3299 
3300       // If we're going to kill an existing arg save it first
3301       if (live[dst.first()->value()]) {
3302         // you can't kill yourself
3303         if (src.first() != dst.first()) {
3304           __ movq(Address(rbp, fp_offset[dst.first()->value()]), r);
3305         }
3306       }
3307       if (src.first()->is_reg()) {
3308         if (live[src.first()->value()] ) {
3309           if (in_sig_bt[j_arg] == T_FLOAT) {
3310             __ movdl(r, src.first()->as_XMMRegister());
3311           } else if (in_sig_bt[j_arg] == T_DOUBLE) {
3312             __ movdq(r, src.first()->as_XMMRegister());
3313           } else if (r != src.first()->as_Register()) {
3314             if (!useless) {
3315               __ movq(r, src.first()->as_Register());
3316             }
3317           }
3318         } else {
3319           // If the arg is an oop type we don't support don't bother to store
3320           // it
3321           if (!useless) {
3322             if (in_sig_bt[j_arg] == T_DOUBLE ||
3323                 in_sig_bt[j_arg] == T_LONG  ||
3324                 in_sig_bt[j_arg] == T_OBJECT ) {
3325               __ movq(r, Address(rbp, fp_offset[src.first()->value()]));
3326             } else {
3327               __ movl(r, Address(rbp, fp_offset[src.first()->value()]));
3328             }
3329           }
3330         }
3331         live[src.first()->value()] = false;
3332       } else if (!useless) {
3333         // full sized move even for int should be ok
3334         __ movq(r, Address(rbp, reg2offset_in(src.first())));
3335       }
3336 
3337       // At this point r has the original java arg in the final location
3338       // (assuming it wasn't useless). If the java arg was an oop
3339       // we have a bit more to do
3340 
3341       if (in_sig_bt[j_arg] == T_ARRAY || in_sig_bt[j_arg] == T_OBJECT ) {
3342         if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
3343           // need to unbox a one-word value
3344           Label skip;
3345           __ testq(r, r);
3346           __ jcc(Assembler::equal, skip);
3347           BasicType bt = out_sig_bt[c_arg];
3348           int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
3349           Address src1(r, box_offset);
3350           if ( bt == T_LONG ) {
3351             __ movq(r, src1);
3352           } else {
3353             __ movl(r, src1);
3354           }
3355           __ bind(skip);
3356 
3357         } else if (out_sig_bt[c_arg] != T_ADDRESS) {
3358           // Convert the arg to NULL
3359           __ xorq(r, r);
3360         }
3361       }
3362 
3363       // dst can longer be holding an input value
3364       live[dst.first()->value()] = false;
3365     }
3366     if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
3367       assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3368       ++c_arg; // skip over T_VOID to keep the loop indices in sync
3369     }
3370   }
3371 
3372 
3373   // Ok now we are done. Need to place the nop that dtrace wants in order to
3374   // patch in the trap
3375   int patch_offset = ((intptr_t)__ pc()) - start;
3376 
3377   __ nop();
3378 
3379 
3380   // Return
3381 
3382   __ leave();
3383   __ ret(0);
3384 
3385   __ flush();
3386 
3387   nmethod *nm = nmethod::new_dtrace_nmethod(
3388       method, masm->code(), vep_offset, patch_offset, frame_complete,
3389       stack_slots / VMRegImpl::slots_per_word);
3390   return nm;
3391 
3392 }
3393 
3394 #endif // HAVE_DTRACE_H
3395 
3396 // this function returns the adjust size (in number of words) to a c2i adapter
3397 // activation for use during deoptimization
3398 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
3399   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
3400 }
3401 
3402 
3403 uint SharedRuntime::out_preserve_stack_slots() {
3404   return 0;
3405 }
3406 
3407 //------------------------------generate_deopt_blob----------------------------
3408 void SharedRuntime::generate_deopt_blob() {
3409   // Allocate space for the code
3410   ResourceMark rm;
3411   // Setup code generation tools
3412   CodeBuffer buffer("deopt_blob", 2048, 1024);
3413   MacroAssembler* masm = new MacroAssembler(&buffer);
3414   int frame_size_in_words;
3415   OopMap* map = NULL;
3416   OopMapSet *oop_maps = new OopMapSet();
3417 
3418   // -------------
3419   // This code enters when returning to a de-optimized nmethod.  A return
3420   // address has been pushed on the the stack, and return values are in
3421   // registers.
3422   // If we are doing a normal deopt then we were called from the patched
3423   // nmethod from the point we returned to the nmethod. So the return
3424   // address on the stack is wrong by NativeCall::instruction_size
3425   // We will adjust the value so it looks like we have the original return
3426   // address on the stack (like when we eagerly deoptimized).
3427   // In the case of an exception pending when deoptimizing, we enter
3428   // with a return address on the stack that points after the call we patched
3429   // into the exception handler. We have the following register state from,
3430   // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
3431   //    rax: exception oop
3432   //    rbx: exception handler
3433   //    rdx: throwing pc
3434   // So in this case we simply jam rdx into the useless return address and
3435   // the stack looks just like we want.
3436   //
3437   // At this point we need to de-opt.  We save the argument return
3438   // registers.  We call the first C routine, fetch_unroll_info().  This
3439   // routine captures the return values and returns a structure which
3440   // describes the current frame size and the sizes of all replacement frames.
3441   // The current frame is compiled code and may contain many inlined
3442   // functions, each with their own JVM state.  We pop the current frame, then
3443   // push all the new frames.  Then we call the C routine unpack_frames() to
3444   // populate these frames.  Finally unpack_frames() returns us the new target
3445   // address.  Notice that callee-save registers are BLOWN here; they have
3446   // already been captured in the vframeArray at the time the return PC was
3447   // patched.
3448   address start = __ pc();
3449   Label cont;
3450 
3451   // Prolog for non exception case!
3452 
3453   // Save everything in sight.
3454   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3455 
3456   // Normal deoptimization.  Save exec mode for unpack_frames.
3457   __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
3458   __ jmp(cont);
3459 
3460   int reexecute_offset = __ pc() - start;
3461 
3462   // Reexecute case
3463   // return address is the pc describes what bci to do re-execute at
3464 
3465   // No need to update map as each call to save_live_registers will produce identical oopmap
3466   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3467 
3468   __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
3469   __ jmp(cont);
3470 
3471   int exception_offset = __ pc() - start;
3472 
3473   // Prolog for exception case
3474 
3475   // all registers are dead at this entry point, except for rax, and
3476   // rdx which contain the exception oop and exception pc
3477   // respectively.  Set them in TLS and fall thru to the
3478   // unpack_with_exception_in_tls entry point.
3479 
3480   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3481   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax);
3482 
3483   int exception_in_tls_offset = __ pc() - start;
3484 
3485   // new implementation because exception oop is now passed in JavaThread
3486 
3487   // Prolog for exception case
3488   // All registers must be preserved because they might be used by LinearScan
3489   // Exceptiop oop and throwing PC are passed in JavaThread
3490   // tos: stack at point of call to method that threw the exception (i.e. only
3491   // args are on the stack, no return address)
3492 
3493   // make room on stack for the return address
3494   // It will be patched later with the throwing pc. The correct value is not
3495   // available now because loading it from memory would destroy registers.
3496   __ push(0);
3497 
3498   // Save everything in sight.
3499   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3500 
3501   // Now it is safe to overwrite any register
3502 
3503   // Deopt during an exception.  Save exec mode for unpack_frames.
3504   __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
3505 
3506   // load throwing pc from JavaThread and patch it as the return address
3507   // of the current frame. Then clear the field in JavaThread
3508 
3509   __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3510   __ movptr(Address(rbp, wordSize), rdx);
3511   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3512 
3513 #ifdef ASSERT
3514   // verify that there is really an exception oop in JavaThread
3515   __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3516   __ verify_oop(rax);
3517 
3518   // verify that there is no pending exception
3519   Label no_pending_exception;
3520   __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3521   __ testptr(rax, rax);
3522   __ jcc(Assembler::zero, no_pending_exception);
3523   __ stop("must not have pending exception here");
3524   __ bind(no_pending_exception);
3525 #endif
3526 
3527   __ bind(cont);
3528 
3529   // Call C code.  Need thread and this frame, but NOT official VM entry
3530   // crud.  We cannot block on this call, no GC can happen.
3531   //
3532   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
3533 
3534   // fetch_unroll_info needs to call last_java_frame().
3535 
3536   __ set_last_Java_frame(noreg, noreg, NULL);
3537 #ifdef ASSERT
3538   { Label L;
3539     __ cmpptr(Address(r15_thread,
3540                     JavaThread::last_Java_fp_offset()),
3541             (int32_t)0);
3542     __ jcc(Assembler::equal, L);
3543     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
3544     __ bind(L);
3545   }
3546 #endif // ASSERT
3547   __ mov(c_rarg0, r15_thread);
3548   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
3549 
3550   // Need to have an oopmap that tells fetch_unroll_info where to
3551   // find any register it might need.
3552   oop_maps->add_gc_map(__ pc() - start, map);
3553 
3554   __ reset_last_Java_frame(false);
3555 
3556   // Load UnrollBlock* into rdi
3557   __ mov(rdi, rax);
3558 
3559    Label noException;
3560   __ cmpl(r14, Deoptimization::Unpack_exception);   // Was exception pending?
3561   __ jcc(Assembler::notEqual, noException);
3562   __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3563   // QQQ this is useless it was NULL above
3564   __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3565   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
3566   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3567 
3568   __ verify_oop(rax);
3569 
3570   // Overwrite the result registers with the exception results.
3571   __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3572   // I think this is useless
3573   __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx);
3574 
3575   __ bind(noException);
3576 
3577   // Only register save data is on the stack.
3578   // Now restore the result registers.  Everything else is either dead
3579   // or captured in the vframeArray.
3580   RegisterSaver::restore_result_registers(masm);
3581 
3582   // All of the register save area has been popped of the stack. Only the
3583   // return address remains.
3584 
3585   // Pop all the frames we must move/replace.
3586   //
3587   // Frame picture (youngest to oldest)
3588   // 1: self-frame (no frame link)
3589   // 2: deopting frame  (no frame link)
3590   // 3: caller of deopting frame (could be compiled/interpreted).
3591   //
3592   // Note: by leaving the return address of self-frame on the stack
3593   // and using the size of frame 2 to adjust the stack
3594   // when we are done the return to frame 3 will still be on the stack.
3595 
3596   // Pop deoptimized frame
3597   __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
3598   __ addptr(rsp, rcx);
3599 
3600   // rsp should be pointing at the return address to the caller (3)
3601 
3602   // Pick up the initial fp we should save
3603   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3604   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3605 
3606 #ifdef ASSERT
3607   // Compilers generate code that bang the stack by as much as the
3608   // interpreter would need. So this stack banging should never
3609   // trigger a fault. Verify that it does not on non product builds.
3610   if (UseStackBanging) {
3611     __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3612     __ bang_stack_size(rbx, rcx);
3613   }
3614 #endif
3615 
3616   // Load address of array of frame pcs into rcx
3617   __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3618 
3619   // Trash the old pc
3620   __ addptr(rsp, wordSize);
3621 
3622   // Load address of array of frame sizes into rsi
3623   __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
3624 
3625   // Load counter into rdx
3626   __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
3627 
3628   // Now adjust the caller's stack to make up for the extra locals
3629   // but record the original sp so that we can save it in the skeletal interpreter
3630   // frame and the stack walking of interpreter_sender will get the unextended sp
3631   // value and not the "real" sp value.
3632 
3633   const Register sender_sp = r8;
3634 
3635   __ mov(sender_sp, rsp);
3636   __ movl(rbx, Address(rdi,
3637                        Deoptimization::UnrollBlock::
3638                        caller_adjustment_offset_in_bytes()));
3639   __ subptr(rsp, rbx);
3640 
3641   // Push interpreter frames in a loop
3642   Label loop;
3643   __ bind(loop);
3644   __ movptr(rbx, Address(rsi, 0));      // Load frame size
3645 #ifdef CC_INTERP
3646   __ subptr(rbx, 4*wordSize);           // we'll push pc and ebp by hand and
3647 #ifdef ASSERT
3648   __ push(0xDEADDEAD);                  // Make a recognizable pattern
3649   __ push(0xDEADDEAD);
3650 #else /* ASSERT */
3651   __ subptr(rsp, 2*wordSize);           // skip the "static long no_param"
3652 #endif /* ASSERT */
3653 #else
3654   __ subptr(rbx, 2*wordSize);           // We'll push pc and ebp by hand
3655 #endif // CC_INTERP
3656   __ pushptr(Address(rcx, 0));          // Save return address
3657   __ enter();                           // Save old & set new ebp
3658   __ subptr(rsp, rbx);                  // Prolog
3659 #ifdef CC_INTERP
3660   __ movptr(Address(rbp,
3661                   -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
3662             sender_sp); // Make it walkable
3663 #else /* CC_INTERP */
3664   // This value is corrected by layout_activation_impl
3665   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3666   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
3667 #endif /* CC_INTERP */
3668   __ mov(sender_sp, rsp);               // Pass sender_sp to next frame
3669   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
3670   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
3671   __ decrementl(rdx);                   // Decrement counter
3672   __ jcc(Assembler::notZero, loop);
3673   __ pushptr(Address(rcx, 0));          // Save final return address
3674 
3675   // Re-push self-frame
3676   __ enter();                           // Save old & set new ebp
3677 
3678   // Allocate a full sized register save area.
3679   // Return address and rbp are in place, so we allocate two less words.
3680   __ subptr(rsp, (frame_size_in_words - 2) * wordSize);
3681 
3682   // Restore frame locals after moving the frame
3683   __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0);
3684   __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3685 
3686   // Call C code.  Need thread but NOT official VM entry
3687   // crud.  We cannot block on this call, no GC can happen.  Call should
3688   // restore return values to their stack-slots with the new SP.
3689   //
3690   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
3691 
3692   // Use rbp because the frames look interpreted now
3693   // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3694   // Don't need the precise return PC here, just precise enough to point into this code blob.
3695   address the_pc = __ pc();
3696   __ set_last_Java_frame(noreg, rbp, the_pc);
3697 
3698   __ andptr(rsp, -(StackAlignmentInBytes));  // Fix stack alignment as required by ABI
3699   __ mov(c_rarg0, r15_thread);
3700   __ movl(c_rarg1, r14); // second arg: exec_mode
3701   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3702   // Revert SP alignment after call since we're going to do some SP relative addressing below
3703   __ movptr(rsp, Address(r15_thread, JavaThread::last_Java_sp_offset()));
3704 
3705   // Set an oopmap for the call site
3706   // Use the same PC we used for the last java frame
3707   oop_maps->add_gc_map(the_pc - start,
3708                        new OopMap( frame_size_in_words, 0 ));
3709 
3710   // Clear fp AND pc
3711   __ reset_last_Java_frame(true);
3712 
3713   // Collect return values
3714   __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
3715   __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
3716   // I think this is useless (throwing pc?)
3717   __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes()));
3718 
3719   // Pop self-frame.
3720   __ leave();                           // Epilog
3721 
3722   // Jump to interpreter
3723   __ ret(0);
3724 
3725   // Make sure all code is generated
3726   masm->flush();
3727 
3728   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
3729   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3730 }
3731 
3732 #ifdef COMPILER2
3733 //------------------------------generate_uncommon_trap_blob--------------------
3734 void SharedRuntime::generate_uncommon_trap_blob() {
3735   // Allocate space for the code
3736   ResourceMark rm;
3737   // Setup code generation tools
3738   CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
3739   MacroAssembler* masm = new MacroAssembler(&buffer);
3740 
3741   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3742 
3743   address start = __ pc();
3744 
3745   if (UseRTMLocking) {
3746     // Abort RTM transaction before possible nmethod deoptimization.
3747     __ xabort(0);
3748   }
3749 
3750   // Push self-frame.  We get here with a return address on the
3751   // stack, so rsp is 8-byte aligned until we allocate our frame.
3752   __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
3753 
3754   // No callee saved registers. rbp is assumed implicitly saved
3755   __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3756 
3757   // compiler left unloaded_class_index in j_rarg0 move to where the
3758   // runtime expects it.
3759   __ movl(c_rarg1, j_rarg0);
3760 
3761   __ set_last_Java_frame(noreg, noreg, NULL);
3762 
3763   // Call C code.  Need thread but NOT official VM entry
3764   // crud.  We cannot block on this call, no GC can happen.  Call should
3765   // capture callee-saved registers as well as return values.
3766   // Thread is in rdi already.
3767   //
3768   // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
3769 
3770   __ mov(c_rarg0, r15_thread);
3771   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
3772 
3773   // Set an oopmap for the call site
3774   OopMapSet* oop_maps = new OopMapSet();
3775   OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
3776 
3777   // location of rbp is known implicitly by the frame sender code
3778 
3779   oop_maps->add_gc_map(__ pc() - start, map);
3780 
3781   __ reset_last_Java_frame(false);
3782 
3783   // Load UnrollBlock* into rdi
3784   __ mov(rdi, rax);
3785 
3786   // Pop all the frames we must move/replace.
3787   //
3788   // Frame picture (youngest to oldest)
3789   // 1: self-frame (no frame link)
3790   // 2: deopting frame  (no frame link)
3791   // 3: caller of deopting frame (could be compiled/interpreted).
3792 
3793   // Pop self-frame.  We have no frame, and must rely only on rax and rsp.
3794   __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
3795 
3796   // Pop deoptimized frame (int)
3797   __ movl(rcx, Address(rdi,
3798                        Deoptimization::UnrollBlock::
3799                        size_of_deoptimized_frame_offset_in_bytes()));
3800   __ addptr(rsp, rcx);
3801 
3802   // rsp should be pointing at the return address to the caller (3)
3803 
3804   // Pick up the initial fp we should save
3805   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3806   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3807 
3808 #ifdef ASSERT
3809   // Compilers generate code that bang the stack by as much as the
3810   // interpreter would need. So this stack banging should never
3811   // trigger a fault. Verify that it does not on non product builds.
3812   if (UseStackBanging) {
3813     __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3814     __ bang_stack_size(rbx, rcx);
3815   }
3816 #endif
3817 
3818   // Load address of array of frame pcs into rcx (address*)
3819   __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3820 
3821   // Trash the return pc
3822   __ addptr(rsp, wordSize);
3823 
3824   // Load address of array of frame sizes into rsi (intptr_t*)
3825   __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock:: frame_sizes_offset_in_bytes()));
3826 
3827   // Counter
3828   __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock:: number_of_frames_offset_in_bytes())); // (int)
3829 
3830   // Now adjust the caller's stack to make up for the extra locals but
3831   // record the original sp so that we can save it in the skeletal
3832   // interpreter frame and the stack walking of interpreter_sender
3833   // will get the unextended sp value and not the "real" sp value.
3834 
3835   const Register sender_sp = r8;
3836 
3837   __ mov(sender_sp, rsp);
3838   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset_in_bytes())); // (int)
3839   __ subptr(rsp, rbx);
3840 
3841   // Push interpreter frames in a loop
3842   Label loop;
3843   __ bind(loop);
3844   __ movptr(rbx, Address(rsi, 0)); // Load frame size
3845   __ subptr(rbx, 2 * wordSize);    // We'll push pc and rbp by hand
3846   __ pushptr(Address(rcx, 0));     // Save return address
3847   __ enter();                      // Save old & set new rbp
3848   __ subptr(rsp, rbx);             // Prolog
3849 #ifdef CC_INTERP
3850   __ movptr(Address(rbp,
3851                   -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
3852             sender_sp); // Make it walkable
3853 #else // CC_INTERP
3854   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
3855             sender_sp);            // Make it walkable
3856   // This value is corrected by layout_activation_impl
3857   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3858 #endif // CC_INTERP
3859   __ mov(sender_sp, rsp);          // Pass sender_sp to next frame
3860   __ addptr(rsi, wordSize);        // Bump array pointer (sizes)
3861   __ addptr(rcx, wordSize);        // Bump array pointer (pcs)
3862   __ decrementl(rdx);              // Decrement counter
3863   __ jcc(Assembler::notZero, loop);
3864   __ pushptr(Address(rcx, 0));     // Save final return address
3865 
3866   // Re-push self-frame
3867   __ enter();                 // Save old & set new rbp
3868   __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
3869                               // Prolog
3870 
3871   // Use rbp because the frames look interpreted now
3872   // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3873   // Don't need the precise return PC here, just precise enough to point into this code blob.
3874   address the_pc = __ pc();
3875   __ set_last_Java_frame(noreg, rbp, the_pc);
3876 
3877   // Call C code.  Need thread but NOT official VM entry
3878   // crud.  We cannot block on this call, no GC can happen.  Call should
3879   // restore return values to their stack-slots with the new SP.
3880   // Thread is in rdi already.
3881   //
3882   // BasicType unpack_frames(JavaThread* thread, int exec_mode);
3883 
3884   __ andptr(rsp, -(StackAlignmentInBytes)); // Align SP as required by ABI
3885   __ mov(c_rarg0, r15_thread);
3886   __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap);
3887   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3888 
3889   // Set an oopmap for the call site
3890   // Use the same PC we used for the last java frame
3891   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3892 
3893   // Clear fp AND pc
3894   __ reset_last_Java_frame(true);
3895 
3896   // Pop self-frame.
3897   __ leave();                 // Epilog
3898 
3899   // Jump to interpreter
3900   __ ret(0);
3901 
3902   // Make sure all code is generated
3903   masm->flush();
3904 
3905   _uncommon_trap_blob =  UncommonTrapBlob::create(&buffer, oop_maps,
3906                                                  SimpleRuntimeFrame::framesize >> 1);
3907 }
3908 #endif // COMPILER2
3909 
3910 
3911 //------------------------------generate_handler_blob------
3912 //
3913 // Generate a special Compile2Runtime blob that saves all registers,
3914 // and setup oopmap.
3915 //
3916 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3917   assert(StubRoutines::forward_exception_entry() != NULL,
3918          "must be generated before");
3919 
3920   ResourceMark rm;
3921   OopMapSet *oop_maps = new OopMapSet();
3922   OopMap* map;
3923 
3924   // Allocate space for the code.  Setup code generation tools.
3925   CodeBuffer buffer("handler_blob", 2048, 1024);
3926   MacroAssembler* masm = new MacroAssembler(&buffer);
3927 
3928   address start   = __ pc();
3929   address call_pc = NULL;
3930   int frame_size_in_words;
3931   bool cause_return = (poll_type == POLL_AT_RETURN);
3932   bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3933 
3934   if (UseRTMLocking) {
3935     // Abort RTM transaction before calling runtime
3936     // because critical section will be large and will be
3937     // aborted anyway. Also nmethod could be deoptimized.
3938     __ xabort(0);
3939   }
3940 
3941   // Make room for return address (or push it again)
3942   if (!cause_return) {
3943     __ push(rbx);
3944   }
3945 
3946   // Save registers, fpu state, and flags
3947   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
3948 
3949   // The following is basically a call_VM.  However, we need the precise
3950   // address of the call in order to generate an oopmap. Hence, we do all the
3951   // work outselves.
3952 
3953   __ set_last_Java_frame(noreg, noreg, NULL);
3954 
3955   // The return address must always be correct so that frame constructor never
3956   // sees an invalid pc.
3957 
3958   if (!cause_return) {
3959     // overwrite the dummy value we pushed on entry
3960     __ movptr(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
3961     __ movptr(Address(rbp, wordSize), c_rarg0);
3962   }
3963 
3964   // Do the call
3965   __ mov(c_rarg0, r15_thread);
3966   __ call(RuntimeAddress(call_ptr));
3967 
3968   // Set an oopmap for the call site.  This oopmap will map all
3969   // oop-registers and debug-info registers as callee-saved.  This
3970   // will allow deoptimization at this safepoint to find all possible
3971   // debug-info recordings, as well as let GC find all oops.
3972 
3973   oop_maps->add_gc_map( __ pc() - start, map);
3974 
3975   Label noException;
3976 
3977   __ reset_last_Java_frame(false);
3978 
3979   __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3980   __ jcc(Assembler::equal, noException);
3981 
3982   // Exception pending
3983 
3984   RegisterSaver::restore_live_registers(masm, save_vectors);
3985 
3986   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3987 
3988   // No exception case
3989   __ bind(noException);
3990 
3991   // Normal exit, restore registers and exit.
3992   RegisterSaver::restore_live_registers(masm, save_vectors);
3993 
3994   __ ret(0);
3995 
3996   // Make sure all code is generated
3997   masm->flush();
3998 
3999   // Fill-out other meta info
4000   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
4001 }
4002 
4003 //
4004 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
4005 //
4006 // Generate a stub that calls into vm to find out the proper destination
4007 // of a java call. All the argument registers are live at this point
4008 // but since this is generic code we don't know what they are and the caller
4009 // must do any gc of the args.
4010 //
4011 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
4012   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
4013 
4014   // allocate space for the code
4015   ResourceMark rm;
4016 
4017   CodeBuffer buffer(name, 1000, 512);
4018   MacroAssembler* masm                = new MacroAssembler(&buffer);
4019 
4020   int frame_size_in_words;
4021 
4022   OopMapSet *oop_maps = new OopMapSet();
4023   OopMap* map = NULL;
4024 
4025   int start = __ offset();
4026 
4027   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
4028 
4029   int frame_complete = __ offset();
4030 
4031   __ set_last_Java_frame(noreg, noreg, NULL);
4032 
4033   __ mov(c_rarg0, r15_thread);
4034 
4035   __ call(RuntimeAddress(destination));
4036 
4037 
4038   // Set an oopmap for the call site.
4039   // We need this not only for callee-saved registers, but also for volatile
4040   // registers that the compiler might be keeping live across a safepoint.
4041 
4042   oop_maps->add_gc_map( __ offset() - start, map);
4043 
4044   // rax contains the address we are going to jump to assuming no exception got installed
4045 
4046   // clear last_Java_sp
4047   __ reset_last_Java_frame(false);
4048   // check for pending exceptions
4049   Label pending;
4050   __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
4051   __ jcc(Assembler::notEqual, pending);
4052 
4053   // get the returned Method*
4054   __ get_vm_result_2(rbx, r15_thread);
4055   __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
4056 
4057   __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
4058 
4059   RegisterSaver::restore_live_registers(masm);
4060 
4061   // We are back the the original state on entry and ready to go.
4062 
4063   __ jmp(rax);
4064 
4065   // Pending exception after the safepoint
4066 
4067   __ bind(pending);
4068 
4069   RegisterSaver::restore_live_registers(masm);
4070 
4071   // exception pending => remove activation and forward to exception handler
4072 
4073   __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
4074 
4075   __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
4076   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
4077 
4078   // -------------
4079   // make sure all code is generated
4080   masm->flush();
4081 
4082   // return the  blob
4083   // frame_size_words or bytes??
4084   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
4085 }
4086 
4087 
4088 //------------------------------Montgomery multiplication------------------------
4089 //
4090 
4091 #ifndef _WINDOWS
4092 
4093 #define ASM_SUBTRACT
4094 
4095 #ifdef ASM_SUBTRACT
4096 // Subtract 0:b from carry:a.  Return carry.
4097 static unsigned long
4098 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) {
4099   long i = 0, cnt = len;
4100   unsigned long tmp;
4101   asm volatile("clc; "
4102                "0: ; "
4103                "mov (%[b], %[i], 8), %[tmp]; "
4104                "sbb %[tmp], (%[a], %[i], 8); "
4105                "inc %[i]; dec %[cnt]; "
4106                "jne 0b; "
4107                "mov %[carry], %[tmp]; sbb $0, %[tmp]; "
4108                : [i]"+r"(i), [cnt]"+r"(cnt), [tmp]"=&r"(tmp)
4109                : [a]"r"(a), [b]"r"(b), [carry]"r"(carry)
4110                : "memory");
4111   return tmp;
4112 }
4113 #else // ASM_SUBTRACT
4114 typedef int __attribute__((mode(TI))) int128;
4115 
4116 // Subtract 0:b from carry:a.  Return carry.
4117 static unsigned long
4118 sub(unsigned long a[], unsigned long b[], unsigned long carry, int len) {
4119   int128 tmp = 0;
4120   int i;
4121   for (i = 0; i < len; i++) {
4122     tmp += a[i];
4123     tmp -= b[i];
4124     a[i] = tmp;
4125     tmp >>= 64;
4126     assert(-1 <= tmp && tmp <= 0, "invariant");
4127   }
4128   return tmp + carry;
4129 }
4130 #endif // ! ASM_SUBTRACT
4131 
4132 // Multiply (unsigned) Long A by Long B, accumulating the double-
4133 // length result into the accumulator formed of T0, T1, and T2.
4134 #define MACC(A, B, T0, T1, T2)                                      \
4135 do {                                                                \
4136   unsigned long hi, lo;                                             \
4137   asm volatile("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4"   \
4138            : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2)      \
4139            : "r"(A), "a"(B) : "cc");                                \
4140  } while(0)
4141 
4142 // As above, but add twice the double-length result into the
4143 // accumulator.
4144 #define MACC2(A, B, T0, T1, T2)                                     \
4145 do {                                                                \
4146   unsigned long hi, lo;                                             \
4147   asm volatile("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4;"  \
4148            "add %%rax, %2; adc %%rdx, %3; adc $0, %4"               \
4149            : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2)      \
4150            : "r"(A), "a"(B) : "cc");                                \
4151  } while(0)
4152 
4153 // Fast Montgomery multiplication.  The derivation of the algorithm is
4154 // in  A Cryptographic Library for the Motorola DSP56000,
4155 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237.
4156 
4157 static void __attribute__((noinline))
4158 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[],
4159                     unsigned long m[], unsigned long inv, int len) {
4160   unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
4161   int i;
4162 
4163   assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
4164 
4165   for (i = 0; i < len; i++) {
4166     int j;
4167     for (j = 0; j < i; j++) {
4168       MACC(a[j], b[i-j], t0, t1, t2);
4169       MACC(m[j], n[i-j], t0, t1, t2);
4170     }
4171     MACC(a[i], b[0], t0, t1, t2);
4172     m[i] = t0 * inv;
4173     MACC(m[i], n[0], t0, t1, t2);
4174 
4175     assert(t0 == 0, "broken Montgomery multiply");
4176 
4177     t0 = t1; t1 = t2; t2 = 0;
4178   }
4179 
4180   for (i = len; i < 2*len; i++) {
4181     int j;
4182     for (j = i-len+1; j < len; j++) {
4183       MACC(a[j], b[i-j], t0, t1, t2);
4184       MACC(m[j], n[i-j], t0, t1, t2);
4185     }
4186     m[i-len] = t0;
4187     t0 = t1; t1 = t2; t2 = 0;
4188   }
4189 
4190   while (t0)
4191     t0 = sub(m, n, t0, len);
4192 }
4193 
4194 // Fast Montgomery squaring.  This uses asymptotically 25% fewer
4195 // multiplies so it should be up to 25% faster than Montgomery
4196 // multiplication.  However, its loop control is more complex and it
4197 // may actually run slower on some machines.
4198 
4199 static void __attribute__((noinline))
4200 montgomery_square(unsigned long a[], unsigned long n[],
4201                   unsigned long m[], unsigned long inv, int len) {
4202   unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
4203   int i;
4204 
4205   assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
4206 
4207   for (i = 0; i < len; i++) {
4208     int j;
4209     int end = (i+1)/2;
4210     for (j = 0; j < end; j++) {
4211       MACC2(a[j], a[i-j], t0, t1, t2);
4212       MACC(m[j], n[i-j], t0, t1, t2);
4213     }
4214     if ((i & 1) == 0) {
4215       MACC(a[j], a[j], t0, t1, t2);
4216     }
4217     for (; j < i; j++) {
4218       MACC(m[j], n[i-j], t0, t1, t2);
4219     }
4220     m[i] = t0 * inv;
4221     MACC(m[i], n[0], t0, t1, t2);
4222 
4223     assert(t0 == 0, "broken Montgomery square");
4224 
4225     t0 = t1; t1 = t2; t2 = 0;
4226   }
4227 
4228   for (i = len; i < 2*len; i++) {
4229     int start = i-len+1;
4230     int end = start + (len - start)/2;
4231     int j;
4232     for (j = start; j < end; j++) {
4233       MACC2(a[j], a[i-j], t0, t1, t2);
4234       MACC(m[j], n[i-j], t0, t1, t2);
4235     }
4236     if ((i & 1) == 0) {
4237       MACC(a[j], a[j], t0, t1, t2);
4238     }
4239     for (; j < len; j++) {
4240       MACC(m[j], n[i-j], t0, t1, t2);
4241     }
4242     m[i-len] = t0;
4243     t0 = t1; t1 = t2; t2 = 0;
4244   }
4245 
4246   while (t0)
4247     t0 = sub(m, n, t0, len);
4248 }
4249 
4250 // Swap words in a longword.
4251 static unsigned long swap(unsigned long x) {
4252   return (x << 32) | (x >> 32);
4253 }
4254 
4255 // Copy len longwords from s to d, word-swapping as we go.  The
4256 // destination array is reversed.
4257 static void reverse_words(unsigned long *s, unsigned long *d, int len) {
4258   d += len;
4259   while(len-- > 0) {
4260     d--;
4261     *d = swap(*s);
4262     s++;
4263   }
4264 }
4265 
4266 // The threshold at which squaring is advantageous was determined
4267 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz.
4268 #define MONTGOMERY_SQUARING_THRESHOLD 64
4269 
4270 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
4271                                         jint len, jlong inv,
4272                                         jint *m_ints) {
4273   assert(len % 2 == 0, "array length in montgomery_multiply must be even");
4274   int longwords = len/2;
4275 
4276   // Make very sure we don't use so much space that the stack might
4277   // overflow.  512 jints corresponds to an 16384-bit integer and
4278   // will use here a total of 8k bytes of stack space.
4279   int total_allocation = longwords * sizeof (unsigned long) * 4;
4280   guarantee(total_allocation <= 8192, "must be");
4281   unsigned long *scratch = (unsigned long *)alloca(total_allocation);
4282 
4283   // Local scratch arrays
4284   unsigned long
4285     *a = scratch + 0 * longwords,
4286     *b = scratch + 1 * longwords,
4287     *n = scratch + 2 * longwords,
4288     *m = scratch + 3 * longwords;
4289 
4290   reverse_words((unsigned long *)a_ints, a, longwords);
4291   reverse_words((unsigned long *)b_ints, b, longwords);
4292   reverse_words((unsigned long *)n_ints, n, longwords);
4293 
4294   ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords);
4295 
4296   reverse_words(m, (unsigned long *)m_ints, longwords);
4297 }
4298 
4299 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
4300                                       jint len, jlong inv,
4301                                       jint *m_ints) {
4302   assert(len % 2 == 0, "array length in montgomery_square must be even");
4303   int longwords = len/2;
4304 
4305   // Make very sure we don't use so much space that the stack might
4306   // overflow.  512 jints corresponds to an 16384-bit integer and
4307   // will use here a total of 6k bytes of stack space.
4308   int total_allocation = longwords * sizeof (unsigned long) * 3;
4309   guarantee(total_allocation <= 8192, "must be");
4310   unsigned long *scratch = (unsigned long *)alloca(total_allocation);
4311 
4312   // Local scratch arrays
4313   unsigned long
4314     *a = scratch + 0 * longwords,
4315     *n = scratch + 1 * longwords,
4316     *m = scratch + 2 * longwords;
4317 
4318   reverse_words((unsigned long *)a_ints, a, longwords);
4319   reverse_words((unsigned long *)n_ints, n, longwords);
4320 
4321   //montgomery_square fails to pass BigIntegerTest on solaris amd64
4322   //on jdk7 and jdk8.
4323 #ifndef SOLARIS
4324   if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
4325 #else
4326   if (0) {
4327 #endif
4328     ::montgomery_square(a, n, m, (unsigned long)inv, longwords);
4329   } else {
4330     ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords);
4331   }
4332 
4333   reverse_words(m, (unsigned long *)m_ints, longwords);
4334 }
4335 
4336 #endif // WINDOWS
4337 
4338 #ifdef COMPILER2
4339 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
4340 //
4341 //------------------------------generate_exception_blob---------------------------
4342 // creates exception blob at the end
4343 // Using exception blob, this code is jumped from a compiled method.
4344 // (see emit_exception_handler in x86_64.ad file)
4345 //
4346 // Given an exception pc at a call we call into the runtime for the
4347 // handler in this method. This handler might merely restore state
4348 // (i.e. callee save registers) unwind the frame and jump to the
4349 // exception handler for the nmethod if there is no Java level handler
4350 // for the nmethod.
4351 //
4352 // This code is entered with a jmp.
4353 //
4354 // Arguments:
4355 //   rax: exception oop
4356 //   rdx: exception pc
4357 //
4358 // Results:
4359 //   rax: exception oop
4360 //   rdx: exception pc in caller or ???
4361 //   destination: exception handler of caller
4362 //
4363 // Note: the exception pc MUST be at a call (precise debug information)
4364 //       Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved.
4365 //
4366 
4367 void OptoRuntime::generate_exception_blob() {
4368   assert(!OptoRuntime::is_callee_saved_register(RDX_num), "");
4369   assert(!OptoRuntime::is_callee_saved_register(RAX_num), "");
4370   assert(!OptoRuntime::is_callee_saved_register(RCX_num), "");
4371 
4372   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
4373 
4374   // Allocate space for the code
4375   ResourceMark rm;
4376   // Setup code generation tools
4377   CodeBuffer buffer("exception_blob", 2048, 1024);
4378   MacroAssembler* masm = new MacroAssembler(&buffer);
4379 
4380 
4381   address start = __ pc();
4382 
4383   // Exception pc is 'return address' for stack walker
4384   __ push(rdx);
4385   __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
4386 
4387   // Save callee-saved registers.  See x86_64.ad.
4388 
4389   // rbp is an implicitly saved callee saved register (i.e., the calling
4390   // convention will save/restore it in the prolog/epilog). Other than that
4391   // there are no callee save registers now that adapter frames are gone.
4392 
4393   __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
4394 
4395   // Store exception in Thread object. We cannot pass any arguments to the
4396   // handle_exception call, since we do not want to make any assumption
4397   // about the size of the frame where the exception happened in.
4398   // c_rarg0 is either rdi (Linux) or rcx (Windows).
4399   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
4400   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
4401 
4402   // This call does all the hard work.  It checks if an exception handler
4403   // exists in the method.
4404   // If so, it returns the handler address.
4405   // If not, it prepares for stack-unwinding, restoring the callee-save
4406   // registers of the frame being removed.
4407   //
4408   // address OptoRuntime::handle_exception_C(JavaThread* thread)
4409 
4410   // At a method handle call, the stack may not be properly aligned
4411   // when returning with an exception.
4412   address the_pc = __ pc();
4413   __ set_last_Java_frame(noreg, noreg, the_pc);
4414   __ mov(c_rarg0, r15_thread);
4415   __ andptr(rsp, -(StackAlignmentInBytes));    // Align stack
4416   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
4417 
4418   // Set an oopmap for the call site.  This oopmap will only be used if we
4419   // are unwinding the stack.  Hence, all locations will be dead.
4420   // Callee-saved registers will be the same as the frame above (i.e.,
4421   // handle_exception_stub), since they were restored when we got the
4422   // exception.
4423 
4424   OopMapSet* oop_maps = new OopMapSet();
4425 
4426   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
4427 
4428   __ reset_last_Java_frame(false);
4429 
4430   // Restore callee-saved registers
4431 
4432   // rbp is an implicitly saved callee-saved register (i.e., the calling
4433   // convention will save restore it in prolog/epilog) Other than that
4434   // there are no callee save registers now that adapter frames are gone.
4435 
4436   __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
4437 
4438   __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
4439   __ pop(rdx);                  // No need for exception pc anymore
4440 
4441   // rax: exception handler
4442 
4443   // We have a handler in rax (could be deopt blob).
4444   __ mov(r8, rax);
4445 
4446   // Get the exception oop
4447   __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
4448   // Get the exception pc in case we are deoptimized
4449   __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
4450 #ifdef ASSERT
4451   __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD);
4452   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
4453 #endif
4454   // Clear the exception oop so GC no longer processes it as a root.
4455   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
4456 
4457   // rax: exception oop
4458   // r8:  exception handler
4459   // rdx: exception pc
4460   // Jump to handler
4461 
4462   __ jmp(r8);
4463 
4464   // Make sure all code is generated
4465   masm->flush();
4466 
4467   // Set exception blob
4468   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
4469 }
4470 #endif // COMPILER2