1 /*
   2  * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/debugInfoRec.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/nativeInst.hpp"
  31 #include "code/vtableStubs.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/shared/gcLocker.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "logging/log.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/compiledICHolder.hpp"
  40 #include "oops/klass.inline.hpp"
  41 #include "prims/methodHandles.hpp"
  42 #include "runtime/jniHandles.hpp"
  43 #include "runtime/safepointMechanism.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/signature.hpp"
  46 #include "runtime/stubRoutines.hpp"
  47 #include "runtime/vframeArray.hpp"
  48 #include "runtime/vm_version.hpp"
  49 #include "utilities/align.hpp"
  50 #include "vmreg_x86.inline.hpp"
  51 #ifdef COMPILER1
  52 #include "c1/c1_Runtime1.hpp"
  53 #endif
  54 #ifdef COMPILER2
  55 #include "opto/runtime.hpp"
  56 #endif
  57 
  58 #define __ masm->
  59 
  60 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  61 
  62 class RegisterSaver {
  63   // Capture info about frame layout
  64 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
  65   enum layout {
  66                 fpu_state_off = 0,
  67                 fpu_state_end = fpu_state_off+FPUStateSizeInWords,
  68                 st0_off, st0H_off,
  69                 st1_off, st1H_off,
  70                 st2_off, st2H_off,
  71                 st3_off, st3H_off,
  72                 st4_off, st4H_off,
  73                 st5_off, st5H_off,
  74                 st6_off, st6H_off,
  75                 st7_off, st7H_off,
  76                 xmm_off,
  77                 DEF_XMM_OFFS(0),
  78                 DEF_XMM_OFFS(1),
  79                 DEF_XMM_OFFS(2),
  80                 DEF_XMM_OFFS(3),
  81                 DEF_XMM_OFFS(4),
  82                 DEF_XMM_OFFS(5),
  83                 DEF_XMM_OFFS(6),
  84                 DEF_XMM_OFFS(7),
  85                 flags_off = xmm7_off + 16/BytesPerInt + 1, // 16-byte stack alignment fill word
  86                 rdi_off,
  87                 rsi_off,
  88                 ignore_off,  // extra copy of rbp,
  89                 rsp_off,
  90                 rbx_off,
  91                 rdx_off,
  92                 rcx_off,
  93                 rax_off,
  94                 // The frame sender code expects that rbp will be in the "natural" place and
  95                 // will override any oopMap setting for it. We must therefore force the layout
  96                 // so that it agrees with the frame sender code.
  97                 rbp_off,
  98                 return_off,      // slot for return address
  99                 reg_save_size };
 100   enum { FPU_regs_live = flags_off - fpu_state_end };
 101 
 102   public:
 103 
 104   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words,
 105                                      int* total_frame_words, bool verify_fpu = true, bool save_vectors = false);
 106   static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
 107 
 108   static int rax_offset() { return rax_off; }
 109   static int rbx_offset() { return rbx_off; }
 110 
 111   // Offsets into the register save area
 112   // Used by deoptimization when it is managing result register
 113   // values on its own
 114 
 115   static int raxOffset(void) { return rax_off; }
 116   static int rdxOffset(void) { return rdx_off; }
 117   static int rbxOffset(void) { return rbx_off; }
 118   static int xmm0Offset(void) { return xmm0_off; }
 119   // This really returns a slot in the fp save area, which one is not important
 120   static int fpResultOffset(void) { return st0_off; }
 121 
 122   // During deoptimization only the result register need to be restored
 123   // all the other values have already been extracted.
 124 
 125   static void restore_result_registers(MacroAssembler* masm);
 126 
 127 };
 128 
 129 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
 130                                            int* total_frame_words, bool verify_fpu, bool save_vectors) {
 131   int num_xmm_regs = XMMRegister::number_of_registers;
 132   int ymm_bytes = num_xmm_regs * 16;
 133   int zmm_bytes = num_xmm_regs * 32;
 134 #ifdef COMPILER2
 135   int opmask_state_bytes = KRegister::number_of_registers * 8;
 136   if (save_vectors) {
 137     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 138     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 139     // Save upper half of YMM registers
 140     int vect_bytes = ymm_bytes;
 141     if (UseAVX > 2) {
 142       // Save upper half of ZMM registers as well
 143       vect_bytes += zmm_bytes;
 144       additional_frame_words += opmask_state_bytes / wordSize;
 145     }
 146     additional_frame_words += vect_bytes / wordSize;
 147   }
 148 #else
 149   assert(!save_vectors, "vectors are generated only by C2");
 150 #endif
 151   int frame_size_in_bytes = (reg_save_size + additional_frame_words) * wordSize;
 152   int frame_words = frame_size_in_bytes / wordSize;
 153   *total_frame_words = frame_words;
 154 
 155   assert(FPUStateSizeInWords == 27, "update stack layout");
 156 
 157   // save registers, fpu state, and flags
 158   // We assume caller has already has return address slot on the stack
 159   // We push epb twice in this sequence because we want the real rbp,
 160   // to be under the return like a normal enter and we want to use pusha
 161   // We push by hand instead of using push.
 162   __ enter();
 163   __ pusha();
 164   __ pushf();
 165   __ subptr(rsp,FPU_regs_live*wordSize); // Push FPU registers space
 166   __ push_FPU_state();          // Save FPU state & init
 167 
 168   if (verify_fpu) {
 169     // Some stubs may have non standard FPU control word settings so
 170     // only check and reset the value when it required to be the
 171     // standard value.  The safepoint blob in particular can be used
 172     // in methods which are using the 24 bit control word for
 173     // optimized float math.
 174 
 175 #ifdef ASSERT
 176     // Make sure the control word has the expected value
 177     Label ok;
 178     __ cmpw(Address(rsp, 0), StubRoutines::x86::fpu_cntrl_wrd_std());
 179     __ jccb(Assembler::equal, ok);
 180     __ stop("corrupted control word detected");
 181     __ bind(ok);
 182 #endif
 183 
 184     // Reset the control word to guard against exceptions being unmasked
 185     // since fstp_d can cause FPU stack underflow exceptions.  Write it
 186     // into the on stack copy and then reload that to make sure that the
 187     // current and future values are correct.
 188     __ movw(Address(rsp, 0), StubRoutines::x86::fpu_cntrl_wrd_std());
 189   }
 190 
 191   __ frstor(Address(rsp, 0));
 192   if (!verify_fpu) {
 193     // Set the control word so that exceptions are masked for the
 194     // following code.
 195     __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
 196   }
 197 
 198   int off = st0_off;
 199   int delta = st1_off - off;
 200 
 201   // Save the FPU registers in de-opt-able form
 202   for (int n = 0; n < FloatRegister::number_of_registers; n++) {
 203     __ fstp_d(Address(rsp, off*wordSize));
 204     off += delta;
 205   }
 206 
 207   off = xmm0_off;
 208   delta = xmm1_off - off;
 209   if(UseSSE == 1) {
 210     // Save the XMM state
 211     for (int n = 0; n < num_xmm_regs; n++) {
 212       __ movflt(Address(rsp, off*wordSize), as_XMMRegister(n));
 213       off += delta;
 214     }
 215   } else if(UseSSE >= 2) {
 216     // Save whole 128bit (16 bytes) XMM registers
 217     for (int n = 0; n < num_xmm_regs; n++) {
 218       __ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
 219       off += delta;
 220     }
 221   }
 222 
 223 #ifdef COMPILER2
 224   if (save_vectors) {
 225     __ subptr(rsp, ymm_bytes);
 226     // Save upper half of YMM registers
 227     for (int n = 0; n < num_xmm_regs; n++) {
 228       __ vextractf128_high(Address(rsp, n*16), as_XMMRegister(n));
 229     }
 230     if (UseAVX > 2) {
 231       __ subptr(rsp, zmm_bytes);
 232       // Save upper half of ZMM registers
 233       for (int n = 0; n < num_xmm_regs; n++) {
 234         __ vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n));
 235       }
 236       __ subptr(rsp, opmask_state_bytes);
 237       // Save opmask registers
 238       for (int n = 0; n < KRegister::number_of_registers; n++) {
 239         __ kmov(Address(rsp, n*8), as_KRegister(n));
 240       }
 241     }
 242   }
 243 #else
 244   assert(!save_vectors, "vectors are generated only by C2");
 245 #endif
 246 
 247   __ vzeroupper();
 248 
 249   // Set an oopmap for the call site.  This oopmap will map all
 250   // oop-registers and debug-info registers as callee-saved.  This
 251   // will allow deoptimization at this safepoint to find all possible
 252   // debug-info recordings, as well as let GC find all oops.
 253 
 254   OopMapSet *oop_maps = new OopMapSet();
 255   OopMap* map =  new OopMap( frame_words, 0 );
 256 
 257 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
 258 #define NEXTREG(x) (x)->as_VMReg()->next()
 259 
 260   map->set_callee_saved(STACK_OFFSET(rax_off), rax->as_VMReg());
 261   map->set_callee_saved(STACK_OFFSET(rcx_off), rcx->as_VMReg());
 262   map->set_callee_saved(STACK_OFFSET(rdx_off), rdx->as_VMReg());
 263   map->set_callee_saved(STACK_OFFSET(rbx_off), rbx->as_VMReg());
 264   // rbp, location is known implicitly, no oopMap
 265   map->set_callee_saved(STACK_OFFSET(rsi_off), rsi->as_VMReg());
 266   map->set_callee_saved(STACK_OFFSET(rdi_off), rdi->as_VMReg());
 267 
 268   // %%% This is really a waste but we'll keep things as they were for now for the upper component
 269   off = st0_off;
 270   delta = st1_off - off;
 271   for (int n = 0; n < FloatRegister::number_of_registers; n++) {
 272     FloatRegister freg_name = as_FloatRegister(n);
 273     map->set_callee_saved(STACK_OFFSET(off), freg_name->as_VMReg());
 274     map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(freg_name));
 275     off += delta;
 276   }
 277   off = xmm0_off;
 278   delta = xmm1_off - off;
 279   for (int n = 0; n < num_xmm_regs; n++) {
 280     XMMRegister xmm_name = as_XMMRegister(n);
 281     map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
 282     map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(xmm_name));
 283     off += delta;
 284   }
 285 #undef NEXTREG
 286 #undef STACK_OFFSET
 287 
 288   return map;
 289 }
 290 
 291 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 292   int opmask_state_bytes = 0;
 293   int additional_frame_bytes = 0;
 294   int num_xmm_regs = XMMRegister::number_of_registers;
 295   int ymm_bytes = num_xmm_regs * 16;
 296   int zmm_bytes = num_xmm_regs * 32;
 297   // Recover XMM & FPU state
 298 #ifdef COMPILER2
 299   if (restore_vectors) {
 300     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 301     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 302     // Save upper half of YMM registers
 303     additional_frame_bytes = ymm_bytes;
 304     if (UseAVX > 2) {
 305       // Save upper half of ZMM registers as well
 306       additional_frame_bytes += zmm_bytes;
 307       opmask_state_bytes = KRegister::number_of_registers * 8;
 308       additional_frame_bytes += opmask_state_bytes;
 309     }
 310   }
 311 #else
 312   assert(!restore_vectors, "vectors are generated only by C2");
 313 #endif
 314 
 315   int off = xmm0_off;
 316   int delta = xmm1_off - off;
 317 
 318   __ vzeroupper();
 319 
 320   if (UseSSE == 1) {
 321     // Restore XMM registers
 322     assert(additional_frame_bytes == 0, "");
 323     for (int n = 0; n < num_xmm_regs; n++) {
 324       __ movflt(as_XMMRegister(n), Address(rsp, off*wordSize));
 325       off += delta;
 326     }
 327   } else if (UseSSE >= 2) {
 328     // Restore whole 128bit (16 bytes) XMM registers. Do this before restoring YMM and
 329     // ZMM because the movdqu instruction zeros the upper part of the XMM register.
 330     for (int n = 0; n < num_xmm_regs; n++) {
 331       __ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
 332       off += delta;
 333     }
 334   }
 335 
 336   if (restore_vectors) {
 337     off = additional_frame_bytes - ymm_bytes;
 338     // Restore upper half of YMM registers.
 339     for (int n = 0; n < num_xmm_regs; n++) {
 340       __ vinsertf128_high(as_XMMRegister(n), Address(rsp, n*16+off));
 341     }
 342     if (UseAVX > 2) {
 343       // Restore upper half of ZMM registers.
 344       off = opmask_state_bytes;
 345       for (int n = 0; n < num_xmm_regs; n++) {
 346         __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, n*32+off));
 347       }
 348       for (int n = 0; n < KRegister::number_of_registers; n++) {
 349         __ kmov(as_KRegister(n), Address(rsp, n*8));
 350       }
 351     }
 352     __ addptr(rsp, additional_frame_bytes);
 353   }
 354 
 355   __ pop_FPU_state();
 356   __ addptr(rsp, FPU_regs_live*wordSize); // Pop FPU registers
 357 
 358   __ popf();
 359   __ popa();
 360   // Get the rbp, described implicitly by the frame sender code (no oopMap)
 361   __ pop(rbp);
 362 }
 363 
 364 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 365 
 366   // Just restore result register. Only used by deoptimization. By
 367   // now any callee save register that needs to be restore to a c2
 368   // caller of the deoptee has been extracted into the vframeArray
 369   // and will be stuffed into the c2i adapter we create for later
 370   // restoration so only result registers need to be restored here.
 371   //
 372 
 373   __ frstor(Address(rsp, 0));      // Restore fpu state
 374 
 375   // Recover XMM & FPU state
 376   if( UseSSE == 1 ) {
 377     __ movflt(xmm0, Address(rsp, xmm0_off*wordSize));
 378   } else if( UseSSE >= 2 ) {
 379     __ movdbl(xmm0, Address(rsp, xmm0_off*wordSize));
 380   }
 381   __ movptr(rax, Address(rsp, rax_off*wordSize));
 382   __ movptr(rdx, Address(rsp, rdx_off*wordSize));
 383   // Pop all of the register save are off the stack except the return address
 384   __ addptr(rsp, return_off * wordSize);
 385 }
 386 
 387 // Is vector's size (in bytes) bigger than a size saved by default?
 388 // 16 bytes XMM registers are saved by default using SSE2 movdqu instructions.
 389 // Note, MaxVectorSize == 0 with UseSSE < 2 and vectors are not generated.
 390 bool SharedRuntime::is_wide_vector(int size) {
 391   return size > 16;
 392 }
 393 
 394 // The java_calling_convention describes stack locations as ideal slots on
 395 // a frame with no abi restrictions. Since we must observe abi restrictions
 396 // (like the placement of the register window) the slots must be biased by
 397 // the following value.
 398 static int reg2offset_in(VMReg r) {
 399   // Account for saved rbp, and return address
 400   // This should really be in_preserve_stack_slots
 401   return (r->reg2stack() + 2) * VMRegImpl::stack_slot_size;
 402 }
 403 
 404 static int reg2offset_out(VMReg r) {
 405   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 406 }
 407 
 408 // ---------------------------------------------------------------------------
 409 // Read the array of BasicTypes from a signature, and compute where the
 410 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 411 // quantities.  Values less than SharedInfo::stack0 are registers, those above
 412 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 413 // as framesizes are fixed.
 414 // VMRegImpl::stack0 refers to the first slot 0(sp).
 415 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
 416 // Register up to Register::number_of_registers are the 32-bit
 417 // integer registers.
 418 
 419 // Pass first two oop/int args in registers ECX and EDX.
 420 // Pass first two float/double args in registers XMM0 and XMM1.
 421 // Doubles have precedence, so if you pass a mix of floats and doubles
 422 // the doubles will grab the registers before the floats will.
 423 
 424 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 425 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
 426 // units regardless of build. Of course for i486 there is no 64 bit build
 427 
 428 
 429 // ---------------------------------------------------------------------------
 430 // The compiled Java calling convention.
 431 // Pass first two oop/int args in registers ECX and EDX.
 432 // Pass first two float/double args in registers XMM0 and XMM1.
 433 // Doubles have precedence, so if you pass a mix of floats and doubles
 434 // the doubles will grab the registers before the floats will.
 435 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 436                                            VMRegPair *regs,
 437                                            int total_args_passed) {
 438   uint    stack = 0;          // Starting stack position for args on stack
 439 
 440 
 441   // Pass first two oop/int args in registers ECX and EDX.
 442   uint reg_arg0 = 9999;
 443   uint reg_arg1 = 9999;
 444 
 445   // Pass first two float/double args in registers XMM0 and XMM1.
 446   // Doubles have precedence, so if you pass a mix of floats and doubles
 447   // the doubles will grab the registers before the floats will.
 448   // CNC - TURNED OFF FOR non-SSE.
 449   //       On Intel we have to round all doubles (and most floats) at
 450   //       call sites by storing to the stack in any case.
 451   // UseSSE=0 ==> Don't Use ==> 9999+0
 452   // UseSSE=1 ==> Floats only ==> 9999+1
 453   // UseSSE>=2 ==> Floats or doubles ==> 9999+2
 454   enum { fltarg_dontuse = 9999+0, fltarg_float_only = 9999+1, fltarg_flt_dbl = 9999+2 };
 455   uint fargs = (UseSSE>=2) ? 2 : UseSSE;
 456   uint freg_arg0 = 9999+fargs;
 457   uint freg_arg1 = 9999+fargs;
 458 
 459   // Pass doubles & longs aligned on the stack.  First count stack slots for doubles
 460   int i;
 461   for( i = 0; i < total_args_passed; i++) {
 462     if( sig_bt[i] == T_DOUBLE ) {
 463       // first 2 doubles go in registers
 464       if( freg_arg0 == fltarg_flt_dbl ) freg_arg0 = i;
 465       else if( freg_arg1 == fltarg_flt_dbl ) freg_arg1 = i;
 466       else // Else double is passed low on the stack to be aligned.
 467         stack += 2;
 468     } else if( sig_bt[i] == T_LONG ) {
 469       stack += 2;
 470     }
 471   }
 472   int dstack = 0;             // Separate counter for placing doubles
 473 
 474   // Now pick where all else goes.
 475   for( i = 0; i < total_args_passed; i++) {
 476     // From the type and the argument number (count) compute the location
 477     switch( sig_bt[i] ) {
 478     case T_SHORT:
 479     case T_CHAR:
 480     case T_BYTE:
 481     case T_BOOLEAN:
 482     case T_INT:
 483     case T_ARRAY:
 484     case T_OBJECT:
 485     case T_PRIMITIVE_OBJECT:
 486     case T_ADDRESS:
 487       if( reg_arg0 == 9999 )  {
 488         reg_arg0 = i;
 489         regs[i].set1(rcx->as_VMReg());
 490       } else if( reg_arg1 == 9999 )  {
 491         reg_arg1 = i;
 492         regs[i].set1(rdx->as_VMReg());
 493       } else {
 494         regs[i].set1(VMRegImpl::stack2reg(stack++));
 495       }
 496       break;
 497     case T_FLOAT:
 498       if( freg_arg0 == fltarg_flt_dbl || freg_arg0 == fltarg_float_only ) {
 499         freg_arg0 = i;
 500         regs[i].set1(xmm0->as_VMReg());
 501       } else if( freg_arg1 == fltarg_flt_dbl || freg_arg1 == fltarg_float_only ) {
 502         freg_arg1 = i;
 503         regs[i].set1(xmm1->as_VMReg());
 504       } else {
 505         regs[i].set1(VMRegImpl::stack2reg(stack++));
 506       }
 507       break;
 508     case T_LONG:
 509       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
 510       regs[i].set2(VMRegImpl::stack2reg(dstack));
 511       dstack += 2;
 512       break;
 513     case T_DOUBLE:
 514       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
 515       if( freg_arg0 == (uint)i ) {
 516         regs[i].set2(xmm0->as_VMReg());
 517       } else if( freg_arg1 == (uint)i ) {
 518         regs[i].set2(xmm1->as_VMReg());
 519       } else {
 520         regs[i].set2(VMRegImpl::stack2reg(dstack));
 521         dstack += 2;
 522       }
 523       break;
 524     case T_VOID: regs[i].set_bad(); break;
 525       break;
 526     default:
 527       ShouldNotReachHere();
 528       break;
 529     }
 530   }
 531 
 532   // return value can be odd number of VMRegImpl stack slots make multiple of 2
 533   return align_up(stack, 2);
 534 }
 535 
 536 const uint SharedRuntime::java_return_convention_max_int = 1;
 537 const uint SharedRuntime::java_return_convention_max_float = 1;
 538 int SharedRuntime::java_return_convention(const BasicType *sig_bt,
 539                                           VMRegPair *regs,
 540                                           int total_args_passed) {
 541   Unimplemented();
 542   return 0;
 543 }
 544 
 545 // Patch the callers callsite with entry to compiled code if it exists.
 546 static void patch_callers_callsite(MacroAssembler *masm) {
 547   Label L;
 548   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
 549   __ jcc(Assembler::equal, L);
 550   // Schedule the branch target address early.
 551   // Call into the VM to patch the caller, then jump to compiled callee
 552   // rax, isn't live so capture return address while we easily can
 553   __ movptr(rax, Address(rsp, 0));
 554   __ pusha();
 555   __ pushf();
 556 
 557   if (UseSSE == 1) {
 558     __ subptr(rsp, 2*wordSize);
 559     __ movflt(Address(rsp, 0), xmm0);
 560     __ movflt(Address(rsp, wordSize), xmm1);
 561   }
 562   if (UseSSE >= 2) {
 563     __ subptr(rsp, 4*wordSize);
 564     __ movdbl(Address(rsp, 0), xmm0);
 565     __ movdbl(Address(rsp, 2*wordSize), xmm1);
 566   }
 567 #ifdef COMPILER2
 568   // C2 may leave the stack dirty if not in SSE2+ mode
 569   if (UseSSE >= 2) {
 570     __ verify_FPU(0, "c2i transition should have clean FPU stack");
 571   } else {
 572     __ empty_FPU_stack();
 573   }
 574 #endif /* COMPILER2 */
 575 
 576   // VM needs caller's callsite
 577   __ push(rax);
 578   // VM needs target method
 579   __ push(rbx);
 580   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 581   __ addptr(rsp, 2*wordSize);
 582 
 583   if (UseSSE == 1) {
 584     __ movflt(xmm0, Address(rsp, 0));
 585     __ movflt(xmm1, Address(rsp, wordSize));
 586     __ addptr(rsp, 2*wordSize);
 587   }
 588   if (UseSSE >= 2) {
 589     __ movdbl(xmm0, Address(rsp, 0));
 590     __ movdbl(xmm1, Address(rsp, 2*wordSize));
 591     __ addptr(rsp, 4*wordSize);
 592   }
 593 
 594   __ popf();
 595   __ popa();
 596   __ bind(L);
 597 }
 598 
 599 
 600 static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
 601   int next_off = st_off - Interpreter::stackElementSize;
 602   __ movdbl(Address(rsp, next_off), r);
 603 }
 604 
 605 static void gen_c2i_adapter(MacroAssembler *masm,
 606                             const GrowableArray<SigEntry>& sig_extended,
 607                             const VMRegPair *regs,
 608                             Label& skip_fixup,
 609                             address start,
 610                             OopMapSet*& oop_maps,
 611                             int& frame_complete,
 612                             int& frame_size_in_words) {
 613   // Before we get into the guts of the C2I adapter, see if we should be here
 614   // at all.  We've come from compiled code and are attempting to jump to the
 615   // interpreter, which means the caller made a static call to get here
 616   // (vcalls always get a compiled target if there is one).  Check for a
 617   // compiled target.  If there is one, we need to patch the caller's call.
 618   patch_callers_callsite(masm);
 619 
 620   __ bind(skip_fixup);
 621 
 622 #ifdef COMPILER2
 623   // C2 may leave the stack dirty if not in SSE2+ mode
 624   if (UseSSE >= 2) {
 625     __ verify_FPU(0, "c2i transition should have clean FPU stack");
 626   } else {
 627     __ empty_FPU_stack();
 628   }
 629 #endif /* COMPILER2 */
 630 
 631   // Since all args are passed on the stack, total_args_passed * interpreter_
 632   // stack_element_size  is the
 633   // space we need.
 634   int extraspace = sig_extended.length() * Interpreter::stackElementSize;
 635 
 636   // Get return address
 637   __ pop(rax);
 638 
 639   // set senderSP value
 640   __ movptr(rsi, rsp);
 641 
 642   __ subptr(rsp, extraspace);
 643 
 644   // Now write the args into the outgoing interpreter space
 645   for (int i = 0; i < sig_extended.length(); i++) {
 646     if (sig_extended.at(i)._bt == T_VOID) {
 647       assert(i > 0 && (sig_extended.at(i-1)._bt == T_LONG || sig_extended.at(i-1)._bt == T_DOUBLE), "missing half");
 648       continue;
 649     }
 650 
 651     // st_off points to lowest address on stack.
 652     int st_off = ((sig_extended.length() - 1) - i) * Interpreter::stackElementSize;
 653     int next_off = st_off - Interpreter::stackElementSize;
 654 
 655     // Say 4 args:
 656     // i   st_off
 657     // 0   12 T_LONG
 658     // 1    8 T_VOID
 659     // 2    4 T_OBJECT
 660     // 3    0 T_BOOL
 661     VMReg r_1 = regs[i].first();
 662     VMReg r_2 = regs[i].second();
 663     if (!r_1->is_valid()) {
 664       assert(!r_2->is_valid(), "");
 665       continue;
 666     }
 667 
 668     if (r_1->is_stack()) {
 669       // memory to memory use fpu stack top
 670       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 671 
 672       if (!r_2->is_valid()) {
 673         __ movl(rdi, Address(rsp, ld_off));
 674         __ movptr(Address(rsp, st_off), rdi);
 675       } else {
 676 
 677         // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
 678         // st_off == MSW, st_off-wordSize == LSW
 679 
 680         __ movptr(rdi, Address(rsp, ld_off));
 681         __ movptr(Address(rsp, next_off), rdi);
 682         __ movptr(rdi, Address(rsp, ld_off + wordSize));
 683         __ movptr(Address(rsp, st_off), rdi);
 684       }
 685     } else if (r_1->is_Register()) {
 686       Register r = r_1->as_Register();
 687       if (!r_2->is_valid()) {
 688         __ movl(Address(rsp, st_off), r);
 689       } else {
 690         // long/double in gpr
 691         ShouldNotReachHere();
 692       }
 693     } else {
 694       assert(r_1->is_XMMRegister(), "");
 695       if (!r_2->is_valid()) {
 696         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
 697       } else {
 698         assert(sig_extended.at(i)._bt == T_DOUBLE || sig_extended.at(i)._bt == T_LONG, "wrong type");
 699         move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
 700       }
 701     }
 702   }
 703 
 704   // Schedule the branch target address early.
 705   __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
 706   // And repush original return address
 707   __ push(rax);
 708   __ jmp(rcx);
 709 }
 710 
 711 
 712 static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
 713   int next_val_off = ld_off - Interpreter::stackElementSize;
 714   __ movdbl(r, Address(saved_sp, next_val_off));
 715 }
 716 
 717 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
 718                         address code_start, address code_end,
 719                         Label& L_ok) {
 720   Label L_fail;
 721   __ lea(temp_reg, ExternalAddress(code_start));
 722   __ cmpptr(pc_reg, temp_reg);
 723   __ jcc(Assembler::belowEqual, L_fail);
 724   __ lea(temp_reg, ExternalAddress(code_end));
 725   __ cmpptr(pc_reg, temp_reg);
 726   __ jcc(Assembler::below, L_ok);
 727   __ bind(L_fail);
 728 }
 729 
 730 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 731                                     int comp_args_on_stack,
 732                                     const GrowableArray<SigEntry>& sig_extended,
 733                                     const VMRegPair *regs) {
 734 
 735   // Note: rsi contains the senderSP on entry. We must preserve it since
 736   // we may do a i2c -> c2i transition if we lose a race where compiled
 737   // code goes non-entrant while we get args ready.
 738 
 739   // Adapters can be frameless because they do not require the caller
 740   // to perform additional cleanup work, such as correcting the stack pointer.
 741   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
 742   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
 743   // even if a callee has modified the stack pointer.
 744   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
 745   // routinely repairs its caller's stack pointer (from sender_sp, which is set
 746   // up via the senderSP register).
 747   // In other words, if *either* the caller or callee is interpreted, we can
 748   // get the stack pointer repaired after a call.
 749   // This is why c2i and i2c adapters cannot be indefinitely composed.
 750   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 751   // both caller and callee would be compiled methods, and neither would
 752   // clean up the stack pointer changes performed by the two adapters.
 753   // If this happens, control eventually transfers back to the compiled
 754   // caller, but with an uncorrected stack, causing delayed havoc.
 755 
 756   // Pick up the return address
 757   __ movptr(rax, Address(rsp, 0));
 758 
 759   if (VerifyAdapterCalls &&
 760       (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
 761     // So, let's test for cascading c2i/i2c adapters right now.
 762     //  assert(Interpreter::contains($return_addr) ||
 763     //         StubRoutines::contains($return_addr),
 764     //         "i2c adapter must return to an interpreter frame");
 765     __ block_comment("verify_i2c { ");
 766     Label L_ok;
 767     if (Interpreter::code() != NULL)
 768       range_check(masm, rax, rdi,
 769                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 770                   L_ok);
 771     if (StubRoutines::code1() != NULL)
 772       range_check(masm, rax, rdi,
 773                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 774                   L_ok);
 775     if (StubRoutines::code2() != NULL)
 776       range_check(masm, rax, rdi,
 777                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 778                   L_ok);
 779     const char* msg = "i2c adapter must return to an interpreter frame";
 780     __ block_comment(msg);
 781     __ stop(msg);
 782     __ bind(L_ok);
 783     __ block_comment("} verify_i2ce ");
 784   }
 785 
 786   // Must preserve original SP for loading incoming arguments because
 787   // we need to align the outgoing SP for compiled code.
 788   __ movptr(rdi, rsp);
 789 
 790   // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
 791   // in registers, we will occasionally have no stack args.
 792   int comp_words_on_stack = 0;
 793   if (comp_args_on_stack) {
 794     // Sig words on the stack are greater-than VMRegImpl::stack0.  Those in
 795     // registers are below.  By subtracting stack0, we either get a negative
 796     // number (all values in registers) or the maximum stack slot accessed.
 797     // int comp_args_on_stack = VMRegImpl::reg2stack(max_arg);
 798     // Convert 4-byte stack slots to words.
 799     comp_words_on_stack = align_up(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
 800     // Round up to miminum stack alignment, in wordSize
 801     comp_words_on_stack = align_up(comp_words_on_stack, 2);
 802     __ subptr(rsp, comp_words_on_stack * wordSize);
 803   }
 804 
 805   // Align the outgoing SP
 806   __ andptr(rsp, -(StackAlignmentInBytes));
 807 
 808   // push the return address on the stack (note that pushing, rather
 809   // than storing it, yields the correct frame alignment for the callee)
 810   __ push(rax);
 811 
 812   // Put saved SP in another register
 813   const Register saved_sp = rax;
 814   __ movptr(saved_sp, rdi);
 815 
 816 
 817   // Will jump to the compiled code just as if compiled code was doing it.
 818   // Pre-load the register-jump target early, to schedule it better.
 819   __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
 820 
 821   // Now generate the shuffle code.  Pick up all register args and move the
 822   // rest through the floating point stack top.
 823   for (int i = 0; i < sig_extended.length(); i++) {
 824     if (sig_extended.at(i)._bt == T_VOID) {
 825       // Longs and doubles are passed in native word order, but misaligned
 826       // in the 32-bit build.
 827       assert(i > 0 && (sig_extended.at(i-1)._bt == T_LONG || sig_extended.at(i-1)._bt == T_DOUBLE), "missing half");
 828       continue;
 829     }
 830 
 831     // Pick up 0, 1 or 2 words from SP+offset.
 832 
 833     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 834             "scrambled load targets?");
 835     // Load in argument order going down.
 836     int ld_off = (sig_extended.length() - i) * Interpreter::stackElementSize;
 837     // Point to interpreter value (vs. tag)
 838     int next_off = ld_off - Interpreter::stackElementSize;
 839     //
 840     //
 841     //
 842     VMReg r_1 = regs[i].first();
 843     VMReg r_2 = regs[i].second();
 844     if (!r_1->is_valid()) {
 845       assert(!r_2->is_valid(), "");
 846       continue;
 847     }
 848     if (r_1->is_stack()) {
 849       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 850       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
 851 
 852       // We can use rsi as a temp here because compiled code doesn't need rsi as an input
 853       // and if we end up going thru a c2i because of a miss a reasonable value of rsi
 854       // we be generated.
 855       if (!r_2->is_valid()) {
 856         // __ fld_s(Address(saved_sp, ld_off));
 857         // __ fstp_s(Address(rsp, st_off));
 858         __ movl(rsi, Address(saved_sp, ld_off));
 859         __ movptr(Address(rsp, st_off), rsi);
 860       } else {
 861         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 862         // are accessed as negative so LSW is at LOW address
 863 
 864         // ld_off is MSW so get LSW
 865         // st_off is LSW (i.e. reg.first())
 866         // __ fld_d(Address(saved_sp, next_off));
 867         // __ fstp_d(Address(rsp, st_off));
 868         //
 869         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 870         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 871         // So we must adjust where to pick up the data to match the interpreter.
 872         //
 873         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 874         // are accessed as negative so LSW is at LOW address
 875 
 876         // ld_off is MSW so get LSW
 877         __ movptr(rsi, Address(saved_sp, next_off));
 878         __ movptr(Address(rsp, st_off), rsi);
 879         __ movptr(rsi, Address(saved_sp, ld_off));
 880         __ movptr(Address(rsp, st_off + wordSize), rsi);
 881       }
 882     } else if (r_1->is_Register()) {  // Register argument
 883       Register r = r_1->as_Register();
 884       assert(r != rax, "must be different");
 885       if (r_2->is_valid()) {
 886         //
 887         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 888         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 889         // So we must adjust where to pick up the data to match the interpreter.
 890 
 891         // this can be a misaligned move
 892         __ movptr(r, Address(saved_sp, next_off));
 893         assert(r_2->as_Register() != rax, "need another temporary register");
 894         // Remember r_1 is low address (and LSB on x86)
 895         // So r_2 gets loaded from high address regardless of the platform
 896         __ movptr(r_2->as_Register(), Address(saved_sp, ld_off));
 897       } else {
 898         __ movl(r, Address(saved_sp, ld_off));
 899       }
 900     } else {
 901       assert(r_1->is_XMMRegister(), "");
 902       if (!r_2->is_valid()) {
 903         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
 904       } else {
 905         move_i2c_double(masm, r_1->as_XMMRegister(), saved_sp, ld_off);
 906       }
 907     }
 908   }
 909 
 910   // 6243940 We might end up in handle_wrong_method if
 911   // the callee is deoptimized as we race thru here. If that
 912   // happens we don't want to take a safepoint because the
 913   // caller frame will look interpreted and arguments are now
 914   // "compiled" so it is much better to make this transition
 915   // invisible to the stack walking code. Unfortunately if
 916   // we try and find the callee by normal means a safepoint
 917   // is possible. So we stash the desired callee in the thread
 918   // and the vm will find there should this case occur.
 919 
 920   __ get_thread(rax);
 921   __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
 922 
 923   // move Method* to rax, in case we end up in an c2i adapter.
 924   // the c2i adapters expect Method* in rax, (c2) because c2's
 925   // resolve stubs return the result (the method) in rax,.
 926   // I'd love to fix this.
 927   __ mov(rax, rbx);
 928 
 929   __ jmp(rdi);
 930 }
 931 
 932 // ---------------------------------------------------------------
 933 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 934                                                             int comp_args_on_stack,
 935                                                             const GrowableArray<SigEntry>& sig_extended,
 936                                                             const VMRegPair *regs,
 937                                                             AdapterFingerPrint* fingerprint,
 938                                                             AdapterBlob*& new_adapter) {
 939   address i2c_entry = __ pc();
 940 
 941   gen_i2c_adapter(masm, comp_args_on_stack, sig_extended, regs);
 942 
 943   // -------------------------------------------------------------------------
 944   // Generate a C2I adapter.  On entry we know rbx, holds the Method* during calls
 945   // to the interpreter.  The args start out packed in the compiled layout.  They
 946   // need to be unpacked into the interpreter layout.  This will almost always
 947   // require some stack space.  We grow the current (compiled) stack, then repack
 948   // the args.  We  finally end in a jump to the generic interpreter entry point.
 949   // On exit from the interpreter, the interpreter will restore our SP (lest the
 950   // compiled code, which relies solely on SP and not EBP, get sick).
 951 
 952   address c2i_unverified_entry = __ pc();
 953   Label skip_fixup;
 954 
 955   Register holder = rax;
 956   Register receiver = rcx;
 957   Register temp = rbx;
 958 
 959   {
 960 
 961     Label missed;
 962     __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
 963     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
 964     __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
 965     __ jcc(Assembler::notEqual, missed);
 966     // Method might have been compiled since the call site was patched to
 967     // interpreted if that is the case treat it as a miss so we can get
 968     // the call site corrected.
 969     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
 970     __ jcc(Assembler::equal, skip_fixup);
 971 
 972     __ bind(missed);
 973     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 974   }
 975 
 976   address c2i_entry = __ pc();
 977 
 978   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 979   bs->c2i_entry_barrier(masm);
 980 
 981   OopMapSet* oop_maps = NULL;
 982   int frame_complete = CodeOffsets::frame_never_safe;
 983   int frame_size_in_words = 0;
 984   gen_c2i_adapter(masm, sig_extended, regs, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words);
 985 
 986   __ flush();
 987   new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps);
 988   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 989 }
 990 
 991 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 992                                          VMRegPair *regs,
 993                                          VMRegPair *regs2,
 994                                          int total_args_passed) {
 995   assert(regs2 == NULL, "not needed on x86");
 996 // We return the amount of VMRegImpl stack slots we need to reserve for all
 997 // the arguments NOT counting out_preserve_stack_slots.
 998 
 999   uint    stack = 0;        // All arguments on stack
1000 
1001   for( int i = 0; i < total_args_passed; i++) {
1002     // From the type and the argument number (count) compute the location
1003     switch( sig_bt[i] ) {
1004     case T_BOOLEAN:
1005     case T_CHAR:
1006     case T_FLOAT:
1007     case T_BYTE:
1008     case T_SHORT:
1009     case T_INT:
1010     case T_OBJECT:
1011     case T_PRIMITIVE_OBJECT:
1012     case T_ARRAY:
1013     case T_ADDRESS:
1014     case T_METADATA:
1015       regs[i].set1(VMRegImpl::stack2reg(stack++));
1016       break;
1017     case T_LONG:
1018     case T_DOUBLE: // The stack numbering is reversed from Java
1019       // Since C arguments do not get reversed, the ordering for
1020       // doubles on the stack must be opposite the Java convention
1021       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
1022       regs[i].set2(VMRegImpl::stack2reg(stack));
1023       stack += 2;
1024       break;
1025     case T_VOID: regs[i].set_bad(); break;
1026     default:
1027       ShouldNotReachHere();
1028       break;
1029     }
1030   }
1031   return stack;
1032 }
1033 
1034 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
1035                                              uint num_bits,
1036                                              uint total_args_passed) {
1037   Unimplemented();
1038   return 0;
1039 }
1040 
1041 // A simple move of integer like type
1042 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1043   if (src.first()->is_stack()) {
1044     if (dst.first()->is_stack()) {
1045       // stack to stack
1046       // __ ld(FP, reg2offset(src.first()), L5);
1047       // __ st(L5, SP, reg2offset(dst.first()));
1048       __ movl2ptr(rax, Address(rbp, reg2offset_in(src.first())));
1049       __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1050     } else {
1051       // stack to reg
1052       __ movl2ptr(dst.first()->as_Register(),  Address(rbp, reg2offset_in(src.first())));
1053     }
1054   } else if (dst.first()->is_stack()) {
1055     // reg to stack
1056     // no need to sign extend on 64bit
1057     __ movptr(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1058   } else {
1059     if (dst.first() != src.first()) {
1060       __ mov(dst.first()->as_Register(), src.first()->as_Register());
1061     }
1062   }
1063 }
1064 
1065 // An oop arg. Must pass a handle not the oop itself
1066 static void object_move(MacroAssembler* masm,
1067                         OopMap* map,
1068                         int oop_handle_offset,
1069                         int framesize_in_slots,
1070                         VMRegPair src,
1071                         VMRegPair dst,
1072                         bool is_receiver,
1073                         int* receiver_offset) {
1074 
1075   // Because of the calling conventions we know that src can be a
1076   // register or a stack location. dst can only be a stack location.
1077 
1078   assert(dst.first()->is_stack(), "must be stack");
1079   // must pass a handle. First figure out the location we use as a handle
1080 
1081   if (src.first()->is_stack()) {
1082     // Oop is already on the stack as an argument
1083     Register rHandle = rax;
1084     Label nil;
1085     __ xorptr(rHandle, rHandle);
1086     __ cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD);
1087     __ jcc(Assembler::equal, nil);
1088     __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1089     __ bind(nil);
1090     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1091 
1092     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1093     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1094     if (is_receiver) {
1095       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1096     }
1097   } else {
1098     // Oop is in a register we must store it to the space we reserve
1099     // on the stack for oop_handles
1100     const Register rOop = src.first()->as_Register();
1101     const Register rHandle = rax;
1102     int oop_slot = (rOop == rcx ? 0 : 1) * VMRegImpl::slots_per_word + oop_handle_offset;
1103     int offset = oop_slot*VMRegImpl::stack_slot_size;
1104     Label skip;
1105     __ movptr(Address(rsp, offset), rOop);
1106     map->set_oop(VMRegImpl::stack2reg(oop_slot));
1107     __ xorptr(rHandle, rHandle);
1108     __ cmpptr(rOop, NULL_WORD);
1109     __ jcc(Assembler::equal, skip);
1110     __ lea(rHandle, Address(rsp, offset));
1111     __ bind(skip);
1112     // Store the handle parameter
1113     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1114     if (is_receiver) {
1115       *receiver_offset = offset;
1116     }
1117   }
1118 }
1119 
1120 // A float arg may have to do float reg int reg conversion
1121 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1122   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1123 
1124   // Because of the calling convention we know that src is either a stack location
1125   // or an xmm register. dst can only be a stack location.
1126 
1127   assert(dst.first()->is_stack() && ( src.first()->is_stack() || src.first()->is_XMMRegister()), "bad parameters");
1128 
1129   if (src.first()->is_stack()) {
1130     __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1131     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1132   } else {
1133     // reg to stack
1134     __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1135   }
1136 }
1137 
1138 // A long move
1139 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1140 
1141   // The only legal possibility for a long_move VMRegPair is:
1142   // 1: two stack slots (possibly unaligned)
1143   // as neither the java  or C calling convention will use registers
1144   // for longs.
1145 
1146   if (src.first()->is_stack() && dst.first()->is_stack()) {
1147     assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
1148     __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1149     __ movptr(rbx, Address(rbp, reg2offset_in(src.second())));
1150     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1151     __ movptr(Address(rsp, reg2offset_out(dst.second())), rbx);
1152   } else {
1153     ShouldNotReachHere();
1154   }
1155 }
1156 
1157 // A double move
1158 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1159 
1160   // The only legal possibilities for a double_move VMRegPair are:
1161   // The painful thing here is that like long_move a VMRegPair might be
1162 
1163   // Because of the calling convention we know that src is either
1164   //   1: a single physical register (xmm registers only)
1165   //   2: two stack slots (possibly unaligned)
1166   // dst can only be a pair of stack slots.
1167 
1168   assert(dst.first()->is_stack() && (src.first()->is_XMMRegister() || src.first()->is_stack()), "bad args");
1169 
1170   if (src.first()->is_stack()) {
1171     // source is all stack
1172     __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1173     __ movptr(rbx, Address(rbp, reg2offset_in(src.second())));
1174     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1175     __ movptr(Address(rsp, reg2offset_out(dst.second())), rbx);
1176   } else {
1177     // reg to stack
1178     // No worries about stack alignment
1179     __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1180   }
1181 }
1182 
1183 
1184 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1185   // We always ignore the frame_slots arg and just use the space just below frame pointer
1186   // which by this time is free to use
1187   switch (ret_type) {
1188   case T_FLOAT:
1189     __ fstp_s(Address(rbp, -wordSize));
1190     break;
1191   case T_DOUBLE:
1192     __ fstp_d(Address(rbp, -2*wordSize));
1193     break;
1194   case T_VOID:  break;
1195   case T_LONG:
1196     __ movptr(Address(rbp, -wordSize), rax);
1197     __ movptr(Address(rbp, -2*wordSize), rdx);
1198     break;
1199   default: {
1200     __ movptr(Address(rbp, -wordSize), rax);
1201     }
1202   }
1203 }
1204 
1205 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1206   // We always ignore the frame_slots arg and just use the space just below frame pointer
1207   // which by this time is free to use
1208   switch (ret_type) {
1209   case T_FLOAT:
1210     __ fld_s(Address(rbp, -wordSize));
1211     break;
1212   case T_DOUBLE:
1213     __ fld_d(Address(rbp, -2*wordSize));
1214     break;
1215   case T_LONG:
1216     __ movptr(rax, Address(rbp, -wordSize));
1217     __ movptr(rdx, Address(rbp, -2*wordSize));
1218     break;
1219   case T_VOID:  break;
1220   default: {
1221     __ movptr(rax, Address(rbp, -wordSize));
1222     }
1223   }
1224 }
1225 
1226 static void verify_oop_args(MacroAssembler* masm,
1227                             const methodHandle& method,
1228                             const BasicType* sig_bt,
1229                             const VMRegPair* regs) {
1230   Register temp_reg = rbx;  // not part of any compiled calling seq
1231   if (VerifyOops) {
1232     for (int i = 0; i < method->size_of_parameters(); i++) {
1233       if (is_reference_type(sig_bt[i])) {
1234         VMReg r = regs[i].first();
1235         assert(r->is_valid(), "bad oop arg");
1236         if (r->is_stack()) {
1237           __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1238           __ verify_oop(temp_reg);
1239         } else {
1240           __ verify_oop(r->as_Register());
1241         }
1242       }
1243     }
1244   }
1245 }
1246 
1247 static void gen_special_dispatch(MacroAssembler* masm,
1248                                  const methodHandle& method,
1249                                  const BasicType* sig_bt,
1250                                  const VMRegPair* regs) {
1251   verify_oop_args(masm, method, sig_bt, regs);
1252   vmIntrinsics::ID iid = method->intrinsic_id();
1253 
1254   // Now write the args into the outgoing interpreter space
1255   bool     has_receiver   = false;
1256   Register receiver_reg   = noreg;
1257   int      member_arg_pos = -1;
1258   Register member_reg     = noreg;
1259   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1260   if (ref_kind != 0) {
1261     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1262     member_reg = rbx;  // known to be free at this point
1263     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1264   } else if (iid == vmIntrinsics::_invokeBasic) {
1265     has_receiver = true;
1266   } else {
1267     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1268   }
1269 
1270   if (member_reg != noreg) {
1271     // Load the member_arg into register, if necessary.
1272     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1273     VMReg r = regs[member_arg_pos].first();
1274     if (r->is_stack()) {
1275       __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1276     } else {
1277       // no data motion is needed
1278       member_reg = r->as_Register();
1279     }
1280   }
1281 
1282   if (has_receiver) {
1283     // Make sure the receiver is loaded into a register.
1284     assert(method->size_of_parameters() > 0, "oob");
1285     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1286     VMReg r = regs[0].first();
1287     assert(r->is_valid(), "bad receiver arg");
1288     if (r->is_stack()) {
1289       // Porting note:  This assumes that compiled calling conventions always
1290       // pass the receiver oop in a register.  If this is not true on some
1291       // platform, pick a temp and load the receiver from stack.
1292       fatal("receiver always in a register");
1293       receiver_reg = rcx;  // known to be free at this point
1294       __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1295     } else {
1296       // no data motion is needed
1297       receiver_reg = r->as_Register();
1298     }
1299   }
1300 
1301   // Figure out which address we are really jumping to:
1302   MethodHandles::generate_method_handle_dispatch(masm, iid,
1303                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1304 }
1305 
1306 // ---------------------------------------------------------------------------
1307 // Generate a native wrapper for a given method.  The method takes arguments
1308 // in the Java compiled code convention, marshals them to the native
1309 // convention (handlizes oops, etc), transitions to native, makes the call,
1310 // returns to java state (possibly blocking), unhandlizes any result and
1311 // returns.
1312 //
1313 // Critical native functions are a shorthand for the use of
1314 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1315 // functions.  The wrapper is expected to unpack the arguments before
1316 // passing them to the callee. Critical native functions leave the state _in_Java,
1317 // since they cannot stop for GC.
1318 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1319 // block and the check for pending exceptions it's impossible for them
1320 // to be thrown.
1321 //
1322 //
1323 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1324                                                 const methodHandle& method,
1325                                                 int compile_id,
1326                                                 BasicType* in_sig_bt,
1327                                                 VMRegPair* in_regs,
1328                                                 BasicType ret_type) {
1329   if (method->is_method_handle_intrinsic()) {
1330     vmIntrinsics::ID iid = method->intrinsic_id();
1331     intptr_t start = (intptr_t)__ pc();
1332     int vep_offset = ((intptr_t)__ pc()) - start;
1333     gen_special_dispatch(masm,
1334                          method,
1335                          in_sig_bt,
1336                          in_regs);
1337     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1338     __ flush();
1339     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1340     return nmethod::new_native_nmethod(method,
1341                                        compile_id,
1342                                        masm->code(),
1343                                        vep_offset,
1344                                        frame_complete,
1345                                        stack_slots / VMRegImpl::slots_per_word,
1346                                        in_ByteSize(-1),
1347                                        in_ByteSize(-1),
1348                                        (OopMapSet*)NULL);
1349   }
1350   address native_func = method->native_function();
1351   assert(native_func != NULL, "must have function");
1352 
1353   // An OopMap for lock (and class if static)
1354   OopMapSet *oop_maps = new OopMapSet();
1355 
1356   // We have received a description of where all the java arg are located
1357   // on entry to the wrapper. We need to convert these args to where
1358   // the jni function will expect them. To figure out where they go
1359   // we convert the java signature to a C signature by inserting
1360   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1361 
1362   const int total_in_args = method->size_of_parameters();
1363   int  total_c_args       = total_in_args + (method->is_static() ? 2 : 1);
1364 
1365   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1366   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1367   BasicType* in_elem_bt = NULL;
1368 
1369   int argc = 0;
1370   out_sig_bt[argc++] = T_ADDRESS;
1371   if (method->is_static()) {
1372     out_sig_bt[argc++] = T_OBJECT;
1373   }
1374 
1375   for (int i = 0; i < total_in_args ; i++ ) {
1376     out_sig_bt[argc++] = in_sig_bt[i];
1377   }
1378 
1379   // Now figure out where the args must be stored and how much stack space
1380   // they require.
1381   int out_arg_slots;
1382   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1383 
1384   // Compute framesize for the wrapper.  We need to handlize all oops in
1385   // registers a max of 2 on x86.
1386 
1387   // Calculate the total number of stack slots we will need.
1388 
1389   // First count the abi requirement plus all of the outgoing args
1390   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1391 
1392   // Now the space for the inbound oop handle area
1393   int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
1394 
1395   int oop_handle_offset = stack_slots;
1396   stack_slots += total_save_slots;
1397 
1398   // Now any space we need for handlizing a klass if static method
1399 
1400   int klass_slot_offset = 0;
1401   int klass_offset = -1;
1402   int lock_slot_offset = 0;
1403   bool is_static = false;
1404 
1405   if (method->is_static()) {
1406     klass_slot_offset = stack_slots;
1407     stack_slots += VMRegImpl::slots_per_word;
1408     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1409     is_static = true;
1410   }
1411 
1412   // Plus a lock if needed
1413 
1414   if (method->is_synchronized()) {
1415     lock_slot_offset = stack_slots;
1416     stack_slots += VMRegImpl::slots_per_word;
1417   }
1418 
1419   // Now a place (+2) to save return values or temp during shuffling
1420   // + 2 for return address (which we own) and saved rbp,
1421   stack_slots += 4;
1422 
1423   // Ok The space we have allocated will look like:
1424   //
1425   //
1426   // FP-> |                     |
1427   //      |---------------------|
1428   //      | 2 slots for moves   |
1429   //      |---------------------|
1430   //      | lock box (if sync)  |
1431   //      |---------------------| <- lock_slot_offset  (-lock_slot_rbp_offset)
1432   //      | klass (if static)   |
1433   //      |---------------------| <- klass_slot_offset
1434   //      | oopHandle area      |
1435   //      |---------------------| <- oop_handle_offset (a max of 2 registers)
1436   //      | outbound memory     |
1437   //      | based arguments     |
1438   //      |                     |
1439   //      |---------------------|
1440   //      |                     |
1441   // SP-> | out_preserved_slots |
1442   //
1443   //
1444   // ****************************************************************************
1445   // WARNING - on Windows Java Natives use pascal calling convention and pop the
1446   // arguments off of the stack after the jni call. Before the call we can use
1447   // instructions that are SP relative. After the jni call we switch to FP
1448   // relative instructions instead of re-adjusting the stack on windows.
1449   // ****************************************************************************
1450 
1451 
1452   // Now compute actual number of stack words we need rounding to make
1453   // stack properly aligned.
1454   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1455 
1456   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1457 
1458   intptr_t start = (intptr_t)__ pc();
1459 
1460   // First thing make an ic check to see if we should even be here
1461 
1462   // We are free to use all registers as temps without saving them and
1463   // restoring them except rbp. rbp is the only callee save register
1464   // as far as the interpreter and the compiler(s) are concerned.
1465 
1466 
1467   const Register ic_reg = rax;
1468   const Register receiver = rcx;
1469   Label hit;
1470   Label exception_pending;
1471 
1472   __ verify_oop(receiver);
1473   __ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
1474   __ jcc(Assembler::equal, hit);
1475 
1476   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1477 
1478   // verified entry must be aligned for code patching.
1479   // and the first 5 bytes must be in the same cache line
1480   // if we align at 8 then we will be sure 5 bytes are in the same line
1481   __ align(8);
1482 
1483   __ bind(hit);
1484 
1485   int vep_offset = ((intptr_t)__ pc()) - start;
1486 
1487 #ifdef COMPILER1
1488   // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
1489   if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
1490     inline_check_hashcode_from_object_header(masm, method, rcx /*obj_reg*/, rax /*result*/);
1491    }
1492 #endif // COMPILER1
1493 
1494   // The instruction at the verified entry point must be 5 bytes or longer
1495   // because it can be patched on the fly by make_non_entrant. The stack bang
1496   // instruction fits that requirement.
1497 
1498   // Generate stack overflow check
1499   __ bang_stack_with_offset((int)StackOverflow::stack_shadow_zone_size());
1500 
1501   // Generate a new frame for the wrapper.
1502   __ enter();
1503   // -2 because return address is already present and so is saved rbp
1504   __ subptr(rsp, stack_size - 2*wordSize);
1505 
1506 
1507   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1508   bs->nmethod_entry_barrier(masm, NULL /* slow_path */, NULL /* continuation */);
1509 
1510   // Frame is now completed as far as size and linkage.
1511   int frame_complete = ((intptr_t)__ pc()) - start;
1512 
1513   if (UseRTMLocking) {
1514     // Abort RTM transaction before calling JNI
1515     // because critical section will be large and will be
1516     // aborted anyway. Also nmethod could be deoptimized.
1517     __ xabort(0);
1518   }
1519 
1520   // Calculate the difference between rsp and rbp,. We need to know it
1521   // after the native call because on windows Java Natives will pop
1522   // the arguments and it is painful to do rsp relative addressing
1523   // in a platform independent way. So after the call we switch to
1524   // rbp, relative addressing.
1525 
1526   int fp_adjustment = stack_size - 2*wordSize;
1527 
1528 #ifdef COMPILER2
1529   // C2 may leave the stack dirty if not in SSE2+ mode
1530   if (UseSSE >= 2) {
1531     __ verify_FPU(0, "c2i transition should have clean FPU stack");
1532   } else {
1533     __ empty_FPU_stack();
1534   }
1535 #endif /* COMPILER2 */
1536 
1537   // Compute the rbp, offset for any slots used after the jni call
1538 
1539   int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1540 
1541   // We use rdi as a thread pointer because it is callee save and
1542   // if we load it once it is usable thru the entire wrapper
1543   const Register thread = rdi;
1544 
1545    // We use rsi as the oop handle for the receiver/klass
1546    // It is callee save so it survives the call to native
1547 
1548    const Register oop_handle_reg = rsi;
1549 
1550    __ get_thread(thread);
1551 
1552   //
1553   // We immediately shuffle the arguments so that any vm call we have to
1554   // make from here on out (sync slow path, jvmti, etc.) we will have
1555   // captured the oops from our caller and have a valid oopMap for
1556   // them.
1557 
1558   // -----------------
1559   // The Grand Shuffle
1560   //
1561   // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1562   // and, if static, the class mirror instead of a receiver.  This pretty much
1563   // guarantees that register layout will not match (and x86 doesn't use reg
1564   // parms though amd does).  Since the native abi doesn't use register args
1565   // and the java conventions does we don't have to worry about collisions.
1566   // All of our moved are reg->stack or stack->stack.
1567   // We ignore the extra arguments during the shuffle and handle them at the
1568   // last moment. The shuffle is described by the two calling convention
1569   // vectors we have in our possession. We simply walk the java vector to
1570   // get the source locations and the c vector to get the destinations.
1571 
1572   int c_arg = method->is_static() ? 2 : 1;
1573 
1574   // Record rsp-based slot for receiver on stack for non-static methods
1575   int receiver_offset = -1;
1576 
1577   // This is a trick. We double the stack slots so we can claim
1578   // the oops in the caller's frame. Since we are sure to have
1579   // more args than the caller doubling is enough to make
1580   // sure we can capture all the incoming oop args from the
1581   // caller.
1582   //
1583   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1584 
1585   // Mark location of rbp,
1586   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
1587 
1588   // We know that we only have args in at most two integer registers (rcx, rdx). So rax, rbx
1589   // Are free to temporaries if we have to do  stack to steck moves.
1590   // All inbound args are referenced based on rbp, and all outbound args via rsp.
1591 
1592   for (int i = 0; i < total_in_args ; i++, c_arg++ ) {
1593     switch (in_sig_bt[i]) {
1594       case T_ARRAY:
1595       case T_PRIMITIVE_OBJECT:
1596       case T_OBJECT:
1597         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1598                     ((i == 0) && (!is_static)),
1599                     &receiver_offset);
1600         break;
1601       case T_VOID:
1602         break;
1603 
1604       case T_FLOAT:
1605         float_move(masm, in_regs[i], out_regs[c_arg]);
1606           break;
1607 
1608       case T_DOUBLE:
1609         assert( i + 1 < total_in_args &&
1610                 in_sig_bt[i + 1] == T_VOID &&
1611                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1612         double_move(masm, in_regs[i], out_regs[c_arg]);
1613         break;
1614 
1615       case T_LONG :
1616         long_move(masm, in_regs[i], out_regs[c_arg]);
1617         break;
1618 
1619       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1620 
1621       default:
1622         simple_move32(masm, in_regs[i], out_regs[c_arg]);
1623     }
1624   }
1625 
1626   // Pre-load a static method's oop into rsi.  Used both by locking code and
1627   // the normal JNI call code.
1628   if (method->is_static()) {
1629 
1630     //  load opp into a register
1631     __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
1632 
1633     // Now handlize the static class mirror it's known not-null.
1634     __ movptr(Address(rsp, klass_offset), oop_handle_reg);
1635     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1636 
1637     // Now get the handle
1638     __ lea(oop_handle_reg, Address(rsp, klass_offset));
1639     // store the klass handle as second argument
1640     __ movptr(Address(rsp, wordSize), oop_handle_reg);
1641   }
1642 
1643   // Change state to native (we save the return address in the thread, since it might not
1644   // be pushed on the stack when we do a stack traversal). It is enough that the pc()
1645   // points into the right code segment. It does not have to be the correct return pc.
1646   // We use the same pc/oopMap repeatedly when we call out
1647 
1648   intptr_t the_pc = (intptr_t) __ pc();
1649   oop_maps->add_gc_map(the_pc - start, map);
1650 
1651   __ set_last_Java_frame(thread, rsp, noreg, (address)the_pc, noreg);
1652 
1653 
1654   // We have all of the arguments setup at this point. We must not touch any register
1655   // argument registers at this point (what if we save/restore them there are no oop?
1656 
1657   {
1658     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0, noreg);
1659     __ mov_metadata(rax, method());
1660     __ call_VM_leaf(
1661          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1662          thread, rax);
1663   }
1664 
1665   // RedefineClasses() tracing support for obsolete method entry
1666   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1667     __ mov_metadata(rax, method());
1668     __ call_VM_leaf(
1669          CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1670          thread, rax);
1671   }
1672 
1673   // These are register definitions we need for locking/unlocking
1674   const Register swap_reg = rax;  // Must use rax, for cmpxchg instruction
1675   const Register obj_reg  = rcx;  // Will contain the oop
1676   const Register lock_reg = rdx;  // Address of compiler lock object (BasicLock)
1677 
1678   Label slow_path_lock;
1679   Label lock_done;
1680 
1681   // Lock a synchronized method
1682   if (method->is_synchronized()) {
1683     Label count_mon;
1684 
1685     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1686 
1687     // Get the handle (the 2nd argument)
1688     __ movptr(oop_handle_reg, Address(rsp, wordSize));
1689 
1690     // Get address of the box
1691 
1692     __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1693 
1694     // Load the oop from the handle
1695     __ movptr(obj_reg, Address(oop_handle_reg, 0));
1696 
1697     if (!UseHeavyMonitors) {
1698       // Load immediate 1 into swap_reg %rax,
1699       __ movptr(swap_reg, 1);
1700 
1701       // Load (object->mark() | 1) into swap_reg %rax,
1702       __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1703 
1704       // Save (object->mark() | 1) into BasicLock's displaced header
1705       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1706 
1707       // src -> dest iff dest == rax, else rax, <- dest
1708       // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1709       __ lock();
1710       __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1711       __ jcc(Assembler::equal, count_mon);
1712 
1713       // Test if the oopMark is an obvious stack pointer, i.e.,
1714       //  1) (mark & 3) == 0, and
1715       //  2) rsp <= mark < mark + os::pagesize()
1716       // These 3 tests can be done by evaluating the following
1717       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1718       // assuming both stack pointer and pagesize have their
1719       // least significant 2 bits clear.
1720       // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1721 
1722       __ subptr(swap_reg, rsp);
1723       __ andptr(swap_reg, 3 - os::vm_page_size());
1724 
1725       // Save the test result, for recursive case, the result is zero
1726       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1727       __ jcc(Assembler::notEqual, slow_path_lock);
1728     } else {
1729       __ jmp(slow_path_lock);
1730     }
1731     __ bind(count_mon);
1732     __ inc_held_monitor_count();
1733 
1734     // Slow path will re-enter here
1735     __ bind(lock_done);
1736   }
1737 
1738 
1739   // Finally just about ready to make the JNI call
1740 
1741   // get JNIEnv* which is first argument to native
1742   __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1743   __ movptr(Address(rsp, 0), rdx);
1744 
1745   // Now set thread in native
1746   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1747 
1748   __ call(RuntimeAddress(native_func));
1749 
1750   // Verify or restore cpu control state after JNI call
1751   __ restore_cpu_control_state_after_jni(noreg);
1752 
1753   // WARNING - on Windows Java Natives use pascal calling convention and pop the
1754   // arguments off of the stack. We could just re-adjust the stack pointer here
1755   // and continue to do SP relative addressing but we instead switch to FP
1756   // relative addressing.
1757 
1758   // Unpack native results.
1759   switch (ret_type) {
1760   case T_BOOLEAN: __ c2bool(rax);            break;
1761   case T_CHAR   : __ andptr(rax, 0xFFFF);    break;
1762   case T_BYTE   : __ sign_extend_byte (rax); break;
1763   case T_SHORT  : __ sign_extend_short(rax); break;
1764   case T_INT    : /* nothing to do */        break;
1765   case T_DOUBLE :
1766   case T_FLOAT  :
1767     // Result is in st0 we'll save as needed
1768     break;
1769   case T_ARRAY:                 // Really a handle
1770   case T_PRIMITIVE_OBJECT:           // Really a handle
1771   case T_OBJECT:                // Really a handle
1772       break; // can't de-handlize until after safepoint check
1773   case T_VOID: break;
1774   case T_LONG: break;
1775   default       : ShouldNotReachHere();
1776   }
1777 
1778   Label after_transition;
1779 
1780   // Switch thread to "native transition" state before reading the synchronization state.
1781   // This additional state is necessary because reading and testing the synchronization
1782   // state is not atomic w.r.t. GC, as this scenario demonstrates:
1783   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1784   //     VM thread changes sync state to synchronizing and suspends threads for GC.
1785   //     Thread A is resumed to finish this native method, but doesn't block here since it
1786   //     didn't see any synchronization is progress, and escapes.
1787   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
1788 
1789   // Force this write out before the read below
1790   __ membar(Assembler::Membar_mask_bits(
1791             Assembler::LoadLoad | Assembler::LoadStore |
1792             Assembler::StoreLoad | Assembler::StoreStore));
1793 
1794   if (AlwaysRestoreFPU) {
1795     // Make sure the control word is correct.
1796     __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
1797   }
1798 
1799   // check for safepoint operation in progress and/or pending suspend requests
1800   { Label Continue, slow_path;
1801 
1802     __ safepoint_poll(slow_path, thread, true /* at_return */, false /* in_nmethod */);
1803 
1804     __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1805     __ jcc(Assembler::equal, Continue);
1806     __ bind(slow_path);
1807 
1808     // Don't use call_VM as it will see a possible pending exception and forward it
1809     // and never return here preventing us from clearing _last_native_pc down below.
1810     // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1811     // preserved and correspond to the bcp/locals pointers. So we do a runtime call
1812     // by hand.
1813     //
1814     __ vzeroupper();
1815 
1816     save_native_result(masm, ret_type, stack_slots);
1817     __ push(thread);
1818     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
1819                                               JavaThread::check_special_condition_for_native_trans)));
1820     __ increment(rsp, wordSize);
1821     // Restore any method result value
1822     restore_native_result(masm, ret_type, stack_slots);
1823     __ bind(Continue);
1824   }
1825 
1826   // change thread state
1827   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1828   __ bind(after_transition);
1829 
1830   Label reguard;
1831   Label reguard_done;
1832   __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
1833   __ jcc(Assembler::equal, reguard);
1834 
1835   // slow path reguard  re-enters here
1836   __ bind(reguard_done);
1837 
1838   // Handle possible exception (will unlock if necessary)
1839 
1840   // native result if any is live
1841 
1842   // Unlock
1843   Label slow_path_unlock;
1844   Label unlock_done;
1845   if (method->is_synchronized()) {
1846 
1847     Label fast_done;
1848 
1849     // Get locked oop from the handle we passed to jni
1850     __ movptr(obj_reg, Address(oop_handle_reg, 0));
1851 
1852     if (!UseHeavyMonitors) {
1853       Label not_recur;
1854       // Simple recursive lock?
1855       __ cmpptr(Address(rbp, lock_slot_rbp_offset), NULL_WORD);
1856       __ jcc(Assembler::notEqual, not_recur);
1857       __ dec_held_monitor_count();
1858       __ jmpb(fast_done);
1859       __ bind(not_recur);
1860     }
1861 
1862     // Must save rax, if it is live now because cmpxchg must use it
1863     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1864       save_native_result(masm, ret_type, stack_slots);
1865     }
1866 
1867     if (!UseHeavyMonitors) {
1868       //  get old displaced header
1869       __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
1870 
1871       // get address of the stack lock
1872       __ lea(rax, Address(rbp, lock_slot_rbp_offset));
1873 
1874       // Atomic swap old header if oop still contains the stack lock
1875       // src -> dest iff dest == rax, else rax, <- dest
1876       // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
1877       __ lock();
1878       __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1879       __ jcc(Assembler::notEqual, slow_path_unlock);
1880       __ dec_held_monitor_count();
1881     } else {
1882       __ jmp(slow_path_unlock);
1883     }
1884 
1885     // slow path re-enters here
1886     __ bind(unlock_done);
1887     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1888       restore_native_result(masm, ret_type, stack_slots);
1889     }
1890 
1891     __ bind(fast_done);
1892   }
1893 
1894   {
1895     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0, noreg);
1896     // Tell dtrace about this method exit
1897     save_native_result(masm, ret_type, stack_slots);
1898     __ mov_metadata(rax, method());
1899     __ call_VM_leaf(
1900          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1901          thread, rax);
1902     restore_native_result(masm, ret_type, stack_slots);
1903   }
1904 
1905   // We can finally stop using that last_Java_frame we setup ages ago
1906 
1907   __ reset_last_Java_frame(thread, false);
1908 
1909   // Unbox oop result, e.g. JNIHandles::resolve value.
1910   if (is_reference_type(ret_type)) {
1911     __ resolve_jobject(rax /* value */,
1912                        thread /* thread */,
1913                        rcx /* tmp */);
1914   }
1915 
1916   if (CheckJNICalls) {
1917     // clear_pending_jni_exception_check
1918     __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
1919   }
1920 
1921   // reset handle block
1922   __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
1923   __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
1924 
1925   // Any exception pending?
1926   __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1927   __ jcc(Assembler::notEqual, exception_pending);
1928 
1929   // no exception, we're almost done
1930 
1931   // check that only result value is on FPU stack
1932   __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit");
1933 
1934   // Fixup floating pointer results so that result looks like a return from a compiled method
1935   if (ret_type == T_FLOAT) {
1936     if (UseSSE >= 1) {
1937       // Pop st0 and store as float and reload into xmm register
1938       __ fstp_s(Address(rbp, -4));
1939       __ movflt(xmm0, Address(rbp, -4));
1940     }
1941   } else if (ret_type == T_DOUBLE) {
1942     if (UseSSE >= 2) {
1943       // Pop st0 and store as double and reload into xmm register
1944       __ fstp_d(Address(rbp, -8));
1945       __ movdbl(xmm0, Address(rbp, -8));
1946     }
1947   }
1948 
1949   // Return
1950 
1951   __ leave();
1952   __ ret(0);
1953 
1954   // Unexpected paths are out of line and go here
1955 
1956   // Slow path locking & unlocking
1957   if (method->is_synchronized()) {
1958 
1959     // BEGIN Slow path lock
1960 
1961     __ bind(slow_path_lock);
1962 
1963     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1964     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1965     __ push(thread);
1966     __ push(lock_reg);
1967     __ push(obj_reg);
1968     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
1969     __ addptr(rsp, 3*wordSize);
1970 
1971 #ifdef ASSERT
1972     { Label L;
1973     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1974     __ jcc(Assembler::equal, L);
1975     __ stop("no pending exception allowed on exit from monitorenter");
1976     __ bind(L);
1977     }
1978 #endif
1979     __ jmp(lock_done);
1980 
1981     // END Slow path lock
1982 
1983     // BEGIN Slow path unlock
1984     __ bind(slow_path_unlock);
1985     __ vzeroupper();
1986     // Slow path unlock
1987 
1988     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
1989       save_native_result(masm, ret_type, stack_slots);
1990     }
1991     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
1992 
1993     __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
1994     __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1995 
1996 
1997     // should be a peal
1998     // +wordSize because of the push above
1999     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2000     __ push(thread);
2001     __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2002     __ push(rax);
2003 
2004     __ push(obj_reg);
2005     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2006     __ addptr(rsp, 3*wordSize);
2007 #ifdef ASSERT
2008     {
2009       Label L;
2010       __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2011       __ jcc(Assembler::equal, L);
2012       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2013       __ bind(L);
2014     }
2015 #endif /* ASSERT */
2016 
2017     __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2018 
2019     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2020       restore_native_result(masm, ret_type, stack_slots);
2021     }
2022     __ jmp(unlock_done);
2023     // END Slow path unlock
2024 
2025   }
2026 
2027   // SLOW PATH Reguard the stack if needed
2028 
2029   __ bind(reguard);
2030   __ vzeroupper();
2031   save_native_result(masm, ret_type, stack_slots);
2032   {
2033     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2034   }
2035   restore_native_result(masm, ret_type, stack_slots);
2036   __ jmp(reguard_done);
2037 
2038 
2039   // BEGIN EXCEPTION PROCESSING
2040 
2041   // Forward  the exception
2042   __ bind(exception_pending);
2043 
2044   // remove possible return value from FPU register stack
2045   __ empty_FPU_stack();
2046 
2047   // pop our frame
2048   __ leave();
2049   // and forward the exception
2050   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2051 
2052   __ flush();
2053 
2054   nmethod *nm = nmethod::new_native_nmethod(method,
2055                                             compile_id,
2056                                             masm->code(),
2057                                             vep_offset,
2058                                             frame_complete,
2059                                             stack_slots / VMRegImpl::slots_per_word,
2060                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2061                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2062                                             oop_maps);
2063 
2064   return nm;
2065 
2066 }
2067 
2068 // this function returns the adjust size (in number of words) to a c2i adapter
2069 // activation for use during deoptimization
2070 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2071   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2072 }
2073 
2074 
2075 // Number of stack slots between incoming argument block and the start of
2076 // a new frame.  The PROLOG must add this many slots to the stack.  The
2077 // EPILOG must remove this many slots.  Intel needs one slot for
2078 // return address and one for rbp, (must save rbp)
2079 uint SharedRuntime::in_preserve_stack_slots() {
2080   return 2+VerifyStackAtCalls;
2081 }
2082 
2083 uint SharedRuntime::out_preserve_stack_slots() {
2084   return 0;
2085 }
2086 
2087 //------------------------------generate_deopt_blob----------------------------
2088 void SharedRuntime::generate_deopt_blob() {
2089   // allocate space for the code
2090   ResourceMark rm;
2091   // setup code generation tools
2092   // note: the buffer code size must account for StackShadowPages=50
2093   CodeBuffer   buffer("deopt_blob", 1536, 1024);
2094   MacroAssembler* masm = new MacroAssembler(&buffer);
2095   int frame_size_in_words;
2096   OopMap* map = NULL;
2097   // Account for the extra args we place on the stack
2098   // by the time we call fetch_unroll_info
2099   const int additional_words = 2; // deopt kind, thread
2100 
2101   OopMapSet *oop_maps = new OopMapSet();
2102 
2103   // -------------
2104   // This code enters when returning to a de-optimized nmethod.  A return
2105   // address has been pushed on the stack, and return values are in
2106   // registers.
2107   // If we are doing a normal deopt then we were called from the patched
2108   // nmethod from the point we returned to the nmethod. So the return
2109   // address on the stack is wrong by NativeCall::instruction_size
2110   // We will adjust the value to it looks like we have the original return
2111   // address on the stack (like when we eagerly deoptimized).
2112   // In the case of an exception pending with deoptimized then we enter
2113   // with a return address on the stack that points after the call we patched
2114   // into the exception handler. We have the following register state:
2115   //    rax,: exception
2116   //    rbx,: exception handler
2117   //    rdx: throwing pc
2118   // So in this case we simply jam rdx into the useless return address and
2119   // the stack looks just like we want.
2120   //
2121   // At this point we need to de-opt.  We save the argument return
2122   // registers.  We call the first C routine, fetch_unroll_info().  This
2123   // routine captures the return values and returns a structure which
2124   // describes the current frame size and the sizes of all replacement frames.
2125   // The current frame is compiled code and may contain many inlined
2126   // functions, each with their own JVM state.  We pop the current frame, then
2127   // push all the new frames.  Then we call the C routine unpack_frames() to
2128   // populate these frames.  Finally unpack_frames() returns us the new target
2129   // address.  Notice that callee-save registers are BLOWN here; they have
2130   // already been captured in the vframeArray at the time the return PC was
2131   // patched.
2132   address start = __ pc();
2133   Label cont;
2134 
2135   // Prolog for non exception case!
2136 
2137   // Save everything in sight.
2138 
2139   map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2140   // Normal deoptimization
2141   __ push(Deoptimization::Unpack_deopt);
2142   __ jmp(cont);
2143 
2144   int reexecute_offset = __ pc() - start;
2145 
2146   // Reexecute case
2147   // return address is the pc describes what bci to do re-execute at
2148 
2149   // No need to update map as each call to save_live_registers will produce identical oopmap
2150   (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2151 
2152   __ push(Deoptimization::Unpack_reexecute);
2153   __ jmp(cont);
2154 
2155   int exception_offset = __ pc() - start;
2156 
2157   // Prolog for exception case
2158 
2159   // all registers are dead at this entry point, except for rax, and
2160   // rdx which contain the exception oop and exception pc
2161   // respectively.  Set them in TLS and fall thru to the
2162   // unpack_with_exception_in_tls entry point.
2163 
2164   __ get_thread(rdi);
2165   __ movptr(Address(rdi, JavaThread::exception_pc_offset()), rdx);
2166   __ movptr(Address(rdi, JavaThread::exception_oop_offset()), rax);
2167 
2168   int exception_in_tls_offset = __ pc() - start;
2169 
2170   // new implementation because exception oop is now passed in JavaThread
2171 
2172   // Prolog for exception case
2173   // All registers must be preserved because they might be used by LinearScan
2174   // Exceptiop oop and throwing PC are passed in JavaThread
2175   // tos: stack at point of call to method that threw the exception (i.e. only
2176   // args are on the stack, no return address)
2177 
2178   // make room on stack for the return address
2179   // It will be patched later with the throwing pc. The correct value is not
2180   // available now because loading it from memory would destroy registers.
2181   __ push(0);
2182 
2183   // Save everything in sight.
2184 
2185   // No need to update map as each call to save_live_registers will produce identical oopmap
2186   (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2187 
2188   // Now it is safe to overwrite any register
2189 
2190   // store the correct deoptimization type
2191   __ push(Deoptimization::Unpack_exception);
2192 
2193   // load throwing pc from JavaThread and patch it as the return address
2194   // of the current frame. Then clear the field in JavaThread
2195   __ get_thread(rdi);
2196   __ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset()));
2197   __ movptr(Address(rbp, wordSize), rdx);
2198   __ movptr(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
2199 
2200 #ifdef ASSERT
2201   // verify that there is really an exception oop in JavaThread
2202   __ movptr(rax, Address(rdi, JavaThread::exception_oop_offset()));
2203   __ verify_oop(rax);
2204 
2205   // verify that there is no pending exception
2206   Label no_pending_exception;
2207   __ movptr(rax, Address(rdi, Thread::pending_exception_offset()));
2208   __ testptr(rax, rax);
2209   __ jcc(Assembler::zero, no_pending_exception);
2210   __ stop("must not have pending exception here");
2211   __ bind(no_pending_exception);
2212 #endif
2213 
2214   __ bind(cont);
2215 
2216   // Compiled code leaves the floating point stack dirty, empty it.
2217   __ empty_FPU_stack();
2218 
2219 
2220   // Call C code.  Need thread and this frame, but NOT official VM entry
2221   // crud.  We cannot block on this call, no GC can happen.
2222   __ get_thread(rcx);
2223   __ push(rcx);
2224   // fetch_unroll_info needs to call last_java_frame()
2225   __ set_last_Java_frame(rcx, noreg, noreg, NULL, noreg);
2226 
2227   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2228 
2229   // Need to have an oopmap that tells fetch_unroll_info where to
2230   // find any register it might need.
2231 
2232   oop_maps->add_gc_map( __ pc()-start, map);
2233 
2234   // Discard args to fetch_unroll_info
2235   __ pop(rcx);
2236   __ pop(rcx);
2237 
2238   __ get_thread(rcx);
2239   __ reset_last_Java_frame(rcx, false);
2240 
2241   // Load UnrollBlock into EDI
2242   __ mov(rdi, rax);
2243 
2244   // Move the unpack kind to a safe place in the UnrollBlock because
2245   // we are very short of registers
2246 
2247   Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
2248   // retrieve the deopt kind from the UnrollBlock.
2249   __ movl(rax, unpack_kind);
2250 
2251    Label noException;
2252   __ cmpl(rax, Deoptimization::Unpack_exception);   // Was exception pending?
2253   __ jcc(Assembler::notEqual, noException);
2254   __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
2255   __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
2256   __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
2257   __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
2258 
2259   __ verify_oop(rax);
2260 
2261   // Overwrite the result registers with the exception results.
2262   __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2263   __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2264 
2265   __ bind(noException);
2266 
2267   // Stack is back to only having register save data on the stack.
2268   // Now restore the result registers. Everything else is either dead or captured
2269   // in the vframeArray.
2270 
2271   RegisterSaver::restore_result_registers(masm);
2272 
2273   // Non standard control word may be leaked out through a safepoint blob, and we can
2274   // deopt at a poll point with the non standard control word. However, we should make
2275   // sure the control word is correct after restore_result_registers.
2276   __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
2277 
2278   // All of the register save area has been popped of the stack. Only the
2279   // return address remains.
2280 
2281   // Pop all the frames we must move/replace.
2282   //
2283   // Frame picture (youngest to oldest)
2284   // 1: self-frame (no frame link)
2285   // 2: deopting frame  (no frame link)
2286   // 3: caller of deopting frame (could be compiled/interpreted).
2287   //
2288   // Note: by leaving the return address of self-frame on the stack
2289   // and using the size of frame 2 to adjust the stack
2290   // when we are done the return to frame 3 will still be on the stack.
2291 
2292   // Pop deoptimized frame
2293   __ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2294 
2295   // sp should be pointing at the return address to the caller (3)
2296 
2297   // Pick up the initial fp we should save
2298   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2299   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
2300 
2301 #ifdef ASSERT
2302   // Compilers generate code that bang the stack by as much as the
2303   // interpreter would need. So this stack banging should never
2304   // trigger a fault. Verify that it does not on non product builds.
2305   __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2306   __ bang_stack_size(rbx, rcx);
2307 #endif
2308 
2309   // Load array of frame pcs into ECX
2310   __ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2311 
2312   __ pop(rsi); // trash the old pc
2313 
2314   // Load array of frame sizes into ESI
2315   __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2316 
2317   Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2318 
2319   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2320   __ movl(counter, rbx);
2321 
2322   // Now adjust the caller's stack to make up for the extra locals
2323   // but record the original sp so that we can save it in the skeletal interpreter
2324   // frame and the stack walking of interpreter_sender will get the unextended sp
2325   // value and not the "real" sp value.
2326 
2327   Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2328   __ movptr(sp_temp, rsp);
2329   __ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2330   __ subptr(rsp, rbx);
2331 
2332   // Push interpreter frames in a loop
2333   Label loop;
2334   __ bind(loop);
2335   __ movptr(rbx, Address(rsi, 0));      // Load frame size
2336   __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
2337   __ pushptr(Address(rcx, 0));          // save return address
2338   __ enter();                           // save old & set new rbp,
2339   __ subptr(rsp, rbx);                  // Prolog!
2340   __ movptr(rbx, sp_temp);              // sender's sp
2341   // This value is corrected by layout_activation_impl
2342   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
2343   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2344   __ movptr(sp_temp, rsp);              // pass to next frame
2345   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
2346   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
2347   __ decrementl(counter);             // decrement counter
2348   __ jcc(Assembler::notZero, loop);
2349   __ pushptr(Address(rcx, 0));          // save final return address
2350 
2351   // Re-push self-frame
2352   __ enter();                           // save old & set new rbp,
2353 
2354   //  Return address and rbp, are in place
2355   // We'll push additional args later. Just allocate a full sized
2356   // register save area
2357   __ subptr(rsp, (frame_size_in_words-additional_words - 2) * wordSize);
2358 
2359   // Restore frame locals after moving the frame
2360   __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2361   __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2362   __ fstp_d(Address(rsp, RegisterSaver::fpResultOffset()*wordSize));   // Pop float stack and store in local
2363   if( UseSSE>=2 ) __ movdbl(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2364   if( UseSSE==1 ) __ movflt(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2365 
2366   // Set up the args to unpack_frame
2367 
2368   __ pushl(unpack_kind);                     // get the unpack_kind value
2369   __ get_thread(rcx);
2370   __ push(rcx);
2371 
2372   // set last_Java_sp, last_Java_fp
2373   __ set_last_Java_frame(rcx, noreg, rbp, NULL, noreg);
2374 
2375   // Call C code.  Need thread but NOT official VM entry
2376   // crud.  We cannot block on this call, no GC can happen.  Call should
2377   // restore return values to their stack-slots with the new SP.
2378   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2379   // Set an oopmap for the call site
2380   oop_maps->add_gc_map( __ pc()-start, new OopMap( frame_size_in_words, 0 ));
2381 
2382   // rax, contains the return result type
2383   __ push(rax);
2384 
2385   __ get_thread(rcx);
2386   __ reset_last_Java_frame(rcx, false);
2387 
2388   // Collect return values
2389   __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
2390   __ movptr(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize));
2391 
2392   // Clear floating point stack before returning to interpreter
2393   __ empty_FPU_stack();
2394 
2395   // Check if we should push the float or double return value.
2396   Label results_done, yes_double_value;
2397   __ cmpl(Address(rsp, 0), T_DOUBLE);
2398   __ jcc (Assembler::zero, yes_double_value);
2399   __ cmpl(Address(rsp, 0), T_FLOAT);
2400   __ jcc (Assembler::notZero, results_done);
2401 
2402   // return float value as expected by interpreter
2403   if( UseSSE>=1 ) __ movflt(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2404   else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2405   __ jmp(results_done);
2406 
2407   // return double value as expected by interpreter
2408   __ bind(yes_double_value);
2409   if( UseSSE>=2 ) __ movdbl(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2410   else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2411 
2412   __ bind(results_done);
2413 
2414   // Pop self-frame.
2415   __ leave();                              // Epilog!
2416 
2417   // Jump to interpreter
2418   __ ret(0);
2419 
2420   // -------------
2421   // make sure all code is generated
2422   masm->flush();
2423 
2424   _deopt_blob = DeoptimizationBlob::create( &buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2425   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2426 }
2427 
2428 
2429 #ifdef COMPILER2
2430 //------------------------------generate_uncommon_trap_blob--------------------
2431 void SharedRuntime::generate_uncommon_trap_blob() {
2432   // allocate space for the code
2433   ResourceMark rm;
2434   // setup code generation tools
2435   CodeBuffer   buffer("uncommon_trap_blob", 512, 512);
2436   MacroAssembler* masm = new MacroAssembler(&buffer);
2437 
2438   enum frame_layout {
2439     arg0_off,      // thread                     sp + 0 // Arg location for
2440     arg1_off,      // unloaded_class_index       sp + 1 // calling C
2441     arg2_off,      // exec_mode                  sp + 2
2442     // The frame sender code expects that rbp will be in the "natural" place and
2443     // will override any oopMap setting for it. We must therefore force the layout
2444     // so that it agrees with the frame sender code.
2445     rbp_off,       // callee saved register      sp + 3
2446     return_off,    // slot for return address    sp + 4
2447     framesize
2448   };
2449 
2450   address start = __ pc();
2451 
2452   if (UseRTMLocking) {
2453     // Abort RTM transaction before possible nmethod deoptimization.
2454     __ xabort(0);
2455   }
2456 
2457   // Push self-frame.
2458   __ subptr(rsp, return_off*wordSize);     // Epilog!
2459 
2460   // rbp, is an implicitly saved callee saved register (i.e. the calling
2461   // convention will save restore it in prolog/epilog) Other than that
2462   // there are no callee save registers no that adapter frames are gone.
2463   __ movptr(Address(rsp, rbp_off*wordSize), rbp);
2464 
2465   // Clear the floating point exception stack
2466   __ empty_FPU_stack();
2467 
2468   // set last_Java_sp
2469   __ get_thread(rdx);
2470   __ set_last_Java_frame(rdx, noreg, noreg, NULL, noreg);
2471 
2472   // Call C code.  Need thread but NOT official VM entry
2473   // crud.  We cannot block on this call, no GC can happen.  Call should
2474   // capture callee-saved registers as well as return values.
2475   __ movptr(Address(rsp, arg0_off*wordSize), rdx);
2476   // argument already in ECX
2477   __ movl(Address(rsp, arg1_off*wordSize),rcx);
2478   __ movl(Address(rsp, arg2_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2479   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2480 
2481   // Set an oopmap for the call site
2482   OopMapSet *oop_maps = new OopMapSet();
2483   OopMap* map =  new OopMap( framesize, 0 );
2484   // No oopMap for rbp, it is known implicitly
2485 
2486   oop_maps->add_gc_map( __ pc()-start, map);
2487 
2488   __ get_thread(rcx);
2489 
2490   __ reset_last_Java_frame(rcx, false);
2491 
2492   // Load UnrollBlock into EDI
2493   __ movptr(rdi, rax);
2494 
2495 #ifdef ASSERT
2496   { Label L;
2497     __ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
2498             (int32_t)Deoptimization::Unpack_uncommon_trap);
2499     __ jcc(Assembler::equal, L);
2500     __ stop("SharedRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
2501     __ bind(L);
2502   }
2503 #endif
2504 
2505   // Pop all the frames we must move/replace.
2506   //
2507   // Frame picture (youngest to oldest)
2508   // 1: self-frame (no frame link)
2509   // 2: deopting frame  (no frame link)
2510   // 3: caller of deopting frame (could be compiled/interpreted).
2511 
2512   // Pop self-frame.  We have no frame, and must rely only on EAX and ESP.
2513   __ addptr(rsp,(framesize-1)*wordSize);     // Epilog!
2514 
2515   // Pop deoptimized frame
2516   __ movl2ptr(rcx, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2517   __ addptr(rsp, rcx);
2518 
2519   // sp should be pointing at the return address to the caller (3)
2520 
2521   // Pick up the initial fp we should save
2522   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2523   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
2524 
2525 #ifdef ASSERT
2526   // Compilers generate code that bang the stack by as much as the
2527   // interpreter would need. So this stack banging should never
2528   // trigger a fault. Verify that it does not on non product builds.
2529   __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2530   __ bang_stack_size(rbx, rcx);
2531 #endif
2532 
2533   // Load array of frame pcs into ECX
2534   __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2535 
2536   __ pop(rsi); // trash the pc
2537 
2538   // Load array of frame sizes into ESI
2539   __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2540 
2541   Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2542 
2543   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2544   __ movl(counter, rbx);
2545 
2546   // Now adjust the caller's stack to make up for the extra locals
2547   // but record the original sp so that we can save it in the skeletal interpreter
2548   // frame and the stack walking of interpreter_sender will get the unextended sp
2549   // value and not the "real" sp value.
2550 
2551   Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2552   __ movptr(sp_temp, rsp);
2553   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2554   __ subptr(rsp, rbx);
2555 
2556   // Push interpreter frames in a loop
2557   Label loop;
2558   __ bind(loop);
2559   __ movptr(rbx, Address(rsi, 0));      // Load frame size
2560   __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
2561   __ pushptr(Address(rcx, 0));          // save return address
2562   __ enter();                           // save old & set new rbp,
2563   __ subptr(rsp, rbx);                  // Prolog!
2564   __ movptr(rbx, sp_temp);              // sender's sp
2565   // This value is corrected by layout_activation_impl
2566   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
2567   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2568   __ movptr(sp_temp, rsp);              // pass to next frame
2569   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
2570   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
2571   __ decrementl(counter);             // decrement counter
2572   __ jcc(Assembler::notZero, loop);
2573   __ pushptr(Address(rcx, 0));            // save final return address
2574 
2575   // Re-push self-frame
2576   __ enter();                           // save old & set new rbp,
2577   __ subptr(rsp, (framesize-2) * wordSize);   // Prolog!
2578 
2579 
2580   // set last_Java_sp, last_Java_fp
2581   __ get_thread(rdi);
2582   __ set_last_Java_frame(rdi, noreg, rbp, NULL, noreg);
2583 
2584   // Call C code.  Need thread but NOT official VM entry
2585   // crud.  We cannot block on this call, no GC can happen.  Call should
2586   // restore return values to their stack-slots with the new SP.
2587   __ movptr(Address(rsp,arg0_off*wordSize),rdi);
2588   __ movl(Address(rsp,arg1_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2589   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2590   // Set an oopmap for the call site
2591   oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) );
2592 
2593   __ get_thread(rdi);
2594   __ reset_last_Java_frame(rdi, true);
2595 
2596   // Pop self-frame.
2597   __ leave();     // Epilog!
2598 
2599   // Jump to interpreter
2600   __ ret(0);
2601 
2602   // -------------
2603   // make sure all code is generated
2604   masm->flush();
2605 
2606    _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, framesize);
2607 }
2608 #endif // COMPILER2
2609 
2610 //------------------------------generate_handler_blob------
2611 //
2612 // Generate a special Compile2Runtime blob that saves all registers,
2613 // setup oopmap, and calls safepoint code to stop the compiled code for
2614 // a safepoint.
2615 //
2616 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
2617 
2618   // Account for thread arg in our frame
2619   const int additional_words = 1;
2620   int frame_size_in_words;
2621 
2622   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2623 
2624   ResourceMark rm;
2625   OopMapSet *oop_maps = new OopMapSet();
2626   OopMap* map;
2627 
2628   // allocate space for the code
2629   // setup code generation tools
2630   CodeBuffer   buffer("handler_blob", 2048, 1024);
2631   MacroAssembler* masm = new MacroAssembler(&buffer);
2632 
2633   const Register java_thread = rdi; // callee-saved for VC++
2634   address start   = __ pc();
2635   address call_pc = NULL;
2636   bool cause_return = (poll_type == POLL_AT_RETURN);
2637   bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
2638 
2639   if (UseRTMLocking) {
2640     // Abort RTM transaction before calling runtime
2641     // because critical section will be large and will be
2642     // aborted anyway. Also nmethod could be deoptimized.
2643     __ xabort(0);
2644   }
2645 
2646   // If cause_return is true we are at a poll_return and there is
2647   // the return address on the stack to the caller on the nmethod
2648   // that is safepoint. We can leave this return on the stack and
2649   // effectively complete the return and safepoint in the caller.
2650   // Otherwise we push space for a return address that the safepoint
2651   // handler will install later to make the stack walking sensible.
2652   if (!cause_return)
2653     __ push(rbx);  // Make room for return address (or push it again)
2654 
2655   map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false, save_vectors);
2656 
2657   // The following is basically a call_VM. However, we need the precise
2658   // address of the call in order to generate an oopmap. Hence, we do all the
2659   // work ourselves.
2660 
2661   // Push thread argument and setup last_Java_sp
2662   __ get_thread(java_thread);
2663   __ push(java_thread);
2664   __ set_last_Java_frame(java_thread, noreg, noreg, NULL, noreg);
2665 
2666   // if this was not a poll_return then we need to correct the return address now.
2667   if (!cause_return) {
2668     // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
2669     // Additionally, rbx is a callee saved register and we can look at it later to determine
2670     // if someone changed the return address for us!
2671     __ movptr(rbx, Address(java_thread, JavaThread::saved_exception_pc_offset()));
2672     __ movptr(Address(rbp, wordSize), rbx);
2673   }
2674 
2675   // do the call
2676   __ call(RuntimeAddress(call_ptr));
2677 
2678   // Set an oopmap for the call site.  This oopmap will map all
2679   // oop-registers and debug-info registers as callee-saved.  This
2680   // will allow deoptimization at this safepoint to find all possible
2681   // debug-info recordings, as well as let GC find all oops.
2682 
2683   oop_maps->add_gc_map( __ pc() - start, map);
2684 
2685   // Discard arg
2686   __ pop(rcx);
2687 
2688   Label noException;
2689 
2690   // Clear last_Java_sp again
2691   __ get_thread(java_thread);
2692   __ reset_last_Java_frame(java_thread, false);
2693 
2694   __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD);
2695   __ jcc(Assembler::equal, noException);
2696 
2697   // Exception pending
2698   RegisterSaver::restore_live_registers(masm, save_vectors);
2699 
2700   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2701 
2702   __ bind(noException);
2703 
2704   Label no_adjust, bail, not_special;
2705   if (!cause_return) {
2706     // If our stashed return pc was modified by the runtime we avoid touching it
2707     __ cmpptr(rbx, Address(rbp, wordSize));
2708     __ jccb(Assembler::notEqual, no_adjust);
2709 
2710     // Skip over the poll instruction.
2711     // See NativeInstruction::is_safepoint_poll()
2712     // Possible encodings:
2713     //      85 00       test   %eax,(%rax)
2714     //      85 01       test   %eax,(%rcx)
2715     //      85 02       test   %eax,(%rdx)
2716     //      85 03       test   %eax,(%rbx)
2717     //      85 06       test   %eax,(%rsi)
2718     //      85 07       test   %eax,(%rdi)
2719     //
2720     //      85 04 24    test   %eax,(%rsp)
2721     //      85 45 00    test   %eax,0x0(%rbp)
2722 
2723 #ifdef ASSERT
2724     __ movptr(rax, rbx); // remember where 0x85 should be, for verification below
2725 #endif
2726     // rsp/rbp base encoding takes 3 bytes with the following register values:
2727     // rsp 0x04
2728     // rbp 0x05
2729     __ movzbl(rcx, Address(rbx, 1));
2730     __ andptr(rcx, 0x07); // looking for 0x04 .. 0x05
2731     __ subptr(rcx, 4);    // looking for 0x00 .. 0x01
2732     __ cmpptr(rcx, 1);
2733     __ jcc(Assembler::above, not_special);
2734     __ addptr(rbx, 1);
2735     __ bind(not_special);
2736 #ifdef ASSERT
2737     // Verify the correct encoding of the poll we're about to skip.
2738     __ cmpb(Address(rax, 0), NativeTstRegMem::instruction_code_memXregl);
2739     __ jcc(Assembler::notEqual, bail);
2740     // Mask out the modrm bits
2741     __ testb(Address(rax, 1), NativeTstRegMem::modrm_mask);
2742     // rax encodes to 0, so if the bits are nonzero it's incorrect
2743     __ jcc(Assembler::notZero, bail);
2744 #endif
2745     // Adjust return pc forward to step over the safepoint poll instruction
2746     __ addptr(rbx, 2);
2747     __ movptr(Address(rbp, wordSize), rbx);
2748   }
2749 
2750   __ bind(no_adjust);
2751   // Normal exit, register restoring and exit
2752   RegisterSaver::restore_live_registers(masm, save_vectors);
2753 
2754   __ ret(0);
2755 
2756 #ifdef ASSERT
2757   __ bind(bail);
2758   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2759 #endif
2760 
2761   // make sure all code is generated
2762   masm->flush();
2763 
2764   // Fill-out other meta info
2765   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2766 }
2767 
2768 //
2769 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2770 //
2771 // Generate a stub that calls into vm to find out the proper destination
2772 // of a java call. All the argument registers are live at this point
2773 // but since this is generic code we don't know what they are and the caller
2774 // must do any gc of the args.
2775 //
2776 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
2777   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2778 
2779   // allocate space for the code
2780   ResourceMark rm;
2781 
2782   CodeBuffer buffer(name, 1000, 512);
2783   MacroAssembler* masm                = new MacroAssembler(&buffer);
2784 
2785   int frame_size_words;
2786   enum frame_layout {
2787                 thread_off,
2788                 extra_words };
2789 
2790   OopMapSet *oop_maps = new OopMapSet();
2791   OopMap* map = NULL;
2792 
2793   int start = __ offset();
2794 
2795   map = RegisterSaver::save_live_registers(masm, extra_words, &frame_size_words);
2796 
2797   int frame_complete = __ offset();
2798 
2799   const Register thread = rdi;
2800   __ get_thread(rdi);
2801 
2802   __ push(thread);
2803   __ set_last_Java_frame(thread, noreg, rbp, NULL, noreg);
2804 
2805   __ call(RuntimeAddress(destination));
2806 
2807 
2808   // Set an oopmap for the call site.
2809   // We need this not only for callee-saved registers, but also for volatile
2810   // registers that the compiler might be keeping live across a safepoint.
2811 
2812   oop_maps->add_gc_map( __ offset() - start, map);
2813 
2814   // rax, contains the address we are going to jump to assuming no exception got installed
2815 
2816   __ addptr(rsp, wordSize);
2817 
2818   // clear last_Java_sp
2819   __ reset_last_Java_frame(thread, true);
2820   // check for pending exceptions
2821   Label pending;
2822   __ cmpptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
2823   __ jcc(Assembler::notEqual, pending);
2824 
2825   // get the returned Method*
2826   __ get_vm_result_2(rbx, thread);
2827   __ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
2828 
2829   __ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
2830 
2831   RegisterSaver::restore_live_registers(masm);
2832 
2833   // We are back to the original state on entry and ready to go.
2834 
2835   __ jmp(rax);
2836 
2837   // Pending exception after the safepoint
2838 
2839   __ bind(pending);
2840 
2841   RegisterSaver::restore_live_registers(masm);
2842 
2843   // exception pending => remove activation and forward to exception handler
2844 
2845   __ get_thread(thread);
2846   __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
2847   __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
2848   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2849 
2850   // -------------
2851   // make sure all code is generated
2852   masm->flush();
2853 
2854   // return the  blob
2855   // frame_size_words or bytes??
2856   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
2857 }
2858 
2859 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
2860   Unimplemented();
2861   return NULL;
2862 }