1 /*
   2  * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/debugInfoRec.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/nativeInst.hpp"
  31 #include "code/vtableStubs.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/shared/gcLocker.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "logging/log.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/compiledICHolder.hpp"
  40 #include "oops/klass.inline.hpp"
  41 #include "prims/methodHandles.hpp"
  42 #include "runtime/jniHandles.hpp"
  43 #include "runtime/safepointMechanism.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/signature.hpp"
  46 #include "runtime/stubRoutines.hpp"
  47 #include "runtime/vframeArray.hpp"
  48 #include "runtime/vm_version.hpp"
  49 #include "utilities/align.hpp"
  50 #include "vmreg_x86.inline.hpp"
  51 #ifdef COMPILER1
  52 #include "c1/c1_Runtime1.hpp"
  53 #endif
  54 #ifdef COMPILER2
  55 #include "opto/runtime.hpp"
  56 #endif
  57 
  58 #define __ masm->
  59 
  60 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  61 
  62 class RegisterSaver {
  63   // Capture info about frame layout
  64 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
  65   enum layout {
  66                 fpu_state_off = 0,
  67                 fpu_state_end = fpu_state_off+FPUStateSizeInWords,
  68                 st0_off, st0H_off,
  69                 st1_off, st1H_off,
  70                 st2_off, st2H_off,
  71                 st3_off, st3H_off,
  72                 st4_off, st4H_off,
  73                 st5_off, st5H_off,
  74                 st6_off, st6H_off,
  75                 st7_off, st7H_off,
  76                 xmm_off,
  77                 DEF_XMM_OFFS(0),
  78                 DEF_XMM_OFFS(1),
  79                 DEF_XMM_OFFS(2),
  80                 DEF_XMM_OFFS(3),
  81                 DEF_XMM_OFFS(4),
  82                 DEF_XMM_OFFS(5),
  83                 DEF_XMM_OFFS(6),
  84                 DEF_XMM_OFFS(7),
  85                 flags_off = xmm7_off + 16/BytesPerInt + 1, // 16-byte stack alignment fill word
  86                 rdi_off,
  87                 rsi_off,
  88                 ignore_off,  // extra copy of rbp,
  89                 rsp_off,
  90                 rbx_off,
  91                 rdx_off,
  92                 rcx_off,
  93                 rax_off,
  94                 // The frame sender code expects that rbp will be in the "natural" place and
  95                 // will override any oopMap setting for it. We must therefore force the layout
  96                 // so that it agrees with the frame sender code.
  97                 rbp_off,
  98                 return_off,      // slot for return address
  99                 reg_save_size };
 100   enum { FPU_regs_live = flags_off - fpu_state_end };
 101 
 102   public:
 103 
 104   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words,
 105                                      int* total_frame_words, bool verify_fpu = true, bool save_vectors = false);
 106   static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
 107 
 108   static int rax_offset() { return rax_off; }
 109   static int rbx_offset() { return rbx_off; }
 110 
 111   // Offsets into the register save area
 112   // Used by deoptimization when it is managing result register
 113   // values on its own
 114 
 115   static int raxOffset(void) { return rax_off; }
 116   static int rdxOffset(void) { return rdx_off; }
 117   static int rbxOffset(void) { return rbx_off; }
 118   static int xmm0Offset(void) { return xmm0_off; }
 119   // This really returns a slot in the fp save area, which one is not important
 120   static int fpResultOffset(void) { return st0_off; }
 121 
 122   // During deoptimization only the result register need to be restored
 123   // all the other values have already been extracted.
 124 
 125   static void restore_result_registers(MacroAssembler* masm);
 126 
 127 };
 128 
 129 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
 130                                            int* total_frame_words, bool verify_fpu, bool save_vectors) {
 131   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 132   int ymm_bytes = num_xmm_regs * 16;
 133   int zmm_bytes = num_xmm_regs * 32;
 134 #ifdef COMPILER2
 135   int opmask_state_bytes = KRegisterImpl::number_of_registers * 8;
 136   if (save_vectors) {
 137     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 138     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 139     // Save upper half of YMM registers
 140     int vect_bytes = ymm_bytes;
 141     if (UseAVX > 2) {
 142       // Save upper half of ZMM registers as well
 143       vect_bytes += zmm_bytes;
 144       additional_frame_words += opmask_state_bytes / wordSize;
 145     }
 146     additional_frame_words += vect_bytes / wordSize;
 147   }
 148 #else
 149   assert(!save_vectors, "vectors are generated only by C2");
 150 #endif
 151   int frame_size_in_bytes = (reg_save_size + additional_frame_words) * wordSize;
 152   int frame_words = frame_size_in_bytes / wordSize;
 153   *total_frame_words = frame_words;
 154 
 155   assert(FPUStateSizeInWords == 27, "update stack layout");
 156 
 157   // save registers, fpu state, and flags
 158   // We assume caller has already has return address slot on the stack
 159   // We push epb twice in this sequence because we want the real rbp,
 160   // to be under the return like a normal enter and we want to use pusha
 161   // We push by hand instead of using push.
 162   __ enter();
 163   __ pusha();
 164   __ pushf();
 165   __ subptr(rsp,FPU_regs_live*wordSize); // Push FPU registers space
 166   __ push_FPU_state();          // Save FPU state & init
 167 
 168   if (verify_fpu) {
 169     // Some stubs may have non standard FPU control word settings so
 170     // only check and reset the value when it required to be the
 171     // standard value.  The safepoint blob in particular can be used
 172     // in methods which are using the 24 bit control word for
 173     // optimized float math.
 174 
 175 #ifdef ASSERT
 176     // Make sure the control word has the expected value
 177     Label ok;
 178     __ cmpw(Address(rsp, 0), StubRoutines::x86::fpu_cntrl_wrd_std());
 179     __ jccb(Assembler::equal, ok);
 180     __ stop("corrupted control word detected");
 181     __ bind(ok);
 182 #endif
 183 
 184     // Reset the control word to guard against exceptions being unmasked
 185     // since fstp_d can cause FPU stack underflow exceptions.  Write it
 186     // into the on stack copy and then reload that to make sure that the
 187     // current and future values are correct.
 188     __ movw(Address(rsp, 0), StubRoutines::x86::fpu_cntrl_wrd_std());
 189   }
 190 
 191   __ frstor(Address(rsp, 0));
 192   if (!verify_fpu) {
 193     // Set the control word so that exceptions are masked for the
 194     // following code.
 195     __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
 196   }
 197 
 198   int off = st0_off;
 199   int delta = st1_off - off;
 200 
 201   // Save the FPU registers in de-opt-able form
 202   for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
 203     __ fstp_d(Address(rsp, off*wordSize));
 204     off += delta;
 205   }
 206 
 207   off = xmm0_off;
 208   delta = xmm1_off - off;
 209   if(UseSSE == 1) {
 210     // Save the XMM state
 211     for (int n = 0; n < num_xmm_regs; n++) {
 212       __ movflt(Address(rsp, off*wordSize), as_XMMRegister(n));
 213       off += delta;
 214     }
 215   } else if(UseSSE >= 2) {
 216     // Save whole 128bit (16 bytes) XMM registers
 217     for (int n = 0; n < num_xmm_regs; n++) {
 218       __ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
 219       off += delta;
 220     }
 221   }
 222 
 223 #ifdef COMPILER2
 224   if (save_vectors) {
 225     __ subptr(rsp, ymm_bytes);
 226     // Save upper half of YMM registers
 227     for (int n = 0; n < num_xmm_regs; n++) {
 228       __ vextractf128_high(Address(rsp, n*16), as_XMMRegister(n));
 229     }
 230     if (UseAVX > 2) {
 231       __ subptr(rsp, zmm_bytes);
 232       // Save upper half of ZMM registers
 233       for (int n = 0; n < num_xmm_regs; n++) {
 234         __ vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n));
 235       }
 236       __ subptr(rsp, opmask_state_bytes);
 237       // Save opmask registers
 238       for (int n = 0; n < KRegisterImpl::number_of_registers; n++) {
 239         __ kmov(Address(rsp, n*8), as_KRegister(n));
 240       }
 241     }
 242   }
 243 #else
 244   assert(!save_vectors, "vectors are generated only by C2");
 245 #endif
 246 
 247   __ vzeroupper();
 248 
 249   // Set an oopmap for the call site.  This oopmap will map all
 250   // oop-registers and debug-info registers as callee-saved.  This
 251   // will allow deoptimization at this safepoint to find all possible
 252   // debug-info recordings, as well as let GC find all oops.
 253 
 254   OopMapSet *oop_maps = new OopMapSet();
 255   OopMap* map =  new OopMap( frame_words, 0 );
 256 
 257 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
 258 #define NEXTREG(x) (x)->as_VMReg()->next()
 259 
 260   map->set_callee_saved(STACK_OFFSET(rax_off), rax->as_VMReg());
 261   map->set_callee_saved(STACK_OFFSET(rcx_off), rcx->as_VMReg());
 262   map->set_callee_saved(STACK_OFFSET(rdx_off), rdx->as_VMReg());
 263   map->set_callee_saved(STACK_OFFSET(rbx_off), rbx->as_VMReg());
 264   // rbp, location is known implicitly, no oopMap
 265   map->set_callee_saved(STACK_OFFSET(rsi_off), rsi->as_VMReg());
 266   map->set_callee_saved(STACK_OFFSET(rdi_off), rdi->as_VMReg());
 267 
 268   // %%% This is really a waste but we'll keep things as they were for now for the upper component
 269   off = st0_off;
 270   delta = st1_off - off;
 271   for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
 272     FloatRegister freg_name = as_FloatRegister(n);
 273     map->set_callee_saved(STACK_OFFSET(off), freg_name->as_VMReg());
 274     map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(freg_name));
 275     off += delta;
 276   }
 277   off = xmm0_off;
 278   delta = xmm1_off - off;
 279   for (int n = 0; n < num_xmm_regs; n++) {
 280     XMMRegister xmm_name = as_XMMRegister(n);
 281     map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
 282     map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(xmm_name));
 283     off += delta;
 284   }
 285 #undef NEXTREG
 286 #undef STACK_OFFSET
 287 
 288   return map;
 289 }
 290 
 291 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 292   int opmask_state_bytes = 0;
 293   int additional_frame_bytes = 0;
 294   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 295   int ymm_bytes = num_xmm_regs * 16;
 296   int zmm_bytes = num_xmm_regs * 32;
 297   // Recover XMM & FPU state
 298 #ifdef COMPILER2
 299   if (restore_vectors) {
 300     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 301     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 302     // Save upper half of YMM registers
 303     additional_frame_bytes = ymm_bytes;
 304     if (UseAVX > 2) {
 305       // Save upper half of ZMM registers as well
 306       additional_frame_bytes += zmm_bytes;
 307       opmask_state_bytes = KRegisterImpl::number_of_registers * 8;
 308       additional_frame_bytes += opmask_state_bytes;
 309     }
 310   }
 311 #else
 312   assert(!restore_vectors, "vectors are generated only by C2");
 313 #endif
 314 
 315   int off = xmm0_off;
 316   int delta = xmm1_off - off;
 317 
 318   __ vzeroupper();
 319 
 320   if (UseSSE == 1) {
 321     // Restore XMM registers
 322     assert(additional_frame_bytes == 0, "");
 323     for (int n = 0; n < num_xmm_regs; n++) {
 324       __ movflt(as_XMMRegister(n), Address(rsp, off*wordSize));
 325       off += delta;
 326     }
 327   } else if (UseSSE >= 2) {
 328     // Restore whole 128bit (16 bytes) XMM registers. Do this before restoring YMM and
 329     // ZMM because the movdqu instruction zeros the upper part of the XMM register.
 330     for (int n = 0; n < num_xmm_regs; n++) {
 331       __ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
 332       off += delta;
 333     }
 334   }
 335 
 336   if (restore_vectors) {
 337     off = additional_frame_bytes - ymm_bytes;
 338     // Restore upper half of YMM registers.
 339     for (int n = 0; n < num_xmm_regs; n++) {
 340       __ vinsertf128_high(as_XMMRegister(n), Address(rsp, n*16+off));
 341     }
 342     if (UseAVX > 2) {
 343       // Restore upper half of ZMM registers.
 344       off = opmask_state_bytes;
 345       for (int n = 0; n < num_xmm_regs; n++) {
 346         __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, n*32+off));
 347       }
 348       for (int n = 0; n < KRegisterImpl::number_of_registers; n++) {
 349         __ kmov(as_KRegister(n), Address(rsp, n*8));
 350       }
 351     }
 352     __ addptr(rsp, additional_frame_bytes);
 353   }
 354 
 355   __ pop_FPU_state();
 356   __ addptr(rsp, FPU_regs_live*wordSize); // Pop FPU registers
 357 
 358   __ popf();
 359   __ popa();
 360   // Get the rbp, described implicitly by the frame sender code (no oopMap)
 361   __ pop(rbp);
 362 }
 363 
 364 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 365 
 366   // Just restore result register. Only used by deoptimization. By
 367   // now any callee save register that needs to be restore to a c2
 368   // caller of the deoptee has been extracted into the vframeArray
 369   // and will be stuffed into the c2i adapter we create for later
 370   // restoration so only result registers need to be restored here.
 371   //
 372 
 373   __ frstor(Address(rsp, 0));      // Restore fpu state
 374 
 375   // Recover XMM & FPU state
 376   if( UseSSE == 1 ) {
 377     __ movflt(xmm0, Address(rsp, xmm0_off*wordSize));
 378   } else if( UseSSE >= 2 ) {
 379     __ movdbl(xmm0, Address(rsp, xmm0_off*wordSize));
 380   }
 381   __ movptr(rax, Address(rsp, rax_off*wordSize));
 382   __ movptr(rdx, Address(rsp, rdx_off*wordSize));
 383   // Pop all of the register save are off the stack except the return address
 384   __ addptr(rsp, return_off * wordSize);
 385 }
 386 
 387 // Is vector's size (in bytes) bigger than a size saved by default?
 388 // 16 bytes XMM registers are saved by default using SSE2 movdqu instructions.
 389 // Note, MaxVectorSize == 0 with UseSSE < 2 and vectors are not generated.
 390 bool SharedRuntime::is_wide_vector(int size) {
 391   return size > 16;
 392 }
 393 
 394 // The java_calling_convention describes stack locations as ideal slots on
 395 // a frame with no abi restrictions. Since we must observe abi restrictions
 396 // (like the placement of the register window) the slots must be biased by
 397 // the following value.
 398 static int reg2offset_in(VMReg r) {
 399   // Account for saved rbp, and return address
 400   // This should really be in_preserve_stack_slots
 401   return (r->reg2stack() + 2) * VMRegImpl::stack_slot_size;
 402 }
 403 
 404 static int reg2offset_out(VMReg r) {
 405   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 406 }
 407 
 408 // ---------------------------------------------------------------------------
 409 // Read the array of BasicTypes from a signature, and compute where the
 410 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 411 // quantities.  Values less than SharedInfo::stack0 are registers, those above
 412 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 413 // as framesizes are fixed.
 414 // VMRegImpl::stack0 refers to the first slot 0(sp).
 415 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
 416 // up to RegisterImpl::number_of_registers) are the 32-bit
 417 // integer registers.
 418 
 419 // Pass first two oop/int args in registers ECX and EDX.
 420 // Pass first two float/double args in registers XMM0 and XMM1.
 421 // Doubles have precedence, so if you pass a mix of floats and doubles
 422 // the doubles will grab the registers before the floats will.
 423 
 424 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 425 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
 426 // units regardless of build. Of course for i486 there is no 64 bit build
 427 
 428 
 429 // ---------------------------------------------------------------------------
 430 // The compiled Java calling convention.
 431 // Pass first two oop/int args in registers ECX and EDX.
 432 // Pass first two float/double args in registers XMM0 and XMM1.
 433 // Doubles have precedence, so if you pass a mix of floats and doubles
 434 // the doubles will grab the registers before the floats will.
 435 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 436                                            VMRegPair *regs,
 437                                            int total_args_passed) {
 438   uint    stack = 0;          // Starting stack position for args on stack
 439 
 440 
 441   // Pass first two oop/int args in registers ECX and EDX.
 442   uint reg_arg0 = 9999;
 443   uint reg_arg1 = 9999;
 444 
 445   // Pass first two float/double args in registers XMM0 and XMM1.
 446   // Doubles have precedence, so if you pass a mix of floats and doubles
 447   // the doubles will grab the registers before the floats will.
 448   // CNC - TURNED OFF FOR non-SSE.
 449   //       On Intel we have to round all doubles (and most floats) at
 450   //       call sites by storing to the stack in any case.
 451   // UseSSE=0 ==> Don't Use ==> 9999+0
 452   // UseSSE=1 ==> Floats only ==> 9999+1
 453   // UseSSE>=2 ==> Floats or doubles ==> 9999+2
 454   enum { fltarg_dontuse = 9999+0, fltarg_float_only = 9999+1, fltarg_flt_dbl = 9999+2 };
 455   uint fargs = (UseSSE>=2) ? 2 : UseSSE;
 456   uint freg_arg0 = 9999+fargs;
 457   uint freg_arg1 = 9999+fargs;
 458 
 459   // Pass doubles & longs aligned on the stack.  First count stack slots for doubles
 460   int i;
 461   for( i = 0; i < total_args_passed; i++) {
 462     if( sig_bt[i] == T_DOUBLE ) {
 463       // first 2 doubles go in registers
 464       if( freg_arg0 == fltarg_flt_dbl ) freg_arg0 = i;
 465       else if( freg_arg1 == fltarg_flt_dbl ) freg_arg1 = i;
 466       else // Else double is passed low on the stack to be aligned.
 467         stack += 2;
 468     } else if( sig_bt[i] == T_LONG ) {
 469       stack += 2;
 470     }
 471   }
 472   int dstack = 0;             // Separate counter for placing doubles
 473 
 474   // Now pick where all else goes.
 475   for( i = 0; i < total_args_passed; i++) {
 476     // From the type and the argument number (count) compute the location
 477     switch( sig_bt[i] ) {
 478     case T_SHORT:
 479     case T_CHAR:
 480     case T_BYTE:
 481     case T_BOOLEAN:
 482     case T_INT:
 483     case T_ARRAY:
 484     case T_OBJECT:
 485     case T_ADDRESS:
 486       if( reg_arg0 == 9999 )  {
 487         reg_arg0 = i;
 488         regs[i].set1(rcx->as_VMReg());
 489       } else if( reg_arg1 == 9999 )  {
 490         reg_arg1 = i;
 491         regs[i].set1(rdx->as_VMReg());
 492       } else {
 493         regs[i].set1(VMRegImpl::stack2reg(stack++));
 494       }
 495       break;
 496     case T_FLOAT:
 497       if( freg_arg0 == fltarg_flt_dbl || freg_arg0 == fltarg_float_only ) {
 498         freg_arg0 = i;
 499         regs[i].set1(xmm0->as_VMReg());
 500       } else if( freg_arg1 == fltarg_flt_dbl || freg_arg1 == fltarg_float_only ) {
 501         freg_arg1 = i;
 502         regs[i].set1(xmm1->as_VMReg());
 503       } else {
 504         regs[i].set1(VMRegImpl::stack2reg(stack++));
 505       }
 506       break;
 507     case T_LONG:
 508       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
 509       regs[i].set2(VMRegImpl::stack2reg(dstack));
 510       dstack += 2;
 511       break;
 512     case T_DOUBLE:
 513       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
 514       if( freg_arg0 == (uint)i ) {
 515         regs[i].set2(xmm0->as_VMReg());
 516       } else if( freg_arg1 == (uint)i ) {
 517         regs[i].set2(xmm1->as_VMReg());
 518       } else {
 519         regs[i].set2(VMRegImpl::stack2reg(dstack));
 520         dstack += 2;
 521       }
 522       break;
 523     case T_VOID: regs[i].set_bad(); break;
 524       break;
 525     default:
 526       ShouldNotReachHere();
 527       break;
 528     }
 529   }
 530 
 531   // return value can be odd number of VMRegImpl stack slots make multiple of 2
 532   return align_up(stack, 2);
 533 }
 534 
 535 // Patch the callers callsite with entry to compiled code if it exists.
 536 static void patch_callers_callsite(MacroAssembler *masm) {
 537   Label L;
 538   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 539   __ jcc(Assembler::equal, L);
 540   // Schedule the branch target address early.
 541   // Call into the VM to patch the caller, then jump to compiled callee
 542   // rax, isn't live so capture return address while we easily can
 543   __ movptr(rax, Address(rsp, 0));
 544   __ pusha();
 545   __ pushf();
 546 
 547   if (UseSSE == 1) {
 548     __ subptr(rsp, 2*wordSize);
 549     __ movflt(Address(rsp, 0), xmm0);
 550     __ movflt(Address(rsp, wordSize), xmm1);
 551   }
 552   if (UseSSE >= 2) {
 553     __ subptr(rsp, 4*wordSize);
 554     __ movdbl(Address(rsp, 0), xmm0);
 555     __ movdbl(Address(rsp, 2*wordSize), xmm1);
 556   }
 557 #ifdef COMPILER2
 558   // C2 may leave the stack dirty if not in SSE2+ mode
 559   if (UseSSE >= 2) {
 560     __ verify_FPU(0, "c2i transition should have clean FPU stack");
 561   } else {
 562     __ empty_FPU_stack();
 563   }
 564 #endif /* COMPILER2 */
 565 
 566   // VM needs caller's callsite
 567   __ push(rax);
 568   // VM needs target method
 569   __ push(rbx);
 570   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 571   __ addptr(rsp, 2*wordSize);
 572 
 573   if (UseSSE == 1) {
 574     __ movflt(xmm0, Address(rsp, 0));
 575     __ movflt(xmm1, Address(rsp, wordSize));
 576     __ addptr(rsp, 2*wordSize);
 577   }
 578   if (UseSSE >= 2) {
 579     __ movdbl(xmm0, Address(rsp, 0));
 580     __ movdbl(xmm1, Address(rsp, 2*wordSize));
 581     __ addptr(rsp, 4*wordSize);
 582   }
 583 
 584   __ popf();
 585   __ popa();
 586   __ bind(L);
 587 }
 588 
 589 
 590 static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
 591   int next_off = st_off - Interpreter::stackElementSize;
 592   __ movdbl(Address(rsp, next_off), r);
 593 }
 594 
 595 static void gen_c2i_adapter(MacroAssembler *masm,
 596                             int total_args_passed,
 597                             int comp_args_on_stack,
 598                             const BasicType *sig_bt,
 599                             const VMRegPair *regs,
 600                             Label& skip_fixup) {
 601   // Before we get into the guts of the C2I adapter, see if we should be here
 602   // at all.  We've come from compiled code and are attempting to jump to the
 603   // interpreter, which means the caller made a static call to get here
 604   // (vcalls always get a compiled target if there is one).  Check for a
 605   // compiled target.  If there is one, we need to patch the caller's call.
 606   patch_callers_callsite(masm);
 607 
 608   __ bind(skip_fixup);
 609 
 610 #ifdef COMPILER2
 611   // C2 may leave the stack dirty if not in SSE2+ mode
 612   if (UseSSE >= 2) {
 613     __ verify_FPU(0, "c2i transition should have clean FPU stack");
 614   } else {
 615     __ empty_FPU_stack();
 616   }
 617 #endif /* COMPILER2 */
 618 
 619   // Since all args are passed on the stack, total_args_passed * interpreter_
 620   // stack_element_size  is the
 621   // space we need.
 622   int extraspace = total_args_passed * Interpreter::stackElementSize;
 623 
 624   // Get return address
 625   __ pop(rax);
 626 
 627   // set senderSP value
 628   __ movptr(rsi, rsp);
 629 
 630   __ subptr(rsp, extraspace);
 631 
 632   // Now write the args into the outgoing interpreter space
 633   for (int i = 0; i < total_args_passed; i++) {
 634     if (sig_bt[i] == T_VOID) {
 635       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 636       continue;
 637     }
 638 
 639     // st_off points to lowest address on stack.
 640     int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
 641     int next_off = st_off - Interpreter::stackElementSize;
 642 
 643     // Say 4 args:
 644     // i   st_off
 645     // 0   12 T_LONG
 646     // 1    8 T_VOID
 647     // 2    4 T_OBJECT
 648     // 3    0 T_BOOL
 649     VMReg r_1 = regs[i].first();
 650     VMReg r_2 = regs[i].second();
 651     if (!r_1->is_valid()) {
 652       assert(!r_2->is_valid(), "");
 653       continue;
 654     }
 655 
 656     if (r_1->is_stack()) {
 657       // memory to memory use fpu stack top
 658       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 659 
 660       if (!r_2->is_valid()) {
 661         __ movl(rdi, Address(rsp, ld_off));
 662         __ movptr(Address(rsp, st_off), rdi);
 663       } else {
 664 
 665         // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
 666         // st_off == MSW, st_off-wordSize == LSW
 667 
 668         __ movptr(rdi, Address(rsp, ld_off));
 669         __ movptr(Address(rsp, next_off), rdi);
 670 #ifndef _LP64
 671         __ movptr(rdi, Address(rsp, ld_off + wordSize));
 672         __ movptr(Address(rsp, st_off), rdi);
 673 #else
 674 #ifdef ASSERT
 675         // Overwrite the unused slot with known junk
 676         __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
 677         __ movptr(Address(rsp, st_off), rax);
 678 #endif /* ASSERT */
 679 #endif // _LP64
 680       }
 681     } else if (r_1->is_Register()) {
 682       Register r = r_1->as_Register();
 683       if (!r_2->is_valid()) {
 684         __ movl(Address(rsp, st_off), r);
 685       } else {
 686         // long/double in gpr
 687         NOT_LP64(ShouldNotReachHere());
 688         // Two VMRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 689         // T_DOUBLE and T_LONG use two slots in the interpreter
 690         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 691           // long/double in gpr
 692 #ifdef ASSERT
 693           // Overwrite the unused slot with known junk
 694           LP64_ONLY(__ mov64(rax, CONST64(0xdeadffffdeadaaab)));
 695           __ movptr(Address(rsp, st_off), rax);
 696 #endif /* ASSERT */
 697           __ movptr(Address(rsp, next_off), r);
 698         } else {
 699           __ movptr(Address(rsp, st_off), r);
 700         }
 701       }
 702     } else {
 703       assert(r_1->is_XMMRegister(), "");
 704       if (!r_2->is_valid()) {
 705         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
 706       } else {
 707         assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
 708         move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
 709       }
 710     }
 711   }
 712 
 713   // Schedule the branch target address early.
 714   __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
 715   // And repush original return address
 716   __ push(rax);
 717   __ jmp(rcx);
 718 }
 719 
 720 
 721 static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
 722   int next_val_off = ld_off - Interpreter::stackElementSize;
 723   __ movdbl(r, Address(saved_sp, next_val_off));
 724 }
 725 
 726 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
 727                         address code_start, address code_end,
 728                         Label& L_ok) {
 729   Label L_fail;
 730   __ lea(temp_reg, ExternalAddress(code_start));
 731   __ cmpptr(pc_reg, temp_reg);
 732   __ jcc(Assembler::belowEqual, L_fail);
 733   __ lea(temp_reg, ExternalAddress(code_end));
 734   __ cmpptr(pc_reg, temp_reg);
 735   __ jcc(Assembler::below, L_ok);
 736   __ bind(L_fail);
 737 }
 738 
 739 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 740                                     int total_args_passed,
 741                                     int comp_args_on_stack,
 742                                     const BasicType *sig_bt,
 743                                     const VMRegPair *regs) {
 744   // Note: rsi contains the senderSP on entry. We must preserve it since
 745   // we may do a i2c -> c2i transition if we lose a race where compiled
 746   // code goes non-entrant while we get args ready.
 747 
 748   // Adapters can be frameless because they do not require the caller
 749   // to perform additional cleanup work, such as correcting the stack pointer.
 750   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
 751   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
 752   // even if a callee has modified the stack pointer.
 753   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
 754   // routinely repairs its caller's stack pointer (from sender_sp, which is set
 755   // up via the senderSP register).
 756   // In other words, if *either* the caller or callee is interpreted, we can
 757   // get the stack pointer repaired after a call.
 758   // This is why c2i and i2c adapters cannot be indefinitely composed.
 759   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 760   // both caller and callee would be compiled methods, and neither would
 761   // clean up the stack pointer changes performed by the two adapters.
 762   // If this happens, control eventually transfers back to the compiled
 763   // caller, but with an uncorrected stack, causing delayed havoc.
 764 
 765   // Pick up the return address
 766   __ movptr(rax, Address(rsp, 0));
 767 
 768   if (VerifyAdapterCalls &&
 769       (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
 770     // So, let's test for cascading c2i/i2c adapters right now.
 771     //  assert(Interpreter::contains($return_addr) ||
 772     //         StubRoutines::contains($return_addr),
 773     //         "i2c adapter must return to an interpreter frame");
 774     __ block_comment("verify_i2c { ");
 775     Label L_ok;
 776     if (Interpreter::code() != NULL)
 777       range_check(masm, rax, rdi,
 778                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 779                   L_ok);
 780     if (StubRoutines::code1() != NULL)
 781       range_check(masm, rax, rdi,
 782                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 783                   L_ok);
 784     if (StubRoutines::code2() != NULL)
 785       range_check(masm, rax, rdi,
 786                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 787                   L_ok);
 788     const char* msg = "i2c adapter must return to an interpreter frame";
 789     __ block_comment(msg);
 790     __ stop(msg);
 791     __ bind(L_ok);
 792     __ block_comment("} verify_i2ce ");
 793   }
 794 
 795   // Must preserve original SP for loading incoming arguments because
 796   // we need to align the outgoing SP for compiled code.
 797   __ movptr(rdi, rsp);
 798 
 799   // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
 800   // in registers, we will occasionally have no stack args.
 801   int comp_words_on_stack = 0;
 802   if (comp_args_on_stack) {
 803     // Sig words on the stack are greater-than VMRegImpl::stack0.  Those in
 804     // registers are below.  By subtracting stack0, we either get a negative
 805     // number (all values in registers) or the maximum stack slot accessed.
 806     // int comp_args_on_stack = VMRegImpl::reg2stack(max_arg);
 807     // Convert 4-byte stack slots to words.
 808     comp_words_on_stack = align_up(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
 809     // Round up to miminum stack alignment, in wordSize
 810     comp_words_on_stack = align_up(comp_words_on_stack, 2);
 811     __ subptr(rsp, comp_words_on_stack * wordSize);
 812   }
 813 
 814   // Align the outgoing SP
 815   __ andptr(rsp, -(StackAlignmentInBytes));
 816 
 817   // push the return address on the stack (note that pushing, rather
 818   // than storing it, yields the correct frame alignment for the callee)
 819   __ push(rax);
 820 
 821   // Put saved SP in another register
 822   const Register saved_sp = rax;
 823   __ movptr(saved_sp, rdi);
 824 
 825 
 826   // Will jump to the compiled code just as if compiled code was doing it.
 827   // Pre-load the register-jump target early, to schedule it better.
 828   __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
 829 
 830   // Now generate the shuffle code.  Pick up all register args and move the
 831   // rest through the floating point stack top.
 832   for (int i = 0; i < total_args_passed; i++) {
 833     if (sig_bt[i] == T_VOID) {
 834       // Longs and doubles are passed in native word order, but misaligned
 835       // in the 32-bit build.
 836       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 837       continue;
 838     }
 839 
 840     // Pick up 0, 1 or 2 words from SP+offset.
 841 
 842     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 843             "scrambled load targets?");
 844     // Load in argument order going down.
 845     int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
 846     // Point to interpreter value (vs. tag)
 847     int next_off = ld_off - Interpreter::stackElementSize;
 848     //
 849     //
 850     //
 851     VMReg r_1 = regs[i].first();
 852     VMReg r_2 = regs[i].second();
 853     if (!r_1->is_valid()) {
 854       assert(!r_2->is_valid(), "");
 855       continue;
 856     }
 857     if (r_1->is_stack()) {
 858       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 859       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
 860 
 861       // We can use rsi as a temp here because compiled code doesn't need rsi as an input
 862       // and if we end up going thru a c2i because of a miss a reasonable value of rsi
 863       // we be generated.
 864       if (!r_2->is_valid()) {
 865         // __ fld_s(Address(saved_sp, ld_off));
 866         // __ fstp_s(Address(rsp, st_off));
 867         __ movl(rsi, Address(saved_sp, ld_off));
 868         __ movptr(Address(rsp, st_off), rsi);
 869       } else {
 870         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 871         // are accessed as negative so LSW is at LOW address
 872 
 873         // ld_off is MSW so get LSW
 874         // st_off is LSW (i.e. reg.first())
 875         // __ fld_d(Address(saved_sp, next_off));
 876         // __ fstp_d(Address(rsp, st_off));
 877         //
 878         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 879         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 880         // So we must adjust where to pick up the data to match the interpreter.
 881         //
 882         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 883         // are accessed as negative so LSW is at LOW address
 884 
 885         // ld_off is MSW so get LSW
 886         const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 887                            next_off : ld_off;
 888         __ movptr(rsi, Address(saved_sp, offset));
 889         __ movptr(Address(rsp, st_off), rsi);
 890 #ifndef _LP64
 891         __ movptr(rsi, Address(saved_sp, ld_off));
 892         __ movptr(Address(rsp, st_off + wordSize), rsi);
 893 #endif // _LP64
 894       }
 895     } else if (r_1->is_Register()) {  // Register argument
 896       Register r = r_1->as_Register();
 897       assert(r != rax, "must be different");
 898       if (r_2->is_valid()) {
 899         //
 900         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 901         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 902         // So we must adjust where to pick up the data to match the interpreter.
 903 
 904         const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 905                            next_off : ld_off;
 906 
 907         // this can be a misaligned move
 908         __ movptr(r, Address(saved_sp, offset));
 909 #ifndef _LP64
 910         assert(r_2->as_Register() != rax, "need another temporary register");
 911         // Remember r_1 is low address (and LSB on x86)
 912         // So r_2 gets loaded from high address regardless of the platform
 913         __ movptr(r_2->as_Register(), Address(saved_sp, ld_off));
 914 #endif // _LP64
 915       } else {
 916         __ movl(r, Address(saved_sp, ld_off));
 917       }
 918     } else {
 919       assert(r_1->is_XMMRegister(), "");
 920       if (!r_2->is_valid()) {
 921         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
 922       } else {
 923         move_i2c_double(masm, r_1->as_XMMRegister(), saved_sp, ld_off);
 924       }
 925     }
 926   }
 927 
 928   // 6243940 We might end up in handle_wrong_method if
 929   // the callee is deoptimized as we race thru here. If that
 930   // happens we don't want to take a safepoint because the
 931   // caller frame will look interpreted and arguments are now
 932   // "compiled" so it is much better to make this transition
 933   // invisible to the stack walking code. Unfortunately if
 934   // we try and find the callee by normal means a safepoint
 935   // is possible. So we stash the desired callee in the thread
 936   // and the vm will find there should this case occur.
 937 
 938   __ get_thread(rax);
 939   __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
 940 
 941   // move Method* to rax, in case we end up in an c2i adapter.
 942   // the c2i adapters expect Method* in rax, (c2) because c2's
 943   // resolve stubs return the result (the method) in rax,.
 944   // I'd love to fix this.
 945   __ mov(rax, rbx);
 946 
 947   __ jmp(rdi);
 948 }
 949 
 950 // ---------------------------------------------------------------
 951 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 952                                                             int total_args_passed,
 953                                                             int comp_args_on_stack,
 954                                                             const BasicType *sig_bt,
 955                                                             const VMRegPair *regs,
 956                                                             AdapterFingerPrint* fingerprint) {
 957   address i2c_entry = __ pc();
 958 
 959   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 960 
 961   // -------------------------------------------------------------------------
 962   // Generate a C2I adapter.  On entry we know rbx, holds the Method* during calls
 963   // to the interpreter.  The args start out packed in the compiled layout.  They
 964   // need to be unpacked into the interpreter layout.  This will almost always
 965   // require some stack space.  We grow the current (compiled) stack, then repack
 966   // the args.  We  finally end in a jump to the generic interpreter entry point.
 967   // On exit from the interpreter, the interpreter will restore our SP (lest the
 968   // compiled code, which relys solely on SP and not EBP, get sick).
 969 
 970   address c2i_unverified_entry = __ pc();
 971   Label skip_fixup;
 972 
 973   Register holder = rax;
 974   Register receiver = rcx;
 975   Register temp = rbx;
 976 
 977   {
 978 
 979     Label missed;
 980     __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
 981     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
 982     __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
 983     __ jcc(Assembler::notEqual, missed);
 984     // Method might have been compiled since the call site was patched to
 985     // interpreted if that is the case treat it as a miss so we can get
 986     // the call site corrected.
 987     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 988     __ jcc(Assembler::equal, skip_fixup);
 989 
 990     __ bind(missed);
 991     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 992   }
 993 
 994   address c2i_entry = __ pc();
 995 
 996   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 997   bs->c2i_entry_barrier(masm);
 998 
 999   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
1000 
1001   __ flush();
1002   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
1003 }
1004 
1005 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1006                                          VMRegPair *regs,
1007                                          VMRegPair *regs2,
1008                                          int total_args_passed) {
1009   assert(regs2 == NULL, "not needed on x86");
1010 // We return the amount of VMRegImpl stack slots we need to reserve for all
1011 // the arguments NOT counting out_preserve_stack_slots.
1012 
1013   uint    stack = 0;        // All arguments on stack
1014 
1015   for( int i = 0; i < total_args_passed; i++) {
1016     // From the type and the argument number (count) compute the location
1017     switch( sig_bt[i] ) {
1018     case T_BOOLEAN:
1019     case T_CHAR:
1020     case T_FLOAT:
1021     case T_BYTE:
1022     case T_SHORT:
1023     case T_INT:
1024     case T_OBJECT:
1025     case T_ARRAY:
1026     case T_ADDRESS:
1027     case T_METADATA:
1028       regs[i].set1(VMRegImpl::stack2reg(stack++));
1029       break;
1030     case T_LONG:
1031     case T_DOUBLE: // The stack numbering is reversed from Java
1032       // Since C arguments do not get reversed, the ordering for
1033       // doubles on the stack must be opposite the Java convention
1034       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
1035       regs[i].set2(VMRegImpl::stack2reg(stack));
1036       stack += 2;
1037       break;
1038     case T_VOID: regs[i].set_bad(); break;
1039     default:
1040       ShouldNotReachHere();
1041       break;
1042     }
1043   }
1044   return stack;
1045 }
1046 
1047 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
1048                                              uint num_bits,
1049                                              uint total_args_passed) {
1050   Unimplemented();
1051   return 0;
1052 }
1053 
1054 // A simple move of integer like type
1055 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1056   if (src.first()->is_stack()) {
1057     if (dst.first()->is_stack()) {
1058       // stack to stack
1059       // __ ld(FP, reg2offset(src.first()), L5);
1060       // __ st(L5, SP, reg2offset(dst.first()));
1061       __ movl2ptr(rax, Address(rbp, reg2offset_in(src.first())));
1062       __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1063     } else {
1064       // stack to reg
1065       __ movl2ptr(dst.first()->as_Register(),  Address(rbp, reg2offset_in(src.first())));
1066     }
1067   } else if (dst.first()->is_stack()) {
1068     // reg to stack
1069     // no need to sign extend on 64bit
1070     __ movptr(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1071   } else {
1072     if (dst.first() != src.first()) {
1073       __ mov(dst.first()->as_Register(), src.first()->as_Register());
1074     }
1075   }
1076 }
1077 
1078 // An oop arg. Must pass a handle not the oop itself
1079 static void object_move(MacroAssembler* masm,
1080                         OopMap* map,
1081                         int oop_handle_offset,
1082                         int framesize_in_slots,
1083                         VMRegPair src,
1084                         VMRegPair dst,
1085                         bool is_receiver,
1086                         int* receiver_offset) {
1087 
1088   // Because of the calling conventions we know that src can be a
1089   // register or a stack location. dst can only be a stack location.
1090 
1091   assert(dst.first()->is_stack(), "must be stack");
1092   // must pass a handle. First figure out the location we use as a handle
1093 
1094   if (src.first()->is_stack()) {
1095     // Oop is already on the stack as an argument
1096     Register rHandle = rax;
1097     Label nil;
1098     __ xorptr(rHandle, rHandle);
1099     __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1100     __ jcc(Assembler::equal, nil);
1101     __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1102     __ bind(nil);
1103     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1104 
1105     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1106     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1107     if (is_receiver) {
1108       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1109     }
1110   } else {
1111     // Oop is in an a register we must store it to the space we reserve
1112     // on the stack for oop_handles
1113     const Register rOop = src.first()->as_Register();
1114     const Register rHandle = rax;
1115     int oop_slot = (rOop == rcx ? 0 : 1) * VMRegImpl::slots_per_word + oop_handle_offset;
1116     int offset = oop_slot*VMRegImpl::stack_slot_size;
1117     Label skip;
1118     __ movptr(Address(rsp, offset), rOop);
1119     map->set_oop(VMRegImpl::stack2reg(oop_slot));
1120     __ xorptr(rHandle, rHandle);
1121     __ cmpptr(rOop, (int32_t)NULL_WORD);
1122     __ jcc(Assembler::equal, skip);
1123     __ lea(rHandle, Address(rsp, offset));
1124     __ bind(skip);
1125     // Store the handle parameter
1126     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1127     if (is_receiver) {
1128       *receiver_offset = offset;
1129     }
1130   }
1131 }
1132 
1133 // A float arg may have to do float reg int reg conversion
1134 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1135   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1136 
1137   // Because of the calling convention we know that src is either a stack location
1138   // or an xmm register. dst can only be a stack location.
1139 
1140   assert(dst.first()->is_stack() && ( src.first()->is_stack() || src.first()->is_XMMRegister()), "bad parameters");
1141 
1142   if (src.first()->is_stack()) {
1143     __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1144     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1145   } else {
1146     // reg to stack
1147     __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1148   }
1149 }
1150 
1151 // A long move
1152 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1153 
1154   // The only legal possibility for a long_move VMRegPair is:
1155   // 1: two stack slots (possibly unaligned)
1156   // as neither the java  or C calling convention will use registers
1157   // for longs.
1158 
1159   if (src.first()->is_stack() && dst.first()->is_stack()) {
1160     assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
1161     __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1162     NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
1163     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1164     NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
1165   } else {
1166     ShouldNotReachHere();
1167   }
1168 }
1169 
1170 // A double move
1171 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1172 
1173   // The only legal possibilities for a double_move VMRegPair are:
1174   // The painful thing here is that like long_move a VMRegPair might be
1175 
1176   // Because of the calling convention we know that src is either
1177   //   1: a single physical register (xmm registers only)
1178   //   2: two stack slots (possibly unaligned)
1179   // dst can only be a pair of stack slots.
1180 
1181   assert(dst.first()->is_stack() && (src.first()->is_XMMRegister() || src.first()->is_stack()), "bad args");
1182 
1183   if (src.first()->is_stack()) {
1184     // source is all stack
1185     __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1186     NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
1187     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1188     NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
1189   } else {
1190     // reg to stack
1191     // No worries about stack alignment
1192     __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1193   }
1194 }
1195 
1196 
1197 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1198   // We always ignore the frame_slots arg and just use the space just below frame pointer
1199   // which by this time is free to use
1200   switch (ret_type) {
1201   case T_FLOAT:
1202     __ fstp_s(Address(rbp, -wordSize));
1203     break;
1204   case T_DOUBLE:
1205     __ fstp_d(Address(rbp, -2*wordSize));
1206     break;
1207   case T_VOID:  break;
1208   case T_LONG:
1209     __ movptr(Address(rbp, -wordSize), rax);
1210     NOT_LP64(__ movptr(Address(rbp, -2*wordSize), rdx));
1211     break;
1212   default: {
1213     __ movptr(Address(rbp, -wordSize), rax);
1214     }
1215   }
1216 }
1217 
1218 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1219   // We always ignore the frame_slots arg and just use the space just below frame pointer
1220   // which by this time is free to use
1221   switch (ret_type) {
1222   case T_FLOAT:
1223     __ fld_s(Address(rbp, -wordSize));
1224     break;
1225   case T_DOUBLE:
1226     __ fld_d(Address(rbp, -2*wordSize));
1227     break;
1228   case T_LONG:
1229     __ movptr(rax, Address(rbp, -wordSize));
1230     NOT_LP64(__ movptr(rdx, Address(rbp, -2*wordSize)));
1231     break;
1232   case T_VOID:  break;
1233   default: {
1234     __ movptr(rax, Address(rbp, -wordSize));
1235     }
1236   }
1237 }
1238 
1239 // Unpack an array argument into a pointer to the body and the length
1240 // if the array is non-null, otherwise pass 0 for both.
1241 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1242   Register tmp_reg = rax;
1243   assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1244          "possible collision");
1245   assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1246          "possible collision");
1247 
1248   // Pass the length, ptr pair
1249   Label is_null, done;
1250   VMRegPair tmp(tmp_reg->as_VMReg());
1251   if (reg.first()->is_stack()) {
1252     // Load the arg up from the stack
1253     simple_move32(masm, reg, tmp);
1254     reg = tmp;
1255   }
1256   __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1257   __ jccb(Assembler::equal, is_null);
1258   __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1259   simple_move32(masm, tmp, body_arg);
1260   // load the length relative to the body.
1261   __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1262                            arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1263   simple_move32(masm, tmp, length_arg);
1264   __ jmpb(done);
1265   __ bind(is_null);
1266   // Pass zeros
1267   __ xorptr(tmp_reg, tmp_reg);
1268   simple_move32(masm, tmp, body_arg);
1269   simple_move32(masm, tmp, length_arg);
1270   __ bind(done);
1271 }
1272 
1273 static void verify_oop_args(MacroAssembler* masm,
1274                             const methodHandle& method,
1275                             const BasicType* sig_bt,
1276                             const VMRegPair* regs) {
1277   Register temp_reg = rbx;  // not part of any compiled calling seq
1278   if (VerifyOops) {
1279     for (int i = 0; i < method->size_of_parameters(); i++) {
1280       if (is_reference_type(sig_bt[i])) {
1281         VMReg r = regs[i].first();
1282         assert(r->is_valid(), "bad oop arg");
1283         if (r->is_stack()) {
1284           __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1285           __ verify_oop(temp_reg);
1286         } else {
1287           __ verify_oop(r->as_Register());
1288         }
1289       }
1290     }
1291   }
1292 }
1293 
1294 static void gen_special_dispatch(MacroAssembler* masm,
1295                                  const methodHandle& method,
1296                                  const BasicType* sig_bt,
1297                                  const VMRegPair* regs) {
1298   verify_oop_args(masm, method, sig_bt, regs);
1299   vmIntrinsics::ID iid = method->intrinsic_id();
1300 
1301   // Now write the args into the outgoing interpreter space
1302   bool     has_receiver   = false;
1303   Register receiver_reg   = noreg;
1304   int      member_arg_pos = -1;
1305   Register member_reg     = noreg;
1306   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1307   if (ref_kind != 0) {
1308     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1309     member_reg = rbx;  // known to be free at this point
1310     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1311   } else if (iid == vmIntrinsics::_invokeBasic) {
1312     has_receiver = true;
1313   } else {
1314     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1315   }
1316 
1317   if (member_reg != noreg) {
1318     // Load the member_arg into register, if necessary.
1319     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1320     VMReg r = regs[member_arg_pos].first();
1321     if (r->is_stack()) {
1322       __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1323     } else {
1324       // no data motion is needed
1325       member_reg = r->as_Register();
1326     }
1327   }
1328 
1329   if (has_receiver) {
1330     // Make sure the receiver is loaded into a register.
1331     assert(method->size_of_parameters() > 0, "oob");
1332     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1333     VMReg r = regs[0].first();
1334     assert(r->is_valid(), "bad receiver arg");
1335     if (r->is_stack()) {
1336       // Porting note:  This assumes that compiled calling conventions always
1337       // pass the receiver oop in a register.  If this is not true on some
1338       // platform, pick a temp and load the receiver from stack.
1339       fatal("receiver always in a register");
1340       receiver_reg = rcx;  // known to be free at this point
1341       __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1342     } else {
1343       // no data motion is needed
1344       receiver_reg = r->as_Register();
1345     }
1346   }
1347 
1348   // Figure out which address we are really jumping to:
1349   MethodHandles::generate_method_handle_dispatch(masm, iid,
1350                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1351 }
1352 
1353 // ---------------------------------------------------------------------------
1354 // Generate a native wrapper for a given method.  The method takes arguments
1355 // in the Java compiled code convention, marshals them to the native
1356 // convention (handlizes oops, etc), transitions to native, makes the call,
1357 // returns to java state (possibly blocking), unhandlizes any result and
1358 // returns.
1359 //
1360 // Critical native functions are a shorthand for the use of
1361 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1362 // functions.  The wrapper is expected to unpack the arguments before
1363 // passing them to the callee. Critical native functions leave the state _in_Java,
1364 // since they cannot stop for GC.
1365 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1366 // block and the check for pending exceptions it's impossible for them
1367 // to be thrown.
1368 //
1369 //
1370 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1371                                                 const methodHandle& method,
1372                                                 int compile_id,
1373                                                 BasicType* in_sig_bt,
1374                                                 VMRegPair* in_regs,
1375                                                 BasicType ret_type,
1376                                                 address critical_entry) {
1377   if (method->is_method_handle_intrinsic()) {
1378     vmIntrinsics::ID iid = method->intrinsic_id();
1379     intptr_t start = (intptr_t)__ pc();
1380     int vep_offset = ((intptr_t)__ pc()) - start;
1381     gen_special_dispatch(masm,
1382                          method,
1383                          in_sig_bt,
1384                          in_regs);
1385     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1386     __ flush();
1387     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1388     return nmethod::new_native_nmethod(method,
1389                                        compile_id,
1390                                        masm->code(),
1391                                        vep_offset,
1392                                        frame_complete,
1393                                        stack_slots / VMRegImpl::slots_per_word,
1394                                        in_ByteSize(-1),
1395                                        in_ByteSize(-1),
1396                                        (OopMapSet*)NULL);
1397   }
1398   bool is_critical_native = true;
1399   address native_func = critical_entry;
1400   if (native_func == NULL) {
1401     native_func = method->native_function();
1402     is_critical_native = false;
1403   }
1404   assert(native_func != NULL, "must have function");
1405 
1406   // An OopMap for lock (and class if static)
1407   OopMapSet *oop_maps = new OopMapSet();
1408 
1409   // We have received a description of where all the java arg are located
1410   // on entry to the wrapper. We need to convert these args to where
1411   // the jni function will expect them. To figure out where they go
1412   // we convert the java signature to a C signature by inserting
1413   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1414 
1415   const int total_in_args = method->size_of_parameters();
1416   int total_c_args = total_in_args;
1417   if (!is_critical_native) {
1418     total_c_args += 1;
1419     if (method->is_static()) {
1420       total_c_args++;
1421     }
1422   } else {
1423     for (int i = 0; i < total_in_args; i++) {
1424       if (in_sig_bt[i] == T_ARRAY) {
1425         total_c_args++;
1426       }
1427     }
1428   }
1429 
1430   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1431   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1432   BasicType* in_elem_bt = NULL;
1433 
1434   int argc = 0;
1435   if (!is_critical_native) {
1436     out_sig_bt[argc++] = T_ADDRESS;
1437     if (method->is_static()) {
1438       out_sig_bt[argc++] = T_OBJECT;
1439     }
1440 
1441     for (int i = 0; i < total_in_args ; i++ ) {
1442       out_sig_bt[argc++] = in_sig_bt[i];
1443     }
1444   } else {
1445     in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1446     SignatureStream ss(method->signature());
1447     for (int i = 0; i < total_in_args ; i++ ) {
1448       if (in_sig_bt[i] == T_ARRAY) {
1449         // Arrays are passed as int, elem* pair
1450         out_sig_bt[argc++] = T_INT;
1451         out_sig_bt[argc++] = T_ADDRESS;
1452         ss.skip_array_prefix(1);  // skip one '['
1453         assert(ss.is_primitive(), "primitive type expected");
1454         in_elem_bt[i] = ss.type();
1455       } else {
1456         out_sig_bt[argc++] = in_sig_bt[i];
1457         in_elem_bt[i] = T_VOID;
1458       }
1459       if (in_sig_bt[i] != T_VOID) {
1460         assert(in_sig_bt[i] == ss.type() ||
1461                in_sig_bt[i] == T_ARRAY, "must match");
1462         ss.next();
1463       }
1464     }
1465   }
1466 
1467   // Now figure out where the args must be stored and how much stack space
1468   // they require.
1469   int out_arg_slots;
1470   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1471 
1472   // Compute framesize for the wrapper.  We need to handlize all oops in
1473   // registers a max of 2 on x86.
1474 
1475   // Calculate the total number of stack slots we will need.
1476 
1477   // First count the abi requirement plus all of the outgoing args
1478   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1479 
1480   // Now the space for the inbound oop handle area
1481   int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
1482   if (is_critical_native) {
1483     // Critical natives may have to call out so they need a save area
1484     // for register arguments.
1485     int double_slots = 0;
1486     int single_slots = 0;
1487     for ( int i = 0; i < total_in_args; i++) {
1488       if (in_regs[i].first()->is_Register()) {
1489         const Register reg = in_regs[i].first()->as_Register();
1490         switch (in_sig_bt[i]) {
1491           case T_ARRAY:  // critical array (uses 2 slots on LP64)
1492           case T_BOOLEAN:
1493           case T_BYTE:
1494           case T_SHORT:
1495           case T_CHAR:
1496           case T_INT:  single_slots++; break;
1497           case T_LONG: double_slots++; break;
1498           default:  ShouldNotReachHere();
1499         }
1500       } else if (in_regs[i].first()->is_XMMRegister()) {
1501         switch (in_sig_bt[i]) {
1502           case T_FLOAT:  single_slots++; break;
1503           case T_DOUBLE: double_slots++; break;
1504           default:  ShouldNotReachHere();
1505         }
1506       } else if (in_regs[i].first()->is_FloatRegister()) {
1507         ShouldNotReachHere();
1508       }
1509     }
1510     total_save_slots = double_slots * 2 + single_slots;
1511     // align the save area
1512     if (double_slots != 0) {
1513       stack_slots = align_up(stack_slots, 2);
1514     }
1515   }
1516 
1517   int oop_handle_offset = stack_slots;
1518   stack_slots += total_save_slots;
1519 
1520   // Now any space we need for handlizing a klass if static method
1521 
1522   int klass_slot_offset = 0;
1523   int klass_offset = -1;
1524   int lock_slot_offset = 0;
1525   bool is_static = false;
1526 
1527   if (method->is_static()) {
1528     klass_slot_offset = stack_slots;
1529     stack_slots += VMRegImpl::slots_per_word;
1530     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1531     is_static = true;
1532   }
1533 
1534   // Plus a lock if needed
1535 
1536   if (method->is_synchronized()) {
1537     lock_slot_offset = stack_slots;
1538     stack_slots += VMRegImpl::slots_per_word;
1539   }
1540 
1541   // Now a place (+2) to save return values or temp during shuffling
1542   // + 2 for return address (which we own) and saved rbp,
1543   stack_slots += 4;
1544 
1545   // Ok The space we have allocated will look like:
1546   //
1547   //
1548   // FP-> |                     |
1549   //      |---------------------|
1550   //      | 2 slots for moves   |
1551   //      |---------------------|
1552   //      | lock box (if sync)  |
1553   //      |---------------------| <- lock_slot_offset  (-lock_slot_rbp_offset)
1554   //      | klass (if static)   |
1555   //      |---------------------| <- klass_slot_offset
1556   //      | oopHandle area      |
1557   //      |---------------------| <- oop_handle_offset (a max of 2 registers)
1558   //      | outbound memory     |
1559   //      | based arguments     |
1560   //      |                     |
1561   //      |---------------------|
1562   //      |                     |
1563   // SP-> | out_preserved_slots |
1564   //
1565   //
1566   // ****************************************************************************
1567   // WARNING - on Windows Java Natives use pascal calling convention and pop the
1568   // arguments off of the stack after the jni call. Before the call we can use
1569   // instructions that are SP relative. After the jni call we switch to FP
1570   // relative instructions instead of re-adjusting the stack on windows.
1571   // ****************************************************************************
1572 
1573 
1574   // Now compute actual number of stack words we need rounding to make
1575   // stack properly aligned.
1576   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1577 
1578   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1579 
1580   intptr_t start = (intptr_t)__ pc();
1581 
1582   // First thing make an ic check to see if we should even be here
1583 
1584   // We are free to use all registers as temps without saving them and
1585   // restoring them except rbp. rbp is the only callee save register
1586   // as far as the interpreter and the compiler(s) are concerned.
1587 
1588 
1589   const Register ic_reg = rax;
1590   const Register receiver = rcx;
1591   Label hit;
1592   Label exception_pending;
1593 
1594   __ verify_oop(receiver);
1595   __ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
1596   __ jcc(Assembler::equal, hit);
1597 
1598   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1599 
1600   // verified entry must be aligned for code patching.
1601   // and the first 5 bytes must be in the same cache line
1602   // if we align at 8 then we will be sure 5 bytes are in the same line
1603   __ align(8);
1604 
1605   __ bind(hit);
1606 
1607   int vep_offset = ((intptr_t)__ pc()) - start;
1608 
1609 #ifdef COMPILER1
1610   // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
1611   if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
1612     inline_check_hashcode_from_object_header(masm, method, rcx /*obj_reg*/, rax /*result*/);
1613    }
1614 #endif // COMPILER1
1615 
1616   // The instruction at the verified entry point must be 5 bytes or longer
1617   // because it can be patched on the fly by make_non_entrant. The stack bang
1618   // instruction fits that requirement.
1619 
1620   // Generate stack overflow check
1621   __ bang_stack_with_offset((int)StackOverflow::stack_shadow_zone_size());
1622 
1623   // Generate a new frame for the wrapper.
1624   __ enter();
1625   // -2 because return address is already present and so is saved rbp
1626   __ subptr(rsp, stack_size - 2*wordSize);
1627 
1628 
1629   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1630   bs->nmethod_entry_barrier(masm);
1631 
1632   // Frame is now completed as far as size and linkage.
1633   int frame_complete = ((intptr_t)__ pc()) - start;
1634 
1635   if (UseRTMLocking) {
1636     // Abort RTM transaction before calling JNI
1637     // because critical section will be large and will be
1638     // aborted anyway. Also nmethod could be deoptimized.
1639     __ xabort(0);
1640   }
1641 
1642   // Calculate the difference between rsp and rbp,. We need to know it
1643   // after the native call because on windows Java Natives will pop
1644   // the arguments and it is painful to do rsp relative addressing
1645   // in a platform independent way. So after the call we switch to
1646   // rbp, relative addressing.
1647 
1648   int fp_adjustment = stack_size - 2*wordSize;
1649 
1650 #ifdef COMPILER2
1651   // C2 may leave the stack dirty if not in SSE2+ mode
1652   if (UseSSE >= 2) {
1653     __ verify_FPU(0, "c2i transition should have clean FPU stack");
1654   } else {
1655     __ empty_FPU_stack();
1656   }
1657 #endif /* COMPILER2 */
1658 
1659   // Compute the rbp, offset for any slots used after the jni call
1660 
1661   int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1662 
1663   // We use rdi as a thread pointer because it is callee save and
1664   // if we load it once it is usable thru the entire wrapper
1665   const Register thread = rdi;
1666 
1667    // We use rsi as the oop handle for the receiver/klass
1668    // It is callee save so it survives the call to native
1669 
1670    const Register oop_handle_reg = rsi;
1671 
1672    __ get_thread(thread);
1673 
1674   //
1675   // We immediately shuffle the arguments so that any vm call we have to
1676   // make from here on out (sync slow path, jvmti, etc.) we will have
1677   // captured the oops from our caller and have a valid oopMap for
1678   // them.
1679 
1680   // -----------------
1681   // The Grand Shuffle
1682   //
1683   // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1684   // and, if static, the class mirror instead of a receiver.  This pretty much
1685   // guarantees that register layout will not match (and x86 doesn't use reg
1686   // parms though amd does).  Since the native abi doesn't use register args
1687   // and the java conventions does we don't have to worry about collisions.
1688   // All of our moved are reg->stack or stack->stack.
1689   // We ignore the extra arguments during the shuffle and handle them at the
1690   // last moment. The shuffle is described by the two calling convention
1691   // vectors we have in our possession. We simply walk the java vector to
1692   // get the source locations and the c vector to get the destinations.
1693 
1694   int c_arg = is_critical_native ? 0 : (method->is_static() ? 2 : 1 );
1695 
1696   // Record rsp-based slot for receiver on stack for non-static methods
1697   int receiver_offset = -1;
1698 
1699   // This is a trick. We double the stack slots so we can claim
1700   // the oops in the caller's frame. Since we are sure to have
1701   // more args than the caller doubling is enough to make
1702   // sure we can capture all the incoming oop args from the
1703   // caller.
1704   //
1705   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1706 
1707   // Mark location of rbp,
1708   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
1709 
1710   // We know that we only have args in at most two integer registers (rcx, rdx). So rax, rbx
1711   // Are free to temporaries if we have to do  stack to steck moves.
1712   // All inbound args are referenced based on rbp, and all outbound args via rsp.
1713 
1714   for (int i = 0; i < total_in_args ; i++, c_arg++ ) {
1715     switch (in_sig_bt[i]) {
1716       case T_ARRAY:
1717         if (is_critical_native) {
1718           VMRegPair in_arg = in_regs[i];
1719           unpack_array_argument(masm, in_arg, in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1720           c_arg++;
1721           break;
1722         }
1723       case T_OBJECT:
1724         assert(!is_critical_native, "no oop arguments");
1725         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1726                     ((i == 0) && (!is_static)),
1727                     &receiver_offset);
1728         break;
1729       case T_VOID:
1730         break;
1731 
1732       case T_FLOAT:
1733         float_move(masm, in_regs[i], out_regs[c_arg]);
1734           break;
1735 
1736       case T_DOUBLE:
1737         assert( i + 1 < total_in_args &&
1738                 in_sig_bt[i + 1] == T_VOID &&
1739                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1740         double_move(masm, in_regs[i], out_regs[c_arg]);
1741         break;
1742 
1743       case T_LONG :
1744         long_move(masm, in_regs[i], out_regs[c_arg]);
1745         break;
1746 
1747       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1748 
1749       default:
1750         simple_move32(masm, in_regs[i], out_regs[c_arg]);
1751     }
1752   }
1753 
1754   // Pre-load a static method's oop into rsi.  Used both by locking code and
1755   // the normal JNI call code.
1756   if (method->is_static() && !is_critical_native) {
1757 
1758     //  load opp into a register
1759     __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
1760 
1761     // Now handlize the static class mirror it's known not-null.
1762     __ movptr(Address(rsp, klass_offset), oop_handle_reg);
1763     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1764 
1765     // Now get the handle
1766     __ lea(oop_handle_reg, Address(rsp, klass_offset));
1767     // store the klass handle as second argument
1768     __ movptr(Address(rsp, wordSize), oop_handle_reg);
1769   }
1770 
1771   // Change state to native (we save the return address in the thread, since it might not
1772   // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
1773   // points into the right code segment. It does not have to be the correct return pc.
1774   // We use the same pc/oopMap repeatedly when we call out
1775 
1776   intptr_t the_pc = (intptr_t) __ pc();
1777   oop_maps->add_gc_map(the_pc - start, map);
1778 
1779   __ set_last_Java_frame(thread, rsp, noreg, (address)the_pc);
1780 
1781 
1782   // We have all of the arguments setup at this point. We must not touch any register
1783   // argument registers at this point (what if we save/restore them there are no oop?
1784 
1785   {
1786     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
1787     __ mov_metadata(rax, method());
1788     __ call_VM_leaf(
1789          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1790          thread, rax);
1791   }
1792 
1793   // RedefineClasses() tracing support for obsolete method entry
1794   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1795     __ mov_metadata(rax, method());
1796     __ call_VM_leaf(
1797          CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1798          thread, rax);
1799   }
1800 
1801   // These are register definitions we need for locking/unlocking
1802   const Register swap_reg = rax;  // Must use rax, for cmpxchg instruction
1803   const Register obj_reg  = rcx;  // Will contain the oop
1804   const Register lock_reg = rdx;  // Address of compiler lock object (BasicLock)
1805 
1806   Label slow_path_lock;
1807   Label lock_done;
1808 
1809   // Lock a synchronized method
1810   if (method->is_synchronized()) {
1811     assert(!is_critical_native, "unhandled");
1812 
1813 
1814     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1815 
1816     // Get the handle (the 2nd argument)
1817     __ movptr(oop_handle_reg, Address(rsp, wordSize));
1818 
1819     // Get address of the box
1820 
1821     __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1822 
1823     // Load the oop from the handle
1824     __ movptr(obj_reg, Address(oop_handle_reg, 0));
1825 
1826     if (LockingMode == LM_MONITOR) {
1827       __ jmp(slow_path_lock);
1828     } else if (LockingMode == LM_LEGACY) {
1829       if (UseBiasedLocking) {
1830         // Note that oop_handle_reg is trashed during this call
1831         __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, noreg, false, lock_done, &slow_path_lock);
1832       }
1833 
1834       // Load immediate 1 into swap_reg %rax,
1835       __ movptr(swap_reg, 1);
1836 
1837       // Load (object->mark() | 1) into swap_reg %rax,
1838       __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1839 
1840       // Save (object->mark() | 1) into BasicLock's displaced header
1841       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1842 
1843       // src -> dest iff dest == rax, else rax, <- dest
1844       // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1845       __ lock();
1846       __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1847       __ jcc(Assembler::equal, lock_done);
1848 
1849       // Test if the oopMark is an obvious stack pointer, i.e.,
1850       //  1) (mark & 3) == 0, and
1851       //  2) rsp <= mark < mark + os::pagesize()
1852       // These 3 tests can be done by evaluating the following
1853       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1854       // assuming both stack pointer and pagesize have their
1855       // least significant 2 bits clear.
1856       // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1857 
1858       __ subptr(swap_reg, rsp);
1859       __ andptr(swap_reg, 3 - os::vm_page_size());
1860 
1861       // Save the test result, for recursive case, the result is zero
1862       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1863       __ jcc(Assembler::notEqual, slow_path_lock);
1864     } else {
1865       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1866      // Load object header
1867      __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1868      __ fast_lock_impl(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
1869     }
1870     // Slow path will re-enter here
1871     __ bind(lock_done);
1872 
1873     if (UseBiasedLocking) {
1874       // Re-fetch oop_handle_reg as we trashed it above
1875       __ movptr(oop_handle_reg, Address(rsp, wordSize));
1876     }
1877   }
1878 
1879 
1880   // Finally just about ready to make the JNI call
1881 
1882   // get JNIEnv* which is first argument to native
1883   if (!is_critical_native) {
1884     __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1885     __ movptr(Address(rsp, 0), rdx);
1886 
1887     // Now set thread in native
1888     __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1889   }
1890 
1891   __ call(RuntimeAddress(native_func));
1892 
1893   // Verify or restore cpu control state after JNI call
1894   __ restore_cpu_control_state_after_jni();
1895 
1896   // WARNING - on Windows Java Natives use pascal calling convention and pop the
1897   // arguments off of the stack. We could just re-adjust the stack pointer here
1898   // and continue to do SP relative addressing but we instead switch to FP
1899   // relative addressing.
1900 
1901   // Unpack native results.
1902   switch (ret_type) {
1903   case T_BOOLEAN: __ c2bool(rax);            break;
1904   case T_CHAR   : __ andptr(rax, 0xFFFF);    break;
1905   case T_BYTE   : __ sign_extend_byte (rax); break;
1906   case T_SHORT  : __ sign_extend_short(rax); break;
1907   case T_INT    : /* nothing to do */        break;
1908   case T_DOUBLE :
1909   case T_FLOAT  :
1910     // Result is in st0 we'll save as needed
1911     break;
1912   case T_ARRAY:                 // Really a handle
1913   case T_OBJECT:                // Really a handle
1914       break; // can't de-handlize until after safepoint check
1915   case T_VOID: break;
1916   case T_LONG: break;
1917   default       : ShouldNotReachHere();
1918   }
1919 
1920   Label after_transition;
1921 
1922   // If this is a critical native, check for a safepoint or suspend request after the call.
1923   // If a safepoint is needed, transition to native, then to native_trans to handle
1924   // safepoints like the native methods that are not critical natives.
1925   if (is_critical_native) {
1926     Label needs_safepoint;
1927     __ safepoint_poll(needs_safepoint, thread, false /* at_return */, false /* in_nmethod */);
1928     __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1929     __ jcc(Assembler::equal, after_transition);
1930     __ bind(needs_safepoint);
1931   }
1932 
1933   // Switch thread to "native transition" state before reading the synchronization state.
1934   // This additional state is necessary because reading and testing the synchronization
1935   // state is not atomic w.r.t. GC, as this scenario demonstrates:
1936   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1937   //     VM thread changes sync state to synchronizing and suspends threads for GC.
1938   //     Thread A is resumed to finish this native method, but doesn't block here since it
1939   //     didn't see any synchronization is progress, and escapes.
1940   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
1941 
1942   // Force this write out before the read below
1943   __ membar(Assembler::Membar_mask_bits(
1944             Assembler::LoadLoad | Assembler::LoadStore |
1945             Assembler::StoreLoad | Assembler::StoreStore));
1946 
1947   if (AlwaysRestoreFPU) {
1948     // Make sure the control word is correct.
1949     __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
1950   }
1951 
1952   // check for safepoint operation in progress and/or pending suspend requests
1953   { Label Continue, slow_path;
1954 
1955     __ safepoint_poll(slow_path, thread, true /* at_return */, false /* in_nmethod */);
1956 
1957     __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1958     __ jcc(Assembler::equal, Continue);
1959     __ bind(slow_path);
1960 
1961     // Don't use call_VM as it will see a possible pending exception and forward it
1962     // and never return here preventing us from clearing _last_native_pc down below.
1963     // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1964     // preserved and correspond to the bcp/locals pointers. So we do a runtime call
1965     // by hand.
1966     //
1967     __ vzeroupper();
1968 
1969     save_native_result(masm, ret_type, stack_slots);
1970     __ push(thread);
1971     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
1972                                               JavaThread::check_special_condition_for_native_trans)));
1973     __ increment(rsp, wordSize);
1974     // Restore any method result value
1975     restore_native_result(masm, ret_type, stack_slots);
1976     __ bind(Continue);
1977   }
1978 
1979   // change thread state
1980   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1981   __ bind(after_transition);
1982 
1983   Label reguard;
1984   Label reguard_done;
1985   __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
1986   __ jcc(Assembler::equal, reguard);
1987 
1988   // slow path reguard  re-enters here
1989   __ bind(reguard_done);
1990 
1991   // Handle possible exception (will unlock if necessary)
1992 
1993   // native result if any is live
1994 
1995   // Unlock
1996   Label slow_path_unlock;
1997   Label unlock_done;
1998   if (method->is_synchronized()) {
1999 
2000     Label done;
2001 
2002     // Get locked oop from the handle we passed to jni
2003     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2004 
2005     if (UseBiasedLocking) {
2006       __ biased_locking_exit(obj_reg, rbx, done);
2007     }
2008 
2009     if (LockingMode == LM_LEGACY) {
2010       // Simple recursive lock?
2011 
2012       __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
2013       __ jcc(Assembler::equal, done);
2014     }
2015 
2016     // Must save rax, if if it is live now because cmpxchg must use it
2017     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2018       save_native_result(masm, ret_type, stack_slots);
2019     }
2020 
2021     if (LockingMode == LM_MONITOR) {
2022       __ jmp(slow_path_unlock);
2023     } else if (LockingMode == LM_LEGACY) {
2024       //  get old displaced header
2025       __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
2026 
2027       // get address of the stack lock
2028       __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2029 
2030       // Atomic swap old header if oop still contains the stack lock
2031       // src -> dest iff dest == rax, else rax, <- dest
2032       // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
2033       __ lock();
2034       __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2035       __ jcc(Assembler::notEqual, slow_path_unlock);
2036     } else {
2037       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2038       __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2039       __ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
2040       __ fast_unlock_impl(obj_reg, swap_reg, lock_reg, slow_path_unlock);
2041     }
2042 
2043     // slow path re-enters here
2044     __ bind(unlock_done);
2045     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2046       restore_native_result(masm, ret_type, stack_slots);
2047     }
2048 
2049     __ bind(done);
2050 
2051   }
2052 
2053   {
2054     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2055     // Tell dtrace about this method exit
2056     save_native_result(masm, ret_type, stack_slots);
2057     __ mov_metadata(rax, method());
2058     __ call_VM_leaf(
2059          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2060          thread, rax);
2061     restore_native_result(masm, ret_type, stack_slots);
2062   }
2063 
2064   // We can finally stop using that last_Java_frame we setup ages ago
2065 
2066   __ reset_last_Java_frame(thread, false);
2067 
2068   // Unbox oop result, e.g. JNIHandles::resolve value.
2069   if (is_reference_type(ret_type)) {
2070     __ resolve_jobject(rax /* value */,
2071                        thread /* thread */,
2072                        rcx /* tmp */);
2073   }
2074 
2075   if (CheckJNICalls) {
2076     // clear_pending_jni_exception_check
2077     __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
2078   }
2079 
2080   if (!is_critical_native) {
2081     // reset handle block
2082     __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
2083     __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
2084 
2085     // Any exception pending?
2086     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2087     __ jcc(Assembler::notEqual, exception_pending);
2088   }
2089 
2090   // no exception, we're almost done
2091 
2092   // check that only result value is on FPU stack
2093   __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit");
2094 
2095   // Fixup floating pointer results so that result looks like a return from a compiled method
2096   if (ret_type == T_FLOAT) {
2097     if (UseSSE >= 1) {
2098       // Pop st0 and store as float and reload into xmm register
2099       __ fstp_s(Address(rbp, -4));
2100       __ movflt(xmm0, Address(rbp, -4));
2101     }
2102   } else if (ret_type == T_DOUBLE) {
2103     if (UseSSE >= 2) {
2104       // Pop st0 and store as double and reload into xmm register
2105       __ fstp_d(Address(rbp, -8));
2106       __ movdbl(xmm0, Address(rbp, -8));
2107     }
2108   }
2109 
2110   // Return
2111 
2112   __ leave();
2113   __ ret(0);
2114 
2115   // Unexpected paths are out of line and go here
2116 
2117   // Slow path locking & unlocking
2118   if (method->is_synchronized()) {
2119 
2120     // BEGIN Slow path lock
2121 
2122     __ bind(slow_path_lock);
2123 
2124     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2125     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2126     __ push(thread);
2127     __ push(lock_reg);
2128     __ push(obj_reg);
2129     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
2130     __ addptr(rsp, 3*wordSize);
2131 
2132 #ifdef ASSERT
2133     { Label L;
2134     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2135     __ jcc(Assembler::equal, L);
2136     __ stop("no pending exception allowed on exit from monitorenter");
2137     __ bind(L);
2138     }
2139 #endif
2140     __ jmp(lock_done);
2141 
2142     // END Slow path lock
2143 
2144     // BEGIN Slow path unlock
2145     __ bind(slow_path_unlock);
2146     __ vzeroupper();
2147     // Slow path unlock
2148 
2149     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2150       save_native_result(masm, ret_type, stack_slots);
2151     }
2152     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2153 
2154     __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2155     __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2156 
2157 
2158     // should be a peal
2159     // +wordSize because of the push above
2160     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2161     __ push(thread);
2162     __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2163     __ push(rax);
2164 
2165     __ push(obj_reg);
2166     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2167     __ addptr(rsp, 3*wordSize);
2168 #ifdef ASSERT
2169     {
2170       Label L;
2171       __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2172       __ jcc(Assembler::equal, L);
2173       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2174       __ bind(L);
2175     }
2176 #endif /* ASSERT */
2177 
2178     __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2179 
2180     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2181       restore_native_result(masm, ret_type, stack_slots);
2182     }
2183     __ jmp(unlock_done);
2184     // END Slow path unlock
2185 
2186   }
2187 
2188   // SLOW PATH Reguard the stack if needed
2189 
2190   __ bind(reguard);
2191   __ vzeroupper();
2192   save_native_result(masm, ret_type, stack_slots);
2193   {
2194     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2195   }
2196   restore_native_result(masm, ret_type, stack_slots);
2197   __ jmp(reguard_done);
2198 
2199 
2200   // BEGIN EXCEPTION PROCESSING
2201 
2202   if (!is_critical_native) {
2203     // Forward  the exception
2204     __ bind(exception_pending);
2205 
2206     // remove possible return value from FPU register stack
2207     __ empty_FPU_stack();
2208 
2209     // pop our frame
2210     __ leave();
2211     // and forward the exception
2212     __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2213   }
2214 
2215   __ flush();
2216 
2217   nmethod *nm = nmethod::new_native_nmethod(method,
2218                                             compile_id,
2219                                             masm->code(),
2220                                             vep_offset,
2221                                             frame_complete,
2222                                             stack_slots / VMRegImpl::slots_per_word,
2223                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2224                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2225                                             oop_maps);
2226 
2227   return nm;
2228 
2229 }
2230 
2231 // this function returns the adjust size (in number of words) to a c2i adapter
2232 // activation for use during deoptimization
2233 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2234   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2235 }
2236 
2237 
2238 // Number of stack slots between incoming argument block and the start of
2239 // a new frame.  The PROLOG must add this many slots to the stack.  The
2240 // EPILOG must remove this many slots.  Intel needs one slot for
2241 // return address and one for rbp, (must save rbp)
2242 uint SharedRuntime::in_preserve_stack_slots() {
2243   return 2+VerifyStackAtCalls;
2244 }
2245 
2246 uint SharedRuntime::out_preserve_stack_slots() {
2247   return 0;
2248 }
2249 
2250 //------------------------------generate_deopt_blob----------------------------
2251 void SharedRuntime::generate_deopt_blob() {
2252   // allocate space for the code
2253   ResourceMark rm;
2254   // setup code generation tools
2255   // note: the buffer code size must account for StackShadowPages=50
2256   CodeBuffer   buffer("deopt_blob", 1536, 1024);
2257   MacroAssembler* masm = new MacroAssembler(&buffer);
2258   int frame_size_in_words;
2259   OopMap* map = NULL;
2260   // Account for the extra args we place on the stack
2261   // by the time we call fetch_unroll_info
2262   const int additional_words = 2; // deopt kind, thread
2263 
2264   OopMapSet *oop_maps = new OopMapSet();
2265 
2266   // -------------
2267   // This code enters when returning to a de-optimized nmethod.  A return
2268   // address has been pushed on the the stack, and return values are in
2269   // registers.
2270   // If we are doing a normal deopt then we were called from the patched
2271   // nmethod from the point we returned to the nmethod. So the return
2272   // address on the stack is wrong by NativeCall::instruction_size
2273   // We will adjust the value to it looks like we have the original return
2274   // address on the stack (like when we eagerly deoptimized).
2275   // In the case of an exception pending with deoptimized then we enter
2276   // with a return address on the stack that points after the call we patched
2277   // into the exception handler. We have the following register state:
2278   //    rax,: exception
2279   //    rbx,: exception handler
2280   //    rdx: throwing pc
2281   // So in this case we simply jam rdx into the useless return address and
2282   // the stack looks just like we want.
2283   //
2284   // At this point we need to de-opt.  We save the argument return
2285   // registers.  We call the first C routine, fetch_unroll_info().  This
2286   // routine captures the return values and returns a structure which
2287   // describes the current frame size and the sizes of all replacement frames.
2288   // The current frame is compiled code and may contain many inlined
2289   // functions, each with their own JVM state.  We pop the current frame, then
2290   // push all the new frames.  Then we call the C routine unpack_frames() to
2291   // populate these frames.  Finally unpack_frames() returns us the new target
2292   // address.  Notice that callee-save registers are BLOWN here; they have
2293   // already been captured in the vframeArray at the time the return PC was
2294   // patched.
2295   address start = __ pc();
2296   Label cont;
2297 
2298   // Prolog for non exception case!
2299 
2300   // Save everything in sight.
2301 
2302   map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2303   // Normal deoptimization
2304   __ push(Deoptimization::Unpack_deopt);
2305   __ jmp(cont);
2306 
2307   int reexecute_offset = __ pc() - start;
2308 
2309   // Reexecute case
2310   // return address is the pc describes what bci to do re-execute at
2311 
2312   // No need to update map as each call to save_live_registers will produce identical oopmap
2313   (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2314 
2315   __ push(Deoptimization::Unpack_reexecute);
2316   __ jmp(cont);
2317 
2318   int exception_offset = __ pc() - start;
2319 
2320   // Prolog for exception case
2321 
2322   // all registers are dead at this entry point, except for rax, and
2323   // rdx which contain the exception oop and exception pc
2324   // respectively.  Set them in TLS and fall thru to the
2325   // unpack_with_exception_in_tls entry point.
2326 
2327   __ get_thread(rdi);
2328   __ movptr(Address(rdi, JavaThread::exception_pc_offset()), rdx);
2329   __ movptr(Address(rdi, JavaThread::exception_oop_offset()), rax);
2330 
2331   int exception_in_tls_offset = __ pc() - start;
2332 
2333   // new implementation because exception oop is now passed in JavaThread
2334 
2335   // Prolog for exception case
2336   // All registers must be preserved because they might be used by LinearScan
2337   // Exceptiop oop and throwing PC are passed in JavaThread
2338   // tos: stack at point of call to method that threw the exception (i.e. only
2339   // args are on the stack, no return address)
2340 
2341   // make room on stack for the return address
2342   // It will be patched later with the throwing pc. The correct value is not
2343   // available now because loading it from memory would destroy registers.
2344   __ push(0);
2345 
2346   // Save everything in sight.
2347 
2348   // No need to update map as each call to save_live_registers will produce identical oopmap
2349   (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2350 
2351   // Now it is safe to overwrite any register
2352 
2353   // store the correct deoptimization type
2354   __ push(Deoptimization::Unpack_exception);
2355 
2356   // load throwing pc from JavaThread and patch it as the return address
2357   // of the current frame. Then clear the field in JavaThread
2358   __ get_thread(rdi);
2359   __ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset()));
2360   __ movptr(Address(rbp, wordSize), rdx);
2361   __ movptr(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
2362 
2363 #ifdef ASSERT
2364   // verify that there is really an exception oop in JavaThread
2365   __ movptr(rax, Address(rdi, JavaThread::exception_oop_offset()));
2366   __ verify_oop(rax);
2367 
2368   // verify that there is no pending exception
2369   Label no_pending_exception;
2370   __ movptr(rax, Address(rdi, Thread::pending_exception_offset()));
2371   __ testptr(rax, rax);
2372   __ jcc(Assembler::zero, no_pending_exception);
2373   __ stop("must not have pending exception here");
2374   __ bind(no_pending_exception);
2375 #endif
2376 
2377   __ bind(cont);
2378 
2379   // Compiled code leaves the floating point stack dirty, empty it.
2380   __ empty_FPU_stack();
2381 
2382 
2383   // Call C code.  Need thread and this frame, but NOT official VM entry
2384   // crud.  We cannot block on this call, no GC can happen.
2385   __ get_thread(rcx);
2386   __ push(rcx);
2387   // fetch_unroll_info needs to call last_java_frame()
2388   __ set_last_Java_frame(rcx, noreg, noreg, NULL);
2389 
2390   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2391 
2392   // Need to have an oopmap that tells fetch_unroll_info where to
2393   // find any register it might need.
2394 
2395   oop_maps->add_gc_map( __ pc()-start, map);
2396 
2397   // Discard args to fetch_unroll_info
2398   __ pop(rcx);
2399   __ pop(rcx);
2400 
2401   __ get_thread(rcx);
2402   __ reset_last_Java_frame(rcx, false);
2403 
2404   // Load UnrollBlock into EDI
2405   __ mov(rdi, rax);
2406 
2407   // Move the unpack kind to a safe place in the UnrollBlock because
2408   // we are very short of registers
2409 
2410   Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
2411   // retrieve the deopt kind from the UnrollBlock.
2412   __ movl(rax, unpack_kind);
2413 
2414    Label noException;
2415   __ cmpl(rax, Deoptimization::Unpack_exception);   // Was exception pending?
2416   __ jcc(Assembler::notEqual, noException);
2417   __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
2418   __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
2419   __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
2420   __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
2421 
2422   __ verify_oop(rax);
2423 
2424   // Overwrite the result registers with the exception results.
2425   __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2426   __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2427 
2428   __ bind(noException);
2429 
2430   // Stack is back to only having register save data on the stack.
2431   // Now restore the result registers. Everything else is either dead or captured
2432   // in the vframeArray.
2433 
2434   RegisterSaver::restore_result_registers(masm);
2435 
2436   // Non standard control word may be leaked out through a safepoint blob, and we can
2437   // deopt at a poll point with the non standard control word. However, we should make
2438   // sure the control word is correct after restore_result_registers.
2439   __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
2440 
2441   // All of the register save area has been popped of the stack. Only the
2442   // return address remains.
2443 
2444   // Pop all the frames we must move/replace.
2445   //
2446   // Frame picture (youngest to oldest)
2447   // 1: self-frame (no frame link)
2448   // 2: deopting frame  (no frame link)
2449   // 3: caller of deopting frame (could be compiled/interpreted).
2450   //
2451   // Note: by leaving the return address of self-frame on the stack
2452   // and using the size of frame 2 to adjust the stack
2453   // when we are done the return to frame 3 will still be on the stack.
2454 
2455   // Pop deoptimized frame
2456   __ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2457 
2458   // sp should be pointing at the return address to the caller (3)
2459 
2460   // Pick up the initial fp we should save
2461   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2462   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
2463 
2464 #ifdef ASSERT
2465   // Compilers generate code that bang the stack by as much as the
2466   // interpreter would need. So this stack banging should never
2467   // trigger a fault. Verify that it does not on non product builds.
2468   __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2469   __ bang_stack_size(rbx, rcx);
2470 #endif
2471 
2472   // Load array of frame pcs into ECX
2473   __ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2474 
2475   __ pop(rsi); // trash the old pc
2476 
2477   // Load array of frame sizes into ESI
2478   __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2479 
2480   Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2481 
2482   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2483   __ movl(counter, rbx);
2484 
2485   // Now adjust the caller's stack to make up for the extra locals
2486   // but record the original sp so that we can save it in the skeletal interpreter
2487   // frame and the stack walking of interpreter_sender will get the unextended sp
2488   // value and not the "real" sp value.
2489 
2490   Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2491   __ movptr(sp_temp, rsp);
2492   __ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2493   __ subptr(rsp, rbx);
2494 
2495   // Push interpreter frames in a loop
2496   Label loop;
2497   __ bind(loop);
2498   __ movptr(rbx, Address(rsi, 0));      // Load frame size
2499   __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
2500   __ pushptr(Address(rcx, 0));          // save return address
2501   __ enter();                           // save old & set new rbp,
2502   __ subptr(rsp, rbx);                  // Prolog!
2503   __ movptr(rbx, sp_temp);              // sender's sp
2504   // This value is corrected by layout_activation_impl
2505   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
2506   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2507   __ movptr(sp_temp, rsp);              // pass to next frame
2508   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
2509   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
2510   __ decrementl(counter);             // decrement counter
2511   __ jcc(Assembler::notZero, loop);
2512   __ pushptr(Address(rcx, 0));          // save final return address
2513 
2514   // Re-push self-frame
2515   __ enter();                           // save old & set new rbp,
2516 
2517   //  Return address and rbp, are in place
2518   // We'll push additional args later. Just allocate a full sized
2519   // register save area
2520   __ subptr(rsp, (frame_size_in_words-additional_words - 2) * wordSize);
2521 
2522   // Restore frame locals after moving the frame
2523   __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2524   __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2525   __ fstp_d(Address(rsp, RegisterSaver::fpResultOffset()*wordSize));   // Pop float stack and store in local
2526   if( UseSSE>=2 ) __ movdbl(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2527   if( UseSSE==1 ) __ movflt(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2528 
2529   // Set up the args to unpack_frame
2530 
2531   __ pushl(unpack_kind);                     // get the unpack_kind value
2532   __ get_thread(rcx);
2533   __ push(rcx);
2534 
2535   // set last_Java_sp, last_Java_fp
2536   __ set_last_Java_frame(rcx, noreg, rbp, NULL);
2537 
2538   // Call C code.  Need thread but NOT official VM entry
2539   // crud.  We cannot block on this call, no GC can happen.  Call should
2540   // restore return values to their stack-slots with the new SP.
2541   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2542   // Set an oopmap for the call site
2543   oop_maps->add_gc_map( __ pc()-start, new OopMap( frame_size_in_words, 0 ));
2544 
2545   // rax, contains the return result type
2546   __ push(rax);
2547 
2548   __ get_thread(rcx);
2549   __ reset_last_Java_frame(rcx, false);
2550 
2551   // Collect return values
2552   __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
2553   __ movptr(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize));
2554 
2555   // Clear floating point stack before returning to interpreter
2556   __ empty_FPU_stack();
2557 
2558   // Check if we should push the float or double return value.
2559   Label results_done, yes_double_value;
2560   __ cmpl(Address(rsp, 0), T_DOUBLE);
2561   __ jcc (Assembler::zero, yes_double_value);
2562   __ cmpl(Address(rsp, 0), T_FLOAT);
2563   __ jcc (Assembler::notZero, results_done);
2564 
2565   // return float value as expected by interpreter
2566   if( UseSSE>=1 ) __ movflt(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2567   else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2568   __ jmp(results_done);
2569 
2570   // return double value as expected by interpreter
2571   __ bind(yes_double_value);
2572   if( UseSSE>=2 ) __ movdbl(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2573   else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2574 
2575   __ bind(results_done);
2576 
2577   // Pop self-frame.
2578   __ leave();                              // Epilog!
2579 
2580   // Jump to interpreter
2581   __ ret(0);
2582 
2583   // -------------
2584   // make sure all code is generated
2585   masm->flush();
2586 
2587   _deopt_blob = DeoptimizationBlob::create( &buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2588   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2589 }
2590 
2591 
2592 #ifdef COMPILER2
2593 //------------------------------generate_uncommon_trap_blob--------------------
2594 void SharedRuntime::generate_uncommon_trap_blob() {
2595   // allocate space for the code
2596   ResourceMark rm;
2597   // setup code generation tools
2598   CodeBuffer   buffer("uncommon_trap_blob", 512, 512);
2599   MacroAssembler* masm = new MacroAssembler(&buffer);
2600 
2601   enum frame_layout {
2602     arg0_off,      // thread                     sp + 0 // Arg location for
2603     arg1_off,      // unloaded_class_index       sp + 1 // calling C
2604     arg2_off,      // exec_mode                  sp + 2
2605     // The frame sender code expects that rbp will be in the "natural" place and
2606     // will override any oopMap setting for it. We must therefore force the layout
2607     // so that it agrees with the frame sender code.
2608     rbp_off,       // callee saved register      sp + 3
2609     return_off,    // slot for return address    sp + 4
2610     framesize
2611   };
2612 
2613   address start = __ pc();
2614 
2615   if (UseRTMLocking) {
2616     // Abort RTM transaction before possible nmethod deoptimization.
2617     __ xabort(0);
2618   }
2619 
2620   // Push self-frame.
2621   __ subptr(rsp, return_off*wordSize);     // Epilog!
2622 
2623   // rbp, is an implicitly saved callee saved register (i.e. the calling
2624   // convention will save restore it in prolog/epilog) Other than that
2625   // there are no callee save registers no that adapter frames are gone.
2626   __ movptr(Address(rsp, rbp_off*wordSize), rbp);
2627 
2628   // Clear the floating point exception stack
2629   __ empty_FPU_stack();
2630 
2631   // set last_Java_sp
2632   __ get_thread(rdx);
2633   __ set_last_Java_frame(rdx, noreg, noreg, NULL);
2634 
2635   // Call C code.  Need thread but NOT official VM entry
2636   // crud.  We cannot block on this call, no GC can happen.  Call should
2637   // capture callee-saved registers as well as return values.
2638   __ movptr(Address(rsp, arg0_off*wordSize), rdx);
2639   // argument already in ECX
2640   __ movl(Address(rsp, arg1_off*wordSize),rcx);
2641   __ movl(Address(rsp, arg2_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2642   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2643 
2644   // Set an oopmap for the call site
2645   OopMapSet *oop_maps = new OopMapSet();
2646   OopMap* map =  new OopMap( framesize, 0 );
2647   // No oopMap for rbp, it is known implicitly
2648 
2649   oop_maps->add_gc_map( __ pc()-start, map);
2650 
2651   __ get_thread(rcx);
2652 
2653   __ reset_last_Java_frame(rcx, false);
2654 
2655   // Load UnrollBlock into EDI
2656   __ movptr(rdi, rax);
2657 
2658 #ifdef ASSERT
2659   { Label L;
2660     __ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
2661             (int32_t)Deoptimization::Unpack_uncommon_trap);
2662     __ jcc(Assembler::equal, L);
2663     __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
2664     __ bind(L);
2665   }
2666 #endif
2667 
2668   // Pop all the frames we must move/replace.
2669   //
2670   // Frame picture (youngest to oldest)
2671   // 1: self-frame (no frame link)
2672   // 2: deopting frame  (no frame link)
2673   // 3: caller of deopting frame (could be compiled/interpreted).
2674 
2675   // Pop self-frame.  We have no frame, and must rely only on EAX and ESP.
2676   __ addptr(rsp,(framesize-1)*wordSize);     // Epilog!
2677 
2678   // Pop deoptimized frame
2679   __ movl2ptr(rcx, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2680   __ addptr(rsp, rcx);
2681 
2682   // sp should be pointing at the return address to the caller (3)
2683 
2684   // Pick up the initial fp we should save
2685   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2686   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
2687 
2688 #ifdef ASSERT
2689   // Compilers generate code that bang the stack by as much as the
2690   // interpreter would need. So this stack banging should never
2691   // trigger a fault. Verify that it does not on non product builds.
2692   __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2693   __ bang_stack_size(rbx, rcx);
2694 #endif
2695 
2696   // Load array of frame pcs into ECX
2697   __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2698 
2699   __ pop(rsi); // trash the pc
2700 
2701   // Load array of frame sizes into ESI
2702   __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2703 
2704   Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2705 
2706   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2707   __ movl(counter, rbx);
2708 
2709   // Now adjust the caller's stack to make up for the extra locals
2710   // but record the original sp so that we can save it in the skeletal interpreter
2711   // frame and the stack walking of interpreter_sender will get the unextended sp
2712   // value and not the "real" sp value.
2713 
2714   Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2715   __ movptr(sp_temp, rsp);
2716   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2717   __ subptr(rsp, rbx);
2718 
2719   // Push interpreter frames in a loop
2720   Label loop;
2721   __ bind(loop);
2722   __ movptr(rbx, Address(rsi, 0));      // Load frame size
2723   __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
2724   __ pushptr(Address(rcx, 0));          // save return address
2725   __ enter();                           // save old & set new rbp,
2726   __ subptr(rsp, rbx);                  // Prolog!
2727   __ movptr(rbx, sp_temp);              // sender's sp
2728   // This value is corrected by layout_activation_impl
2729   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
2730   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2731   __ movptr(sp_temp, rsp);              // pass to next frame
2732   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
2733   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
2734   __ decrementl(counter);             // decrement counter
2735   __ jcc(Assembler::notZero, loop);
2736   __ pushptr(Address(rcx, 0));            // save final return address
2737 
2738   // Re-push self-frame
2739   __ enter();                           // save old & set new rbp,
2740   __ subptr(rsp, (framesize-2) * wordSize);   // Prolog!
2741 
2742 
2743   // set last_Java_sp, last_Java_fp
2744   __ get_thread(rdi);
2745   __ set_last_Java_frame(rdi, noreg, rbp, NULL);
2746 
2747   // Call C code.  Need thread but NOT official VM entry
2748   // crud.  We cannot block on this call, no GC can happen.  Call should
2749   // restore return values to their stack-slots with the new SP.
2750   __ movptr(Address(rsp,arg0_off*wordSize),rdi);
2751   __ movl(Address(rsp,arg1_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2752   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2753   // Set an oopmap for the call site
2754   oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) );
2755 
2756   __ get_thread(rdi);
2757   __ reset_last_Java_frame(rdi, true);
2758 
2759   // Pop self-frame.
2760   __ leave();     // Epilog!
2761 
2762   // Jump to interpreter
2763   __ ret(0);
2764 
2765   // -------------
2766   // make sure all code is generated
2767   masm->flush();
2768 
2769    _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, framesize);
2770 }
2771 #endif // COMPILER2
2772 
2773 //------------------------------generate_handler_blob------
2774 //
2775 // Generate a special Compile2Runtime blob that saves all registers,
2776 // setup oopmap, and calls safepoint code to stop the compiled code for
2777 // a safepoint.
2778 //
2779 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
2780 
2781   // Account for thread arg in our frame
2782   const int additional_words = 1;
2783   int frame_size_in_words;
2784 
2785   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2786 
2787   ResourceMark rm;
2788   OopMapSet *oop_maps = new OopMapSet();
2789   OopMap* map;
2790 
2791   // allocate space for the code
2792   // setup code generation tools
2793   CodeBuffer   buffer("handler_blob", 1024, 512);
2794   MacroAssembler* masm = new MacroAssembler(&buffer);
2795 
2796   const Register java_thread = rdi; // callee-saved for VC++
2797   address start   = __ pc();
2798   address call_pc = NULL;
2799   bool cause_return = (poll_type == POLL_AT_RETURN);
2800   bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
2801 
2802   if (UseRTMLocking) {
2803     // Abort RTM transaction before calling runtime
2804     // because critical section will be large and will be
2805     // aborted anyway. Also nmethod could be deoptimized.
2806     __ xabort(0);
2807   }
2808 
2809   // If cause_return is true we are at a poll_return and there is
2810   // the return address on the stack to the caller on the nmethod
2811   // that is safepoint. We can leave this return on the stack and
2812   // effectively complete the return and safepoint in the caller.
2813   // Otherwise we push space for a return address that the safepoint
2814   // handler will install later to make the stack walking sensible.
2815   if (!cause_return)
2816     __ push(rbx);  // Make room for return address (or push it again)
2817 
2818   map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false, save_vectors);
2819 
2820   // The following is basically a call_VM. However, we need the precise
2821   // address of the call in order to generate an oopmap. Hence, we do all the
2822   // work ourselves.
2823 
2824   // Push thread argument and setup last_Java_sp
2825   __ get_thread(java_thread);
2826   __ push(java_thread);
2827   __ set_last_Java_frame(java_thread, noreg, noreg, NULL);
2828 
2829   // if this was not a poll_return then we need to correct the return address now.
2830   if (!cause_return) {
2831     // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
2832     // Additionally, rbx is a callee saved register and we can look at it later to determine
2833     // if someone changed the return address for us!
2834     __ movptr(rbx, Address(java_thread, JavaThread::saved_exception_pc_offset()));
2835     __ movptr(Address(rbp, wordSize), rbx);
2836   }
2837 
2838   // do the call
2839   __ call(RuntimeAddress(call_ptr));
2840 
2841   // Set an oopmap for the call site.  This oopmap will map all
2842   // oop-registers and debug-info registers as callee-saved.  This
2843   // will allow deoptimization at this safepoint to find all possible
2844   // debug-info recordings, as well as let GC find all oops.
2845 
2846   oop_maps->add_gc_map( __ pc() - start, map);
2847 
2848   // Discard arg
2849   __ pop(rcx);
2850 
2851   Label noException;
2852 
2853   // Clear last_Java_sp again
2854   __ get_thread(java_thread);
2855   __ reset_last_Java_frame(java_thread, false);
2856 
2857   __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
2858   __ jcc(Assembler::equal, noException);
2859 
2860   // Exception pending
2861   RegisterSaver::restore_live_registers(masm, save_vectors);
2862 
2863   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2864 
2865   __ bind(noException);
2866 
2867   Label no_adjust, bail, not_special;
2868   if (!cause_return) {
2869     // If our stashed return pc was modified by the runtime we avoid touching it
2870     __ cmpptr(rbx, Address(rbp, wordSize));
2871     __ jccb(Assembler::notEqual, no_adjust);
2872 
2873     // Skip over the poll instruction.
2874     // See NativeInstruction::is_safepoint_poll()
2875     // Possible encodings:
2876     //      85 00       test   %eax,(%rax)
2877     //      85 01       test   %eax,(%rcx)
2878     //      85 02       test   %eax,(%rdx)
2879     //      85 03       test   %eax,(%rbx)
2880     //      85 06       test   %eax,(%rsi)
2881     //      85 07       test   %eax,(%rdi)
2882     //
2883     //      85 04 24    test   %eax,(%rsp)
2884     //      85 45 00    test   %eax,0x0(%rbp)
2885 
2886 #ifdef ASSERT
2887     __ movptr(rax, rbx); // remember where 0x85 should be, for verification below
2888 #endif
2889     // rsp/rbp base encoding takes 3 bytes with the following register values:
2890     // rsp 0x04
2891     // rbp 0x05
2892     __ movzbl(rcx, Address(rbx, 1));
2893     __ andptr(rcx, 0x07); // looking for 0x04 .. 0x05
2894     __ subptr(rcx, 4);    // looking for 0x00 .. 0x01
2895     __ cmpptr(rcx, 1);
2896     __ jcc(Assembler::above, not_special);
2897     __ addptr(rbx, 1);
2898     __ bind(not_special);
2899 #ifdef ASSERT
2900     // Verify the correct encoding of the poll we're about to skip.
2901     __ cmpb(Address(rax, 0), NativeTstRegMem::instruction_code_memXregl);
2902     __ jcc(Assembler::notEqual, bail);
2903     // Mask out the modrm bits
2904     __ testb(Address(rax, 1), NativeTstRegMem::modrm_mask);
2905     // rax encodes to 0, so if the bits are nonzero it's incorrect
2906     __ jcc(Assembler::notZero, bail);
2907 #endif
2908     // Adjust return pc forward to step over the safepoint poll instruction
2909     __ addptr(rbx, 2);
2910     __ movptr(Address(rbp, wordSize), rbx);
2911   }
2912 
2913   __ bind(no_adjust);
2914   // Normal exit, register restoring and exit
2915   RegisterSaver::restore_live_registers(masm, save_vectors);
2916 
2917   __ ret(0);
2918 
2919 #ifdef ASSERT
2920   __ bind(bail);
2921   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2922 #endif
2923 
2924   // make sure all code is generated
2925   masm->flush();
2926 
2927   // Fill-out other meta info
2928   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2929 }
2930 
2931 //
2932 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2933 //
2934 // Generate a stub that calls into vm to find out the proper destination
2935 // of a java call. All the argument registers are live at this point
2936 // but since this is generic code we don't know what they are and the caller
2937 // must do any gc of the args.
2938 //
2939 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
2940   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2941 
2942   // allocate space for the code
2943   ResourceMark rm;
2944 
2945   CodeBuffer buffer(name, 1000, 512);
2946   MacroAssembler* masm                = new MacroAssembler(&buffer);
2947 
2948   int frame_size_words;
2949   enum frame_layout {
2950                 thread_off,
2951                 extra_words };
2952 
2953   OopMapSet *oop_maps = new OopMapSet();
2954   OopMap* map = NULL;
2955 
2956   int start = __ offset();
2957 
2958   map = RegisterSaver::save_live_registers(masm, extra_words, &frame_size_words);
2959 
2960   int frame_complete = __ offset();
2961 
2962   const Register thread = rdi;
2963   __ get_thread(rdi);
2964 
2965   __ push(thread);
2966   __ set_last_Java_frame(thread, noreg, rbp, NULL);
2967 
2968   __ call(RuntimeAddress(destination));
2969 
2970 
2971   // Set an oopmap for the call site.
2972   // We need this not only for callee-saved registers, but also for volatile
2973   // registers that the compiler might be keeping live across a safepoint.
2974 
2975   oop_maps->add_gc_map( __ offset() - start, map);
2976 
2977   // rax, contains the address we are going to jump to assuming no exception got installed
2978 
2979   __ addptr(rsp, wordSize);
2980 
2981   // clear last_Java_sp
2982   __ reset_last_Java_frame(thread, true);
2983   // check for pending exceptions
2984   Label pending;
2985   __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
2986   __ jcc(Assembler::notEqual, pending);
2987 
2988   // get the returned Method*
2989   __ get_vm_result_2(rbx, thread);
2990   __ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
2991 
2992   __ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
2993 
2994   RegisterSaver::restore_live_registers(masm);
2995 
2996   // We are back the the original state on entry and ready to go.
2997 
2998   __ jmp(rax);
2999 
3000   // Pending exception after the safepoint
3001 
3002   __ bind(pending);
3003 
3004   RegisterSaver::restore_live_registers(masm);
3005 
3006   // exception pending => remove activation and forward to exception handler
3007 
3008   __ get_thread(thread);
3009   __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
3010   __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
3011   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3012 
3013   // -------------
3014   // make sure all code is generated
3015   masm->flush();
3016 
3017   // return the  blob
3018   // frame_size_words or bytes??
3019   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3020 }
3021 
3022 #ifdef COMPILER2
3023 RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
3024                                                 int shadow_space_bytes,
3025                                                 const GrowableArray<VMReg>& input_registers,
3026                                                 const GrowableArray<VMReg>& output_registers) {
3027   ShouldNotCallThis();
3028   return nullptr;
3029 }
3030 #endif
--- EOF ---