1 /*
   2  * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/debugInfoRec.hpp"
  30 #include "code/nativeInst.hpp"
  31 #include "code/vtableStubs.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/shared/gcLocker.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "logging/log.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/klass.inline.hpp"
  40 #include "prims/methodHandles.hpp"
  41 #include "runtime/jniHandles.hpp"
  42 #include "runtime/safepointMechanism.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/signature.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "runtime/vframeArray.hpp"
  47 #include "runtime/vm_version.hpp"
  48 #include "utilities/align.hpp"
  49 #include "vmreg_x86.inline.hpp"
  50 #ifdef COMPILER1
  51 #include "c1/c1_Runtime1.hpp"
  52 #endif
  53 #ifdef COMPILER2
  54 #include "opto/runtime.hpp"
  55 #endif
  56 
  57 #define __ masm->
  58 
  59 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  60 
  61 class RegisterSaver {
  62   // Capture info about frame layout
  63 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
  64   enum layout {
  65                 fpu_state_off = 0,
  66                 fpu_state_end = fpu_state_off+FPUStateSizeInWords,
  67                 st0_off, st0H_off,
  68                 st1_off, st1H_off,
  69                 st2_off, st2H_off,
  70                 st3_off, st3H_off,
  71                 st4_off, st4H_off,
  72                 st5_off, st5H_off,
  73                 st6_off, st6H_off,
  74                 st7_off, st7H_off,
  75                 xmm_off,
  76                 DEF_XMM_OFFS(0),
  77                 DEF_XMM_OFFS(1),
  78                 DEF_XMM_OFFS(2),
  79                 DEF_XMM_OFFS(3),
  80                 DEF_XMM_OFFS(4),
  81                 DEF_XMM_OFFS(5),
  82                 DEF_XMM_OFFS(6),
  83                 DEF_XMM_OFFS(7),
  84                 flags_off = xmm7_off + 16/BytesPerInt + 1, // 16-byte stack alignment fill word
  85                 rdi_off,
  86                 rsi_off,
  87                 ignore_off,  // extra copy of rbp,
  88                 rsp_off,
  89                 rbx_off,
  90                 rdx_off,
  91                 rcx_off,
  92                 rax_off,
  93                 // The frame sender code expects that rbp will be in the "natural" place and
  94                 // will override any oopMap setting for it. We must therefore force the layout
  95                 // so that it agrees with the frame sender code.
  96                 rbp_off,
  97                 return_off,      // slot for return address
  98                 reg_save_size };
  99   enum { FPU_regs_live = flags_off - fpu_state_end };
 100 
 101   public:
 102 
 103   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words,
 104                                      int* total_frame_words, bool verify_fpu = true, bool save_vectors = false);
 105   static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
 106 
 107   static int rax_offset() { return rax_off; }
 108   static int rbx_offset() { return rbx_off; }
 109 
 110   // Offsets into the register save area
 111   // Used by deoptimization when it is managing result register
 112   // values on its own
 113 
 114   static int raxOffset(void) { return rax_off; }
 115   static int rdxOffset(void) { return rdx_off; }
 116   static int rbxOffset(void) { return rbx_off; }
 117   static int xmm0Offset(void) { return xmm0_off; }
 118   // This really returns a slot in the fp save area, which one is not important
 119   static int fpResultOffset(void) { return st0_off; }
 120 
 121   // During deoptimization only the result register need to be restored
 122   // all the other values have already been extracted.
 123 
 124   static void restore_result_registers(MacroAssembler* masm);
 125 
 126 };
 127 
 128 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
 129                                            int* total_frame_words, bool verify_fpu, bool save_vectors) {
 130   int num_xmm_regs = XMMRegister::number_of_registers;
 131   int ymm_bytes = num_xmm_regs * 16;
 132   int zmm_bytes = num_xmm_regs * 32;
 133 #ifdef COMPILER2
 134   int opmask_state_bytes = KRegister::number_of_registers * 8;
 135   if (save_vectors) {
 136     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 137     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 138     // Save upper half of YMM registers
 139     int vect_bytes = ymm_bytes;
 140     if (UseAVX > 2) {
 141       // Save upper half of ZMM registers as well
 142       vect_bytes += zmm_bytes;
 143       additional_frame_words += opmask_state_bytes / wordSize;
 144     }
 145     additional_frame_words += vect_bytes / wordSize;
 146   }
 147 #else
 148   assert(!save_vectors, "vectors are generated only by C2");
 149 #endif
 150   int frame_size_in_bytes = (reg_save_size + additional_frame_words) * wordSize;
 151   int frame_words = frame_size_in_bytes / wordSize;
 152   *total_frame_words = frame_words;
 153 
 154   assert(FPUStateSizeInWords == 27, "update stack layout");
 155 
 156   // save registers, fpu state, and flags
 157   // We assume caller has already has return address slot on the stack
 158   // We push epb twice in this sequence because we want the real rbp,
 159   // to be under the return like a normal enter and we want to use pusha
 160   // We push by hand instead of using push.
 161   __ enter();
 162   __ pusha();
 163   __ pushf();
 164   __ subptr(rsp,FPU_regs_live*wordSize); // Push FPU registers space
 165   __ push_FPU_state();          // Save FPU state & init
 166 
 167   if (verify_fpu) {
 168     // Some stubs may have non standard FPU control word settings so
 169     // only check and reset the value when it required to be the
 170     // standard value.  The safepoint blob in particular can be used
 171     // in methods which are using the 24 bit control word for
 172     // optimized float math.
 173 
 174 #ifdef ASSERT
 175     // Make sure the control word has the expected value
 176     Label ok;
 177     __ cmpw(Address(rsp, 0), StubRoutines::x86::fpu_cntrl_wrd_std());
 178     __ jccb(Assembler::equal, ok);
 179     __ stop("corrupted control word detected");
 180     __ bind(ok);
 181 #endif
 182 
 183     // Reset the control word to guard against exceptions being unmasked
 184     // since fstp_d can cause FPU stack underflow exceptions.  Write it
 185     // into the on stack copy and then reload that to make sure that the
 186     // current and future values are correct.
 187     __ movw(Address(rsp, 0), StubRoutines::x86::fpu_cntrl_wrd_std());
 188   }
 189 
 190   __ frstor(Address(rsp, 0));
 191   if (!verify_fpu) {
 192     // Set the control word so that exceptions are masked for the
 193     // following code.
 194     __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
 195   }
 196 
 197   int off = st0_off;
 198   int delta = st1_off - off;
 199 
 200   // Save the FPU registers in de-opt-able form
 201   for (int n = 0; n < FloatRegister::number_of_registers; n++) {
 202     __ fstp_d(Address(rsp, off*wordSize));
 203     off += delta;
 204   }
 205 
 206   off = xmm0_off;
 207   delta = xmm1_off - off;
 208   if(UseSSE == 1) {
 209     // Save the XMM state
 210     for (int n = 0; n < num_xmm_regs; n++) {
 211       __ movflt(Address(rsp, off*wordSize), as_XMMRegister(n));
 212       off += delta;
 213     }
 214   } else if(UseSSE >= 2) {
 215     // Save whole 128bit (16 bytes) XMM registers
 216     for (int n = 0; n < num_xmm_regs; n++) {
 217       __ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
 218       off += delta;
 219     }
 220   }
 221 
 222 #ifdef COMPILER2
 223   if (save_vectors) {
 224     __ subptr(rsp, ymm_bytes);
 225     // Save upper half of YMM registers
 226     for (int n = 0; n < num_xmm_regs; n++) {
 227       __ vextractf128_high(Address(rsp, n*16), as_XMMRegister(n));
 228     }
 229     if (UseAVX > 2) {
 230       __ subptr(rsp, zmm_bytes);
 231       // Save upper half of ZMM registers
 232       for (int n = 0; n < num_xmm_regs; n++) {
 233         __ vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n));
 234       }
 235       __ subptr(rsp, opmask_state_bytes);
 236       // Save opmask registers
 237       for (int n = 0; n < KRegister::number_of_registers; n++) {
 238         __ kmov(Address(rsp, n*8), as_KRegister(n));
 239       }
 240     }
 241   }
 242 #else
 243   assert(!save_vectors, "vectors are generated only by C2");
 244 #endif
 245 
 246   __ vzeroupper();
 247 
 248   // Set an oopmap for the call site.  This oopmap will map all
 249   // oop-registers and debug-info registers as callee-saved.  This
 250   // will allow deoptimization at this safepoint to find all possible
 251   // debug-info recordings, as well as let GC find all oops.
 252 
 253   OopMapSet *oop_maps = new OopMapSet();
 254   OopMap* map =  new OopMap( frame_words, 0 );
 255 
 256 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
 257 #define NEXTREG(x) (x)->as_VMReg()->next()
 258 
 259   map->set_callee_saved(STACK_OFFSET(rax_off), rax->as_VMReg());
 260   map->set_callee_saved(STACK_OFFSET(rcx_off), rcx->as_VMReg());
 261   map->set_callee_saved(STACK_OFFSET(rdx_off), rdx->as_VMReg());
 262   map->set_callee_saved(STACK_OFFSET(rbx_off), rbx->as_VMReg());
 263   // rbp, location is known implicitly, no oopMap
 264   map->set_callee_saved(STACK_OFFSET(rsi_off), rsi->as_VMReg());
 265   map->set_callee_saved(STACK_OFFSET(rdi_off), rdi->as_VMReg());
 266 
 267   // %%% This is really a waste but we'll keep things as they were for now for the upper component
 268   off = st0_off;
 269   delta = st1_off - off;
 270   for (int n = 0; n < FloatRegister::number_of_registers; n++) {
 271     FloatRegister freg_name = as_FloatRegister(n);
 272     map->set_callee_saved(STACK_OFFSET(off), freg_name->as_VMReg());
 273     map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(freg_name));
 274     off += delta;
 275   }
 276   off = xmm0_off;
 277   delta = xmm1_off - off;
 278   for (int n = 0; n < num_xmm_regs; n++) {
 279     XMMRegister xmm_name = as_XMMRegister(n);
 280     map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
 281     map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(xmm_name));
 282     off += delta;
 283   }
 284 #undef NEXTREG
 285 #undef STACK_OFFSET
 286 
 287   return map;
 288 }
 289 
 290 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 291   int opmask_state_bytes = 0;
 292   int additional_frame_bytes = 0;
 293   int num_xmm_regs = XMMRegister::number_of_registers;
 294   int ymm_bytes = num_xmm_regs * 16;
 295   int zmm_bytes = num_xmm_regs * 32;
 296   // Recover XMM & FPU state
 297 #ifdef COMPILER2
 298   if (restore_vectors) {
 299     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 300     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 301     // Save upper half of YMM registers
 302     additional_frame_bytes = ymm_bytes;
 303     if (UseAVX > 2) {
 304       // Save upper half of ZMM registers as well
 305       additional_frame_bytes += zmm_bytes;
 306       opmask_state_bytes = KRegister::number_of_registers * 8;
 307       additional_frame_bytes += opmask_state_bytes;
 308     }
 309   }
 310 #else
 311   assert(!restore_vectors, "vectors are generated only by C2");
 312 #endif
 313 
 314   int off = xmm0_off;
 315   int delta = xmm1_off - off;
 316 
 317   __ vzeroupper();
 318 
 319   if (UseSSE == 1) {
 320     // Restore XMM registers
 321     assert(additional_frame_bytes == 0, "");
 322     for (int n = 0; n < num_xmm_regs; n++) {
 323       __ movflt(as_XMMRegister(n), Address(rsp, off*wordSize));
 324       off += delta;
 325     }
 326   } else if (UseSSE >= 2) {
 327     // Restore whole 128bit (16 bytes) XMM registers. Do this before restoring YMM and
 328     // ZMM because the movdqu instruction zeros the upper part of the XMM register.
 329     for (int n = 0; n < num_xmm_regs; n++) {
 330       __ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
 331       off += delta;
 332     }
 333   }
 334 
 335   if (restore_vectors) {
 336     off = additional_frame_bytes - ymm_bytes;
 337     // Restore upper half of YMM registers.
 338     for (int n = 0; n < num_xmm_regs; n++) {
 339       __ vinsertf128_high(as_XMMRegister(n), Address(rsp, n*16+off));
 340     }
 341     if (UseAVX > 2) {
 342       // Restore upper half of ZMM registers.
 343       off = opmask_state_bytes;
 344       for (int n = 0; n < num_xmm_regs; n++) {
 345         __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, n*32+off));
 346       }
 347       for (int n = 0; n < KRegister::number_of_registers; n++) {
 348         __ kmov(as_KRegister(n), Address(rsp, n*8));
 349       }
 350     }
 351     __ addptr(rsp, additional_frame_bytes);
 352   }
 353 
 354   __ pop_FPU_state();
 355   __ addptr(rsp, FPU_regs_live*wordSize); // Pop FPU registers
 356 
 357   __ popf();
 358   __ popa();
 359   // Get the rbp, described implicitly by the frame sender code (no oopMap)
 360   __ pop(rbp);
 361 }
 362 
 363 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 364 
 365   // Just restore result register. Only used by deoptimization. By
 366   // now any callee save register that needs to be restore to a c2
 367   // caller of the deoptee has been extracted into the vframeArray
 368   // and will be stuffed into the c2i adapter we create for later
 369   // restoration so only result registers need to be restored here.
 370   //
 371 
 372   __ frstor(Address(rsp, 0));      // Restore fpu state
 373 
 374   // Recover XMM & FPU state
 375   if( UseSSE == 1 ) {
 376     __ movflt(xmm0, Address(rsp, xmm0_off*wordSize));
 377   } else if( UseSSE >= 2 ) {
 378     __ movdbl(xmm0, Address(rsp, xmm0_off*wordSize));
 379   }
 380   __ movptr(rax, Address(rsp, rax_off*wordSize));
 381   __ movptr(rdx, Address(rsp, rdx_off*wordSize));
 382   // Pop all of the register save are off the stack except the return address
 383   __ addptr(rsp, return_off * wordSize);
 384 }
 385 
 386 // Is vector's size (in bytes) bigger than a size saved by default?
 387 // 16 bytes XMM registers are saved by default using SSE2 movdqu instructions.
 388 // Note, MaxVectorSize == 0 with UseSSE < 2 and vectors are not generated.
 389 bool SharedRuntime::is_wide_vector(int size) {
 390   return size > 16;
 391 }
 392 
 393 // The java_calling_convention describes stack locations as ideal slots on
 394 // a frame with no abi restrictions. Since we must observe abi restrictions
 395 // (like the placement of the register window) the slots must be biased by
 396 // the following value.
 397 static int reg2offset_in(VMReg r) {
 398   // Account for saved rbp, and return address
 399   // This should really be in_preserve_stack_slots
 400   return (r->reg2stack() + 2) * VMRegImpl::stack_slot_size;
 401 }
 402 
 403 static int reg2offset_out(VMReg r) {
 404   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 405 }
 406 
 407 // ---------------------------------------------------------------------------
 408 // Read the array of BasicTypes from a signature, and compute where the
 409 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 410 // quantities.  Values less than SharedInfo::stack0 are registers, those above
 411 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 412 // as framesizes are fixed.
 413 // VMRegImpl::stack0 refers to the first slot 0(sp).
 414 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
 415 // Register up to Register::number_of_registers are the 32-bit
 416 // integer registers.
 417 
 418 // Pass first two oop/int args in registers ECX and EDX.
 419 // Pass first two float/double args in registers XMM0 and XMM1.
 420 // Doubles have precedence, so if you pass a mix of floats and doubles
 421 // the doubles will grab the registers before the floats will.
 422 
 423 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 424 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
 425 // units regardless of build. Of course for i486 there is no 64 bit build
 426 
 427 
 428 // ---------------------------------------------------------------------------
 429 // The compiled Java calling convention.
 430 // Pass first two oop/int args in registers ECX and EDX.
 431 // Pass first two float/double args in registers XMM0 and XMM1.
 432 // Doubles have precedence, so if you pass a mix of floats and doubles
 433 // the doubles will grab the registers before the floats will.
 434 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 435                                            VMRegPair *regs,
 436                                            int total_args_passed) {
 437   uint    stack = 0;          // Starting stack position for args on stack
 438 
 439 
 440   // Pass first two oop/int args in registers ECX and EDX.
 441   uint reg_arg0 = 9999;
 442   uint reg_arg1 = 9999;
 443 
 444   // Pass first two float/double args in registers XMM0 and XMM1.
 445   // Doubles have precedence, so if you pass a mix of floats and doubles
 446   // the doubles will grab the registers before the floats will.
 447   // CNC - TURNED OFF FOR non-SSE.
 448   //       On Intel we have to round all doubles (and most floats) at
 449   //       call sites by storing to the stack in any case.
 450   // UseSSE=0 ==> Don't Use ==> 9999+0
 451   // UseSSE=1 ==> Floats only ==> 9999+1
 452   // UseSSE>=2 ==> Floats or doubles ==> 9999+2
 453   enum { fltarg_dontuse = 9999+0, fltarg_float_only = 9999+1, fltarg_flt_dbl = 9999+2 };
 454   uint fargs = (UseSSE>=2) ? 2 : UseSSE;
 455   uint freg_arg0 = 9999+fargs;
 456   uint freg_arg1 = 9999+fargs;
 457 
 458   // Pass doubles & longs aligned on the stack.  First count stack slots for doubles
 459   int i;
 460   for( i = 0; i < total_args_passed; i++) {
 461     if( sig_bt[i] == T_DOUBLE ) {
 462       // first 2 doubles go in registers
 463       if( freg_arg0 == fltarg_flt_dbl ) freg_arg0 = i;
 464       else if( freg_arg1 == fltarg_flt_dbl ) freg_arg1 = i;
 465       else // Else double is passed low on the stack to be aligned.
 466         stack += 2;
 467     } else if( sig_bt[i] == T_LONG ) {
 468       stack += 2;
 469     }
 470   }
 471   int dstack = 0;             // Separate counter for placing doubles
 472 
 473   // Now pick where all else goes.
 474   for( i = 0; i < total_args_passed; i++) {
 475     // From the type and the argument number (count) compute the location
 476     switch( sig_bt[i] ) {
 477     case T_SHORT:
 478     case T_CHAR:
 479     case T_BYTE:
 480     case T_BOOLEAN:
 481     case T_INT:
 482     case T_ARRAY:
 483     case T_OBJECT:
 484     case T_ADDRESS:
 485       if( reg_arg0 == 9999 )  {
 486         reg_arg0 = i;
 487         regs[i].set1(rcx->as_VMReg());
 488       } else if( reg_arg1 == 9999 )  {
 489         reg_arg1 = i;
 490         regs[i].set1(rdx->as_VMReg());
 491       } else {
 492         regs[i].set1(VMRegImpl::stack2reg(stack++));
 493       }
 494       break;
 495     case T_FLOAT:
 496       if( freg_arg0 == fltarg_flt_dbl || freg_arg0 == fltarg_float_only ) {
 497         freg_arg0 = i;
 498         regs[i].set1(xmm0->as_VMReg());
 499       } else if( freg_arg1 == fltarg_flt_dbl || freg_arg1 == fltarg_float_only ) {
 500         freg_arg1 = i;
 501         regs[i].set1(xmm1->as_VMReg());
 502       } else {
 503         regs[i].set1(VMRegImpl::stack2reg(stack++));
 504       }
 505       break;
 506     case T_LONG:
 507       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
 508       regs[i].set2(VMRegImpl::stack2reg(dstack));
 509       dstack += 2;
 510       break;
 511     case T_DOUBLE:
 512       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
 513       if( freg_arg0 == (uint)i ) {
 514         regs[i].set2(xmm0->as_VMReg());
 515       } else if( freg_arg1 == (uint)i ) {
 516         regs[i].set2(xmm1->as_VMReg());
 517       } else {
 518         regs[i].set2(VMRegImpl::stack2reg(dstack));
 519         dstack += 2;
 520       }
 521       break;
 522     case T_VOID: regs[i].set_bad(); break;
 523       break;
 524     default:
 525       ShouldNotReachHere();
 526       break;
 527     }
 528   }
 529 
 530   return stack;
 531 }
 532 
 533 // Patch the callers callsite with entry to compiled code if it exists.
 534 static void patch_callers_callsite(MacroAssembler *masm) {
 535   Label L;
 536   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
 537   __ jcc(Assembler::equal, L);
 538   // Schedule the branch target address early.
 539   // Call into the VM to patch the caller, then jump to compiled callee
 540   // rax, isn't live so capture return address while we easily can
 541   __ movptr(rax, Address(rsp, 0));
 542   __ pusha();
 543   __ pushf();
 544 
 545   if (UseSSE == 1) {
 546     __ subptr(rsp, 2*wordSize);
 547     __ movflt(Address(rsp, 0), xmm0);
 548     __ movflt(Address(rsp, wordSize), xmm1);
 549   }
 550   if (UseSSE >= 2) {
 551     __ subptr(rsp, 4*wordSize);
 552     __ movdbl(Address(rsp, 0), xmm0);
 553     __ movdbl(Address(rsp, 2*wordSize), xmm1);
 554   }
 555 #ifdef COMPILER2
 556   // C2 may leave the stack dirty if not in SSE2+ mode
 557   if (UseSSE >= 2) {
 558     __ verify_FPU(0, "c2i transition should have clean FPU stack");
 559   } else {
 560     __ empty_FPU_stack();
 561   }
 562 #endif /* COMPILER2 */
 563 
 564   // VM needs caller's callsite
 565   __ push(rax);
 566   // VM needs target method
 567   __ push(rbx);
 568   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 569   __ addptr(rsp, 2*wordSize);
 570 
 571   if (UseSSE == 1) {
 572     __ movflt(xmm0, Address(rsp, 0));
 573     __ movflt(xmm1, Address(rsp, wordSize));
 574     __ addptr(rsp, 2*wordSize);
 575   }
 576   if (UseSSE >= 2) {
 577     __ movdbl(xmm0, Address(rsp, 0));
 578     __ movdbl(xmm1, Address(rsp, 2*wordSize));
 579     __ addptr(rsp, 4*wordSize);
 580   }
 581 
 582   __ popf();
 583   __ popa();
 584   __ bind(L);
 585 }
 586 
 587 
 588 static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
 589   int next_off = st_off - Interpreter::stackElementSize;
 590   __ movdbl(Address(rsp, next_off), r);
 591 }
 592 
 593 static void gen_c2i_adapter(MacroAssembler *masm,
 594                             int total_args_passed,
 595                             int comp_args_on_stack,
 596                             const BasicType *sig_bt,
 597                             const VMRegPair *regs,
 598                             Label& skip_fixup) {
 599   // Before we get into the guts of the C2I adapter, see if we should be here
 600   // at all.  We've come from compiled code and are attempting to jump to the
 601   // interpreter, which means the caller made a static call to get here
 602   // (vcalls always get a compiled target if there is one).  Check for a
 603   // compiled target.  If there is one, we need to patch the caller's call.
 604   patch_callers_callsite(masm);
 605 
 606   __ bind(skip_fixup);
 607 
 608 #ifdef COMPILER2
 609   // C2 may leave the stack dirty if not in SSE2+ mode
 610   if (UseSSE >= 2) {
 611     __ verify_FPU(0, "c2i transition should have clean FPU stack");
 612   } else {
 613     __ empty_FPU_stack();
 614   }
 615 #endif /* COMPILER2 */
 616 
 617   // Since all args are passed on the stack, total_args_passed * interpreter_
 618   // stack_element_size  is the
 619   // space we need.
 620   int extraspace = total_args_passed * Interpreter::stackElementSize;
 621 
 622   // Get return address
 623   __ pop(rax);
 624 
 625   // set senderSP value
 626   __ movptr(rsi, rsp);
 627 
 628   __ subptr(rsp, extraspace);
 629 
 630   // Now write the args into the outgoing interpreter space
 631   for (int i = 0; i < total_args_passed; i++) {
 632     if (sig_bt[i] == T_VOID) {
 633       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 634       continue;
 635     }
 636 
 637     // st_off points to lowest address on stack.
 638     int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
 639     int next_off = st_off - Interpreter::stackElementSize;
 640 
 641     // Say 4 args:
 642     // i   st_off
 643     // 0   12 T_LONG
 644     // 1    8 T_VOID
 645     // 2    4 T_OBJECT
 646     // 3    0 T_BOOL
 647     VMReg r_1 = regs[i].first();
 648     VMReg r_2 = regs[i].second();
 649     if (!r_1->is_valid()) {
 650       assert(!r_2->is_valid(), "");
 651       continue;
 652     }
 653 
 654     if (r_1->is_stack()) {
 655       // memory to memory use fpu stack top
 656       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 657 
 658       if (!r_2->is_valid()) {
 659         __ movl(rdi, Address(rsp, ld_off));
 660         __ movptr(Address(rsp, st_off), rdi);
 661       } else {
 662 
 663         // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
 664         // st_off == MSW, st_off-wordSize == LSW
 665 
 666         __ movptr(rdi, Address(rsp, ld_off));
 667         __ movptr(Address(rsp, next_off), rdi);
 668         __ movptr(rdi, Address(rsp, ld_off + wordSize));
 669         __ movptr(Address(rsp, st_off), rdi);
 670       }
 671     } else if (r_1->is_Register()) {
 672       Register r = r_1->as_Register();
 673       if (!r_2->is_valid()) {
 674         __ movl(Address(rsp, st_off), r);
 675       } else {
 676         // long/double in gpr
 677         ShouldNotReachHere();
 678       }
 679     } else {
 680       assert(r_1->is_XMMRegister(), "");
 681       if (!r_2->is_valid()) {
 682         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
 683       } else {
 684         assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
 685         move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
 686       }
 687     }
 688   }
 689 
 690   // Schedule the branch target address early.
 691   __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
 692   // And repush original return address
 693   __ push(rax);
 694   __ jmp(rcx);
 695 }
 696 
 697 
 698 static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
 699   int next_val_off = ld_off - Interpreter::stackElementSize;
 700   __ movdbl(r, Address(saved_sp, next_val_off));
 701 }
 702 
 703 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
 704                         address code_start, address code_end,
 705                         Label& L_ok) {
 706   Label L_fail;
 707   __ lea(temp_reg, ExternalAddress(code_start));
 708   __ cmpptr(pc_reg, temp_reg);
 709   __ jcc(Assembler::belowEqual, L_fail);
 710   __ lea(temp_reg, ExternalAddress(code_end));
 711   __ cmpptr(pc_reg, temp_reg);
 712   __ jcc(Assembler::below, L_ok);
 713   __ bind(L_fail);
 714 }
 715 
 716 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 717                                     int total_args_passed,
 718                                     int comp_args_on_stack,
 719                                     const BasicType *sig_bt,
 720                                     const VMRegPair *regs) {
 721   // Note: rsi contains the senderSP on entry. We must preserve it since
 722   // we may do a i2c -> c2i transition if we lose a race where compiled
 723   // code goes non-entrant while we get args ready.
 724 
 725   // Adapters can be frameless because they do not require the caller
 726   // to perform additional cleanup work, such as correcting the stack pointer.
 727   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
 728   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
 729   // even if a callee has modified the stack pointer.
 730   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
 731   // routinely repairs its caller's stack pointer (from sender_sp, which is set
 732   // up via the senderSP register).
 733   // In other words, if *either* the caller or callee is interpreted, we can
 734   // get the stack pointer repaired after a call.
 735   // This is why c2i and i2c adapters cannot be indefinitely composed.
 736   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 737   // both caller and callee would be compiled methods, and neither would
 738   // clean up the stack pointer changes performed by the two adapters.
 739   // If this happens, control eventually transfers back to the compiled
 740   // caller, but with an uncorrected stack, causing delayed havoc.
 741 
 742   // Pick up the return address
 743   __ movptr(rax, Address(rsp, 0));
 744 
 745   if (VerifyAdapterCalls &&
 746       (Interpreter::code() != nullptr || StubRoutines::final_stubs_code() != nullptr)) {
 747     // So, let's test for cascading c2i/i2c adapters right now.
 748     //  assert(Interpreter::contains($return_addr) ||
 749     //         StubRoutines::contains($return_addr),
 750     //         "i2c adapter must return to an interpreter frame");
 751     __ block_comment("verify_i2c { ");
 752     Label L_ok;
 753     if (Interpreter::code() != nullptr) {
 754       range_check(masm, rax, rdi,
 755                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 756                   L_ok);
 757     }
 758     if (StubRoutines::initial_stubs_code() != nullptr) {
 759       range_check(masm, rax, rdi,
 760                   StubRoutines::initial_stubs_code()->code_begin(),
 761                   StubRoutines::initial_stubs_code()->code_end(),
 762                   L_ok);
 763     }
 764     if (StubRoutines::final_stubs_code() != nullptr) {
 765       range_check(masm, rax, rdi,
 766                   StubRoutines::final_stubs_code()->code_begin(),
 767                   StubRoutines::final_stubs_code()->code_end(),
 768                   L_ok);
 769     }
 770     const char* msg = "i2c adapter must return to an interpreter frame";
 771     __ block_comment(msg);
 772     __ stop(msg);
 773     __ bind(L_ok);
 774     __ block_comment("} verify_i2ce ");
 775   }
 776 
 777   // Must preserve original SP for loading incoming arguments because
 778   // we need to align the outgoing SP for compiled code.
 779   __ movptr(rdi, rsp);
 780 
 781   // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
 782   // in registers, we will occasionally have no stack args.
 783   int comp_words_on_stack = 0;
 784   if (comp_args_on_stack) {
 785     // Sig words on the stack are greater-than VMRegImpl::stack0.  Those in
 786     // registers are below.  By subtracting stack0, we either get a negative
 787     // number (all values in registers) or the maximum stack slot accessed.
 788     // int comp_args_on_stack = VMRegImpl::reg2stack(max_arg);
 789     // Convert 4-byte stack slots to words.
 790     comp_words_on_stack = align_up(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
 791     // Round up to miminum stack alignment, in wordSize
 792     comp_words_on_stack = align_up(comp_words_on_stack, 2);
 793     __ subptr(rsp, comp_words_on_stack * wordSize);
 794   }
 795 
 796   // Align the outgoing SP
 797   __ andptr(rsp, -(StackAlignmentInBytes));
 798 
 799   // push the return address on the stack (note that pushing, rather
 800   // than storing it, yields the correct frame alignment for the callee)
 801   __ push(rax);
 802 
 803   // Put saved SP in another register
 804   const Register saved_sp = rax;
 805   __ movptr(saved_sp, rdi);
 806 
 807 
 808   // Will jump to the compiled code just as if compiled code was doing it.
 809   // Pre-load the register-jump target early, to schedule it better.
 810   __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
 811 
 812   // Now generate the shuffle code.  Pick up all register args and move the
 813   // rest through the floating point stack top.
 814   for (int i = 0; i < total_args_passed; i++) {
 815     if (sig_bt[i] == T_VOID) {
 816       // Longs and doubles are passed in native word order, but misaligned
 817       // in the 32-bit build.
 818       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 819       continue;
 820     }
 821 
 822     // Pick up 0, 1 or 2 words from SP+offset.
 823 
 824     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 825             "scrambled load targets?");
 826     // Load in argument order going down.
 827     int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
 828     // Point to interpreter value (vs. tag)
 829     int next_off = ld_off - Interpreter::stackElementSize;
 830     //
 831     //
 832     //
 833     VMReg r_1 = regs[i].first();
 834     VMReg r_2 = regs[i].second();
 835     if (!r_1->is_valid()) {
 836       assert(!r_2->is_valid(), "");
 837       continue;
 838     }
 839     if (r_1->is_stack()) {
 840       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 841       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
 842 
 843       // We can use rsi as a temp here because compiled code doesn't need rsi as an input
 844       // and if we end up going thru a c2i because of a miss a reasonable value of rsi
 845       // we be generated.
 846       if (!r_2->is_valid()) {
 847         // __ fld_s(Address(saved_sp, ld_off));
 848         // __ fstp_s(Address(rsp, st_off));
 849         __ movl(rsi, Address(saved_sp, ld_off));
 850         __ movptr(Address(rsp, st_off), rsi);
 851       } else {
 852         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 853         // are accessed as negative so LSW is at LOW address
 854 
 855         // ld_off is MSW so get LSW
 856         // st_off is LSW (i.e. reg.first())
 857         // __ fld_d(Address(saved_sp, next_off));
 858         // __ fstp_d(Address(rsp, st_off));
 859         //
 860         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 861         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 862         // So we must adjust where to pick up the data to match the interpreter.
 863         //
 864         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 865         // are accessed as negative so LSW is at LOW address
 866 
 867         // ld_off is MSW so get LSW
 868         __ movptr(rsi, Address(saved_sp, next_off));
 869         __ movptr(Address(rsp, st_off), rsi);
 870         __ movptr(rsi, Address(saved_sp, ld_off));
 871         __ movptr(Address(rsp, st_off + wordSize), rsi);
 872       }
 873     } else if (r_1->is_Register()) {  // Register argument
 874       Register r = r_1->as_Register();
 875       assert(r != rax, "must be different");
 876       if (r_2->is_valid()) {
 877         //
 878         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 879         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 880         // So we must adjust where to pick up the data to match the interpreter.
 881 
 882         // this can be a misaligned move
 883         __ movptr(r, Address(saved_sp, next_off));
 884         assert(r_2->as_Register() != rax, "need another temporary register");
 885         // Remember r_1 is low address (and LSB on x86)
 886         // So r_2 gets loaded from high address regardless of the platform
 887         __ movptr(r_2->as_Register(), Address(saved_sp, ld_off));
 888       } else {
 889         __ movl(r, Address(saved_sp, ld_off));
 890       }
 891     } else {
 892       assert(r_1->is_XMMRegister(), "");
 893       if (!r_2->is_valid()) {
 894         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
 895       } else {
 896         move_i2c_double(masm, r_1->as_XMMRegister(), saved_sp, ld_off);
 897       }
 898     }
 899   }
 900 
 901   // 6243940 We might end up in handle_wrong_method if
 902   // the callee is deoptimized as we race thru here. If that
 903   // happens we don't want to take a safepoint because the
 904   // caller frame will look interpreted and arguments are now
 905   // "compiled" so it is much better to make this transition
 906   // invisible to the stack walking code. Unfortunately if
 907   // we try and find the callee by normal means a safepoint
 908   // is possible. So we stash the desired callee in the thread
 909   // and the vm will find there should this case occur.
 910 
 911   __ get_thread(rax);
 912   __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
 913 
 914   // move Method* to rax, in case we end up in an c2i adapter.
 915   // the c2i adapters expect Method* in rax, (c2) because c2's
 916   // resolve stubs return the result (the method) in rax,.
 917   // I'd love to fix this.
 918   __ mov(rax, rbx);
 919 
 920   __ jmp(rdi);
 921 }
 922 
 923 // ---------------------------------------------------------------
 924 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 925                                                             int total_args_passed,
 926                                                             int comp_args_on_stack,
 927                                                             const BasicType *sig_bt,
 928                                                             const VMRegPair *regs,
 929                                                             AdapterFingerPrint* fingerprint) {
 930   address i2c_entry = __ pc();
 931 
 932   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 933 
 934   // -------------------------------------------------------------------------
 935   // Generate a C2I adapter.  On entry we know rbx, holds the Method* during calls
 936   // to the interpreter.  The args start out packed in the compiled layout.  They
 937   // need to be unpacked into the interpreter layout.  This will almost always
 938   // require some stack space.  We grow the current (compiled) stack, then repack
 939   // the args.  We  finally end in a jump to the generic interpreter entry point.
 940   // On exit from the interpreter, the interpreter will restore our SP (lest the
 941   // compiled code, which relies solely on SP and not EBP, get sick).
 942 
 943   address c2i_unverified_entry = __ pc();
 944   Label skip_fixup;
 945 
 946   Register data = rax;
 947   Register receiver = rcx;
 948   Register temp = rbx;
 949 
 950   {
 951     __ ic_check(1 /* end_alignment */);
 952     __ movptr(rbx, Address(data, CompiledICData::speculated_method_offset()));
 953     // Method might have been compiled since the call site was patched to
 954     // interpreted if that is the case treat it as a miss so we can get
 955     // the call site corrected.
 956     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
 957     __ jcc(Assembler::equal, skip_fixup);
 958   }
 959 
 960   address c2i_entry = __ pc();
 961 
 962   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 963   bs->c2i_entry_barrier(masm);
 964 
 965   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 966 
 967   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 968 }
 969 
 970 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 971                                          VMRegPair *regs,
 972                                          int total_args_passed) {
 973 
 974 // We return the amount of VMRegImpl stack slots we need to reserve for all
 975 // the arguments NOT counting out_preserve_stack_slots.
 976 
 977   uint    stack = 0;        // All arguments on stack
 978 
 979   for( int i = 0; i < total_args_passed; i++) {
 980     // From the type and the argument number (count) compute the location
 981     switch( sig_bt[i] ) {
 982     case T_BOOLEAN:
 983     case T_CHAR:
 984     case T_FLOAT:
 985     case T_BYTE:
 986     case T_SHORT:
 987     case T_INT:
 988     case T_OBJECT:
 989     case T_ARRAY:
 990     case T_ADDRESS:
 991     case T_METADATA:
 992       regs[i].set1(VMRegImpl::stack2reg(stack++));
 993       break;
 994     case T_LONG:
 995     case T_DOUBLE: // The stack numbering is reversed from Java
 996       // Since C arguments do not get reversed, the ordering for
 997       // doubles on the stack must be opposite the Java convention
 998       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
 999       regs[i].set2(VMRegImpl::stack2reg(stack));
1000       stack += 2;
1001       break;
1002     case T_VOID: regs[i].set_bad(); break;
1003     default:
1004       ShouldNotReachHere();
1005       break;
1006     }
1007   }
1008   return stack;
1009 }
1010 
1011 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
1012                                              uint num_bits,
1013                                              uint total_args_passed) {
1014   Unimplemented();
1015   return 0;
1016 }
1017 
1018 // A simple move of integer like type
1019 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1020   if (src.first()->is_stack()) {
1021     if (dst.first()->is_stack()) {
1022       // stack to stack
1023       // __ ld(FP, reg2offset(src.first()), L5);
1024       // __ st(L5, SP, reg2offset(dst.first()));
1025       __ movl2ptr(rax, Address(rbp, reg2offset_in(src.first())));
1026       __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1027     } else {
1028       // stack to reg
1029       __ movl2ptr(dst.first()->as_Register(),  Address(rbp, reg2offset_in(src.first())));
1030     }
1031   } else if (dst.first()->is_stack()) {
1032     // reg to stack
1033     // no need to sign extend on 64bit
1034     __ movptr(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1035   } else {
1036     if (dst.first() != src.first()) {
1037       __ mov(dst.first()->as_Register(), src.first()->as_Register());
1038     }
1039   }
1040 }
1041 
1042 // An oop arg. Must pass a handle not the oop itself
1043 static void object_move(MacroAssembler* masm,
1044                         OopMap* map,
1045                         int oop_handle_offset,
1046                         int framesize_in_slots,
1047                         VMRegPair src,
1048                         VMRegPair dst,
1049                         bool is_receiver,
1050                         int* receiver_offset) {
1051 
1052   // Because of the calling conventions we know that src can be a
1053   // register or a stack location. dst can only be a stack location.
1054 
1055   assert(dst.first()->is_stack(), "must be stack");
1056   // must pass a handle. First figure out the location we use as a handle
1057 
1058   if (src.first()->is_stack()) {
1059     // Oop is already on the stack as an argument
1060     Register rHandle = rax;
1061     Label nil;
1062     __ xorptr(rHandle, rHandle);
1063     __ cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD);
1064     __ jcc(Assembler::equal, nil);
1065     __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1066     __ bind(nil);
1067     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1068 
1069     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1070     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1071     if (is_receiver) {
1072       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1073     }
1074   } else {
1075     // Oop is in a register we must store it to the space we reserve
1076     // on the stack for oop_handles
1077     const Register rOop = src.first()->as_Register();
1078     const Register rHandle = rax;
1079     int oop_slot = (rOop == rcx ? 0 : 1) * VMRegImpl::slots_per_word + oop_handle_offset;
1080     int offset = oop_slot*VMRegImpl::stack_slot_size;
1081     Label skip;
1082     __ movptr(Address(rsp, offset), rOop);
1083     map->set_oop(VMRegImpl::stack2reg(oop_slot));
1084     __ xorptr(rHandle, rHandle);
1085     __ cmpptr(rOop, NULL_WORD);
1086     __ jcc(Assembler::equal, skip);
1087     __ lea(rHandle, Address(rsp, offset));
1088     __ bind(skip);
1089     // Store the handle parameter
1090     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1091     if (is_receiver) {
1092       *receiver_offset = offset;
1093     }
1094   }
1095 }
1096 
1097 // A float arg may have to do float reg int reg conversion
1098 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1099   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1100 
1101   // Because of the calling convention we know that src is either a stack location
1102   // or an xmm register. dst can only be a stack location.
1103 
1104   assert(dst.first()->is_stack() && ( src.first()->is_stack() || src.first()->is_XMMRegister()), "bad parameters");
1105 
1106   if (src.first()->is_stack()) {
1107     __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1108     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1109   } else {
1110     // reg to stack
1111     __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1112   }
1113 }
1114 
1115 // A long move
1116 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1117 
1118   // The only legal possibility for a long_move VMRegPair is:
1119   // 1: two stack slots (possibly unaligned)
1120   // as neither the java  or C calling convention will use registers
1121   // for longs.
1122 
1123   if (src.first()->is_stack() && dst.first()->is_stack()) {
1124     assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
1125     __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1126     __ movptr(rbx, Address(rbp, reg2offset_in(src.second())));
1127     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1128     __ movptr(Address(rsp, reg2offset_out(dst.second())), rbx);
1129   } else {
1130     ShouldNotReachHere();
1131   }
1132 }
1133 
1134 // A double move
1135 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1136 
1137   // The only legal possibilities for a double_move VMRegPair are:
1138   // The painful thing here is that like long_move a VMRegPair might be
1139 
1140   // Because of the calling convention we know that src is either
1141   //   1: a single physical register (xmm registers only)
1142   //   2: two stack slots (possibly unaligned)
1143   // dst can only be a pair of stack slots.
1144 
1145   assert(dst.first()->is_stack() && (src.first()->is_XMMRegister() || src.first()->is_stack()), "bad args");
1146 
1147   if (src.first()->is_stack()) {
1148     // source is all stack
1149     __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1150     __ movptr(rbx, Address(rbp, reg2offset_in(src.second())));
1151     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1152     __ movptr(Address(rsp, reg2offset_out(dst.second())), rbx);
1153   } else {
1154     // reg to stack
1155     // No worries about stack alignment
1156     __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1157   }
1158 }
1159 
1160 
1161 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1162   // We always ignore the frame_slots arg and just use the space just below frame pointer
1163   // which by this time is free to use
1164   switch (ret_type) {
1165   case T_FLOAT:
1166     __ fstp_s(Address(rbp, -wordSize));
1167     break;
1168   case T_DOUBLE:
1169     __ fstp_d(Address(rbp, -2*wordSize));
1170     break;
1171   case T_VOID:  break;
1172   case T_LONG:
1173     __ movptr(Address(rbp, -wordSize), rax);
1174     __ movptr(Address(rbp, -2*wordSize), rdx);
1175     break;
1176   default: {
1177     __ movptr(Address(rbp, -wordSize), rax);
1178     }
1179   }
1180 }
1181 
1182 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1183   // We always ignore the frame_slots arg and just use the space just below frame pointer
1184   // which by this time is free to use
1185   switch (ret_type) {
1186   case T_FLOAT:
1187     __ fld_s(Address(rbp, -wordSize));
1188     break;
1189   case T_DOUBLE:
1190     __ fld_d(Address(rbp, -2*wordSize));
1191     break;
1192   case T_LONG:
1193     __ movptr(rax, Address(rbp, -wordSize));
1194     __ movptr(rdx, Address(rbp, -2*wordSize));
1195     break;
1196   case T_VOID:  break;
1197   default: {
1198     __ movptr(rax, Address(rbp, -wordSize));
1199     }
1200   }
1201 }
1202 
1203 static void verify_oop_args(MacroAssembler* masm,
1204                             const methodHandle& method,
1205                             const BasicType* sig_bt,
1206                             const VMRegPair* regs) {
1207   Register temp_reg = rbx;  // not part of any compiled calling seq
1208   if (VerifyOops) {
1209     for (int i = 0; i < method->size_of_parameters(); i++) {
1210       if (is_reference_type(sig_bt[i])) {
1211         VMReg r = regs[i].first();
1212         assert(r->is_valid(), "bad oop arg");
1213         if (r->is_stack()) {
1214           __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1215           __ verify_oop(temp_reg);
1216         } else {
1217           __ verify_oop(r->as_Register());
1218         }
1219       }
1220     }
1221   }
1222 }
1223 
1224 static void gen_special_dispatch(MacroAssembler* masm,
1225                                  const methodHandle& method,
1226                                  const BasicType* sig_bt,
1227                                  const VMRegPair* regs) {
1228   verify_oop_args(masm, method, sig_bt, regs);
1229   vmIntrinsics::ID iid = method->intrinsic_id();
1230 
1231   // Now write the args into the outgoing interpreter space
1232   bool     has_receiver   = false;
1233   Register receiver_reg   = noreg;
1234   int      member_arg_pos = -1;
1235   Register member_reg     = noreg;
1236   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1237   if (ref_kind != 0) {
1238     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1239     member_reg = rbx;  // known to be free at this point
1240     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1241   } else if (iid == vmIntrinsics::_invokeBasic) {
1242     has_receiver = true;
1243   } else {
1244     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1245   }
1246 
1247   if (member_reg != noreg) {
1248     // Load the member_arg into register, if necessary.
1249     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1250     VMReg r = regs[member_arg_pos].first();
1251     if (r->is_stack()) {
1252       __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1253     } else {
1254       // no data motion is needed
1255       member_reg = r->as_Register();
1256     }
1257   }
1258 
1259   if (has_receiver) {
1260     // Make sure the receiver is loaded into a register.
1261     assert(method->size_of_parameters() > 0, "oob");
1262     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1263     VMReg r = regs[0].first();
1264     assert(r->is_valid(), "bad receiver arg");
1265     if (r->is_stack()) {
1266       // Porting note:  This assumes that compiled calling conventions always
1267       // pass the receiver oop in a register.  If this is not true on some
1268       // platform, pick a temp and load the receiver from stack.
1269       fatal("receiver always in a register");
1270       receiver_reg = rcx;  // known to be free at this point
1271       __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1272     } else {
1273       // no data motion is needed
1274       receiver_reg = r->as_Register();
1275     }
1276   }
1277 
1278   // Figure out which address we are really jumping to:
1279   MethodHandles::generate_method_handle_dispatch(masm, iid,
1280                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1281 }
1282 
1283 // ---------------------------------------------------------------------------
1284 // Generate a native wrapper for a given method.  The method takes arguments
1285 // in the Java compiled code convention, marshals them to the native
1286 // convention (handlizes oops, etc), transitions to native, makes the call,
1287 // returns to java state (possibly blocking), unhandlizes any result and
1288 // returns.
1289 //
1290 // Critical native functions are a shorthand for the use of
1291 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1292 // functions.  The wrapper is expected to unpack the arguments before
1293 // passing them to the callee. Critical native functions leave the state _in_Java,
1294 // since they cannot stop for GC.
1295 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1296 // block and the check for pending exceptions it's impossible for them
1297 // to be thrown.
1298 //
1299 //
1300 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1301                                                 const methodHandle& method,
1302                                                 int compile_id,
1303                                                 BasicType* in_sig_bt,
1304                                                 VMRegPair* in_regs,
1305                                                 BasicType ret_type) {
1306   if (method->is_method_handle_intrinsic()) {
1307     vmIntrinsics::ID iid = method->intrinsic_id();
1308     intptr_t start = (intptr_t)__ pc();
1309     int vep_offset = ((intptr_t)__ pc()) - start;
1310     gen_special_dispatch(masm,
1311                          method,
1312                          in_sig_bt,
1313                          in_regs);
1314     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1315     __ flush();
1316     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1317     return nmethod::new_native_nmethod(method,
1318                                        compile_id,
1319                                        masm->code(),
1320                                        vep_offset,
1321                                        frame_complete,
1322                                        stack_slots / VMRegImpl::slots_per_word,
1323                                        in_ByteSize(-1),
1324                                        in_ByteSize(-1),
1325                                        (OopMapSet*)nullptr);
1326   }
1327   address native_func = method->native_function();
1328   assert(native_func != nullptr, "must have function");
1329 
1330   // An OopMap for lock (and class if static)
1331   OopMapSet *oop_maps = new OopMapSet();
1332 
1333   // We have received a description of where all the java arg are located
1334   // on entry to the wrapper. We need to convert these args to where
1335   // the jni function will expect them. To figure out where they go
1336   // we convert the java signature to a C signature by inserting
1337   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1338 
1339   const int total_in_args = method->size_of_parameters();
1340   int  total_c_args       = total_in_args + (method->is_static() ? 2 : 1);
1341 
1342   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1343   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1344   BasicType* in_elem_bt = nullptr;
1345 
1346   int argc = 0;
1347   out_sig_bt[argc++] = T_ADDRESS;
1348   if (method->is_static()) {
1349     out_sig_bt[argc++] = T_OBJECT;
1350   }
1351 
1352   for (int i = 0; i < total_in_args ; i++ ) {
1353     out_sig_bt[argc++] = in_sig_bt[i];
1354   }
1355 
1356   // Now figure out where the args must be stored and how much stack space
1357   // they require.
1358   int out_arg_slots;
1359   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1360 
1361   // Compute framesize for the wrapper.  We need to handlize all oops in
1362   // registers a max of 2 on x86.
1363 
1364   // Calculate the total number of stack slots we will need.
1365 
1366   // First count the abi requirement plus all of the outgoing args
1367   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1368 
1369   // Now the space for the inbound oop handle area
1370   int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
1371 
1372   int oop_handle_offset = stack_slots;
1373   stack_slots += total_save_slots;
1374 
1375   // Now any space we need for handlizing a klass if static method
1376 
1377   int klass_slot_offset = 0;
1378   int klass_offset = -1;
1379   int lock_slot_offset = 0;
1380   bool is_static = false;
1381 
1382   if (method->is_static()) {
1383     klass_slot_offset = stack_slots;
1384     stack_slots += VMRegImpl::slots_per_word;
1385     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1386     is_static = true;
1387   }
1388 
1389   // Plus a lock if needed
1390 
1391   if (method->is_synchronized()) {
1392     lock_slot_offset = stack_slots;
1393     stack_slots += VMRegImpl::slots_per_word;
1394   }
1395 
1396   // Now a place (+2) to save return values or temp during shuffling
1397   // + 2 for return address (which we own) and saved rbp,
1398   stack_slots += 4;
1399 
1400   // Ok The space we have allocated will look like:
1401   //
1402   //
1403   // FP-> |                     |
1404   //      |---------------------|
1405   //      | 2 slots for moves   |
1406   //      |---------------------|
1407   //      | lock box (if sync)  |
1408   //      |---------------------| <- lock_slot_offset  (-lock_slot_rbp_offset)
1409   //      | klass (if static)   |
1410   //      |---------------------| <- klass_slot_offset
1411   //      | oopHandle area      |
1412   //      |---------------------| <- oop_handle_offset (a max of 2 registers)
1413   //      | outbound memory     |
1414   //      | based arguments     |
1415   //      |                     |
1416   //      |---------------------|
1417   //      |                     |
1418   // SP-> | out_preserved_slots |
1419   //
1420   //
1421   // ****************************************************************************
1422   // WARNING - on Windows Java Natives use pascal calling convention and pop the
1423   // arguments off of the stack after the jni call. Before the call we can use
1424   // instructions that are SP relative. After the jni call we switch to FP
1425   // relative instructions instead of re-adjusting the stack on windows.
1426   // ****************************************************************************
1427 
1428 
1429   // Now compute actual number of stack words we need rounding to make
1430   // stack properly aligned.
1431   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1432 
1433   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1434 
1435   intptr_t start = (intptr_t)__ pc();
1436 
1437   // First thing make an ic check to see if we should even be here
1438 
1439   // We are free to use all registers as temps without saving them and
1440   // restoring them except rbp. rbp is the only callee save register
1441   // as far as the interpreter and the compiler(s) are concerned.
1442 
1443 
1444   const Register receiver = rcx;
1445   Label exception_pending;
1446 
1447   __ verify_oop(receiver);
1448   // verified entry must be aligned for code patching.
1449   __ ic_check(8 /* end_alignment */);
1450 
1451   int vep_offset = ((intptr_t)__ pc()) - start;
1452 
1453 #ifdef COMPILER1
1454   // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
1455   if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
1456     inline_check_hashcode_from_object_header(masm, method, rcx /*obj_reg*/, rax /*result*/);
1457    }
1458 #endif // COMPILER1
1459 
1460   // The instruction at the verified entry point must be 5 bytes or longer
1461   // because it can be patched on the fly by make_non_entrant. The stack bang
1462   // instruction fits that requirement.
1463 
1464   // Generate stack overflow check
1465   __ bang_stack_with_offset((int)StackOverflow::stack_shadow_zone_size());
1466 
1467   // Generate a new frame for the wrapper.
1468   __ enter();
1469   // -2 because return address is already present and so is saved rbp
1470   __ subptr(rsp, stack_size - 2*wordSize);
1471 
1472 
1473   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1474   bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */);
1475 
1476   // Frame is now completed as far as size and linkage.
1477   int frame_complete = ((intptr_t)__ pc()) - start;
1478 
1479   if (UseRTMLocking) {
1480     // Abort RTM transaction before calling JNI
1481     // because critical section will be large and will be
1482     // aborted anyway. Also nmethod could be deoptimized.
1483     __ xabort(0);
1484   }
1485 
1486   // Calculate the difference between rsp and rbp,. We need to know it
1487   // after the native call because on windows Java Natives will pop
1488   // the arguments and it is painful to do rsp relative addressing
1489   // in a platform independent way. So after the call we switch to
1490   // rbp, relative addressing.
1491 
1492   int fp_adjustment = stack_size - 2*wordSize;
1493 
1494 #ifdef COMPILER2
1495   // C2 may leave the stack dirty if not in SSE2+ mode
1496   if (UseSSE >= 2) {
1497     __ verify_FPU(0, "c2i transition should have clean FPU stack");
1498   } else {
1499     __ empty_FPU_stack();
1500   }
1501 #endif /* COMPILER2 */
1502 
1503   // Compute the rbp, offset for any slots used after the jni call
1504 
1505   int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1506 
1507   // We use rdi as a thread pointer because it is callee save and
1508   // if we load it once it is usable thru the entire wrapper
1509   const Register thread = rdi;
1510 
1511    // We use rsi as the oop handle for the receiver/klass
1512    // It is callee save so it survives the call to native
1513 
1514    const Register oop_handle_reg = rsi;
1515 
1516    __ get_thread(thread);
1517 
1518   //
1519   // We immediately shuffle the arguments so that any vm call we have to
1520   // make from here on out (sync slow path, jvmti, etc.) we will have
1521   // captured the oops from our caller and have a valid oopMap for
1522   // them.
1523 
1524   // -----------------
1525   // The Grand Shuffle
1526   //
1527   // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1528   // and, if static, the class mirror instead of a receiver.  This pretty much
1529   // guarantees that register layout will not match (and x86 doesn't use reg
1530   // parms though amd does).  Since the native abi doesn't use register args
1531   // and the java conventions does we don't have to worry about collisions.
1532   // All of our moved are reg->stack or stack->stack.
1533   // We ignore the extra arguments during the shuffle and handle them at the
1534   // last moment. The shuffle is described by the two calling convention
1535   // vectors we have in our possession. We simply walk the java vector to
1536   // get the source locations and the c vector to get the destinations.
1537 
1538   int c_arg = method->is_static() ? 2 : 1;
1539 
1540   // Record rsp-based slot for receiver on stack for non-static methods
1541   int receiver_offset = -1;
1542 
1543   // This is a trick. We double the stack slots so we can claim
1544   // the oops in the caller's frame. Since we are sure to have
1545   // more args than the caller doubling is enough to make
1546   // sure we can capture all the incoming oop args from the
1547   // caller.
1548   //
1549   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1550 
1551   // Mark location of rbp,
1552   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
1553 
1554   // We know that we only have args in at most two integer registers (rcx, rdx). So rax, rbx
1555   // Are free to temporaries if we have to do  stack to steck moves.
1556   // All inbound args are referenced based on rbp, and all outbound args via rsp.
1557 
1558   for (int i = 0; i < total_in_args ; i++, c_arg++ ) {
1559     switch (in_sig_bt[i]) {
1560       case T_ARRAY:
1561       case T_OBJECT:
1562         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1563                     ((i == 0) && (!is_static)),
1564                     &receiver_offset);
1565         break;
1566       case T_VOID:
1567         break;
1568 
1569       case T_FLOAT:
1570         float_move(masm, in_regs[i], out_regs[c_arg]);
1571           break;
1572 
1573       case T_DOUBLE:
1574         assert( i + 1 < total_in_args &&
1575                 in_sig_bt[i + 1] == T_VOID &&
1576                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1577         double_move(masm, in_regs[i], out_regs[c_arg]);
1578         break;
1579 
1580       case T_LONG :
1581         long_move(masm, in_regs[i], out_regs[c_arg]);
1582         break;
1583 
1584       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1585 
1586       default:
1587         simple_move32(masm, in_regs[i], out_regs[c_arg]);
1588     }
1589   }
1590 
1591   // Pre-load a static method's oop into rsi.  Used both by locking code and
1592   // the normal JNI call code.
1593   if (method->is_static()) {
1594 
1595     //  load opp into a register
1596     __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
1597 
1598     // Now handlize the static class mirror it's known not-null.
1599     __ movptr(Address(rsp, klass_offset), oop_handle_reg);
1600     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1601 
1602     // Now get the handle
1603     __ lea(oop_handle_reg, Address(rsp, klass_offset));
1604     // store the klass handle as second argument
1605     __ movptr(Address(rsp, wordSize), oop_handle_reg);
1606   }
1607 
1608   // Change state to native (we save the return address in the thread, since it might not
1609   // be pushed on the stack when we do a stack traversal). It is enough that the pc()
1610   // points into the right code segment. It does not have to be the correct return pc.
1611   // We use the same pc/oopMap repeatedly when we call out
1612 
1613   intptr_t the_pc = (intptr_t) __ pc();
1614   oop_maps->add_gc_map(the_pc - start, map);
1615 
1616   __ set_last_Java_frame(thread, rsp, noreg, (address)the_pc, noreg);
1617 
1618 
1619   // We have all of the arguments setup at this point. We must not touch any register
1620   // argument registers at this point (what if we save/restore them there are no oop?
1621 
1622   {
1623     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0, noreg);
1624     __ mov_metadata(rax, method());
1625     __ call_VM_leaf(
1626          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1627          thread, rax);
1628   }
1629 
1630   // RedefineClasses() tracing support for obsolete method entry
1631   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1632     __ mov_metadata(rax, method());
1633     __ call_VM_leaf(
1634          CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1635          thread, rax);
1636   }
1637 
1638   // These are register definitions we need for locking/unlocking
1639   const Register swap_reg = rax;  // Must use rax, for cmpxchg instruction
1640   const Register obj_reg  = rcx;  // Will contain the oop
1641   const Register lock_reg = rdx;  // Address of compiler lock object (BasicLock)
1642 
1643   Label slow_path_lock;
1644   Label lock_done;
1645 
1646   // Lock a synchronized method
1647   if (method->is_synchronized()) {
1648     Label count_mon;
1649 
1650     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1651 
1652     // Get the handle (the 2nd argument)
1653     __ movptr(oop_handle_reg, Address(rsp, wordSize));
1654 
1655     // Get address of the box
1656 
1657     __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1658 
1659     // Load the oop from the handle
1660     __ movptr(obj_reg, Address(oop_handle_reg, 0));
1661 
1662     if (LockingMode == LM_MONITOR) {
1663       __ jmp(slow_path_lock);
1664     } else if (LockingMode == LM_LEGACY) {
1665       // Load immediate 1 into swap_reg %rax,
1666       __ movptr(swap_reg, 1);
1667 
1668       // Load (object->mark() | 1) into swap_reg %rax,
1669       __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1670 
1671       // Save (object->mark() | 1) into BasicLock's displaced header
1672       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1673 
1674       // src -> dest iff dest == rax, else rax, <- dest
1675       // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1676       __ lock();
1677       __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1678       __ jcc(Assembler::equal, count_mon);
1679 
1680       // Test if the oopMark is an obvious stack pointer, i.e.,
1681       //  1) (mark & 3) == 0, and
1682       //  2) rsp <= mark < mark + os::pagesize()
1683       // These 3 tests can be done by evaluating the following
1684       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1685       // assuming both stack pointer and pagesize have their
1686       // least significant 2 bits clear.
1687       // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1688 
1689       __ subptr(swap_reg, rsp);
1690       __ andptr(swap_reg, 3 - (int)os::vm_page_size());
1691 
1692       // Save the test result, for recursive case, the result is zero
1693       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1694       __ jcc(Assembler::notEqual, slow_path_lock);
1695     } else {
1696       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1697       __ lightweight_lock(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
1698     }
1699     __ bind(count_mon);
1700     __ inc_held_monitor_count();
1701 
1702     // Slow path will re-enter here
1703     __ bind(lock_done);
1704   }
1705 
1706 
1707   // Finally just about ready to make the JNI call
1708 
1709   // get JNIEnv* which is first argument to native
1710   __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1711   __ movptr(Address(rsp, 0), rdx);
1712 
1713   // Now set thread in native
1714   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1715 
1716   __ call(RuntimeAddress(native_func));
1717 
1718   // Verify or restore cpu control state after JNI call
1719   __ restore_cpu_control_state_after_jni(noreg);
1720 
1721   // WARNING - on Windows Java Natives use pascal calling convention and pop the
1722   // arguments off of the stack. We could just re-adjust the stack pointer here
1723   // and continue to do SP relative addressing but we instead switch to FP
1724   // relative addressing.
1725 
1726   // Unpack native results.
1727   switch (ret_type) {
1728   case T_BOOLEAN: __ c2bool(rax);            break;
1729   case T_CHAR   : __ andptr(rax, 0xFFFF);    break;
1730   case T_BYTE   : __ sign_extend_byte (rax); break;
1731   case T_SHORT  : __ sign_extend_short(rax); break;
1732   case T_INT    : /* nothing to do */        break;
1733   case T_DOUBLE :
1734   case T_FLOAT  :
1735     // Result is in st0 we'll save as needed
1736     break;
1737   case T_ARRAY:                 // Really a handle
1738   case T_OBJECT:                // Really a handle
1739       break; // can't de-handlize until after safepoint check
1740   case T_VOID: break;
1741   case T_LONG: break;
1742   default       : ShouldNotReachHere();
1743   }
1744 
1745   Label after_transition;
1746 
1747   // Switch thread to "native transition" state before reading the synchronization state.
1748   // This additional state is necessary because reading and testing the synchronization
1749   // state is not atomic w.r.t. GC, as this scenario demonstrates:
1750   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1751   //     VM thread changes sync state to synchronizing and suspends threads for GC.
1752   //     Thread A is resumed to finish this native method, but doesn't block here since it
1753   //     didn't see any synchronization is progress, and escapes.
1754   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
1755 
1756   // Force this write out before the read below
1757   if (!UseSystemMemoryBarrier) {
1758     __ membar(Assembler::Membar_mask_bits(
1759               Assembler::LoadLoad | Assembler::LoadStore |
1760               Assembler::StoreLoad | Assembler::StoreStore));
1761   }
1762 
1763   if (AlwaysRestoreFPU) {
1764     // Make sure the control word is correct.
1765     __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
1766   }
1767 
1768   // check for safepoint operation in progress and/or pending suspend requests
1769   { Label Continue, slow_path;
1770 
1771     __ safepoint_poll(slow_path, thread, true /* at_return */, false /* in_nmethod */);
1772 
1773     __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1774     __ jcc(Assembler::equal, Continue);
1775     __ bind(slow_path);
1776 
1777     // Don't use call_VM as it will see a possible pending exception and forward it
1778     // and never return here preventing us from clearing _last_native_pc down below.
1779     // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1780     // preserved and correspond to the bcp/locals pointers. So we do a runtime call
1781     // by hand.
1782     //
1783     __ vzeroupper();
1784 
1785     save_native_result(masm, ret_type, stack_slots);
1786     __ push(thread);
1787     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
1788                                               JavaThread::check_special_condition_for_native_trans)));
1789     __ increment(rsp, wordSize);
1790     // Restore any method result value
1791     restore_native_result(masm, ret_type, stack_slots);
1792     __ bind(Continue);
1793   }
1794 
1795   // change thread state
1796   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1797   __ bind(after_transition);
1798 
1799   Label reguard;
1800   Label reguard_done;
1801   __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
1802   __ jcc(Assembler::equal, reguard);
1803 
1804   // slow path reguard  re-enters here
1805   __ bind(reguard_done);
1806 
1807   // Handle possible exception (will unlock if necessary)
1808 
1809   // native result if any is live
1810 
1811   // Unlock
1812   Label slow_path_unlock;
1813   Label unlock_done;
1814   if (method->is_synchronized()) {
1815 
1816     Label fast_done;
1817 
1818     // Get locked oop from the handle we passed to jni
1819     __ movptr(obj_reg, Address(oop_handle_reg, 0));
1820 
1821     if (LockingMode == LM_LEGACY) {
1822       Label not_recur;
1823       // Simple recursive lock?
1824       __ cmpptr(Address(rbp, lock_slot_rbp_offset), NULL_WORD);
1825       __ jcc(Assembler::notEqual, not_recur);
1826       __ dec_held_monitor_count();
1827       __ jmpb(fast_done);
1828       __ bind(not_recur);
1829     }
1830 
1831     // Must save rax, if it is live now because cmpxchg must use it
1832     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1833       save_native_result(masm, ret_type, stack_slots);
1834     }
1835 
1836     if (LockingMode == LM_MONITOR) {
1837       __ jmp(slow_path_unlock);
1838     } else if (LockingMode == LM_LEGACY) {
1839       //  get old displaced header
1840       __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
1841 
1842       // get address of the stack lock
1843       __ lea(rax, Address(rbp, lock_slot_rbp_offset));
1844 
1845       // Atomic swap old header if oop still contains the stack lock
1846       // src -> dest iff dest == rax, else rax, <- dest
1847       // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
1848       __ lock();
1849       __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1850       __ jcc(Assembler::notEqual, slow_path_unlock);
1851       __ dec_held_monitor_count();
1852     } else {
1853       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1854       __ lightweight_unlock(obj_reg, swap_reg, thread, lock_reg, slow_path_unlock);
1855       __ dec_held_monitor_count();
1856     }
1857 
1858     // slow path re-enters here
1859     __ bind(unlock_done);
1860     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1861       restore_native_result(masm, ret_type, stack_slots);
1862     }
1863 
1864     __ bind(fast_done);
1865   }
1866 
1867   {
1868     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0, noreg);
1869     // Tell dtrace about this method exit
1870     save_native_result(masm, ret_type, stack_slots);
1871     __ mov_metadata(rax, method());
1872     __ call_VM_leaf(
1873          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1874          thread, rax);
1875     restore_native_result(masm, ret_type, stack_slots);
1876   }
1877 
1878   // We can finally stop using that last_Java_frame we setup ages ago
1879 
1880   __ reset_last_Java_frame(thread, false);
1881 
1882   // Unbox oop result, e.g. JNIHandles::resolve value.
1883   if (is_reference_type(ret_type)) {
1884     __ resolve_jobject(rax /* value */,
1885                        thread /* thread */,
1886                        rcx /* tmp */);
1887   }
1888 
1889   if (CheckJNICalls) {
1890     // clear_pending_jni_exception_check
1891     __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
1892   }
1893 
1894   // reset handle block
1895   __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
1896   __ movl(Address(rcx, JNIHandleBlock::top_offset()), NULL_WORD);
1897 
1898   // Any exception pending?
1899   __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1900   __ jcc(Assembler::notEqual, exception_pending);
1901 
1902   // no exception, we're almost done
1903 
1904   // check that only result value is on FPU stack
1905   __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit");
1906 
1907   // Fixup floating pointer results so that result looks like a return from a compiled method
1908   if (ret_type == T_FLOAT) {
1909     if (UseSSE >= 1) {
1910       // Pop st0 and store as float and reload into xmm register
1911       __ fstp_s(Address(rbp, -4));
1912       __ movflt(xmm0, Address(rbp, -4));
1913     }
1914   } else if (ret_type == T_DOUBLE) {
1915     if (UseSSE >= 2) {
1916       // Pop st0 and store as double and reload into xmm register
1917       __ fstp_d(Address(rbp, -8));
1918       __ movdbl(xmm0, Address(rbp, -8));
1919     }
1920   }
1921 
1922   // Return
1923 
1924   __ leave();
1925   __ ret(0);
1926 
1927   // Unexpected paths are out of line and go here
1928 
1929   // Slow path locking & unlocking
1930   if (method->is_synchronized()) {
1931 
1932     // BEGIN Slow path lock
1933 
1934     __ bind(slow_path_lock);
1935 
1936     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1937     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1938     __ push(thread);
1939     __ push(lock_reg);
1940     __ push(obj_reg);
1941     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
1942     __ addptr(rsp, 3*wordSize);
1943 
1944 #ifdef ASSERT
1945     { Label L;
1946     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1947     __ jcc(Assembler::equal, L);
1948     __ stop("no pending exception allowed on exit from monitorenter");
1949     __ bind(L);
1950     }
1951 #endif
1952     __ jmp(lock_done);
1953 
1954     // END Slow path lock
1955 
1956     // BEGIN Slow path unlock
1957     __ bind(slow_path_unlock);
1958     __ vzeroupper();
1959     // Slow path unlock
1960 
1961     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
1962       save_native_result(masm, ret_type, stack_slots);
1963     }
1964     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
1965 
1966     __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
1967     __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1968 
1969 
1970     // should be a peal
1971     // +wordSize because of the push above
1972     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1973     __ push(thread);
1974     __ lea(rax, Address(rbp, lock_slot_rbp_offset));
1975     __ push(rax);
1976 
1977     __ push(obj_reg);
1978     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
1979     __ addptr(rsp, 3*wordSize);
1980 #ifdef ASSERT
1981     {
1982       Label L;
1983       __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1984       __ jcc(Assembler::equal, L);
1985       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
1986       __ bind(L);
1987     }
1988 #endif /* ASSERT */
1989 
1990     __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
1991 
1992     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
1993       restore_native_result(masm, ret_type, stack_slots);
1994     }
1995     __ jmp(unlock_done);
1996     // END Slow path unlock
1997 
1998   }
1999 
2000   // SLOW PATH Reguard the stack if needed
2001 
2002   __ bind(reguard);
2003   __ vzeroupper();
2004   save_native_result(masm, ret_type, stack_slots);
2005   {
2006     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2007   }
2008   restore_native_result(masm, ret_type, stack_slots);
2009   __ jmp(reguard_done);
2010 
2011 
2012   // BEGIN EXCEPTION PROCESSING
2013 
2014   // Forward  the exception
2015   __ bind(exception_pending);
2016 
2017   // remove possible return value from FPU register stack
2018   __ empty_FPU_stack();
2019 
2020   // pop our frame
2021   __ leave();
2022   // and forward the exception
2023   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2024 
2025   __ flush();
2026 
2027   nmethod *nm = nmethod::new_native_nmethod(method,
2028                                             compile_id,
2029                                             masm->code(),
2030                                             vep_offset,
2031                                             frame_complete,
2032                                             stack_slots / VMRegImpl::slots_per_word,
2033                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2034                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2035                                             oop_maps);
2036 
2037   return nm;
2038 
2039 }
2040 
2041 // this function returns the adjust size (in number of words) to a c2i adapter
2042 // activation for use during deoptimization
2043 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2044   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2045 }
2046 
2047 
2048 // Number of stack slots between incoming argument block and the start of
2049 // a new frame.  The PROLOG must add this many slots to the stack.  The
2050 // EPILOG must remove this many slots.  Intel needs one slot for
2051 // return address and one for rbp, (must save rbp)
2052 uint SharedRuntime::in_preserve_stack_slots() {
2053   return 2+VerifyStackAtCalls;
2054 }
2055 
2056 uint SharedRuntime::out_preserve_stack_slots() {
2057   return 0;
2058 }
2059 
2060 //------------------------------generate_deopt_blob----------------------------
2061 void SharedRuntime::generate_deopt_blob() {
2062   // allocate space for the code
2063   ResourceMark rm;
2064   // setup code generation tools
2065   // note: the buffer code size must account for StackShadowPages=50
2066   CodeBuffer   buffer("deopt_blob", 1536, 1024);
2067   MacroAssembler* masm = new MacroAssembler(&buffer);
2068   int frame_size_in_words;
2069   OopMap* map = nullptr;
2070   // Account for the extra args we place on the stack
2071   // by the time we call fetch_unroll_info
2072   const int additional_words = 2; // deopt kind, thread
2073 
2074   OopMapSet *oop_maps = new OopMapSet();
2075 
2076   // -------------
2077   // This code enters when returning to a de-optimized nmethod.  A return
2078   // address has been pushed on the stack, and return values are in
2079   // registers.
2080   // If we are doing a normal deopt then we were called from the patched
2081   // nmethod from the point we returned to the nmethod. So the return
2082   // address on the stack is wrong by NativeCall::instruction_size
2083   // We will adjust the value to it looks like we have the original return
2084   // address on the stack (like when we eagerly deoptimized).
2085   // In the case of an exception pending with deoptimized then we enter
2086   // with a return address on the stack that points after the call we patched
2087   // into the exception handler. We have the following register state:
2088   //    rax,: exception
2089   //    rbx,: exception handler
2090   //    rdx: throwing pc
2091   // So in this case we simply jam rdx into the useless return address and
2092   // the stack looks just like we want.
2093   //
2094   // At this point we need to de-opt.  We save the argument return
2095   // registers.  We call the first C routine, fetch_unroll_info().  This
2096   // routine captures the return values and returns a structure which
2097   // describes the current frame size and the sizes of all replacement frames.
2098   // The current frame is compiled code and may contain many inlined
2099   // functions, each with their own JVM state.  We pop the current frame, then
2100   // push all the new frames.  Then we call the C routine unpack_frames() to
2101   // populate these frames.  Finally unpack_frames() returns us the new target
2102   // address.  Notice that callee-save registers are BLOWN here; they have
2103   // already been captured in the vframeArray at the time the return PC was
2104   // patched.
2105   address start = __ pc();
2106   Label cont;
2107 
2108   // Prolog for non exception case!
2109 
2110   // Save everything in sight.
2111 
2112   map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2113   // Normal deoptimization
2114   __ push(Deoptimization::Unpack_deopt);
2115   __ jmp(cont);
2116 
2117   int reexecute_offset = __ pc() - start;
2118 
2119   // Reexecute case
2120   // return address is the pc describes what bci to do re-execute at
2121 
2122   // No need to update map as each call to save_live_registers will produce identical oopmap
2123   (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2124 
2125   __ push(Deoptimization::Unpack_reexecute);
2126   __ jmp(cont);
2127 
2128   int exception_offset = __ pc() - start;
2129 
2130   // Prolog for exception case
2131 
2132   // all registers are dead at this entry point, except for rax, and
2133   // rdx which contain the exception oop and exception pc
2134   // respectively.  Set them in TLS and fall thru to the
2135   // unpack_with_exception_in_tls entry point.
2136 
2137   __ get_thread(rdi);
2138   __ movptr(Address(rdi, JavaThread::exception_pc_offset()), rdx);
2139   __ movptr(Address(rdi, JavaThread::exception_oop_offset()), rax);
2140 
2141   int exception_in_tls_offset = __ pc() - start;
2142 
2143   // new implementation because exception oop is now passed in JavaThread
2144 
2145   // Prolog for exception case
2146   // All registers must be preserved because they might be used by LinearScan
2147   // Exceptiop oop and throwing PC are passed in JavaThread
2148   // tos: stack at point of call to method that threw the exception (i.e. only
2149   // args are on the stack, no return address)
2150 
2151   // make room on stack for the return address
2152   // It will be patched later with the throwing pc. The correct value is not
2153   // available now because loading it from memory would destroy registers.
2154   __ push(0);
2155 
2156   // Save everything in sight.
2157 
2158   // No need to update map as each call to save_live_registers will produce identical oopmap
2159   (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2160 
2161   // Now it is safe to overwrite any register
2162 
2163   // store the correct deoptimization type
2164   __ push(Deoptimization::Unpack_exception);
2165 
2166   // load throwing pc from JavaThread and patch it as the return address
2167   // of the current frame. Then clear the field in JavaThread
2168   __ get_thread(rdi);
2169   __ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset()));
2170   __ movptr(Address(rbp, wordSize), rdx);
2171   __ movptr(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
2172 
2173 #ifdef ASSERT
2174   // verify that there is really an exception oop in JavaThread
2175   __ movptr(rax, Address(rdi, JavaThread::exception_oop_offset()));
2176   __ verify_oop(rax);
2177 
2178   // verify that there is no pending exception
2179   Label no_pending_exception;
2180   __ movptr(rax, Address(rdi, Thread::pending_exception_offset()));
2181   __ testptr(rax, rax);
2182   __ jcc(Assembler::zero, no_pending_exception);
2183   __ stop("must not have pending exception here");
2184   __ bind(no_pending_exception);
2185 #endif
2186 
2187   __ bind(cont);
2188 
2189   // Compiled code leaves the floating point stack dirty, empty it.
2190   __ empty_FPU_stack();
2191 
2192 
2193   // Call C code.  Need thread and this frame, but NOT official VM entry
2194   // crud.  We cannot block on this call, no GC can happen.
2195   __ get_thread(rcx);
2196   __ push(rcx);
2197   // fetch_unroll_info needs to call last_java_frame()
2198   __ set_last_Java_frame(rcx, noreg, noreg, nullptr, noreg);
2199 
2200   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2201 
2202   // Need to have an oopmap that tells fetch_unroll_info where to
2203   // find any register it might need.
2204 
2205   oop_maps->add_gc_map( __ pc()-start, map);
2206 
2207   // Discard args to fetch_unroll_info
2208   __ pop(rcx);
2209   __ pop(rcx);
2210 
2211   __ get_thread(rcx);
2212   __ reset_last_Java_frame(rcx, false);
2213 
2214   // Load UnrollBlock into EDI
2215   __ mov(rdi, rax);
2216 
2217   // Move the unpack kind to a safe place in the UnrollBlock because
2218   // we are very short of registers
2219 
2220   Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset());
2221   // retrieve the deopt kind from the UnrollBlock.
2222   __ movl(rax, unpack_kind);
2223 
2224    Label noException;
2225   __ cmpl(rax, Deoptimization::Unpack_exception);   // Was exception pending?
2226   __ jcc(Assembler::notEqual, noException);
2227   __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
2228   __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
2229   __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
2230   __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
2231 
2232   __ verify_oop(rax);
2233 
2234   // Overwrite the result registers with the exception results.
2235   __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2236   __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2237 
2238   __ bind(noException);
2239 
2240   // Stack is back to only having register save data on the stack.
2241   // Now restore the result registers. Everything else is either dead or captured
2242   // in the vframeArray.
2243 
2244   RegisterSaver::restore_result_registers(masm);
2245 
2246   // Non standard control word may be leaked out through a safepoint blob, and we can
2247   // deopt at a poll point with the non standard control word. However, we should make
2248   // sure the control word is correct after restore_result_registers.
2249   __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
2250 
2251   // All of the register save area has been popped of the stack. Only the
2252   // return address remains.
2253 
2254   // Pop all the frames we must move/replace.
2255   //
2256   // Frame picture (youngest to oldest)
2257   // 1: self-frame (no frame link)
2258   // 2: deopting frame  (no frame link)
2259   // 3: caller of deopting frame (could be compiled/interpreted).
2260   //
2261   // Note: by leaving the return address of self-frame on the stack
2262   // and using the size of frame 2 to adjust the stack
2263   // when we are done the return to frame 3 will still be on the stack.
2264 
2265   // Pop deoptimized frame
2266   __ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2267 
2268   // sp should be pointing at the return address to the caller (3)
2269 
2270   // Pick up the initial fp we should save
2271   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2272   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset()));
2273 
2274 #ifdef ASSERT
2275   // Compilers generate code that bang the stack by as much as the
2276   // interpreter would need. So this stack banging should never
2277   // trigger a fault. Verify that it does not on non product builds.
2278   __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2279   __ bang_stack_size(rbx, rcx);
2280 #endif
2281 
2282   // Load array of frame pcs into ECX
2283   __ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset()));
2284 
2285   __ pop(rsi); // trash the old pc
2286 
2287   // Load array of frame sizes into ESI
2288   __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset()));
2289 
2290   Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset());
2291 
2292   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset()));
2293   __ movl(counter, rbx);
2294 
2295   // Now adjust the caller's stack to make up for the extra locals
2296   // but record the original sp so that we can save it in the skeletal interpreter
2297   // frame and the stack walking of interpreter_sender will get the unextended sp
2298   // value and not the "real" sp value.
2299 
2300   Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset());
2301   __ movptr(sp_temp, rsp);
2302   __ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset()));
2303   __ subptr(rsp, rbx);
2304 
2305   // Push interpreter frames in a loop
2306   Label loop;
2307   __ bind(loop);
2308   __ movptr(rbx, Address(rsi, 0));      // Load frame size
2309   __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
2310   __ pushptr(Address(rcx, 0));          // save return address
2311   __ enter();                           // save old & set new rbp,
2312   __ subptr(rsp, rbx);                  // Prolog!
2313   __ movptr(rbx, sp_temp);              // sender's sp
2314   // This value is corrected by layout_activation_impl
2315   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
2316   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2317   __ movptr(sp_temp, rsp);              // pass to next frame
2318   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
2319   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
2320   __ decrementl(counter);             // decrement counter
2321   __ jcc(Assembler::notZero, loop);
2322   __ pushptr(Address(rcx, 0));          // save final return address
2323 
2324   // Re-push self-frame
2325   __ enter();                           // save old & set new rbp,
2326 
2327   //  Return address and rbp, are in place
2328   // We'll push additional args later. Just allocate a full sized
2329   // register save area
2330   __ subptr(rsp, (frame_size_in_words-additional_words - 2) * wordSize);
2331 
2332   // Restore frame locals after moving the frame
2333   __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2334   __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2335   __ fstp_d(Address(rsp, RegisterSaver::fpResultOffset()*wordSize));   // Pop float stack and store in local
2336   if( UseSSE>=2 ) __ movdbl(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2337   if( UseSSE==1 ) __ movflt(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2338 
2339   // Set up the args to unpack_frame
2340 
2341   __ pushl(unpack_kind);                     // get the unpack_kind value
2342   __ get_thread(rcx);
2343   __ push(rcx);
2344 
2345   // set last_Java_sp, last_Java_fp
2346   __ set_last_Java_frame(rcx, noreg, rbp, nullptr, noreg);
2347 
2348   // Call C code.  Need thread but NOT official VM entry
2349   // crud.  We cannot block on this call, no GC can happen.  Call should
2350   // restore return values to their stack-slots with the new SP.
2351   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2352   // Set an oopmap for the call site
2353   oop_maps->add_gc_map( __ pc()-start, new OopMap( frame_size_in_words, 0 ));
2354 
2355   // rax, contains the return result type
2356   __ push(rax);
2357 
2358   __ get_thread(rcx);
2359   __ reset_last_Java_frame(rcx, false);
2360 
2361   // Collect return values
2362   __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
2363   __ movptr(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize));
2364 
2365   // Clear floating point stack before returning to interpreter
2366   __ empty_FPU_stack();
2367 
2368   // Check if we should push the float or double return value.
2369   Label results_done, yes_double_value;
2370   __ cmpl(Address(rsp, 0), T_DOUBLE);
2371   __ jcc (Assembler::zero, yes_double_value);
2372   __ cmpl(Address(rsp, 0), T_FLOAT);
2373   __ jcc (Assembler::notZero, results_done);
2374 
2375   // return float value as expected by interpreter
2376   if( UseSSE>=1 ) __ movflt(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2377   else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2378   __ jmp(results_done);
2379 
2380   // return double value as expected by interpreter
2381   __ bind(yes_double_value);
2382   if( UseSSE>=2 ) __ movdbl(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2383   else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2384 
2385   __ bind(results_done);
2386 
2387   // Pop self-frame.
2388   __ leave();                              // Epilog!
2389 
2390   // Jump to interpreter
2391   __ ret(0);
2392 
2393   // -------------
2394   // make sure all code is generated
2395   masm->flush();
2396 
2397   _deopt_blob = DeoptimizationBlob::create( &buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2398   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2399 }
2400 
2401 
2402 #ifdef COMPILER2
2403 //------------------------------generate_uncommon_trap_blob--------------------
2404 void SharedRuntime::generate_uncommon_trap_blob() {
2405   // allocate space for the code
2406   ResourceMark rm;
2407   // setup code generation tools
2408   CodeBuffer   buffer("uncommon_trap_blob", 512, 512);
2409   MacroAssembler* masm = new MacroAssembler(&buffer);
2410 
2411   enum frame_layout {
2412     arg0_off,      // thread                     sp + 0 // Arg location for
2413     arg1_off,      // unloaded_class_index       sp + 1 // calling C
2414     arg2_off,      // exec_mode                  sp + 2
2415     // The frame sender code expects that rbp will be in the "natural" place and
2416     // will override any oopMap setting for it. We must therefore force the layout
2417     // so that it agrees with the frame sender code.
2418     rbp_off,       // callee saved register      sp + 3
2419     return_off,    // slot for return address    sp + 4
2420     framesize
2421   };
2422 
2423   address start = __ pc();
2424 
2425   if (UseRTMLocking) {
2426     // Abort RTM transaction before possible nmethod deoptimization.
2427     __ xabort(0);
2428   }
2429 
2430   // Push self-frame.
2431   __ subptr(rsp, return_off*wordSize);     // Epilog!
2432 
2433   // rbp, is an implicitly saved callee saved register (i.e. the calling
2434   // convention will save restore it in prolog/epilog) Other than that
2435   // there are no callee save registers no that adapter frames are gone.
2436   __ movptr(Address(rsp, rbp_off*wordSize), rbp);
2437 
2438   // Clear the floating point exception stack
2439   __ empty_FPU_stack();
2440 
2441   // set last_Java_sp
2442   __ get_thread(rdx);
2443   __ set_last_Java_frame(rdx, noreg, noreg, nullptr, noreg);
2444 
2445   // Call C code.  Need thread but NOT official VM entry
2446   // crud.  We cannot block on this call, no GC can happen.  Call should
2447   // capture callee-saved registers as well as return values.
2448   __ movptr(Address(rsp, arg0_off*wordSize), rdx);
2449   // argument already in ECX
2450   __ movl(Address(rsp, arg1_off*wordSize),rcx);
2451   __ movl(Address(rsp, arg2_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2452   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2453 
2454   // Set an oopmap for the call site
2455   OopMapSet *oop_maps = new OopMapSet();
2456   OopMap* map =  new OopMap( framesize, 0 );
2457   // No oopMap for rbp, it is known implicitly
2458 
2459   oop_maps->add_gc_map( __ pc()-start, map);
2460 
2461   __ get_thread(rcx);
2462 
2463   __ reset_last_Java_frame(rcx, false);
2464 
2465   // Load UnrollBlock into EDI
2466   __ movptr(rdi, rax);
2467 
2468 #ifdef ASSERT
2469   { Label L;
2470     __ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset()),
2471             (int32_t)Deoptimization::Unpack_uncommon_trap);
2472     __ jcc(Assembler::equal, L);
2473     __ stop("SharedRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
2474     __ bind(L);
2475   }
2476 #endif
2477 
2478   // Pop all the frames we must move/replace.
2479   //
2480   // Frame picture (youngest to oldest)
2481   // 1: self-frame (no frame link)
2482   // 2: deopting frame  (no frame link)
2483   // 3: caller of deopting frame (could be compiled/interpreted).
2484 
2485   // Pop self-frame.  We have no frame, and must rely only on EAX and ESP.
2486   __ addptr(rsp,(framesize-1)*wordSize);     // Epilog!
2487 
2488   // Pop deoptimized frame
2489   __ movl2ptr(rcx, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2490   __ addptr(rsp, rcx);
2491 
2492   // sp should be pointing at the return address to the caller (3)
2493 
2494   // Pick up the initial fp we should save
2495   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2496   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset()));
2497 
2498 #ifdef ASSERT
2499   // Compilers generate code that bang the stack by as much as the
2500   // interpreter would need. So this stack banging should never
2501   // trigger a fault. Verify that it does not on non product builds.
2502   __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2503   __ bang_stack_size(rbx, rcx);
2504 #endif
2505 
2506   // Load array of frame pcs into ECX
2507   __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset()));
2508 
2509   __ pop(rsi); // trash the pc
2510 
2511   // Load array of frame sizes into ESI
2512   __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset()));
2513 
2514   Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset());
2515 
2516   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset()));
2517   __ movl(counter, rbx);
2518 
2519   // Now adjust the caller's stack to make up for the extra locals
2520   // but record the original sp so that we can save it in the skeletal interpreter
2521   // frame and the stack walking of interpreter_sender will get the unextended sp
2522   // value and not the "real" sp value.
2523 
2524   Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset());
2525   __ movptr(sp_temp, rsp);
2526   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset()));
2527   __ subptr(rsp, rbx);
2528 
2529   // Push interpreter frames in a loop
2530   Label loop;
2531   __ bind(loop);
2532   __ movptr(rbx, Address(rsi, 0));      // Load frame size
2533   __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
2534   __ pushptr(Address(rcx, 0));          // save return address
2535   __ enter();                           // save old & set new rbp,
2536   __ subptr(rsp, rbx);                  // Prolog!
2537   __ movptr(rbx, sp_temp);              // sender's sp
2538   // This value is corrected by layout_activation_impl
2539   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
2540   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2541   __ movptr(sp_temp, rsp);              // pass to next frame
2542   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
2543   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
2544   __ decrementl(counter);             // decrement counter
2545   __ jcc(Assembler::notZero, loop);
2546   __ pushptr(Address(rcx, 0));            // save final return address
2547 
2548   // Re-push self-frame
2549   __ enter();                           // save old & set new rbp,
2550   __ subptr(rsp, (framesize-2) * wordSize);   // Prolog!
2551 
2552 
2553   // set last_Java_sp, last_Java_fp
2554   __ get_thread(rdi);
2555   __ set_last_Java_frame(rdi, noreg, rbp, nullptr, noreg);
2556 
2557   // Call C code.  Need thread but NOT official VM entry
2558   // crud.  We cannot block on this call, no GC can happen.  Call should
2559   // restore return values to their stack-slots with the new SP.
2560   __ movptr(Address(rsp,arg0_off*wordSize),rdi);
2561   __ movl(Address(rsp,arg1_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2562   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2563   // Set an oopmap for the call site
2564   oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) );
2565 
2566   __ get_thread(rdi);
2567   __ reset_last_Java_frame(rdi, true);
2568 
2569   // Pop self-frame.
2570   __ leave();     // Epilog!
2571 
2572   // Jump to interpreter
2573   __ ret(0);
2574 
2575   // -------------
2576   // make sure all code is generated
2577   masm->flush();
2578 
2579    _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, framesize);
2580 }
2581 #endif // COMPILER2
2582 
2583 //------------------------------generate_handler_blob------
2584 //
2585 // Generate a special Compile2Runtime blob that saves all registers,
2586 // setup oopmap, and calls safepoint code to stop the compiled code for
2587 // a safepoint.
2588 //
2589 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
2590 
2591   // Account for thread arg in our frame
2592   const int additional_words = 1;
2593   int frame_size_in_words;
2594 
2595   assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2596 
2597   ResourceMark rm;
2598   OopMapSet *oop_maps = new OopMapSet();
2599   OopMap* map;
2600 
2601   // allocate space for the code
2602   // setup code generation tools
2603   CodeBuffer   buffer("handler_blob", 2048, 1024);
2604   MacroAssembler* masm = new MacroAssembler(&buffer);
2605 
2606   const Register java_thread = rdi; // callee-saved for VC++
2607   address start   = __ pc();
2608   address call_pc = nullptr;
2609   bool cause_return = (poll_type == POLL_AT_RETURN);
2610   bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
2611 
2612   if (UseRTMLocking) {
2613     // Abort RTM transaction before calling runtime
2614     // because critical section will be large and will be
2615     // aborted anyway. Also nmethod could be deoptimized.
2616     __ xabort(0);
2617   }
2618 
2619   // If cause_return is true we are at a poll_return and there is
2620   // the return address on the stack to the caller on the nmethod
2621   // that is safepoint. We can leave this return on the stack and
2622   // effectively complete the return and safepoint in the caller.
2623   // Otherwise we push space for a return address that the safepoint
2624   // handler will install later to make the stack walking sensible.
2625   if (!cause_return)
2626     __ push(rbx);  // Make room for return address (or push it again)
2627 
2628   map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false, save_vectors);
2629 
2630   // The following is basically a call_VM. However, we need the precise
2631   // address of the call in order to generate an oopmap. Hence, we do all the
2632   // work ourselves.
2633 
2634   // Push thread argument and setup last_Java_sp
2635   __ get_thread(java_thread);
2636   __ push(java_thread);
2637   __ set_last_Java_frame(java_thread, noreg, noreg, nullptr, noreg);
2638 
2639   // if this was not a poll_return then we need to correct the return address now.
2640   if (!cause_return) {
2641     // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
2642     // Additionally, rbx is a callee saved register and we can look at it later to determine
2643     // if someone changed the return address for us!
2644     __ movptr(rbx, Address(java_thread, JavaThread::saved_exception_pc_offset()));
2645     __ movptr(Address(rbp, wordSize), rbx);
2646   }
2647 
2648   // do the call
2649   __ call(RuntimeAddress(call_ptr));
2650 
2651   // Set an oopmap for the call site.  This oopmap will map all
2652   // oop-registers and debug-info registers as callee-saved.  This
2653   // will allow deoptimization at this safepoint to find all possible
2654   // debug-info recordings, as well as let GC find all oops.
2655 
2656   oop_maps->add_gc_map( __ pc() - start, map);
2657 
2658   // Discard arg
2659   __ pop(rcx);
2660 
2661   Label noException;
2662 
2663   // Clear last_Java_sp again
2664   __ get_thread(java_thread);
2665   __ reset_last_Java_frame(java_thread, false);
2666 
2667   __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD);
2668   __ jcc(Assembler::equal, noException);
2669 
2670   // Exception pending
2671   RegisterSaver::restore_live_registers(masm, save_vectors);
2672 
2673   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2674 
2675   __ bind(noException);
2676 
2677   Label no_adjust, bail, not_special;
2678   if (!cause_return) {
2679     // If our stashed return pc was modified by the runtime we avoid touching it
2680     __ cmpptr(rbx, Address(rbp, wordSize));
2681     __ jccb(Assembler::notEqual, no_adjust);
2682 
2683     // Skip over the poll instruction.
2684     // See NativeInstruction::is_safepoint_poll()
2685     // Possible encodings:
2686     //      85 00       test   %eax,(%rax)
2687     //      85 01       test   %eax,(%rcx)
2688     //      85 02       test   %eax,(%rdx)
2689     //      85 03       test   %eax,(%rbx)
2690     //      85 06       test   %eax,(%rsi)
2691     //      85 07       test   %eax,(%rdi)
2692     //
2693     //      85 04 24    test   %eax,(%rsp)
2694     //      85 45 00    test   %eax,0x0(%rbp)
2695 
2696 #ifdef ASSERT
2697     __ movptr(rax, rbx); // remember where 0x85 should be, for verification below
2698 #endif
2699     // rsp/rbp base encoding takes 3 bytes with the following register values:
2700     // rsp 0x04
2701     // rbp 0x05
2702     __ movzbl(rcx, Address(rbx, 1));
2703     __ andptr(rcx, 0x07); // looking for 0x04 .. 0x05
2704     __ subptr(rcx, 4);    // looking for 0x00 .. 0x01
2705     __ cmpptr(rcx, 1);
2706     __ jcc(Assembler::above, not_special);
2707     __ addptr(rbx, 1);
2708     __ bind(not_special);
2709 #ifdef ASSERT
2710     // Verify the correct encoding of the poll we're about to skip.
2711     __ cmpb(Address(rax, 0), NativeTstRegMem::instruction_code_memXregl);
2712     __ jcc(Assembler::notEqual, bail);
2713     // Mask out the modrm bits
2714     __ testb(Address(rax, 1), NativeTstRegMem::modrm_mask);
2715     // rax encodes to 0, so if the bits are nonzero it's incorrect
2716     __ jcc(Assembler::notZero, bail);
2717 #endif
2718     // Adjust return pc forward to step over the safepoint poll instruction
2719     __ addptr(rbx, 2);
2720     __ movptr(Address(rbp, wordSize), rbx);
2721   }
2722 
2723   __ bind(no_adjust);
2724   // Normal exit, register restoring and exit
2725   RegisterSaver::restore_live_registers(masm, save_vectors);
2726 
2727   __ ret(0);
2728 
2729 #ifdef ASSERT
2730   __ bind(bail);
2731   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2732 #endif
2733 
2734   // make sure all code is generated
2735   masm->flush();
2736 
2737   // Fill-out other meta info
2738   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2739 }
2740 
2741 //
2742 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2743 //
2744 // Generate a stub that calls into vm to find out the proper destination
2745 // of a java call. All the argument registers are live at this point
2746 // but since this is generic code we don't know what they are and the caller
2747 // must do any gc of the args.
2748 //
2749 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
2750   assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2751 
2752   // allocate space for the code
2753   ResourceMark rm;
2754 
2755   CodeBuffer buffer(name, 1000, 512);
2756   MacroAssembler* masm                = new MacroAssembler(&buffer);
2757 
2758   int frame_size_words;
2759   enum frame_layout {
2760                 thread_off,
2761                 extra_words };
2762 
2763   OopMapSet *oop_maps = new OopMapSet();
2764   OopMap* map = nullptr;
2765 
2766   int start = __ offset();
2767 
2768   map = RegisterSaver::save_live_registers(masm, extra_words, &frame_size_words);
2769 
2770   int frame_complete = __ offset();
2771 
2772   const Register thread = rdi;
2773   __ get_thread(rdi);
2774 
2775   __ push(thread);
2776   __ set_last_Java_frame(thread, noreg, rbp, nullptr, noreg);
2777 
2778   __ call(RuntimeAddress(destination));
2779 
2780 
2781   // Set an oopmap for the call site.
2782   // We need this not only for callee-saved registers, but also for volatile
2783   // registers that the compiler might be keeping live across a safepoint.
2784 
2785   oop_maps->add_gc_map( __ offset() - start, map);
2786 
2787   // rax, contains the address we are going to jump to assuming no exception got installed
2788 
2789   __ addptr(rsp, wordSize);
2790 
2791   // clear last_Java_sp
2792   __ reset_last_Java_frame(thread, true);
2793   // check for pending exceptions
2794   Label pending;
2795   __ cmpptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
2796   __ jcc(Assembler::notEqual, pending);
2797 
2798   // get the returned Method*
2799   __ get_vm_result_2(rbx, thread);
2800   __ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
2801 
2802   __ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
2803 
2804   RegisterSaver::restore_live_registers(masm);
2805 
2806   // We are back to the original state on entry and ready to go.
2807 
2808   __ jmp(rax);
2809 
2810   // Pending exception after the safepoint
2811 
2812   __ bind(pending);
2813 
2814   RegisterSaver::restore_live_registers(masm);
2815 
2816   // exception pending => remove activation and forward to exception handler
2817 
2818   __ get_thread(thread);
2819   __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
2820   __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
2821   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2822 
2823   // -------------
2824   // make sure all code is generated
2825   masm->flush();
2826 
2827   // return the  blob
2828   // frame_size_words or bytes??
2829   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
2830 }