1 /*
   2  * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/debugInfoRec.hpp"
  30 #include "code/nativeInst.hpp"
  31 #include "code/vtableStubs.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/shared/gcLocker.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "logging/log.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/klass.inline.hpp"
  40 #include "prims/methodHandles.hpp"
  41 #include "runtime/jniHandles.hpp"
  42 #include "runtime/safepointMechanism.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/signature.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "runtime/timerTrace.hpp"
  47 #include "runtime/vframeArray.hpp"
  48 #include "runtime/vm_version.hpp"
  49 #include "utilities/align.hpp"
  50 #include "vmreg_x86.inline.hpp"
  51 #ifdef COMPILER1
  52 #include "c1/c1_Runtime1.hpp"
  53 #endif
  54 #ifdef COMPILER2
  55 #include "opto/runtime.hpp"
  56 #endif
  57 
  58 #define __ masm->
  59 
  60 #ifdef PRODUCT
  61 #define BLOCK_COMMENT(str) /* nothing */
  62 #else
  63 #define BLOCK_COMMENT(str) __ block_comment(str)
  64 #endif // PRODUCT
  65 
  66 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  67 
  68 class RegisterSaver {
  69   // Capture info about frame layout
  70 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
  71   enum layout {
  72                 fpu_state_off = 0,
  73                 fpu_state_end = fpu_state_off+FPUStateSizeInWords,
  74                 st0_off, st0H_off,
  75                 st1_off, st1H_off,
  76                 st2_off, st2H_off,
  77                 st3_off, st3H_off,
  78                 st4_off, st4H_off,
  79                 st5_off, st5H_off,
  80                 st6_off, st6H_off,
  81                 st7_off, st7H_off,
  82                 xmm_off,
  83                 DEF_XMM_OFFS(0),
  84                 DEF_XMM_OFFS(1),
  85                 DEF_XMM_OFFS(2),
  86                 DEF_XMM_OFFS(3),
  87                 DEF_XMM_OFFS(4),
  88                 DEF_XMM_OFFS(5),
  89                 DEF_XMM_OFFS(6),
  90                 DEF_XMM_OFFS(7),
  91                 flags_off = xmm7_off + 16/BytesPerInt + 1, // 16-byte stack alignment fill word
  92                 rdi_off,
  93                 rsi_off,
  94                 ignore_off,  // extra copy of rbp,
  95                 rsp_off,
  96                 rbx_off,
  97                 rdx_off,
  98                 rcx_off,
  99                 rax_off,
 100                 // The frame sender code expects that rbp will be in the "natural" place and
 101                 // will override any oopMap setting for it. We must therefore force the layout
 102                 // so that it agrees with the frame sender code.
 103                 rbp_off,
 104                 return_off,      // slot for return address
 105                 reg_save_size };
 106   enum { FPU_regs_live = flags_off - fpu_state_end };
 107 
 108   public:
 109 
 110   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words,
 111                                      int* total_frame_words, bool verify_fpu = true, bool save_vectors = false);
 112   static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
 113 
 114   static int rax_offset() { return rax_off; }
 115   static int rbx_offset() { return rbx_off; }
 116 
 117   // Offsets into the register save area
 118   // Used by deoptimization when it is managing result register
 119   // values on its own
 120 
 121   static int raxOffset(void) { return rax_off; }
 122   static int rdxOffset(void) { return rdx_off; }
 123   static int rbxOffset(void) { return rbx_off; }
 124   static int xmm0Offset(void) { return xmm0_off; }
 125   // This really returns a slot in the fp save area, which one is not important
 126   static int fpResultOffset(void) { return st0_off; }
 127 
 128   // During deoptimization only the result register need to be restored
 129   // all the other values have already been extracted.
 130 
 131   static void restore_result_registers(MacroAssembler* masm);
 132 
 133 };
 134 
 135 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
 136                                            int* total_frame_words, bool verify_fpu, bool save_vectors) {
 137   int num_xmm_regs = XMMRegister::number_of_registers;
 138   int ymm_bytes = num_xmm_regs * 16;
 139   int zmm_bytes = num_xmm_regs * 32;
 140 #ifdef COMPILER2
 141   int opmask_state_bytes = KRegister::number_of_registers * 8;
 142   if (save_vectors) {
 143     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 144     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 145     // Save upper half of YMM registers
 146     int vect_bytes = ymm_bytes;
 147     if (UseAVX > 2) {
 148       // Save upper half of ZMM registers as well
 149       vect_bytes += zmm_bytes;
 150       additional_frame_words += opmask_state_bytes / wordSize;
 151     }
 152     additional_frame_words += vect_bytes / wordSize;
 153   }
 154 #else
 155   assert(!save_vectors, "vectors are generated only by C2");
 156 #endif
 157   int frame_size_in_bytes = (reg_save_size + additional_frame_words) * wordSize;
 158   int frame_words = frame_size_in_bytes / wordSize;
 159   *total_frame_words = frame_words;
 160 
 161   assert(FPUStateSizeInWords == 27, "update stack layout");
 162 
 163   // save registers, fpu state, and flags
 164   // We assume caller has already has return address slot on the stack
 165   // We push epb twice in this sequence because we want the real rbp,
 166   // to be under the return like a normal enter and we want to use pusha
 167   // We push by hand instead of using push.
 168   __ enter();
 169   __ pusha();
 170   __ pushf();
 171   __ subptr(rsp,FPU_regs_live*wordSize); // Push FPU registers space
 172   __ push_FPU_state();          // Save FPU state & init
 173 
 174   if (verify_fpu) {
 175     // Some stubs may have non standard FPU control word settings so
 176     // only check and reset the value when it required to be the
 177     // standard value.  The safepoint blob in particular can be used
 178     // in methods which are using the 24 bit control word for
 179     // optimized float math.
 180 
 181 #ifdef ASSERT
 182     // Make sure the control word has the expected value
 183     Label ok;
 184     __ cmpw(Address(rsp, 0), StubRoutines::x86::fpu_cntrl_wrd_std());
 185     __ jccb(Assembler::equal, ok);
 186     __ stop("corrupted control word detected");
 187     __ bind(ok);
 188 #endif
 189 
 190     // Reset the control word to guard against exceptions being unmasked
 191     // since fstp_d can cause FPU stack underflow exceptions.  Write it
 192     // into the on stack copy and then reload that to make sure that the
 193     // current and future values are correct.
 194     __ movw(Address(rsp, 0), StubRoutines::x86::fpu_cntrl_wrd_std());
 195   }
 196 
 197   __ frstor(Address(rsp, 0));
 198   if (!verify_fpu) {
 199     // Set the control word so that exceptions are masked for the
 200     // following code.
 201     __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
 202   }
 203 
 204   int off = st0_off;
 205   int delta = st1_off - off;
 206 
 207   // Save the FPU registers in de-opt-able form
 208   for (int n = 0; n < FloatRegister::number_of_registers; n++) {
 209     __ fstp_d(Address(rsp, off*wordSize));
 210     off += delta;
 211   }
 212 
 213   off = xmm0_off;
 214   delta = xmm1_off - off;
 215   if(UseSSE == 1) {
 216     // Save the XMM state
 217     for (int n = 0; n < num_xmm_regs; n++) {
 218       __ movflt(Address(rsp, off*wordSize), as_XMMRegister(n));
 219       off += delta;
 220     }
 221   } else if(UseSSE >= 2) {
 222     // Save whole 128bit (16 bytes) XMM registers
 223     for (int n = 0; n < num_xmm_regs; n++) {
 224       __ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
 225       off += delta;
 226     }
 227   }
 228 
 229 #ifdef COMPILER2
 230   if (save_vectors) {
 231     __ subptr(rsp, ymm_bytes);
 232     // Save upper half of YMM registers
 233     for (int n = 0; n < num_xmm_regs; n++) {
 234       __ vextractf128_high(Address(rsp, n*16), as_XMMRegister(n));
 235     }
 236     if (UseAVX > 2) {
 237       __ subptr(rsp, zmm_bytes);
 238       // Save upper half of ZMM registers
 239       for (int n = 0; n < num_xmm_regs; n++) {
 240         __ vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n));
 241       }
 242       __ subptr(rsp, opmask_state_bytes);
 243       // Save opmask registers
 244       for (int n = 0; n < KRegister::number_of_registers; n++) {
 245         __ kmov(Address(rsp, n*8), as_KRegister(n));
 246       }
 247     }
 248   }
 249 #else
 250   assert(!save_vectors, "vectors are generated only by C2");
 251 #endif
 252 
 253   __ vzeroupper();
 254 
 255   // Set an oopmap for the call site.  This oopmap will map all
 256   // oop-registers and debug-info registers as callee-saved.  This
 257   // will allow deoptimization at this safepoint to find all possible
 258   // debug-info recordings, as well as let GC find all oops.
 259 
 260   OopMapSet *oop_maps = new OopMapSet();
 261   OopMap* map =  new OopMap( frame_words, 0 );
 262 
 263 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
 264 #define NEXTREG(x) (x)->as_VMReg()->next()
 265 
 266   map->set_callee_saved(STACK_OFFSET(rax_off), rax->as_VMReg());
 267   map->set_callee_saved(STACK_OFFSET(rcx_off), rcx->as_VMReg());
 268   map->set_callee_saved(STACK_OFFSET(rdx_off), rdx->as_VMReg());
 269   map->set_callee_saved(STACK_OFFSET(rbx_off), rbx->as_VMReg());
 270   // rbp, location is known implicitly, no oopMap
 271   map->set_callee_saved(STACK_OFFSET(rsi_off), rsi->as_VMReg());
 272   map->set_callee_saved(STACK_OFFSET(rdi_off), rdi->as_VMReg());
 273 
 274   // %%% This is really a waste but we'll keep things as they were for now for the upper component
 275   off = st0_off;
 276   delta = st1_off - off;
 277   for (int n = 0; n < FloatRegister::number_of_registers; n++) {
 278     FloatRegister freg_name = as_FloatRegister(n);
 279     map->set_callee_saved(STACK_OFFSET(off), freg_name->as_VMReg());
 280     map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(freg_name));
 281     off += delta;
 282   }
 283   off = xmm0_off;
 284   delta = xmm1_off - off;
 285   for (int n = 0; n < num_xmm_regs; n++) {
 286     XMMRegister xmm_name = as_XMMRegister(n);
 287     map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
 288     map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(xmm_name));
 289     off += delta;
 290   }
 291 #undef NEXTREG
 292 #undef STACK_OFFSET
 293 
 294   return map;
 295 }
 296 
 297 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 298   int opmask_state_bytes = 0;
 299   int additional_frame_bytes = 0;
 300   int num_xmm_regs = XMMRegister::number_of_registers;
 301   int ymm_bytes = num_xmm_regs * 16;
 302   int zmm_bytes = num_xmm_regs * 32;
 303   // Recover XMM & FPU state
 304 #ifdef COMPILER2
 305   if (restore_vectors) {
 306     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 307     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 308     // Save upper half of YMM registers
 309     additional_frame_bytes = ymm_bytes;
 310     if (UseAVX > 2) {
 311       // Save upper half of ZMM registers as well
 312       additional_frame_bytes += zmm_bytes;
 313       opmask_state_bytes = KRegister::number_of_registers * 8;
 314       additional_frame_bytes += opmask_state_bytes;
 315     }
 316   }
 317 #else
 318   assert(!restore_vectors, "vectors are generated only by C2");
 319 #endif
 320 
 321   int off = xmm0_off;
 322   int delta = xmm1_off - off;
 323 
 324   __ vzeroupper();
 325 
 326   if (UseSSE == 1) {
 327     // Restore XMM registers
 328     assert(additional_frame_bytes == 0, "");
 329     for (int n = 0; n < num_xmm_regs; n++) {
 330       __ movflt(as_XMMRegister(n), Address(rsp, off*wordSize));
 331       off += delta;
 332     }
 333   } else if (UseSSE >= 2) {
 334     // Restore whole 128bit (16 bytes) XMM registers. Do this before restoring YMM and
 335     // ZMM because the movdqu instruction zeros the upper part of the XMM register.
 336     for (int n = 0; n < num_xmm_regs; n++) {
 337       __ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
 338       off += delta;
 339     }
 340   }
 341 
 342   if (restore_vectors) {
 343     off = additional_frame_bytes - ymm_bytes;
 344     // Restore upper half of YMM registers.
 345     for (int n = 0; n < num_xmm_regs; n++) {
 346       __ vinsertf128_high(as_XMMRegister(n), Address(rsp, n*16+off));
 347     }
 348     if (UseAVX > 2) {
 349       // Restore upper half of ZMM registers.
 350       off = opmask_state_bytes;
 351       for (int n = 0; n < num_xmm_regs; n++) {
 352         __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, n*32+off));
 353       }
 354       for (int n = 0; n < KRegister::number_of_registers; n++) {
 355         __ kmov(as_KRegister(n), Address(rsp, n*8));
 356       }
 357     }
 358     __ addptr(rsp, additional_frame_bytes);
 359   }
 360 
 361   __ pop_FPU_state();
 362   __ addptr(rsp, FPU_regs_live*wordSize); // Pop FPU registers
 363 
 364   __ popf();
 365   __ popa();
 366   // Get the rbp, described implicitly by the frame sender code (no oopMap)
 367   __ pop(rbp);
 368 }
 369 
 370 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 371 
 372   // Just restore result register. Only used by deoptimization. By
 373   // now any callee save register that needs to be restore to a c2
 374   // caller of the deoptee has been extracted into the vframeArray
 375   // and will be stuffed into the c2i adapter we create for later
 376   // restoration so only result registers need to be restored here.
 377   //
 378 
 379   __ frstor(Address(rsp, 0));      // Restore fpu state
 380 
 381   // Recover XMM & FPU state
 382   if( UseSSE == 1 ) {
 383     __ movflt(xmm0, Address(rsp, xmm0_off*wordSize));
 384   } else if( UseSSE >= 2 ) {
 385     __ movdbl(xmm0, Address(rsp, xmm0_off*wordSize));
 386   }
 387   __ movptr(rax, Address(rsp, rax_off*wordSize));
 388   __ movptr(rdx, Address(rsp, rdx_off*wordSize));
 389   // Pop all of the register save are off the stack except the return address
 390   __ addptr(rsp, return_off * wordSize);
 391 }
 392 
 393 // Is vector's size (in bytes) bigger than a size saved by default?
 394 // 16 bytes XMM registers are saved by default using SSE2 movdqu instructions.
 395 // Note, MaxVectorSize == 0 with UseSSE < 2 and vectors are not generated.
 396 bool SharedRuntime::is_wide_vector(int size) {
 397   return size > 16;
 398 }
 399 
 400 // The java_calling_convention describes stack locations as ideal slots on
 401 // a frame with no abi restrictions. Since we must observe abi restrictions
 402 // (like the placement of the register window) the slots must be biased by
 403 // the following value.
 404 static int reg2offset_in(VMReg r) {
 405   // Account for saved rbp, and return address
 406   // This should really be in_preserve_stack_slots
 407   return (r->reg2stack() + 2) * VMRegImpl::stack_slot_size;
 408 }
 409 
 410 static int reg2offset_out(VMReg r) {
 411   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 412 }
 413 
 414 // ---------------------------------------------------------------------------
 415 // Read the array of BasicTypes from a signature, and compute where the
 416 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 417 // quantities.  Values less than SharedInfo::stack0 are registers, those above
 418 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 419 // as framesizes are fixed.
 420 // VMRegImpl::stack0 refers to the first slot 0(sp).
 421 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
 422 // Register up to Register::number_of_registers are the 32-bit
 423 // integer registers.
 424 
 425 // Pass first two oop/int args in registers ECX and EDX.
 426 // Pass first two float/double args in registers XMM0 and XMM1.
 427 // Doubles have precedence, so if you pass a mix of floats and doubles
 428 // the doubles will grab the registers before the floats will.
 429 
 430 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 431 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
 432 // units regardless of build. Of course for i486 there is no 64 bit build
 433 
 434 
 435 // ---------------------------------------------------------------------------
 436 // The compiled Java calling convention.
 437 // Pass first two oop/int args in registers ECX and EDX.
 438 // Pass first two float/double args in registers XMM0 and XMM1.
 439 // Doubles have precedence, so if you pass a mix of floats and doubles
 440 // the doubles will grab the registers before the floats will.
 441 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 442                                            VMRegPair *regs,
 443                                            int total_args_passed) {
 444   uint    stack = 0;          // Starting stack position for args on stack
 445 
 446 
 447   // Pass first two oop/int args in registers ECX and EDX.
 448   uint reg_arg0 = 9999;
 449   uint reg_arg1 = 9999;
 450 
 451   // Pass first two float/double args in registers XMM0 and XMM1.
 452   // Doubles have precedence, so if you pass a mix of floats and doubles
 453   // the doubles will grab the registers before the floats will.
 454   // CNC - TURNED OFF FOR non-SSE.
 455   //       On Intel we have to round all doubles (and most floats) at
 456   //       call sites by storing to the stack in any case.
 457   // UseSSE=0 ==> Don't Use ==> 9999+0
 458   // UseSSE=1 ==> Floats only ==> 9999+1
 459   // UseSSE>=2 ==> Floats or doubles ==> 9999+2
 460   enum { fltarg_dontuse = 9999+0, fltarg_float_only = 9999+1, fltarg_flt_dbl = 9999+2 };
 461   uint fargs = (UseSSE>=2) ? 2 : UseSSE;
 462   uint freg_arg0 = 9999+fargs;
 463   uint freg_arg1 = 9999+fargs;
 464 
 465   // Pass doubles & longs aligned on the stack.  First count stack slots for doubles
 466   int i;
 467   for( i = 0; i < total_args_passed; i++) {
 468     if( sig_bt[i] == T_DOUBLE ) {
 469       // first 2 doubles go in registers
 470       if( freg_arg0 == fltarg_flt_dbl ) freg_arg0 = i;
 471       else if( freg_arg1 == fltarg_flt_dbl ) freg_arg1 = i;
 472       else // Else double is passed low on the stack to be aligned.
 473         stack += 2;
 474     } else if( sig_bt[i] == T_LONG ) {
 475       stack += 2;
 476     }
 477   }
 478   int dstack = 0;             // Separate counter for placing doubles
 479 
 480   // Now pick where all else goes.
 481   for( i = 0; i < total_args_passed; i++) {
 482     // From the type and the argument number (count) compute the location
 483     switch( sig_bt[i] ) {
 484     case T_SHORT:
 485     case T_CHAR:
 486     case T_BYTE:
 487     case T_BOOLEAN:
 488     case T_INT:
 489     case T_ARRAY:
 490     case T_OBJECT:
 491     case T_ADDRESS:
 492       if( reg_arg0 == 9999 )  {
 493         reg_arg0 = i;
 494         regs[i].set1(rcx->as_VMReg());
 495       } else if( reg_arg1 == 9999 )  {
 496         reg_arg1 = i;
 497         regs[i].set1(rdx->as_VMReg());
 498       } else {
 499         regs[i].set1(VMRegImpl::stack2reg(stack++));
 500       }
 501       break;
 502     case T_FLOAT:
 503       if( freg_arg0 == fltarg_flt_dbl || freg_arg0 == fltarg_float_only ) {
 504         freg_arg0 = i;
 505         regs[i].set1(xmm0->as_VMReg());
 506       } else if( freg_arg1 == fltarg_flt_dbl || freg_arg1 == fltarg_float_only ) {
 507         freg_arg1 = i;
 508         regs[i].set1(xmm1->as_VMReg());
 509       } else {
 510         regs[i].set1(VMRegImpl::stack2reg(stack++));
 511       }
 512       break;
 513     case T_LONG:
 514       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
 515       regs[i].set2(VMRegImpl::stack2reg(dstack));
 516       dstack += 2;
 517       break;
 518     case T_DOUBLE:
 519       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
 520       if( freg_arg0 == (uint)i ) {
 521         regs[i].set2(xmm0->as_VMReg());
 522       } else if( freg_arg1 == (uint)i ) {
 523         regs[i].set2(xmm1->as_VMReg());
 524       } else {
 525         regs[i].set2(VMRegImpl::stack2reg(dstack));
 526         dstack += 2;
 527       }
 528       break;
 529     case T_VOID: regs[i].set_bad(); break;
 530       break;
 531     default:
 532       ShouldNotReachHere();
 533       break;
 534     }
 535   }
 536 
 537   return stack;
 538 }
 539 
 540 const uint SharedRuntime::java_return_convention_max_int = 1;
 541 const uint SharedRuntime::java_return_convention_max_float = 1;
 542 int SharedRuntime::java_return_convention(const BasicType *sig_bt,
 543                                           VMRegPair *regs,
 544                                           int total_args_passed) {
 545   Unimplemented();
 546   return 0;
 547 }
 548 
 549 // Patch the callers callsite with entry to compiled code if it exists.
 550 static void patch_callers_callsite(MacroAssembler *masm) {
 551   Label L;
 552   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
 553   __ jcc(Assembler::equal, L);
 554   // Schedule the branch target address early.
 555   // Call into the VM to patch the caller, then jump to compiled callee
 556   // rax, isn't live so capture return address while we easily can
 557   __ movptr(rax, Address(rsp, 0));
 558   __ pusha();
 559   __ pushf();
 560 
 561   if (UseSSE == 1) {
 562     __ subptr(rsp, 2*wordSize);
 563     __ movflt(Address(rsp, 0), xmm0);
 564     __ movflt(Address(rsp, wordSize), xmm1);
 565   }
 566   if (UseSSE >= 2) {
 567     __ subptr(rsp, 4*wordSize);
 568     __ movdbl(Address(rsp, 0), xmm0);
 569     __ movdbl(Address(rsp, 2*wordSize), xmm1);
 570   }
 571 #ifdef COMPILER2
 572   // C2 may leave the stack dirty if not in SSE2+ mode
 573   if (UseSSE >= 2) {
 574     __ verify_FPU(0, "c2i transition should have clean FPU stack");
 575   } else {
 576     __ empty_FPU_stack();
 577   }
 578 #endif /* COMPILER2 */
 579 
 580   // VM needs caller's callsite
 581   __ push(rax);
 582   // VM needs target method
 583   __ push(rbx);
 584   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 585   __ addptr(rsp, 2*wordSize);
 586 
 587   if (UseSSE == 1) {
 588     __ movflt(xmm0, Address(rsp, 0));
 589     __ movflt(xmm1, Address(rsp, wordSize));
 590     __ addptr(rsp, 2*wordSize);
 591   }
 592   if (UseSSE >= 2) {
 593     __ movdbl(xmm0, Address(rsp, 0));
 594     __ movdbl(xmm1, Address(rsp, 2*wordSize));
 595     __ addptr(rsp, 4*wordSize);
 596   }
 597 
 598   __ popf();
 599   __ popa();
 600   __ bind(L);
 601 }
 602 
 603 
 604 static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
 605   int next_off = st_off - Interpreter::stackElementSize;
 606   __ movdbl(Address(rsp, next_off), r);
 607 }
 608 
 609 static void gen_c2i_adapter(MacroAssembler *masm,
 610                             const GrowableArray<SigEntry>& sig_extended,
 611                             const VMRegPair *regs,
 612                             Label& skip_fixup,
 613                             address start,
 614                             OopMapSet*& oop_maps,
 615                             int& frame_complete,
 616                             int& frame_size_in_words) {
 617   // Before we get into the guts of the C2I adapter, see if we should be here
 618   // at all.  We've come from compiled code and are attempting to jump to the
 619   // interpreter, which means the caller made a static call to get here
 620   // (vcalls always get a compiled target if there is one).  Check for a
 621   // compiled target.  If there is one, we need to patch the caller's call.
 622   patch_callers_callsite(masm);
 623 
 624   __ bind(skip_fixup);
 625 
 626 #ifdef COMPILER2
 627   // C2 may leave the stack dirty if not in SSE2+ mode
 628   if (UseSSE >= 2) {
 629     __ verify_FPU(0, "c2i transition should have clean FPU stack");
 630   } else {
 631     __ empty_FPU_stack();
 632   }
 633 #endif /* COMPILER2 */
 634 
 635   // Since all args are passed on the stack, total_args_passed * interpreter_
 636   // stack_element_size  is the
 637   // space we need.
 638   int extraspace = sig_extended.length() * Interpreter::stackElementSize;
 639 
 640   // Get return address
 641   __ pop(rax);
 642 
 643   // set senderSP value
 644   __ movptr(rsi, rsp);
 645 
 646   __ subptr(rsp, extraspace);
 647 
 648   // Now write the args into the outgoing interpreter space
 649   for (int i = 0; i < sig_extended.length(); i++) {
 650     if (sig_extended.at(i)._bt == T_VOID) {
 651       assert(i > 0 && (sig_extended.at(i-1)._bt == T_LONG || sig_extended.at(i-1)._bt == T_DOUBLE), "missing half");
 652       continue;
 653     }
 654 
 655     // st_off points to lowest address on stack.
 656     int st_off = ((sig_extended.length() - 1) - i) * Interpreter::stackElementSize;
 657     int next_off = st_off - Interpreter::stackElementSize;
 658 
 659     // Say 4 args:
 660     // i   st_off
 661     // 0   12 T_LONG
 662     // 1    8 T_VOID
 663     // 2    4 T_OBJECT
 664     // 3    0 T_BOOL
 665     VMReg r_1 = regs[i].first();
 666     VMReg r_2 = regs[i].second();
 667     if (!r_1->is_valid()) {
 668       assert(!r_2->is_valid(), "");
 669       continue;
 670     }
 671 
 672     if (r_1->is_stack()) {
 673       // memory to memory use fpu stack top
 674       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 675 
 676       if (!r_2->is_valid()) {
 677         __ movl(rdi, Address(rsp, ld_off));
 678         __ movptr(Address(rsp, st_off), rdi);
 679       } else {
 680 
 681         // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
 682         // st_off == MSW, st_off-wordSize == LSW
 683 
 684         __ movptr(rdi, Address(rsp, ld_off));
 685         __ movptr(Address(rsp, next_off), rdi);
 686         __ movptr(rdi, Address(rsp, ld_off + wordSize));
 687         __ movptr(Address(rsp, st_off), rdi);
 688       }
 689     } else if (r_1->is_Register()) {
 690       Register r = r_1->as_Register();
 691       if (!r_2->is_valid()) {
 692         __ movl(Address(rsp, st_off), r);
 693       } else {
 694         // long/double in gpr
 695         ShouldNotReachHere();
 696       }
 697     } else {
 698       assert(r_1->is_XMMRegister(), "");
 699       if (!r_2->is_valid()) {
 700         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
 701       } else {
 702         assert(sig_extended.at(i)._bt == T_DOUBLE || sig_extended.at(i)._bt == T_LONG, "wrong type");
 703         move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
 704       }
 705     }
 706   }
 707 
 708   // Schedule the branch target address early.
 709   __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
 710   // And repush original return address
 711   __ push(rax);
 712   __ jmp(rcx);
 713 }
 714 
 715 
 716 static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
 717   int next_val_off = ld_off - Interpreter::stackElementSize;
 718   __ movdbl(r, Address(saved_sp, next_val_off));
 719 }
 720 
 721 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
 722                         address code_start, address code_end,
 723                         Label& L_ok) {
 724   Label L_fail;
 725   __ lea(temp_reg, AddressLiteral(code_start, relocInfo::none));
 726   __ cmpptr(pc_reg, temp_reg);
 727   __ jcc(Assembler::belowEqual, L_fail);
 728   __ lea(temp_reg, AddressLiteral(code_end, relocInfo::none));
 729   __ cmpptr(pc_reg, temp_reg);
 730   __ jcc(Assembler::below, L_ok);
 731   __ bind(L_fail);
 732 }
 733 
 734 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 735                                     int comp_args_on_stack,
 736                                     const GrowableArray<SigEntry>& sig_extended,
 737                                     const VMRegPair *regs) {
 738 
 739   // Note: rsi contains the senderSP on entry. We must preserve it since
 740   // we may do a i2c -> c2i transition if we lose a race where compiled
 741   // code goes non-entrant while we get args ready.
 742 
 743   // Adapters can be frameless because they do not require the caller
 744   // to perform additional cleanup work, such as correcting the stack pointer.
 745   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
 746   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
 747   // even if a callee has modified the stack pointer.
 748   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
 749   // routinely repairs its caller's stack pointer (from sender_sp, which is set
 750   // up via the senderSP register).
 751   // In other words, if *either* the caller or callee is interpreted, we can
 752   // get the stack pointer repaired after a call.
 753   // This is why c2i and i2c adapters cannot be indefinitely composed.
 754   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 755   // both caller and callee would be compiled methods, and neither would
 756   // clean up the stack pointer changes performed by the two adapters.
 757   // If this happens, control eventually transfers back to the compiled
 758   // caller, but with an uncorrected stack, causing delayed havoc.
 759 
 760   // Pick up the return address
 761   __ movptr(rax, Address(rsp, 0));
 762 
 763   if (VerifyAdapterCalls &&
 764       (Interpreter::code() != nullptr || StubRoutines::final_stubs_code() != nullptr)) {
 765     // So, let's test for cascading c2i/i2c adapters right now.
 766     //  assert(Interpreter::contains($return_addr) ||
 767     //         StubRoutines::contains($return_addr),
 768     //         "i2c adapter must return to an interpreter frame");
 769     __ block_comment("verify_i2c { ");
 770     Label L_ok;
 771     if (Interpreter::code() != nullptr) {
 772       range_check(masm, rax, rdi,
 773                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 774                   L_ok);
 775     }
 776     if (StubRoutines::initial_stubs_code() != nullptr) {
 777       range_check(masm, rax, rdi,
 778                   StubRoutines::initial_stubs_code()->code_begin(),
 779                   StubRoutines::initial_stubs_code()->code_end(),
 780                   L_ok);
 781     }
 782     if (StubRoutines::final_stubs_code() != nullptr) {
 783       range_check(masm, rax, rdi,
 784                   StubRoutines::final_stubs_code()->code_begin(),
 785                   StubRoutines::final_stubs_code()->code_end(),
 786                   L_ok);
 787     }
 788     const char* msg = "i2c adapter must return to an interpreter frame";
 789     __ block_comment(msg);
 790     __ stop(msg);
 791     __ bind(L_ok);
 792     __ block_comment("} verify_i2ce ");
 793   }
 794 
 795   // Must preserve original SP for loading incoming arguments because
 796   // we need to align the outgoing SP for compiled code.
 797   __ movptr(rdi, rsp);
 798 
 799   // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
 800   // in registers, we will occasionally have no stack args.
 801   int comp_words_on_stack = 0;
 802   if (comp_args_on_stack) {
 803     // Sig words on the stack are greater-than VMRegImpl::stack0.  Those in
 804     // registers are below.  By subtracting stack0, we either get a negative
 805     // number (all values in registers) or the maximum stack slot accessed.
 806     // int comp_args_on_stack = VMRegImpl::reg2stack(max_arg);
 807     // Convert 4-byte stack slots to words.
 808     comp_words_on_stack = align_up(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
 809     // Round up to miminum stack alignment, in wordSize
 810     comp_words_on_stack = align_up(comp_words_on_stack, 2);
 811     __ subptr(rsp, comp_words_on_stack * wordSize);
 812   }
 813 
 814   // Align the outgoing SP
 815   __ andptr(rsp, -(StackAlignmentInBytes));
 816 
 817   // push the return address on the stack (note that pushing, rather
 818   // than storing it, yields the correct frame alignment for the callee)
 819   __ push(rax);
 820 
 821   // Put saved SP in another register
 822   const Register saved_sp = rax;
 823   __ movptr(saved_sp, rdi);
 824 
 825 
 826   // Will jump to the compiled code just as if compiled code was doing it.
 827   // Pre-load the register-jump target early, to schedule it better.
 828   __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
 829 
 830   // Now generate the shuffle code.  Pick up all register args and move the
 831   // rest through the floating point stack top.
 832   for (int i = 0; i < sig_extended.length(); i++) {
 833     if (sig_extended.at(i)._bt == T_VOID) {
 834       // Longs and doubles are passed in native word order, but misaligned
 835       // in the 32-bit build.
 836       assert(i > 0 && (sig_extended.at(i-1)._bt == T_LONG || sig_extended.at(i-1)._bt == T_DOUBLE), "missing half");
 837       continue;
 838     }
 839 
 840     // Pick up 0, 1 or 2 words from SP+offset.
 841 
 842     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 843             "scrambled load targets?");
 844     // Load in argument order going down.
 845     int ld_off = (sig_extended.length() - i) * Interpreter::stackElementSize;
 846     // Point to interpreter value (vs. tag)
 847     int next_off = ld_off - Interpreter::stackElementSize;
 848     //
 849     //
 850     //
 851     VMReg r_1 = regs[i].first();
 852     VMReg r_2 = regs[i].second();
 853     if (!r_1->is_valid()) {
 854       assert(!r_2->is_valid(), "");
 855       continue;
 856     }
 857     if (r_1->is_stack()) {
 858       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 859       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
 860 
 861       // We can use rsi as a temp here because compiled code doesn't need rsi as an input
 862       // and if we end up going thru a c2i because of a miss a reasonable value of rsi
 863       // we be generated.
 864       if (!r_2->is_valid()) {
 865         // __ fld_s(Address(saved_sp, ld_off));
 866         // __ fstp_s(Address(rsp, st_off));
 867         __ movl(rsi, Address(saved_sp, ld_off));
 868         __ movptr(Address(rsp, st_off), rsi);
 869       } else {
 870         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 871         // are accessed as negative so LSW is at LOW address
 872 
 873         // ld_off is MSW so get LSW
 874         // st_off is LSW (i.e. reg.first())
 875         // __ fld_d(Address(saved_sp, next_off));
 876         // __ fstp_d(Address(rsp, st_off));
 877         //
 878         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 879         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 880         // So we must adjust where to pick up the data to match the interpreter.
 881         //
 882         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 883         // are accessed as negative so LSW is at LOW address
 884 
 885         // ld_off is MSW so get LSW
 886         __ movptr(rsi, Address(saved_sp, next_off));
 887         __ movptr(Address(rsp, st_off), rsi);
 888         __ movptr(rsi, Address(saved_sp, ld_off));
 889         __ movptr(Address(rsp, st_off + wordSize), rsi);
 890       }
 891     } else if (r_1->is_Register()) {  // Register argument
 892       Register r = r_1->as_Register();
 893       assert(r != rax, "must be different");
 894       if (r_2->is_valid()) {
 895         //
 896         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 897         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 898         // So we must adjust where to pick up the data to match the interpreter.
 899 
 900         // this can be a misaligned move
 901         __ movptr(r, Address(saved_sp, next_off));
 902         assert(r_2->as_Register() != rax, "need another temporary register");
 903         // Remember r_1 is low address (and LSB on x86)
 904         // So r_2 gets loaded from high address regardless of the platform
 905         __ movptr(r_2->as_Register(), Address(saved_sp, ld_off));
 906       } else {
 907         __ movl(r, Address(saved_sp, ld_off));
 908       }
 909     } else {
 910       assert(r_1->is_XMMRegister(), "");
 911       if (!r_2->is_valid()) {
 912         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
 913       } else {
 914         move_i2c_double(masm, r_1->as_XMMRegister(), saved_sp, ld_off);
 915       }
 916     }
 917   }
 918 
 919   // 6243940 We might end up in handle_wrong_method if
 920   // the callee is deoptimized as we race thru here. If that
 921   // happens we don't want to take a safepoint because the
 922   // caller frame will look interpreted and arguments are now
 923   // "compiled" so it is much better to make this transition
 924   // invisible to the stack walking code. Unfortunately if
 925   // we try and find the callee by normal means a safepoint
 926   // is possible. So we stash the desired callee in the thread
 927   // and the vm will find there should this case occur.
 928 
 929   __ get_thread(rax);
 930   __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
 931 
 932   // move Method* to rax, in case we end up in an c2i adapter.
 933   // the c2i adapters expect Method* in rax, (c2) because c2's
 934   // resolve stubs return the result (the method) in rax,.
 935   // I'd love to fix this.
 936   __ mov(rax, rbx);
 937 
 938   __ jmp(rdi);
 939 }
 940 
 941 // ---------------------------------------------------------------
 942 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 943                                                             int comp_args_on_stack,
 944                                                             const GrowableArray<SigEntry>& sig_extended,
 945                                                             const VMRegPair *regs,
 946                                                             AdapterFingerPrint* fingerprint,
 947                                                             AdapterBlob*& new_adapter) {
 948   address i2c_entry = __ pc();
 949 
 950   gen_i2c_adapter(masm, comp_args_on_stack, sig_extended, regs);
 951 
 952   // -------------------------------------------------------------------------
 953   // Generate a C2I adapter.  On entry we know rbx, holds the Method* during calls
 954   // to the interpreter.  The args start out packed in the compiled layout.  They
 955   // need to be unpacked into the interpreter layout.  This will almost always
 956   // require some stack space.  We grow the current (compiled) stack, then repack
 957   // the args.  We  finally end in a jump to the generic interpreter entry point.
 958   // On exit from the interpreter, the interpreter will restore our SP (lest the
 959   // compiled code, which relies solely on SP and not EBP, get sick).
 960 
 961   address c2i_unverified_entry = __ pc();
 962   Label skip_fixup;
 963 
 964   Register data = rax;
 965   Register receiver = rcx;
 966   Register temp = rbx;
 967 
 968   {
 969     __ ic_check(1 /* end_alignment */);
 970     __ movptr(rbx, Address(data, CompiledICData::speculated_method_offset()));
 971     // Method might have been compiled since the call site was patched to
 972     // interpreted if that is the case treat it as a miss so we can get
 973     // the call site corrected.
 974     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
 975     __ jcc(Assembler::equal, skip_fixup);
 976   }
 977 
 978   address c2i_entry = __ pc();
 979 
 980   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 981   bs->c2i_entry_barrier(masm);
 982 
 983   OopMapSet* oop_maps = nullptr;
 984   int frame_complete = CodeOffsets::frame_never_safe;
 985   int frame_size_in_words = 0;
 986   gen_c2i_adapter(masm, sig_extended, regs, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words);
 987 
 988   new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps);
 989   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 990 }
 991 
 992 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 993                                          VMRegPair *regs,
 994                                          int total_args_passed) {
 995 
 996 // We return the amount of VMRegImpl stack slots we need to reserve for all
 997 // the arguments NOT counting out_preserve_stack_slots.
 998 
 999   uint    stack = 0;        // All arguments on stack
1000 
1001   for( int i = 0; i < total_args_passed; i++) {
1002     // From the type and the argument number (count) compute the location
1003     switch( sig_bt[i] ) {
1004     case T_BOOLEAN:
1005     case T_CHAR:
1006     case T_FLOAT:
1007     case T_BYTE:
1008     case T_SHORT:
1009     case T_INT:
1010     case T_OBJECT:
1011     case T_ARRAY:
1012     case T_ADDRESS:
1013     case T_METADATA:
1014       regs[i].set1(VMRegImpl::stack2reg(stack++));
1015       break;
1016     case T_LONG:
1017     case T_DOUBLE: // The stack numbering is reversed from Java
1018       // Since C arguments do not get reversed, the ordering for
1019       // doubles on the stack must be opposite the Java convention
1020       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
1021       regs[i].set2(VMRegImpl::stack2reg(stack));
1022       stack += 2;
1023       break;
1024     case T_VOID: regs[i].set_bad(); break;
1025     default:
1026       ShouldNotReachHere();
1027       break;
1028     }
1029   }
1030   return stack;
1031 }
1032 
1033 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
1034                                              uint num_bits,
1035                                              uint total_args_passed) {
1036   Unimplemented();
1037   return 0;
1038 }
1039 
1040 // A simple move of integer like type
1041 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1042   if (src.first()->is_stack()) {
1043     if (dst.first()->is_stack()) {
1044       // stack to stack
1045       // __ ld(FP, reg2offset(src.first()), L5);
1046       // __ st(L5, SP, reg2offset(dst.first()));
1047       __ movl2ptr(rax, Address(rbp, reg2offset_in(src.first())));
1048       __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1049     } else {
1050       // stack to reg
1051       __ movl2ptr(dst.first()->as_Register(),  Address(rbp, reg2offset_in(src.first())));
1052     }
1053   } else if (dst.first()->is_stack()) {
1054     // reg to stack
1055     // no need to sign extend on 64bit
1056     __ movptr(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1057   } else {
1058     if (dst.first() != src.first()) {
1059       __ mov(dst.first()->as_Register(), src.first()->as_Register());
1060     }
1061   }
1062 }
1063 
1064 // An oop arg. Must pass a handle not the oop itself
1065 static void object_move(MacroAssembler* masm,
1066                         OopMap* map,
1067                         int oop_handle_offset,
1068                         int framesize_in_slots,
1069                         VMRegPair src,
1070                         VMRegPair dst,
1071                         bool is_receiver,
1072                         int* receiver_offset) {
1073 
1074   // Because of the calling conventions we know that src can be a
1075   // register or a stack location. dst can only be a stack location.
1076 
1077   assert(dst.first()->is_stack(), "must be stack");
1078   // must pass a handle. First figure out the location we use as a handle
1079 
1080   if (src.first()->is_stack()) {
1081     // Oop is already on the stack as an argument
1082     Register rHandle = rax;
1083     Label nil;
1084     __ xorptr(rHandle, rHandle);
1085     __ cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD);
1086     __ jcc(Assembler::equal, nil);
1087     __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1088     __ bind(nil);
1089     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1090 
1091     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1092     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1093     if (is_receiver) {
1094       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1095     }
1096   } else {
1097     // Oop is in a register we must store it to the space we reserve
1098     // on the stack for oop_handles
1099     const Register rOop = src.first()->as_Register();
1100     const Register rHandle = rax;
1101     int oop_slot = (rOop == rcx ? 0 : 1) * VMRegImpl::slots_per_word + oop_handle_offset;
1102     int offset = oop_slot*VMRegImpl::stack_slot_size;
1103     Label skip;
1104     __ movptr(Address(rsp, offset), rOop);
1105     map->set_oop(VMRegImpl::stack2reg(oop_slot));
1106     __ xorptr(rHandle, rHandle);
1107     __ cmpptr(rOop, NULL_WORD);
1108     __ jcc(Assembler::equal, skip);
1109     __ lea(rHandle, Address(rsp, offset));
1110     __ bind(skip);
1111     // Store the handle parameter
1112     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1113     if (is_receiver) {
1114       *receiver_offset = offset;
1115     }
1116   }
1117 }
1118 
1119 // A float arg may have to do float reg int reg conversion
1120 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1121   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1122 
1123   // Because of the calling convention we know that src is either a stack location
1124   // or an xmm register. dst can only be a stack location.
1125 
1126   assert(dst.first()->is_stack() && ( src.first()->is_stack() || src.first()->is_XMMRegister()), "bad parameters");
1127 
1128   if (src.first()->is_stack()) {
1129     __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1130     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1131   } else {
1132     // reg to stack
1133     __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1134   }
1135 }
1136 
1137 // A long move
1138 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1139 
1140   // The only legal possibility for a long_move VMRegPair is:
1141   // 1: two stack slots (possibly unaligned)
1142   // as neither the java  or C calling convention will use registers
1143   // for longs.
1144 
1145   if (src.first()->is_stack() && dst.first()->is_stack()) {
1146     assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
1147     __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1148     __ movptr(rbx, Address(rbp, reg2offset_in(src.second())));
1149     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1150     __ movptr(Address(rsp, reg2offset_out(dst.second())), rbx);
1151   } else {
1152     ShouldNotReachHere();
1153   }
1154 }
1155 
1156 // A double move
1157 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1158 
1159   // The only legal possibilities for a double_move VMRegPair are:
1160   // The painful thing here is that like long_move a VMRegPair might be
1161 
1162   // Because of the calling convention we know that src is either
1163   //   1: a single physical register (xmm registers only)
1164   //   2: two stack slots (possibly unaligned)
1165   // dst can only be a pair of stack slots.
1166 
1167   assert(dst.first()->is_stack() && (src.first()->is_XMMRegister() || src.first()->is_stack()), "bad args");
1168 
1169   if (src.first()->is_stack()) {
1170     // source is all stack
1171     __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1172     __ movptr(rbx, Address(rbp, reg2offset_in(src.second())));
1173     __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1174     __ movptr(Address(rsp, reg2offset_out(dst.second())), rbx);
1175   } else {
1176     // reg to stack
1177     // No worries about stack alignment
1178     __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1179   }
1180 }
1181 
1182 
1183 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1184   // We always ignore the frame_slots arg and just use the space just below frame pointer
1185   // which by this time is free to use
1186   switch (ret_type) {
1187   case T_FLOAT:
1188     __ fstp_s(Address(rbp, -wordSize));
1189     break;
1190   case T_DOUBLE:
1191     __ fstp_d(Address(rbp, -2*wordSize));
1192     break;
1193   case T_VOID:  break;
1194   case T_LONG:
1195     __ movptr(Address(rbp, -wordSize), rax);
1196     __ movptr(Address(rbp, -2*wordSize), rdx);
1197     break;
1198   default: {
1199     __ movptr(Address(rbp, -wordSize), rax);
1200     }
1201   }
1202 }
1203 
1204 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1205   // We always ignore the frame_slots arg and just use the space just below frame pointer
1206   // which by this time is free to use
1207   switch (ret_type) {
1208   case T_FLOAT:
1209     __ fld_s(Address(rbp, -wordSize));
1210     break;
1211   case T_DOUBLE:
1212     __ fld_d(Address(rbp, -2*wordSize));
1213     break;
1214   case T_LONG:
1215     __ movptr(rax, Address(rbp, -wordSize));
1216     __ movptr(rdx, Address(rbp, -2*wordSize));
1217     break;
1218   case T_VOID:  break;
1219   default: {
1220     __ movptr(rax, Address(rbp, -wordSize));
1221     }
1222   }
1223 }
1224 
1225 static void verify_oop_args(MacroAssembler* masm,
1226                             const methodHandle& method,
1227                             const BasicType* sig_bt,
1228                             const VMRegPair* regs) {
1229   Register temp_reg = rbx;  // not part of any compiled calling seq
1230   if (VerifyOops) {
1231     for (int i = 0; i < method->size_of_parameters(); i++) {
1232       if (is_reference_type(sig_bt[i])) {
1233         VMReg r = regs[i].first();
1234         assert(r->is_valid(), "bad oop arg");
1235         if (r->is_stack()) {
1236           __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1237           __ verify_oop(temp_reg);
1238         } else {
1239           __ verify_oop(r->as_Register());
1240         }
1241       }
1242     }
1243   }
1244 }
1245 
1246 static void gen_special_dispatch(MacroAssembler* masm,
1247                                  const methodHandle& method,
1248                                  const BasicType* sig_bt,
1249                                  const VMRegPair* regs) {
1250   verify_oop_args(masm, method, sig_bt, regs);
1251   vmIntrinsics::ID iid = method->intrinsic_id();
1252 
1253   // Now write the args into the outgoing interpreter space
1254   bool     has_receiver   = false;
1255   Register receiver_reg   = noreg;
1256   int      member_arg_pos = -1;
1257   Register member_reg     = noreg;
1258   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1259   if (ref_kind != 0) {
1260     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1261     member_reg = rbx;  // known to be free at this point
1262     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1263   } else if (iid == vmIntrinsics::_invokeBasic) {
1264     has_receiver = true;
1265   } else {
1266     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1267   }
1268 
1269   if (member_reg != noreg) {
1270     // Load the member_arg into register, if necessary.
1271     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1272     VMReg r = regs[member_arg_pos].first();
1273     if (r->is_stack()) {
1274       __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1275     } else {
1276       // no data motion is needed
1277       member_reg = r->as_Register();
1278     }
1279   }
1280 
1281   if (has_receiver) {
1282     // Make sure the receiver is loaded into a register.
1283     assert(method->size_of_parameters() > 0, "oob");
1284     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1285     VMReg r = regs[0].first();
1286     assert(r->is_valid(), "bad receiver arg");
1287     if (r->is_stack()) {
1288       // Porting note:  This assumes that compiled calling conventions always
1289       // pass the receiver oop in a register.  If this is not true on some
1290       // platform, pick a temp and load the receiver from stack.
1291       fatal("receiver always in a register");
1292       receiver_reg = rcx;  // known to be free at this point
1293       __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1294     } else {
1295       // no data motion is needed
1296       receiver_reg = r->as_Register();
1297     }
1298   }
1299 
1300   // Figure out which address we are really jumping to:
1301   MethodHandles::generate_method_handle_dispatch(masm, iid,
1302                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1303 }
1304 
1305 // ---------------------------------------------------------------------------
1306 // Generate a native wrapper for a given method.  The method takes arguments
1307 // in the Java compiled code convention, marshals them to the native
1308 // convention (handlizes oops, etc), transitions to native, makes the call,
1309 // returns to java state (possibly blocking), unhandlizes any result and
1310 // returns.
1311 //
1312 // Critical native functions are a shorthand for the use of
1313 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1314 // functions.  The wrapper is expected to unpack the arguments before
1315 // passing them to the callee. Critical native functions leave the state _in_Java,
1316 // since they cannot stop for GC.
1317 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1318 // block and the check for pending exceptions it's impossible for them
1319 // to be thrown.
1320 //
1321 //
1322 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1323                                                 const methodHandle& method,
1324                                                 int compile_id,
1325                                                 BasicType* in_sig_bt,
1326                                                 VMRegPair* in_regs,
1327                                                 BasicType ret_type) {
1328   if (method->is_method_handle_intrinsic()) {
1329     vmIntrinsics::ID iid = method->intrinsic_id();
1330     intptr_t start = (intptr_t)__ pc();
1331     int vep_offset = ((intptr_t)__ pc()) - start;
1332     gen_special_dispatch(masm,
1333                          method,
1334                          in_sig_bt,
1335                          in_regs);
1336     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1337     __ flush();
1338     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1339     return nmethod::new_native_nmethod(method,
1340                                        compile_id,
1341                                        masm->code(),
1342                                        vep_offset,
1343                                        frame_complete,
1344                                        stack_slots / VMRegImpl::slots_per_word,
1345                                        in_ByteSize(-1),
1346                                        in_ByteSize(-1),
1347                                        (OopMapSet*)nullptr);
1348   }
1349   address native_func = method->native_function();
1350   assert(native_func != nullptr, "must have function");
1351 
1352   // An OopMap for lock (and class if static)
1353   OopMapSet *oop_maps = new OopMapSet();
1354 
1355   // We have received a description of where all the java arg are located
1356   // on entry to the wrapper. We need to convert these args to where
1357   // the jni function will expect them. To figure out where they go
1358   // we convert the java signature to a C signature by inserting
1359   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1360 
1361   const int total_in_args = method->size_of_parameters();
1362   int  total_c_args       = total_in_args + (method->is_static() ? 2 : 1);
1363 
1364   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1365   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1366 
1367   int argc = 0;
1368   out_sig_bt[argc++] = T_ADDRESS;
1369   if (method->is_static()) {
1370     out_sig_bt[argc++] = T_OBJECT;
1371   }
1372 
1373   for (int i = 0; i < total_in_args ; i++ ) {
1374     out_sig_bt[argc++] = in_sig_bt[i];
1375   }
1376 
1377   // Now figure out where the args must be stored and how much stack space
1378   // they require.
1379   int out_arg_slots;
1380   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1381 
1382   // Compute framesize for the wrapper.  We need to handlize all oops in
1383   // registers a max of 2 on x86.
1384 
1385   // Calculate the total number of stack slots we will need.
1386 
1387   // First count the abi requirement plus all of the outgoing args
1388   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1389 
1390   // Now the space for the inbound oop handle area
1391   int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
1392 
1393   int oop_handle_offset = stack_slots;
1394   stack_slots += total_save_slots;
1395 
1396   // Now any space we need for handlizing a klass if static method
1397 
1398   int klass_slot_offset = 0;
1399   int klass_offset = -1;
1400   int lock_slot_offset = 0;
1401   bool is_static = false;
1402 
1403   if (method->is_static()) {
1404     klass_slot_offset = stack_slots;
1405     stack_slots += VMRegImpl::slots_per_word;
1406     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1407     is_static = true;
1408   }
1409 
1410   // Plus a lock if needed
1411 
1412   if (method->is_synchronized()) {
1413     lock_slot_offset = stack_slots;
1414     stack_slots += VMRegImpl::slots_per_word;
1415   }
1416 
1417   // Now a place (+2) to save return values or temp during shuffling
1418   // + 2 for return address (which we own) and saved rbp,
1419   stack_slots += 4;
1420 
1421   // Ok The space we have allocated will look like:
1422   //
1423   //
1424   // FP-> |                     |
1425   //      |---------------------|
1426   //      | 2 slots for moves   |
1427   //      |---------------------|
1428   //      | lock box (if sync)  |
1429   //      |---------------------| <- lock_slot_offset  (-lock_slot_rbp_offset)
1430   //      | klass (if static)   |
1431   //      |---------------------| <- klass_slot_offset
1432   //      | oopHandle area      |
1433   //      |---------------------| <- oop_handle_offset (a max of 2 registers)
1434   //      | outbound memory     |
1435   //      | based arguments     |
1436   //      |                     |
1437   //      |---------------------|
1438   //      |                     |
1439   // SP-> | out_preserved_slots |
1440   //
1441   //
1442   // ****************************************************************************
1443   // WARNING - on Windows Java Natives use pascal calling convention and pop the
1444   // arguments off of the stack after the jni call. Before the call we can use
1445   // instructions that are SP relative. After the jni call we switch to FP
1446   // relative instructions instead of re-adjusting the stack on windows.
1447   // ****************************************************************************
1448 
1449 
1450   // Now compute actual number of stack words we need rounding to make
1451   // stack properly aligned.
1452   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1453 
1454   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1455 
1456   intptr_t start = (intptr_t)__ pc();
1457 
1458   // First thing make an ic check to see if we should even be here
1459 
1460   // We are free to use all registers as temps without saving them and
1461   // restoring them except rbp. rbp is the only callee save register
1462   // as far as the interpreter and the compiler(s) are concerned.
1463 
1464 
1465   const Register receiver = rcx;
1466   Label exception_pending;
1467 
1468   __ verify_oop(receiver);
1469   // verified entry must be aligned for code patching.
1470   __ ic_check(8 /* end_alignment */);
1471 
1472   int vep_offset = ((intptr_t)__ pc()) - start;
1473 
1474 #ifdef COMPILER1
1475   // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
1476   if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
1477     inline_check_hashcode_from_object_header(masm, method, rcx /*obj_reg*/, rax /*result*/);
1478    }
1479 #endif // COMPILER1
1480 
1481   // The instruction at the verified entry point must be 5 bytes or longer
1482   // because it can be patched on the fly by make_non_entrant. The stack bang
1483   // instruction fits that requirement.
1484 
1485   // Generate stack overflow check
1486   __ bang_stack_with_offset((int)StackOverflow::stack_shadow_zone_size());
1487 
1488   // Generate a new frame for the wrapper.
1489   __ enter();
1490   // -2 because return address is already present and so is saved rbp
1491   __ subptr(rsp, stack_size - 2*wordSize);
1492 
1493 
1494   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1495   bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */);
1496 
1497   // Frame is now completed as far as size and linkage.
1498   int frame_complete = ((intptr_t)__ pc()) - start;
1499 
1500   // Calculate the difference between rsp and rbp,. We need to know it
1501   // after the native call because on windows Java Natives will pop
1502   // the arguments and it is painful to do rsp relative addressing
1503   // in a platform independent way. So after the call we switch to
1504   // rbp, relative addressing.
1505 
1506   int fp_adjustment = stack_size - 2*wordSize;
1507 
1508 #ifdef COMPILER2
1509   // C2 may leave the stack dirty if not in SSE2+ mode
1510   if (UseSSE >= 2) {
1511     __ verify_FPU(0, "c2i transition should have clean FPU stack");
1512   } else {
1513     __ empty_FPU_stack();
1514   }
1515 #endif /* COMPILER2 */
1516 
1517   // Compute the rbp, offset for any slots used after the jni call
1518 
1519   int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1520 
1521   // We use rdi as a thread pointer because it is callee save and
1522   // if we load it once it is usable thru the entire wrapper
1523   const Register thread = rdi;
1524 
1525    // We use rsi as the oop handle for the receiver/klass
1526    // It is callee save so it survives the call to native
1527 
1528    const Register oop_handle_reg = rsi;
1529 
1530    __ get_thread(thread);
1531 
1532   //
1533   // We immediately shuffle the arguments so that any vm call we have to
1534   // make from here on out (sync slow path, jvmti, etc.) we will have
1535   // captured the oops from our caller and have a valid oopMap for
1536   // them.
1537 
1538   // -----------------
1539   // The Grand Shuffle
1540   //
1541   // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1542   // and, if static, the class mirror instead of a receiver.  This pretty much
1543   // guarantees that register layout will not match (and x86 doesn't use reg
1544   // parms though amd does).  Since the native abi doesn't use register args
1545   // and the java conventions does we don't have to worry about collisions.
1546   // All of our moved are reg->stack or stack->stack.
1547   // We ignore the extra arguments during the shuffle and handle them at the
1548   // last moment. The shuffle is described by the two calling convention
1549   // vectors we have in our possession. We simply walk the java vector to
1550   // get the source locations and the c vector to get the destinations.
1551 
1552   int c_arg = method->is_static() ? 2 : 1;
1553 
1554   // Record rsp-based slot for receiver on stack for non-static methods
1555   int receiver_offset = -1;
1556 
1557   // This is a trick. We double the stack slots so we can claim
1558   // the oops in the caller's frame. Since we are sure to have
1559   // more args than the caller doubling is enough to make
1560   // sure we can capture all the incoming oop args from the
1561   // caller.
1562   //
1563   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1564 
1565   // Mark location of rbp,
1566   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
1567 
1568   // We know that we only have args in at most two integer registers (rcx, rdx). So rax, rbx
1569   // Are free to temporaries if we have to do  stack to steck moves.
1570   // All inbound args are referenced based on rbp, and all outbound args via rsp.
1571 
1572   for (int i = 0; i < total_in_args ; i++, c_arg++ ) {
1573     switch (in_sig_bt[i]) {
1574       case T_ARRAY:
1575       case T_OBJECT:
1576         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1577                     ((i == 0) && (!is_static)),
1578                     &receiver_offset);
1579         break;
1580       case T_VOID:
1581         break;
1582 
1583       case T_FLOAT:
1584         float_move(masm, in_regs[i], out_regs[c_arg]);
1585           break;
1586 
1587       case T_DOUBLE:
1588         assert( i + 1 < total_in_args &&
1589                 in_sig_bt[i + 1] == T_VOID &&
1590                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1591         double_move(masm, in_regs[i], out_regs[c_arg]);
1592         break;
1593 
1594       case T_LONG :
1595         long_move(masm, in_regs[i], out_regs[c_arg]);
1596         break;
1597 
1598       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1599 
1600       default:
1601         simple_move32(masm, in_regs[i], out_regs[c_arg]);
1602     }
1603   }
1604 
1605   // Pre-load a static method's oop into rsi.  Used both by locking code and
1606   // the normal JNI call code.
1607   if (method->is_static()) {
1608 
1609     //  load opp into a register
1610     __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
1611 
1612     // Now handlize the static class mirror it's known not-null.
1613     __ movptr(Address(rsp, klass_offset), oop_handle_reg);
1614     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1615 
1616     // Now get the handle
1617     __ lea(oop_handle_reg, Address(rsp, klass_offset));
1618     // store the klass handle as second argument
1619     __ movptr(Address(rsp, wordSize), oop_handle_reg);
1620   }
1621 
1622   // Change state to native (we save the return address in the thread, since it might not
1623   // be pushed on the stack when we do a stack traversal). It is enough that the pc()
1624   // points into the right code segment. It does not have to be the correct return pc.
1625   // We use the same pc/oopMap repeatedly when we call out
1626 
1627   intptr_t the_pc = (intptr_t) __ pc();
1628   oop_maps->add_gc_map(the_pc - start, map);
1629 
1630   __ set_last_Java_frame(thread, rsp, noreg, (address)the_pc, noreg);
1631 
1632 
1633   // We have all of the arguments setup at this point. We must not touch any register
1634   // argument registers at this point (what if we save/restore them there are no oop?
1635 
1636   if (DTraceMethodProbes) {
1637     __ mov_metadata(rax, method());
1638     __ call_VM_leaf(
1639          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1640          thread, rax);
1641   }
1642 
1643   // RedefineClasses() tracing support for obsolete method entry
1644   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1645     __ mov_metadata(rax, method());
1646     __ call_VM_leaf(
1647          CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1648          thread, rax);
1649   }
1650 
1651   // These are register definitions we need for locking/unlocking
1652   const Register swap_reg = rax;  // Must use rax, for cmpxchg instruction
1653   const Register obj_reg  = rcx;  // Will contain the oop
1654   const Register lock_reg = rdx;  // Address of compiler lock object (BasicLock)
1655 
1656   Label slow_path_lock;
1657   Label lock_done;
1658 
1659   // Lock a synchronized method
1660   if (method->is_synchronized()) {
1661     Label count_mon;
1662 
1663     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1664 
1665     // Get the handle (the 2nd argument)
1666     __ movptr(oop_handle_reg, Address(rsp, wordSize));
1667 
1668     // Get address of the box
1669 
1670     __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1671 
1672     // Load the oop from the handle
1673     __ movptr(obj_reg, Address(oop_handle_reg, 0));
1674 
1675     if (LockingMode == LM_MONITOR) {
1676       __ jmp(slow_path_lock);
1677     } else if (LockingMode == LM_LEGACY) {
1678       // Load immediate 1 into swap_reg %rax,
1679       __ movptr(swap_reg, 1);
1680 
1681       // Load (object->mark() | 1) into swap_reg %rax,
1682       __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1683 
1684       // Save (object->mark() | 1) into BasicLock's displaced header
1685       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1686 
1687       // src -> dest iff dest == rax, else rax, <- dest
1688       // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1689       __ lock();
1690       __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1691       __ jcc(Assembler::equal, count_mon);
1692 
1693       // Test if the oopMark is an obvious stack pointer, i.e.,
1694       //  1) (mark & 3) == 0, and
1695       //  2) rsp <= mark < mark + os::pagesize()
1696       // These 3 tests can be done by evaluating the following
1697       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1698       // assuming both stack pointer and pagesize have their
1699       // least significant 2 bits clear.
1700       // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1701 
1702       __ subptr(swap_reg, rsp);
1703       __ andptr(swap_reg, 3 - (int)os::vm_page_size());
1704 
1705       // Save the test result, for recursive case, the result is zero
1706       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1707       __ jcc(Assembler::notEqual, slow_path_lock);
1708     } else {
1709       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1710       // Lacking registers and thread on x86_32. Always take slow path.
1711       __ jmp(slow_path_lock);
1712     }
1713     __ bind(count_mon);
1714     __ inc_held_monitor_count();
1715 
1716     // Slow path will re-enter here
1717     __ bind(lock_done);
1718   }
1719 
1720 
1721   // Finally just about ready to make the JNI call
1722 
1723   // get JNIEnv* which is first argument to native
1724   __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1725   __ movptr(Address(rsp, 0), rdx);
1726 
1727   // Now set thread in native
1728   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1729 
1730   __ call(RuntimeAddress(native_func));
1731 
1732   // Verify or restore cpu control state after JNI call
1733   __ restore_cpu_control_state_after_jni(noreg);
1734 
1735   // WARNING - on Windows Java Natives use pascal calling convention and pop the
1736   // arguments off of the stack. We could just re-adjust the stack pointer here
1737   // and continue to do SP relative addressing but we instead switch to FP
1738   // relative addressing.
1739 
1740   // Unpack native results.
1741   switch (ret_type) {
1742   case T_BOOLEAN: __ c2bool(rax);            break;
1743   case T_CHAR   : __ andptr(rax, 0xFFFF);    break;
1744   case T_BYTE   : __ sign_extend_byte (rax); break;
1745   case T_SHORT  : __ sign_extend_short(rax); break;
1746   case T_INT    : /* nothing to do */        break;
1747   case T_DOUBLE :
1748   case T_FLOAT  :
1749     // Result is in st0 we'll save as needed
1750     break;
1751   case T_ARRAY:                 // Really a handle
1752   case T_OBJECT:                // Really a handle
1753       break; // can't de-handlize until after safepoint check
1754   case T_VOID: break;
1755   case T_LONG: break;
1756   default       : ShouldNotReachHere();
1757   }
1758 
1759   // Switch thread to "native transition" state before reading the synchronization state.
1760   // This additional state is necessary because reading and testing the synchronization
1761   // state is not atomic w.r.t. GC, as this scenario demonstrates:
1762   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1763   //     VM thread changes sync state to synchronizing and suspends threads for GC.
1764   //     Thread A is resumed to finish this native method, but doesn't block here since it
1765   //     didn't see any synchronization is progress, and escapes.
1766   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
1767 
1768   // Force this write out before the read below
1769   if (!UseSystemMemoryBarrier) {
1770     __ membar(Assembler::Membar_mask_bits(
1771               Assembler::LoadLoad | Assembler::LoadStore |
1772               Assembler::StoreLoad | Assembler::StoreStore));
1773   }
1774 
1775   if (AlwaysRestoreFPU) {
1776     // Make sure the control word is correct.
1777     __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
1778   }
1779 
1780   // check for safepoint operation in progress and/or pending suspend requests
1781   { Label Continue, slow_path;
1782 
1783     __ safepoint_poll(slow_path, thread, true /* at_return */, false /* in_nmethod */);
1784 
1785     __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1786     __ jcc(Assembler::equal, Continue);
1787     __ bind(slow_path);
1788 
1789     // Don't use call_VM as it will see a possible pending exception and forward it
1790     // and never return here preventing us from clearing _last_native_pc down below.
1791     // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1792     // preserved and correspond to the bcp/locals pointers. So we do a runtime call
1793     // by hand.
1794     //
1795     __ vzeroupper();
1796 
1797     save_native_result(masm, ret_type, stack_slots);
1798     __ push(thread);
1799     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
1800                                               JavaThread::check_special_condition_for_native_trans)));
1801     __ increment(rsp, wordSize);
1802     // Restore any method result value
1803     restore_native_result(masm, ret_type, stack_slots);
1804     __ bind(Continue);
1805   }
1806 
1807   // change thread state
1808   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1809 
1810   Label reguard;
1811   Label reguard_done;
1812   __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
1813   __ jcc(Assembler::equal, reguard);
1814 
1815   // slow path reguard  re-enters here
1816   __ bind(reguard_done);
1817 
1818   // Handle possible exception (will unlock if necessary)
1819 
1820   // native result if any is live
1821 
1822   // Unlock
1823   Label slow_path_unlock;
1824   Label unlock_done;
1825   if (method->is_synchronized()) {
1826 
1827     Label fast_done;
1828 
1829     // Get locked oop from the handle we passed to jni
1830     __ movptr(obj_reg, Address(oop_handle_reg, 0));
1831 
1832     if (LockingMode == LM_LEGACY) {
1833       Label not_recur;
1834       // Simple recursive lock?
1835       __ cmpptr(Address(rbp, lock_slot_rbp_offset), NULL_WORD);
1836       __ jcc(Assembler::notEqual, not_recur);
1837       __ dec_held_monitor_count();
1838       __ jmpb(fast_done);
1839       __ bind(not_recur);
1840     }
1841 
1842     // Must save rax, if it is live now because cmpxchg must use it
1843     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1844       save_native_result(masm, ret_type, stack_slots);
1845     }
1846 
1847     if (LockingMode == LM_MONITOR) {
1848       __ jmp(slow_path_unlock);
1849     } else if (LockingMode == LM_LEGACY) {
1850       //  get old displaced header
1851       __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
1852 
1853       // get address of the stack lock
1854       __ lea(rax, Address(rbp, lock_slot_rbp_offset));
1855 
1856       // Atomic swap old header if oop still contains the stack lock
1857       // src -> dest iff dest == rax, else rax, <- dest
1858       // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
1859       __ lock();
1860       __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1861       __ jcc(Assembler::notEqual, slow_path_unlock);
1862       __ dec_held_monitor_count();
1863     } else {
1864       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1865       __ lightweight_unlock(obj_reg, swap_reg, thread, lock_reg, slow_path_unlock);
1866       __ dec_held_monitor_count();
1867     }
1868 
1869     // slow path re-enters here
1870     __ bind(unlock_done);
1871     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1872       restore_native_result(masm, ret_type, stack_slots);
1873     }
1874 
1875     __ bind(fast_done);
1876   }
1877 
1878   if (DTraceMethodProbes) {
1879     // Tell dtrace about this method exit
1880     save_native_result(masm, ret_type, stack_slots);
1881     __ mov_metadata(rax, method());
1882     __ call_VM_leaf(
1883          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1884          thread, rax);
1885     restore_native_result(masm, ret_type, stack_slots);
1886   }
1887 
1888   // We can finally stop using that last_Java_frame we setup ages ago
1889 
1890   __ reset_last_Java_frame(thread, false);
1891 
1892   // Unbox oop result, e.g. JNIHandles::resolve value.
1893   if (is_reference_type(ret_type)) {
1894     __ resolve_jobject(rax /* value */,
1895                        thread /* thread */,
1896                        rcx /* tmp */);
1897   }
1898 
1899   if (CheckJNICalls) {
1900     // clear_pending_jni_exception_check
1901     __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
1902   }
1903 
1904   // reset handle block
1905   __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
1906   __ movl(Address(rcx, JNIHandleBlock::top_offset()), NULL_WORD);
1907 
1908   // Any exception pending?
1909   __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1910   __ jcc(Assembler::notEqual, exception_pending);
1911 
1912   // no exception, we're almost done
1913 
1914   // check that only result value is on FPU stack
1915   __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit");
1916 
1917   // Fixup floating pointer results so that result looks like a return from a compiled method
1918   if (ret_type == T_FLOAT) {
1919     if (UseSSE >= 1) {
1920       // Pop st0 and store as float and reload into xmm register
1921       __ fstp_s(Address(rbp, -4));
1922       __ movflt(xmm0, Address(rbp, -4));
1923     }
1924   } else if (ret_type == T_DOUBLE) {
1925     if (UseSSE >= 2) {
1926       // Pop st0 and store as double and reload into xmm register
1927       __ fstp_d(Address(rbp, -8));
1928       __ movdbl(xmm0, Address(rbp, -8));
1929     }
1930   }
1931 
1932   // Return
1933 
1934   __ leave();
1935   __ ret(0);
1936 
1937   // Unexpected paths are out of line and go here
1938 
1939   // Slow path locking & unlocking
1940   if (method->is_synchronized()) {
1941 
1942     // BEGIN Slow path lock
1943 
1944     __ bind(slow_path_lock);
1945 
1946     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1947     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1948     __ push(thread);
1949     __ push(lock_reg);
1950     __ push(obj_reg);
1951     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
1952     __ addptr(rsp, 3*wordSize);
1953 
1954 #ifdef ASSERT
1955     { Label L;
1956     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1957     __ jcc(Assembler::equal, L);
1958     __ stop("no pending exception allowed on exit from monitorenter");
1959     __ bind(L);
1960     }
1961 #endif
1962     __ jmp(lock_done);
1963 
1964     // END Slow path lock
1965 
1966     // BEGIN Slow path unlock
1967     __ bind(slow_path_unlock);
1968     __ vzeroupper();
1969     // Slow path unlock
1970 
1971     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
1972       save_native_result(masm, ret_type, stack_slots);
1973     }
1974     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
1975 
1976     __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
1977     __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1978 
1979 
1980     // should be a peal
1981     // +wordSize because of the push above
1982     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1983     __ push(thread);
1984     __ lea(rax, Address(rbp, lock_slot_rbp_offset));
1985     __ push(rax);
1986 
1987     __ push(obj_reg);
1988     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
1989     __ addptr(rsp, 3*wordSize);
1990 #ifdef ASSERT
1991     {
1992       Label L;
1993       __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1994       __ jcc(Assembler::equal, L);
1995       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
1996       __ bind(L);
1997     }
1998 #endif /* ASSERT */
1999 
2000     __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2001 
2002     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2003       restore_native_result(masm, ret_type, stack_slots);
2004     }
2005     __ jmp(unlock_done);
2006     // END Slow path unlock
2007 
2008   }
2009 
2010   // SLOW PATH Reguard the stack if needed
2011 
2012   __ bind(reguard);
2013   __ vzeroupper();
2014   save_native_result(masm, ret_type, stack_slots);
2015   {
2016     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2017   }
2018   restore_native_result(masm, ret_type, stack_slots);
2019   __ jmp(reguard_done);
2020 
2021 
2022   // BEGIN EXCEPTION PROCESSING
2023 
2024   // Forward  the exception
2025   __ bind(exception_pending);
2026 
2027   // remove possible return value from FPU register stack
2028   __ empty_FPU_stack();
2029 
2030   // pop our frame
2031   __ leave();
2032   // and forward the exception
2033   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2034 
2035   __ flush();
2036 
2037   nmethod *nm = nmethod::new_native_nmethod(method,
2038                                             compile_id,
2039                                             masm->code(),
2040                                             vep_offset,
2041                                             frame_complete,
2042                                             stack_slots / VMRegImpl::slots_per_word,
2043                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2044                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2045                                             oop_maps);
2046 
2047   return nm;
2048 
2049 }
2050 
2051 // this function returns the adjust size (in number of words) to a c2i adapter
2052 // activation for use during deoptimization
2053 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2054   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2055 }
2056 
2057 
2058 // Number of stack slots between incoming argument block and the start of
2059 // a new frame.  The PROLOG must add this many slots to the stack.  The
2060 // EPILOG must remove this many slots.  Intel needs one slot for
2061 // return address and one for rbp, (must save rbp)
2062 uint SharedRuntime::in_preserve_stack_slots() {
2063   return 2+VerifyStackAtCalls;
2064 }
2065 
2066 uint SharedRuntime::out_preserve_stack_slots() {
2067   return 0;
2068 }
2069 
2070 VMReg SharedRuntime::thread_register() {
2071   Unimplemented();
2072   return nullptr;
2073 }
2074 
2075 //------------------------------generate_deopt_blob----------------------------
2076 void SharedRuntime::generate_deopt_blob() {
2077   // allocate space for the code
2078   ResourceMark rm;
2079   // setup code generation tools
2080   // note: the buffer code size must account for StackShadowPages=50
2081   const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id);
2082   CodeBuffer   buffer(name, 1536, 1024);
2083   MacroAssembler* masm = new MacroAssembler(&buffer);
2084   int frame_size_in_words;
2085   OopMap* map = nullptr;
2086   // Account for the extra args we place on the stack
2087   // by the time we call fetch_unroll_info
2088   const int additional_words = 2; // deopt kind, thread
2089 
2090   OopMapSet *oop_maps = new OopMapSet();
2091 
2092   // -------------
2093   // This code enters when returning to a de-optimized nmethod.  A return
2094   // address has been pushed on the stack, and return values are in
2095   // registers.
2096   // If we are doing a normal deopt then we were called from the patched
2097   // nmethod from the point we returned to the nmethod. So the return
2098   // address on the stack is wrong by NativeCall::instruction_size
2099   // We will adjust the value to it looks like we have the original return
2100   // address on the stack (like when we eagerly deoptimized).
2101   // In the case of an exception pending with deoptimized then we enter
2102   // with a return address on the stack that points after the call we patched
2103   // into the exception handler. We have the following register state:
2104   //    rax,: exception
2105   //    rbx,: exception handler
2106   //    rdx: throwing pc
2107   // So in this case we simply jam rdx into the useless return address and
2108   // the stack looks just like we want.
2109   //
2110   // At this point we need to de-opt.  We save the argument return
2111   // registers.  We call the first C routine, fetch_unroll_info().  This
2112   // routine captures the return values and returns a structure which
2113   // describes the current frame size and the sizes of all replacement frames.
2114   // The current frame is compiled code and may contain many inlined
2115   // functions, each with their own JVM state.  We pop the current frame, then
2116   // push all the new frames.  Then we call the C routine unpack_frames() to
2117   // populate these frames.  Finally unpack_frames() returns us the new target
2118   // address.  Notice that callee-save registers are BLOWN here; they have
2119   // already been captured in the vframeArray at the time the return PC was
2120   // patched.
2121   address start = __ pc();
2122   Label cont;
2123 
2124   // Prolog for non exception case!
2125 
2126   // Save everything in sight.
2127 
2128   map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2129   // Normal deoptimization
2130   __ push(Deoptimization::Unpack_deopt);
2131   __ jmp(cont);
2132 
2133   int reexecute_offset = __ pc() - start;
2134 
2135   // Reexecute case
2136   // return address is the pc describes what bci to do re-execute at
2137 
2138   // No need to update map as each call to save_live_registers will produce identical oopmap
2139   (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2140 
2141   __ push(Deoptimization::Unpack_reexecute);
2142   __ jmp(cont);
2143 
2144   int exception_offset = __ pc() - start;
2145 
2146   // Prolog for exception case
2147 
2148   // all registers are dead at this entry point, except for rax, and
2149   // rdx which contain the exception oop and exception pc
2150   // respectively.  Set them in TLS and fall thru to the
2151   // unpack_with_exception_in_tls entry point.
2152 
2153   __ get_thread(rdi);
2154   __ movptr(Address(rdi, JavaThread::exception_pc_offset()), rdx);
2155   __ movptr(Address(rdi, JavaThread::exception_oop_offset()), rax);
2156 
2157   int exception_in_tls_offset = __ pc() - start;
2158 
2159   // new implementation because exception oop is now passed in JavaThread
2160 
2161   // Prolog for exception case
2162   // All registers must be preserved because they might be used by LinearScan
2163   // Exceptiop oop and throwing PC are passed in JavaThread
2164   // tos: stack at point of call to method that threw the exception (i.e. only
2165   // args are on the stack, no return address)
2166 
2167   // make room on stack for the return address
2168   // It will be patched later with the throwing pc. The correct value is not
2169   // available now because loading it from memory would destroy registers.
2170   __ push(0);
2171 
2172   // Save everything in sight.
2173 
2174   // No need to update map as each call to save_live_registers will produce identical oopmap
2175   (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2176 
2177   // Now it is safe to overwrite any register
2178 
2179   // store the correct deoptimization type
2180   __ push(Deoptimization::Unpack_exception);
2181 
2182   // load throwing pc from JavaThread and patch it as the return address
2183   // of the current frame. Then clear the field in JavaThread
2184   __ get_thread(rdi);
2185   __ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset()));
2186   __ movptr(Address(rbp, wordSize), rdx);
2187   __ movptr(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
2188 
2189 #ifdef ASSERT
2190   // verify that there is really an exception oop in JavaThread
2191   __ movptr(rax, Address(rdi, JavaThread::exception_oop_offset()));
2192   __ verify_oop(rax);
2193 
2194   // verify that there is no pending exception
2195   Label no_pending_exception;
2196   __ movptr(rax, Address(rdi, Thread::pending_exception_offset()));
2197   __ testptr(rax, rax);
2198   __ jcc(Assembler::zero, no_pending_exception);
2199   __ stop("must not have pending exception here");
2200   __ bind(no_pending_exception);
2201 #endif
2202 
2203   __ bind(cont);
2204 
2205   // Compiled code leaves the floating point stack dirty, empty it.
2206   __ empty_FPU_stack();
2207 
2208 
2209   // Call C code.  Need thread and this frame, but NOT official VM entry
2210   // crud.  We cannot block on this call, no GC can happen.
2211   __ get_thread(rcx);
2212   __ push(rcx);
2213   // fetch_unroll_info needs to call last_java_frame()
2214   __ set_last_Java_frame(rcx, noreg, noreg, nullptr, noreg);
2215 
2216   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2217 
2218   // Need to have an oopmap that tells fetch_unroll_info where to
2219   // find any register it might need.
2220 
2221   oop_maps->add_gc_map( __ pc()-start, map);
2222 
2223   // Discard args to fetch_unroll_info
2224   __ pop(rcx);
2225   __ pop(rcx);
2226 
2227   __ get_thread(rcx);
2228   __ reset_last_Java_frame(rcx, false);
2229 
2230   // Load UnrollBlock into EDI
2231   __ mov(rdi, rax);
2232 
2233   // Move the unpack kind to a safe place in the UnrollBlock because
2234   // we are very short of registers
2235 
2236   Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset());
2237   // retrieve the deopt kind from the UnrollBlock.
2238   __ movl(rax, unpack_kind);
2239 
2240    Label noException;
2241   __ cmpl(rax, Deoptimization::Unpack_exception);   // Was exception pending?
2242   __ jcc(Assembler::notEqual, noException);
2243   __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
2244   __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
2245   __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
2246   __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
2247 
2248   __ verify_oop(rax);
2249 
2250   // Overwrite the result registers with the exception results.
2251   __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2252   __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2253 
2254   __ bind(noException);
2255 
2256   // Stack is back to only having register save data on the stack.
2257   // Now restore the result registers. Everything else is either dead or captured
2258   // in the vframeArray.
2259 
2260   RegisterSaver::restore_result_registers(masm);
2261 
2262   // Non standard control word may be leaked out through a safepoint blob, and we can
2263   // deopt at a poll point with the non standard control word. However, we should make
2264   // sure the control word is correct after restore_result_registers.
2265   __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
2266 
2267   // All of the register save area has been popped of the stack. Only the
2268   // return address remains.
2269 
2270   // Pop all the frames we must move/replace.
2271   //
2272   // Frame picture (youngest to oldest)
2273   // 1: self-frame (no frame link)
2274   // 2: deopting frame  (no frame link)
2275   // 3: caller of deopting frame (could be compiled/interpreted).
2276   //
2277   // Note: by leaving the return address of self-frame on the stack
2278   // and using the size of frame 2 to adjust the stack
2279   // when we are done the return to frame 3 will still be on the stack.
2280 
2281   // Pop deoptimized frame
2282   __ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2283 
2284   // sp should be pointing at the return address to the caller (3)
2285 
2286   // Pick up the initial fp we should save
2287   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2288   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset()));
2289 
2290 #ifdef ASSERT
2291   // Compilers generate code that bang the stack by as much as the
2292   // interpreter would need. So this stack banging should never
2293   // trigger a fault. Verify that it does not on non product builds.
2294   __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2295   __ bang_stack_size(rbx, rcx);
2296 #endif
2297 
2298   // Load array of frame pcs into ECX
2299   __ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset()));
2300 
2301   __ pop(rsi); // trash the old pc
2302 
2303   // Load array of frame sizes into ESI
2304   __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset()));
2305 
2306   Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset());
2307 
2308   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset()));
2309   __ movl(counter, rbx);
2310 
2311   // Now adjust the caller's stack to make up for the extra locals
2312   // but record the original sp so that we can save it in the skeletal interpreter
2313   // frame and the stack walking of interpreter_sender will get the unextended sp
2314   // value and not the "real" sp value.
2315 
2316   Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset());
2317   __ movptr(sp_temp, rsp);
2318   __ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset()));
2319   __ subptr(rsp, rbx);
2320 
2321   // Push interpreter frames in a loop
2322   Label loop;
2323   __ bind(loop);
2324   __ movptr(rbx, Address(rsi, 0));      // Load frame size
2325   __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
2326   __ pushptr(Address(rcx, 0));          // save return address
2327   __ enter();                           // save old & set new rbp,
2328   __ subptr(rsp, rbx);                  // Prolog!
2329   __ movptr(rbx, sp_temp);              // sender's sp
2330   // This value is corrected by layout_activation_impl
2331   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
2332   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2333   __ movptr(sp_temp, rsp);              // pass to next frame
2334   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
2335   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
2336   __ decrementl(counter);             // decrement counter
2337   __ jcc(Assembler::notZero, loop);
2338   __ pushptr(Address(rcx, 0));          // save final return address
2339 
2340   // Re-push self-frame
2341   __ enter();                           // save old & set new rbp,
2342 
2343   //  Return address and rbp, are in place
2344   // We'll push additional args later. Just allocate a full sized
2345   // register save area
2346   __ subptr(rsp, (frame_size_in_words-additional_words - 2) * wordSize);
2347 
2348   // Restore frame locals after moving the frame
2349   __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2350   __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2351   __ fstp_d(Address(rsp, RegisterSaver::fpResultOffset()*wordSize));   // Pop float stack and store in local
2352   if( UseSSE>=2 ) __ movdbl(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2353   if( UseSSE==1 ) __ movflt(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2354 
2355   // Set up the args to unpack_frame
2356 
2357   __ pushl(unpack_kind);                     // get the unpack_kind value
2358   __ get_thread(rcx);
2359   __ push(rcx);
2360 
2361   // set last_Java_sp, last_Java_fp
2362   __ set_last_Java_frame(rcx, noreg, rbp, nullptr, noreg);
2363 
2364   // Call C code.  Need thread but NOT official VM entry
2365   // crud.  We cannot block on this call, no GC can happen.  Call should
2366   // restore return values to their stack-slots with the new SP.
2367   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2368   // Set an oopmap for the call site
2369   oop_maps->add_gc_map( __ pc()-start, new OopMap( frame_size_in_words, 0 ));
2370 
2371   // rax, contains the return result type
2372   __ push(rax);
2373 
2374   __ get_thread(rcx);
2375   __ reset_last_Java_frame(rcx, false);
2376 
2377   // Collect return values
2378   __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
2379   __ movptr(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize));
2380 
2381   // Clear floating point stack before returning to interpreter
2382   __ empty_FPU_stack();
2383 
2384   // Check if we should push the float or double return value.
2385   Label results_done, yes_double_value;
2386   __ cmpl(Address(rsp, 0), T_DOUBLE);
2387   __ jcc (Assembler::zero, yes_double_value);
2388   __ cmpl(Address(rsp, 0), T_FLOAT);
2389   __ jcc (Assembler::notZero, results_done);
2390 
2391   // return float value as expected by interpreter
2392   if( UseSSE>=1 ) __ movflt(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2393   else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2394   __ jmp(results_done);
2395 
2396   // return double value as expected by interpreter
2397   __ bind(yes_double_value);
2398   if( UseSSE>=2 ) __ movdbl(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2399   else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2400 
2401   __ bind(results_done);
2402 
2403   // Pop self-frame.
2404   __ leave();                              // Epilog!
2405 
2406   // Jump to interpreter
2407   __ ret(0);
2408 
2409   // -------------
2410   // make sure all code is generated
2411   masm->flush();
2412 
2413   _deopt_blob = DeoptimizationBlob::create( &buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2414   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2415 }
2416 
2417 //------------------------------generate_handler_blob------
2418 //
2419 // Generate a special Compile2Runtime blob that saves all registers,
2420 // setup oopmap, and calls safepoint code to stop the compiled code for
2421 // a safepoint.
2422 //
2423 SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) {
2424 
2425   // Account for thread arg in our frame
2426   const int additional_words = 1;
2427   int frame_size_in_words;
2428 
2429   assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2430   assert(is_polling_page_id(id), "expected a polling page stub id");
2431 
2432   ResourceMark rm;
2433   OopMapSet *oop_maps = new OopMapSet();
2434   OopMap* map;
2435 
2436   // allocate space for the code
2437   // setup code generation tools
2438   const char* name = SharedRuntime::stub_name(id);
2439   CodeBuffer   buffer(name, 2048, 1024);
2440   MacroAssembler* masm = new MacroAssembler(&buffer);
2441 
2442   const Register java_thread = rdi; // callee-saved for VC++
2443   address start   = __ pc();
2444   address call_pc = nullptr;
2445   bool cause_return = (id == SharedStubId::polling_page_return_handler_id);
2446   bool save_vectors = (id == SharedStubId::polling_page_vectors_safepoint_handler_id);
2447 
2448   // If cause_return is true we are at a poll_return and there is
2449   // the return address on the stack to the caller on the nmethod
2450   // that is safepoint. We can leave this return on the stack and
2451   // effectively complete the return and safepoint in the caller.
2452   // Otherwise we push space for a return address that the safepoint
2453   // handler will install later to make the stack walking sensible.
2454   if (!cause_return)
2455     __ push(rbx);  // Make room for return address (or push it again)
2456 
2457   map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false, save_vectors);
2458 
2459   // The following is basically a call_VM. However, we need the precise
2460   // address of the call in order to generate an oopmap. Hence, we do all the
2461   // work ourselves.
2462 
2463   // Push thread argument and setup last_Java_sp
2464   __ get_thread(java_thread);
2465   __ push(java_thread);
2466   __ set_last_Java_frame(java_thread, noreg, noreg, nullptr, noreg);
2467 
2468   // if this was not a poll_return then we need to correct the return address now.
2469   if (!cause_return) {
2470     // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
2471     // Additionally, rbx is a callee saved register and we can look at it later to determine
2472     // if someone changed the return address for us!
2473     __ movptr(rbx, Address(java_thread, JavaThread::saved_exception_pc_offset()));
2474     __ movptr(Address(rbp, wordSize), rbx);
2475   }
2476 
2477   // do the call
2478   __ call(RuntimeAddress(call_ptr));
2479 
2480   // Set an oopmap for the call site.  This oopmap will map all
2481   // oop-registers and debug-info registers as callee-saved.  This
2482   // will allow deoptimization at this safepoint to find all possible
2483   // debug-info recordings, as well as let GC find all oops.
2484 
2485   oop_maps->add_gc_map( __ pc() - start, map);
2486 
2487   // Discard arg
2488   __ pop(rcx);
2489 
2490   Label noException;
2491 
2492   // Clear last_Java_sp again
2493   __ get_thread(java_thread);
2494   __ reset_last_Java_frame(java_thread, false);
2495 
2496   __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD);
2497   __ jcc(Assembler::equal, noException);
2498 
2499   // Exception pending
2500   RegisterSaver::restore_live_registers(masm, save_vectors);
2501 
2502   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2503 
2504   __ bind(noException);
2505 
2506   Label no_adjust, bail, not_special;
2507   if (!cause_return) {
2508     // If our stashed return pc was modified by the runtime we avoid touching it
2509     __ cmpptr(rbx, Address(rbp, wordSize));
2510     __ jccb(Assembler::notEqual, no_adjust);
2511 
2512     // Skip over the poll instruction.
2513     // See NativeInstruction::is_safepoint_poll()
2514     // Possible encodings:
2515     //      85 00       test   %eax,(%rax)
2516     //      85 01       test   %eax,(%rcx)
2517     //      85 02       test   %eax,(%rdx)
2518     //      85 03       test   %eax,(%rbx)
2519     //      85 06       test   %eax,(%rsi)
2520     //      85 07       test   %eax,(%rdi)
2521     //
2522     //      85 04 24    test   %eax,(%rsp)
2523     //      85 45 00    test   %eax,0x0(%rbp)
2524 
2525 #ifdef ASSERT
2526     __ movptr(rax, rbx); // remember where 0x85 should be, for verification below
2527 #endif
2528     // rsp/rbp base encoding takes 3 bytes with the following register values:
2529     // rsp 0x04
2530     // rbp 0x05
2531     __ movzbl(rcx, Address(rbx, 1));
2532     __ andptr(rcx, 0x07); // looking for 0x04 .. 0x05
2533     __ subptr(rcx, 4);    // looking for 0x00 .. 0x01
2534     __ cmpptr(rcx, 1);
2535     __ jcc(Assembler::above, not_special);
2536     __ addptr(rbx, 1);
2537     __ bind(not_special);
2538 #ifdef ASSERT
2539     // Verify the correct encoding of the poll we're about to skip.
2540     __ cmpb(Address(rax, 0), NativeTstRegMem::instruction_code_memXregl);
2541     __ jcc(Assembler::notEqual, bail);
2542     // Mask out the modrm bits
2543     __ testb(Address(rax, 1), NativeTstRegMem::modrm_mask);
2544     // rax encodes to 0, so if the bits are nonzero it's incorrect
2545     __ jcc(Assembler::notZero, bail);
2546 #endif
2547     // Adjust return pc forward to step over the safepoint poll instruction
2548     __ addptr(rbx, 2);
2549     __ movptr(Address(rbp, wordSize), rbx);
2550   }
2551 
2552   __ bind(no_adjust);
2553   // Normal exit, register restoring and exit
2554   RegisterSaver::restore_live_registers(masm, save_vectors);
2555 
2556   __ ret(0);
2557 
2558 #ifdef ASSERT
2559   __ bind(bail);
2560   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2561 #endif
2562 
2563   // make sure all code is generated
2564   masm->flush();
2565 
2566   // Fill-out other meta info
2567   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2568 }
2569 
2570 //
2571 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2572 //
2573 // Generate a stub that calls into vm to find out the proper destination
2574 // of a java call. All the argument registers are live at this point
2575 // but since this is generic code we don't know what they are and the caller
2576 // must do any gc of the args.
2577 //
2578 RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) {
2579   assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2580   assert(is_resolve_id(id), "expected a resolve stub id");
2581 
2582   // allocate space for the code
2583   ResourceMark rm;
2584 
2585   const char* name = SharedRuntime::stub_name(id);
2586   CodeBuffer buffer(name, 1000, 512);
2587   MacroAssembler* masm                = new MacroAssembler(&buffer);
2588 
2589   int frame_size_words;
2590   enum frame_layout {
2591                 thread_off,
2592                 extra_words };
2593 
2594   OopMapSet *oop_maps = new OopMapSet();
2595   OopMap* map = nullptr;
2596 
2597   int start = __ offset();
2598 
2599   map = RegisterSaver::save_live_registers(masm, extra_words, &frame_size_words);
2600 
2601   int frame_complete = __ offset();
2602 
2603   const Register thread = rdi;
2604   __ get_thread(rdi);
2605 
2606   __ push(thread);
2607   __ set_last_Java_frame(thread, noreg, rbp, nullptr, noreg);
2608 
2609   __ call(RuntimeAddress(destination));
2610 
2611 
2612   // Set an oopmap for the call site.
2613   // We need this not only for callee-saved registers, but also for volatile
2614   // registers that the compiler might be keeping live across a safepoint.
2615 
2616   oop_maps->add_gc_map( __ offset() - start, map);
2617 
2618   // rax, contains the address we are going to jump to assuming no exception got installed
2619 
2620   __ addptr(rsp, wordSize);
2621 
2622   // clear last_Java_sp
2623   __ reset_last_Java_frame(thread, true);
2624   // check for pending exceptions
2625   Label pending;
2626   __ cmpptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
2627   __ jcc(Assembler::notEqual, pending);
2628 
2629   // get the returned Method*
2630   __ get_vm_result_2(rbx, thread);
2631   __ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
2632 
2633   __ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
2634 
2635   RegisterSaver::restore_live_registers(masm);
2636 
2637   // We are back to the original state on entry and ready to go.
2638 
2639   __ jmp(rax);
2640 
2641   // Pending exception after the safepoint
2642 
2643   __ bind(pending);
2644 
2645   RegisterSaver::restore_live_registers(masm);
2646 
2647   // exception pending => remove activation and forward to exception handler
2648 
2649   __ get_thread(thread);
2650   __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
2651   __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
2652   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2653 
2654   // -------------
2655   // make sure all code is generated
2656   masm->flush();
2657 
2658   // return the  blob
2659   // frame_size_words or bytes??
2660   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
2661 }
2662 
2663 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
2664   Unimplemented();
2665   return nullptr;
2666 }
2667 
2668   //------------------------------------------------------------------------------------------------------------------------
2669   // Continuation point for throwing of implicit exceptions that are not handled in
2670   // the current activation. Fabricates an exception oop and initiates normal
2671   // exception dispatching in this frame.
2672   //
2673   // Previously the compiler (c2) allowed for callee save registers on Java calls.
2674   // This is no longer true after adapter frames were removed but could possibly
2675   // be brought back in the future if the interpreter code was reworked and it
2676   // was deemed worthwhile. The comment below was left to describe what must
2677   // happen here if callee saves were resurrected. As it stands now this stub
2678   // could actually be a vanilla BufferBlob and have now oopMap at all.
2679   // Since it doesn't make much difference we've chosen to leave it the
2680   // way it was in the callee save days and keep the comment.
2681 
2682   // If we need to preserve callee-saved values we need a callee-saved oop map and
2683   // therefore have to make these stubs into RuntimeStubs rather than BufferBlobs.
2684   // If the compiler needs all registers to be preserved between the fault
2685   // point and the exception handler then it must assume responsibility for that in
2686   // AbstractCompiler::continuation_for_implicit_null_exception or
2687   // continuation_for_implicit_division_by_zero_exception. All other implicit
2688   // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
2689   // either at call sites or otherwise assume that stack unwinding will be initiated,
2690   // so caller saved registers were assumed volatile in the compiler.
2691 RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) {
2692   assert(is_throw_id(id), "expected a throw stub id");
2693 
2694   const char* name = SharedRuntime::stub_name(id);
2695 
2696   // Information about frame layout at time of blocking runtime call.
2697   // Note that we only have to preserve callee-saved registers since
2698   // the compilers are responsible for supplying a continuation point
2699   // if they expect all registers to be preserved.
2700   enum layout {
2701     thread_off,    // last_java_sp
2702     arg1_off,
2703     arg2_off,
2704     rbp_off,       // callee saved register
2705     ret_pc,
2706     framesize
2707   };
2708 
2709   int insts_size = 256;
2710   int locs_size  = 32;
2711 
2712   ResourceMark rm;
2713   const char* timer_msg = "SharedRuntime generate_throw_exception";
2714   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
2715 
2716   CodeBuffer code(name, insts_size, locs_size);
2717   OopMapSet* oop_maps  = new OopMapSet();
2718   MacroAssembler* masm = new MacroAssembler(&code);
2719 
2720   address start = __ pc();
2721 
2722   // This is an inlined and slightly modified version of call_VM
2723   // which has the ability to fetch the return PC out of
2724   // thread-local storage and also sets up last_Java_sp slightly
2725   // differently than the real call_VM
2726   Register java_thread = rbx;
2727   __ get_thread(java_thread);
2728 
2729   __ enter(); // required for proper stackwalking of RuntimeStub frame
2730 
2731   // pc and rbp, already pushed
2732   __ subptr(rsp, (framesize-2) * wordSize); // prolog
2733 
2734   // Frame is now completed as far as size and linkage.
2735 
2736   int frame_complete = __ pc() - start;
2737 
2738   // push java thread (becomes first argument of C function)
2739   __ movptr(Address(rsp, thread_off * wordSize), java_thread);
2740   // Set up last_Java_sp and last_Java_fp
2741   __ set_last_Java_frame(java_thread, rsp, rbp, nullptr, noreg);
2742 
2743   // Call runtime
2744   BLOCK_COMMENT("call runtime_entry");
2745   __ call(RuntimeAddress(runtime_entry));
2746   // Generate oop map
2747   OopMap* map =  new OopMap(framesize, 0);
2748   oop_maps->add_gc_map(__ pc() - start, map);
2749 
2750   // restore the thread (cannot use the pushed argument since arguments
2751   // may be overwritten by C code generated by an optimizing compiler);
2752   // however can use the register value directly if it is callee saved.
2753   __ get_thread(java_thread);
2754 
2755   __ reset_last_Java_frame(java_thread, true);
2756 
2757   __ leave(); // required for proper stackwalking of RuntimeStub frame
2758 
2759   // check for pending exceptions
2760 #ifdef ASSERT
2761   Label L;
2762   __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD);
2763   __ jcc(Assembler::notEqual, L);
2764   __ should_not_reach_here();
2765   __ bind(L);
2766 #endif /* ASSERT */
2767   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2768 
2769 
2770   RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, framesize, oop_maps, false);
2771   return stub;
2772 }
2773 
2774 #if INCLUDE_JFR
2775 
2776 static void jfr_prologue(address the_pc, MacroAssembler* masm) {
2777   Register java_thread = rdi;
2778   __ get_thread(java_thread);
2779   __ set_last_Java_frame(java_thread, rsp, rbp, the_pc, noreg);
2780   __ movptr(Address(rsp, 0), java_thread);
2781 }
2782 
2783 // The handle is dereferenced through a load barrier.
2784 static void jfr_epilogue(MacroAssembler* masm) {
2785   Register java_thread = rdi;
2786   __ get_thread(java_thread);
2787   __ reset_last_Java_frame(java_thread, true);
2788 }
2789 
2790 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
2791 // It returns a jobject handle to the event writer.
2792 // The handle is dereferenced and the return value is the event writer oop.
2793 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
2794   enum layout {
2795     FPUState_off         = 0,
2796     rbp_off              = FPUStateSizeInWords,
2797     rdi_off,
2798     rsi_off,
2799     rcx_off,
2800     rbx_off,
2801     saved_argument_off,
2802     saved_argument_off2, // 2nd half of double
2803     framesize
2804   };
2805 
2806   int insts_size = 1024;
2807   int locs_size = 64;
2808   const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id);
2809   CodeBuffer code(name, insts_size, locs_size);
2810   OopMapSet* oop_maps = new OopMapSet();
2811   MacroAssembler* masm = new MacroAssembler(&code);
2812 
2813   address start = __ pc();
2814   __ enter();
2815   int frame_complete = __ pc() - start;
2816   address the_pc = __ pc();
2817   jfr_prologue(the_pc, masm);
2818   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
2819   jfr_epilogue(masm);
2820   __ resolve_global_jobject(rax, rdi, rdx);
2821   __ leave();
2822   __ ret(0);
2823 
2824   OopMap* map = new OopMap(framesize, 1); // rbp
2825   oop_maps->add_gc_map(the_pc - start, map);
2826 
2827   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
2828     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
2829                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2830                                   oop_maps, false);
2831   return stub;
2832 }
2833 
2834 // For c2: call to return a leased buffer.
2835 RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
2836   enum layout {
2837     FPUState_off = 0,
2838     rbp_off = FPUStateSizeInWords,
2839     rdi_off,
2840     rsi_off,
2841     rcx_off,
2842     rbx_off,
2843     saved_argument_off,
2844     saved_argument_off2, // 2nd half of double
2845     framesize
2846   };
2847 
2848   int insts_size = 1024;
2849   int locs_size = 64;
2850   const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id);
2851   CodeBuffer code(name, insts_size, locs_size);
2852   OopMapSet* oop_maps = new OopMapSet();
2853   MacroAssembler* masm = new MacroAssembler(&code);
2854 
2855   address start = __ pc();
2856   __ enter();
2857   int frame_complete = __ pc() - start;
2858   address the_pc = __ pc();
2859   jfr_prologue(the_pc, masm);
2860   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
2861   jfr_epilogue(masm);
2862   __ leave();
2863   __ ret(0);
2864 
2865   OopMap* map = new OopMap(framesize, 1); // rbp
2866   oop_maps->add_gc_map(the_pc - start, map);
2867 
2868   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
2869     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
2870                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2871                                   oop_maps, false);
2872   return stub;
2873 }
2874 
2875 #endif // INCLUDE_JFR