1 /*
   2  * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #ifndef _WINDOWS
  27 #include "alloca.h"
  28 #endif
  29 #include "asm/macroAssembler.hpp"
  30 #include "asm/macroAssembler.inline.hpp"
  31 #include "code/compiledIC.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/nativeInst.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/gcLocker.hpp"
  38 #include "gc/shared/barrierSet.hpp"
  39 #include "gc/shared/barrierSetAssembler.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "logging/log.hpp"
  42 #include "memory/resourceArea.hpp"
  43 #include "memory/universe.hpp"
  44 #include "oops/klass.inline.hpp"
  45 #include "oops/method.inline.hpp"
  46 #include "prims/methodHandles.hpp"
  47 #include "runtime/continuation.hpp"
  48 #include "runtime/continuationEntry.inline.hpp"
  49 #include "runtime/globals.hpp"
  50 #include "runtime/jniHandles.hpp"
  51 #include "runtime/safepointMechanism.hpp"
  52 #include "runtime/sharedRuntime.hpp"
  53 #include "runtime/signature.hpp"
  54 #include "runtime/stubRoutines.hpp"
  55 #include "runtime/vframeArray.hpp"
  56 #include "runtime/vm_version.hpp"
  57 #include "utilities/align.hpp"
  58 #include "utilities/checkedCast.hpp"
  59 #include "utilities/formatBuffer.hpp"
  60 #include "vmreg_x86.inline.hpp"
  61 #ifdef COMPILER1
  62 #include "c1/c1_Runtime1.hpp"
  63 #endif
  64 #ifdef COMPILER2
  65 #include "opto/runtime.hpp"
  66 #endif
  67 #if INCLUDE_JVMCI
  68 #include "jvmci/jvmciJavaClasses.hpp"
  69 #endif
  70 
  71 #define __ masm->
  72 
  73 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  74 
  75 class SimpleRuntimeFrame {
  76 
  77   public:
  78 
  79   // Most of the runtime stubs have this simple frame layout.
  80   // This class exists to make the layout shared in one place.
  81   // Offsets are for compiler stack slots, which are jints.
  82   enum layout {
  83     // The frame sender code expects that rbp will be in the "natural" place and
  84     // will override any oopMap setting for it. We must therefore force the layout
  85     // so that it agrees with the frame sender code.
  86     rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
  87     rbp_off2,
  88     return_off, return_off2,
  89     framesize
  90   };
  91 };
  92 
  93 class RegisterSaver {
  94   // Capture info about frame layout.  Layout offsets are in jint
  95   // units because compiler frame slots are jints.
  96 #define XSAVE_AREA_BEGIN 160
  97 #define XSAVE_AREA_YMM_BEGIN 576
  98 #define XSAVE_AREA_OPMASK_BEGIN 1088
  99 #define XSAVE_AREA_ZMM_BEGIN 1152
 100 #define XSAVE_AREA_UPPERBANK 1664
 101 #define DEF_XMM_OFFS(regnum)       xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
 102 #define DEF_YMM_OFFS(regnum)       ymm ## regnum ## _off = ymm_off + (regnum)*16/BytesPerInt, ymm ## regnum ## H_off
 103 #define DEF_ZMM_OFFS(regnum)       zmm ## regnum ## _off = zmm_off + (regnum)*32/BytesPerInt, zmm ## regnum ## H_off
 104 #define DEF_OPMASK_OFFS(regnum)    opmask ## regnum ## _off = opmask_off + (regnum)*8/BytesPerInt,     opmask ## regnum ## H_off
 105 #define DEF_ZMM_UPPER_OFFS(regnum) zmm ## regnum ## _off = zmm_upper_off + (regnum-16)*64/BytesPerInt, zmm ## regnum ## H_off
 106   enum layout {
 107     fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
 108     xmm_off       = fpu_state_off + XSAVE_AREA_BEGIN/BytesPerInt,            // offset in fxsave save area
 109     DEF_XMM_OFFS(0),
 110     DEF_XMM_OFFS(1),
 111     // 2..15 are implied in range usage
 112     ymm_off = xmm_off + (XSAVE_AREA_YMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
 113     DEF_YMM_OFFS(0),
 114     DEF_YMM_OFFS(1),
 115     // 2..15 are implied in range usage
 116     opmask_off         = xmm_off + (XSAVE_AREA_OPMASK_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
 117     DEF_OPMASK_OFFS(0),
 118     DEF_OPMASK_OFFS(1),
 119     // 2..7 are implied in range usage
 120     zmm_off = xmm_off + (XSAVE_AREA_ZMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
 121     DEF_ZMM_OFFS(0),
 122     DEF_ZMM_OFFS(1),
 123     zmm_upper_off = xmm_off + (XSAVE_AREA_UPPERBANK - XSAVE_AREA_BEGIN)/BytesPerInt,
 124     DEF_ZMM_UPPER_OFFS(16),
 125     DEF_ZMM_UPPER_OFFS(17),
 126     // 18..31 are implied in range usage
 127     fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
 128     fpu_stateH_end,
 129     r15_off, r15H_off,
 130     r14_off, r14H_off,
 131     r13_off, r13H_off,
 132     r12_off, r12H_off,
 133     r11_off, r11H_off,
 134     r10_off, r10H_off,
 135     r9_off,  r9H_off,
 136     r8_off,  r8H_off,
 137     rdi_off, rdiH_off,
 138     rsi_off, rsiH_off,
 139     ignore_off, ignoreH_off,  // extra copy of rbp
 140     rsp_off, rspH_off,
 141     rbx_off, rbxH_off,
 142     rdx_off, rdxH_off,
 143     rcx_off, rcxH_off,
 144     rax_off, raxH_off,
 145     // 16-byte stack alignment fill word: see MacroAssembler::push/pop_IU_state
 146     align_off, alignH_off,
 147     flags_off, flagsH_off,
 148     // The frame sender code expects that rbp will be in the "natural" place and
 149     // will override any oopMap setting for it. We must therefore force the layout
 150     // so that it agrees with the frame sender code.
 151     rbp_off, rbpH_off,        // copy of rbp we will restore
 152     return_off, returnH_off,  // slot for return address
 153     reg_save_size             // size in compiler stack slots
 154   };
 155 
 156  public:
 157   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_wide_vectors);
 158   static void restore_live_registers(MacroAssembler* masm, bool restore_wide_vectors = false);
 159 
 160   // Offsets into the register save area
 161   // Used by deoptimization when it is managing result register
 162   // values on its own
 163 
 164   static int rax_offset_in_bytes(void)    { return BytesPerInt * rax_off; }
 165   static int rdx_offset_in_bytes(void)    { return BytesPerInt * rdx_off; }
 166   static int rbx_offset_in_bytes(void)    { return BytesPerInt * rbx_off; }
 167   static int xmm0_offset_in_bytes(void)   { return BytesPerInt * xmm0_off; }
 168   static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
 169 
 170   // During deoptimization only the result registers need to be restored,
 171   // all the other values have already been extracted.
 172   static void restore_result_registers(MacroAssembler* masm);
 173 };
 174 
 175 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_wide_vectors) {
 176   int off = 0;
 177   int num_xmm_regs = XMMRegister::available_xmm_registers();
 178 #if COMPILER2_OR_JVMCI
 179   if (save_wide_vectors && UseAVX == 0) {
 180     save_wide_vectors = false; // vectors larger than 16 byte long are supported only with AVX
 181   }
 182   assert(!save_wide_vectors || MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 183 #else
 184   save_wide_vectors = false; // vectors are generated only by C2 and JVMCI
 185 #endif
 186 
 187   // Always make the frame size 16-byte aligned, both vector and non vector stacks are always allocated
 188   int frame_size_in_bytes = align_up(reg_save_size*BytesPerInt, num_xmm_regs);
 189   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 190   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 191   // CodeBlob frame size is in words.
 192   int frame_size_in_words = frame_size_in_bytes / wordSize;
 193   *total_frame_words = frame_size_in_words;
 194 
 195   // Save registers, fpu state, and flags.
 196   // We assume caller has already pushed the return address onto the
 197   // stack, so rsp is 8-byte aligned here.
 198   // We push rpb twice in this sequence because we want the real rbp
 199   // to be under the return like a normal enter.
 200 
 201   __ enter();          // rsp becomes 16-byte aligned here
 202   __ push_CPU_state(); // Push a multiple of 16 bytes
 203 
 204   // push cpu state handles this on EVEX enabled targets
 205   if (save_wide_vectors) {
 206     // Save upper half of YMM registers(0..15)
 207     int base_addr = XSAVE_AREA_YMM_BEGIN;
 208     for (int n = 0; n < 16; n++) {
 209       __ vextractf128_high(Address(rsp, base_addr+n*16), as_XMMRegister(n));
 210     }
 211     if (VM_Version::supports_evex()) {
 212       // Save upper half of ZMM registers(0..15)
 213       base_addr = XSAVE_AREA_ZMM_BEGIN;
 214       for (int n = 0; n < 16; n++) {
 215         __ vextractf64x4_high(Address(rsp, base_addr+n*32), as_XMMRegister(n));
 216       }
 217       // Save full ZMM registers(16..num_xmm_regs)
 218       base_addr = XSAVE_AREA_UPPERBANK;
 219       off = 0;
 220       int vector_len = Assembler::AVX_512bit;
 221       for (int n = 16; n < num_xmm_regs; n++) {
 222         __ evmovdqul(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n), vector_len);
 223       }
 224 #if COMPILER2_OR_JVMCI
 225       base_addr = XSAVE_AREA_OPMASK_BEGIN;
 226       off = 0;
 227       for(int n = 0; n < KRegister::number_of_registers; n++) {
 228         __ kmov(Address(rsp, base_addr+(off++*8)), as_KRegister(n));
 229       }
 230 #endif
 231     }
 232   } else {
 233     if (VM_Version::supports_evex()) {
 234       // Save upper bank of XMM registers(16..31) for scalar or 16-byte vector usage
 235       int base_addr = XSAVE_AREA_UPPERBANK;
 236       off = 0;
 237       int vector_len = VM_Version::supports_avx512vl() ?  Assembler::AVX_128bit : Assembler::AVX_512bit;
 238       for (int n = 16; n < num_xmm_regs; n++) {
 239         __ evmovdqul(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n), vector_len);
 240       }
 241 #if COMPILER2_OR_JVMCI
 242       base_addr = XSAVE_AREA_OPMASK_BEGIN;
 243       off = 0;
 244       for(int n = 0; n < KRegister::number_of_registers; n++) {
 245         __ kmov(Address(rsp, base_addr+(off++*8)), as_KRegister(n));
 246       }
 247 #endif
 248     }
 249   }
 250   __ vzeroupper();
 251   if (frame::arg_reg_save_area_bytes != 0) {
 252     // Allocate argument register save area
 253     __ subptr(rsp, frame::arg_reg_save_area_bytes);
 254   }
 255 
 256   // Set an oopmap for the call site.  This oopmap will map all
 257   // oop-registers and debug-info registers as callee-saved.  This
 258   // will allow deoptimization at this safepoint to find all possible
 259   // debug-info recordings, as well as let GC find all oops.
 260 
 261   OopMapSet *oop_maps = new OopMapSet();
 262   OopMap* map = new OopMap(frame_size_in_slots, 0);
 263 
 264 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x))
 265 
 266   map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg());
 267   map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg());
 268   map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx->as_VMReg());
 269   map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx->as_VMReg());
 270   // rbp location is known implicitly by the frame sender code, needs no oopmap
 271   // and the location where rbp was saved by is ignored
 272   map->set_callee_saved(STACK_OFFSET( rsi_off ), rsi->as_VMReg());
 273   map->set_callee_saved(STACK_OFFSET( rdi_off ), rdi->as_VMReg());
 274   map->set_callee_saved(STACK_OFFSET( r8_off  ), r8->as_VMReg());
 275   map->set_callee_saved(STACK_OFFSET( r9_off  ), r9->as_VMReg());
 276   map->set_callee_saved(STACK_OFFSET( r10_off ), r10->as_VMReg());
 277   map->set_callee_saved(STACK_OFFSET( r11_off ), r11->as_VMReg());
 278   map->set_callee_saved(STACK_OFFSET( r12_off ), r12->as_VMReg());
 279   map->set_callee_saved(STACK_OFFSET( r13_off ), r13->as_VMReg());
 280   map->set_callee_saved(STACK_OFFSET( r14_off ), r14->as_VMReg());
 281   map->set_callee_saved(STACK_OFFSET( r15_off ), r15->as_VMReg());
 282   // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
 283   // on EVEX enabled targets, we get it included in the xsave area
 284   off = xmm0_off;
 285   int delta = xmm1_off - off;
 286   for (int n = 0; n < 16; n++) {
 287     XMMRegister xmm_name = as_XMMRegister(n);
 288     map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
 289     off += delta;
 290   }
 291   if (UseAVX > 2) {
 292     // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
 293     off = zmm16_off;
 294     delta = zmm17_off - off;
 295     for (int n = 16; n < num_xmm_regs; n++) {
 296       XMMRegister zmm_name = as_XMMRegister(n);
 297       map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg());
 298       off += delta;
 299     }
 300   }
 301 
 302 #if COMPILER2_OR_JVMCI
 303   if (save_wide_vectors) {
 304     // Save upper half of YMM registers(0..15)
 305     off = ymm0_off;
 306     delta = ymm1_off - ymm0_off;
 307     for (int n = 0; n < 16; n++) {
 308       XMMRegister ymm_name = as_XMMRegister(n);
 309       map->set_callee_saved(STACK_OFFSET(off), ymm_name->as_VMReg()->next(4));
 310       off += delta;
 311     }
 312     if (VM_Version::supports_evex()) {
 313       // Save upper half of ZMM registers(0..15)
 314       off = zmm0_off;
 315       delta = zmm1_off - zmm0_off;
 316       for (int n = 0; n < 16; n++) {
 317         XMMRegister zmm_name = as_XMMRegister(n);
 318         map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg()->next(8));
 319         off += delta;
 320       }
 321     }
 322   }
 323 #endif // COMPILER2_OR_JVMCI
 324 
 325   // %%% These should all be a waste but we'll keep things as they were for now
 326   if (true) {
 327     map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next());
 328     map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next());
 329     map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next());
 330     map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx->as_VMReg()->next());
 331     // rbp location is known implicitly by the frame sender code, needs no oopmap
 332     map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi->as_VMReg()->next());
 333     map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi->as_VMReg()->next());
 334     map->set_callee_saved(STACK_OFFSET( r8H_off  ), r8->as_VMReg()->next());
 335     map->set_callee_saved(STACK_OFFSET( r9H_off  ), r9->as_VMReg()->next());
 336     map->set_callee_saved(STACK_OFFSET( r10H_off ), r10->as_VMReg()->next());
 337     map->set_callee_saved(STACK_OFFSET( r11H_off ), r11->as_VMReg()->next());
 338     map->set_callee_saved(STACK_OFFSET( r12H_off ), r12->as_VMReg()->next());
 339     map->set_callee_saved(STACK_OFFSET( r13H_off ), r13->as_VMReg()->next());
 340     map->set_callee_saved(STACK_OFFSET( r14H_off ), r14->as_VMReg()->next());
 341     map->set_callee_saved(STACK_OFFSET( r15H_off ), r15->as_VMReg()->next());
 342     // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
 343     // on EVEX enabled targets, we get it included in the xsave area
 344     off = xmm0H_off;
 345     delta = xmm1H_off - off;
 346     for (int n = 0; n < 16; n++) {
 347       XMMRegister xmm_name = as_XMMRegister(n);
 348       map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg()->next());
 349       off += delta;
 350     }
 351     if (UseAVX > 2) {
 352       // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
 353       off = zmm16H_off;
 354       delta = zmm17H_off - off;
 355       for (int n = 16; n < num_xmm_regs; n++) {
 356         XMMRegister zmm_name = as_XMMRegister(n);
 357         map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg()->next());
 358         off += delta;
 359       }
 360     }
 361   }
 362 
 363   return map;
 364 }
 365 
 366 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_wide_vectors) {
 367   int num_xmm_regs = XMMRegister::available_xmm_registers();
 368   if (frame::arg_reg_save_area_bytes != 0) {
 369     // Pop arg register save area
 370     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 371   }
 372 
 373 #if COMPILER2_OR_JVMCI
 374   if (restore_wide_vectors) {
 375     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 376     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 377   }
 378 #else
 379   assert(!restore_wide_vectors, "vectors are generated only by C2");
 380 #endif
 381 
 382   __ vzeroupper();
 383 
 384   // On EVEX enabled targets everything is handled in pop fpu state
 385   if (restore_wide_vectors) {
 386     // Restore upper half of YMM registers (0..15)
 387     int base_addr = XSAVE_AREA_YMM_BEGIN;
 388     for (int n = 0; n < 16; n++) {
 389       __ vinsertf128_high(as_XMMRegister(n), Address(rsp, base_addr+n*16));
 390     }
 391     if (VM_Version::supports_evex()) {
 392       // Restore upper half of ZMM registers (0..15)
 393       base_addr = XSAVE_AREA_ZMM_BEGIN;
 394       for (int n = 0; n < 16; n++) {
 395         __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, base_addr+n*32));
 396       }
 397       // Restore full ZMM registers(16..num_xmm_regs)
 398       base_addr = XSAVE_AREA_UPPERBANK;
 399       int vector_len = Assembler::AVX_512bit;
 400       int off = 0;
 401       for (int n = 16; n < num_xmm_regs; n++) {
 402         __ evmovdqul(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)), vector_len);
 403       }
 404 #if COMPILER2_OR_JVMCI
 405       base_addr = XSAVE_AREA_OPMASK_BEGIN;
 406       off = 0;
 407       for (int n = 0; n < KRegister::number_of_registers; n++) {
 408         __ kmov(as_KRegister(n), Address(rsp, base_addr+(off++*8)));
 409       }
 410 #endif
 411     }
 412   } else {
 413     if (VM_Version::supports_evex()) {
 414       // Restore upper bank of XMM registers(16..31) for scalar or 16-byte vector usage
 415       int base_addr = XSAVE_AREA_UPPERBANK;
 416       int off = 0;
 417       int vector_len = VM_Version::supports_avx512vl() ?  Assembler::AVX_128bit : Assembler::AVX_512bit;
 418       for (int n = 16; n < num_xmm_regs; n++) {
 419         __ evmovdqul(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)), vector_len);
 420       }
 421 #if COMPILER2_OR_JVMCI
 422       base_addr = XSAVE_AREA_OPMASK_BEGIN;
 423       off = 0;
 424       for (int n = 0; n < KRegister::number_of_registers; n++) {
 425         __ kmov(as_KRegister(n), Address(rsp, base_addr+(off++*8)));
 426       }
 427 #endif
 428     }
 429   }
 430 
 431   // Recover CPU state
 432   __ pop_CPU_state();
 433   // Get the rbp described implicitly by the calling convention (no oopMap)
 434   __ pop(rbp);
 435 }
 436 
 437 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 438 
 439   // Just restore result register. Only used by deoptimization. By
 440   // now any callee save register that needs to be restored to a c2
 441   // caller of the deoptee has been extracted into the vframeArray
 442   // and will be stuffed into the c2i adapter we create for later
 443   // restoration so only result registers need to be restored here.
 444 
 445   // Restore fp result register
 446   __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
 447   // Restore integer result register
 448   __ movptr(rax, Address(rsp, rax_offset_in_bytes()));
 449   __ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
 450 
 451   // Pop all of the register save are off the stack except the return address
 452   __ addptr(rsp, return_offset_in_bytes());
 453 }
 454 
 455 // Is vector's size (in bytes) bigger than a size saved by default?
 456 // 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions.
 457 bool SharedRuntime::is_wide_vector(int size) {
 458   return size > 16;
 459 }
 460 
 461 // ---------------------------------------------------------------------------
 462 // Read the array of BasicTypes from a signature, and compute where the
 463 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 464 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 465 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 466 // as framesizes are fixed.
 467 // VMRegImpl::stack0 refers to the first slot 0(sp).
 468 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
 469 // Register up to Register::number_of_registers are the 64-bit
 470 // integer registers.
 471 
 472 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
 473 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
 474 // units regardless of build. Of course for i486 there is no 64 bit build
 475 
 476 // The Java calling convention is a "shifted" version of the C ABI.
 477 // By skipping the first C ABI register we can call non-static jni methods
 478 // with small numbers of arguments without having to shuffle the arguments
 479 // at all. Since we control the java ABI we ought to at least get some
 480 // advantage out of it.
 481 
 482 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 483                                            VMRegPair *regs,
 484                                            int total_args_passed) {
 485 
 486   // Create the mapping between argument positions and
 487   // registers.
 488   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 489     j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5
 490   };
 491   static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 492     j_farg0, j_farg1, j_farg2, j_farg3,
 493     j_farg4, j_farg5, j_farg6, j_farg7
 494   };
 495 
 496 
 497   uint int_args = 0;
 498   uint fp_args = 0;
 499   uint stk_args = 0;
 500 
 501   for (int i = 0; i < total_args_passed; i++) {
 502     switch (sig_bt[i]) {
 503     case T_BOOLEAN:
 504     case T_CHAR:
 505     case T_BYTE:
 506     case T_SHORT:
 507     case T_INT:
 508       if (int_args < Argument::n_int_register_parameters_j) {
 509         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 510       } else {
 511         stk_args = align_up(stk_args, 2);
 512         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 513         stk_args += 1;
 514       }
 515       break;
 516     case T_VOID:
 517       // halves of T_LONG or T_DOUBLE
 518       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 519       regs[i].set_bad();
 520       break;
 521     case T_LONG:
 522       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 523       // fall through
 524     case T_OBJECT:
 525     case T_ARRAY:
 526     case T_ADDRESS:
 527       if (int_args < Argument::n_int_register_parameters_j) {
 528         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 529       } else {
 530         stk_args = align_up(stk_args, 2);
 531         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 532         stk_args += 2;
 533       }
 534       break;
 535     case T_FLOAT:
 536       if (fp_args < Argument::n_float_register_parameters_j) {
 537         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 538       } else {
 539         stk_args = align_up(stk_args, 2);
 540         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 541         stk_args += 1;
 542       }
 543       break;
 544     case T_DOUBLE:
 545       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 546       if (fp_args < Argument::n_float_register_parameters_j) {
 547         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 548       } else {
 549         stk_args = align_up(stk_args, 2);
 550         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 551         stk_args += 2;
 552       }
 553       break;
 554     default:
 555       ShouldNotReachHere();
 556       break;
 557     }
 558   }
 559 
 560   return stk_args;
 561 }
 562 
 563 // Patch the callers callsite with entry to compiled code if it exists.
 564 static void patch_callers_callsite(MacroAssembler *masm) {
 565   Label L;
 566   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
 567   __ jcc(Assembler::equal, L);
 568 
 569   // Save the current stack pointer
 570   __ mov(r13, rsp);
 571   // Schedule the branch target address early.
 572   // Call into the VM to patch the caller, then jump to compiled callee
 573   // rax isn't live so capture return address while we easily can
 574   __ movptr(rax, Address(rsp, 0));
 575 
 576   // align stack so push_CPU_state doesn't fault
 577   __ andptr(rsp, -(StackAlignmentInBytes));
 578   __ push_CPU_state();
 579   __ vzeroupper();
 580   // VM needs caller's callsite
 581   // VM needs target method
 582   // This needs to be a long call since we will relocate this adapter to
 583   // the codeBuffer and it may not reach
 584 
 585   // Allocate argument register save area
 586   if (frame::arg_reg_save_area_bytes != 0) {
 587     __ subptr(rsp, frame::arg_reg_save_area_bytes);
 588   }
 589   __ mov(c_rarg0, rbx);
 590   __ mov(c_rarg1, rax);
 591   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 592 
 593   // De-allocate argument register save area
 594   if (frame::arg_reg_save_area_bytes != 0) {
 595     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 596   }
 597 
 598   __ vzeroupper();
 599   __ pop_CPU_state();
 600   // restore sp
 601   __ mov(rsp, r13);
 602   __ bind(L);
 603 }
 604 
 605 
 606 static void gen_c2i_adapter(MacroAssembler *masm,
 607                             int total_args_passed,
 608                             int comp_args_on_stack,
 609                             const BasicType *sig_bt,
 610                             const VMRegPair *regs,
 611                             Label& skip_fixup) {
 612   // Before we get into the guts of the C2I adapter, see if we should be here
 613   // at all.  We've come from compiled code and are attempting to jump to the
 614   // interpreter, which means the caller made a static call to get here
 615   // (vcalls always get a compiled target if there is one).  Check for a
 616   // compiled target.  If there is one, we need to patch the caller's call.
 617   patch_callers_callsite(masm);
 618 
 619   __ bind(skip_fixup);
 620 
 621   // Since all args are passed on the stack, total_args_passed *
 622   // Interpreter::stackElementSize is the space we need.
 623 
 624   assert(total_args_passed >= 0, "total_args_passed is %d", total_args_passed);
 625 
 626   int extraspace = (total_args_passed * Interpreter::stackElementSize);
 627 
 628   // stack is aligned, keep it that way
 629   // This is not currently needed or enforced by the interpreter, but
 630   // we might as well conform to the ABI.
 631   extraspace = align_up(extraspace, 2*wordSize);
 632 
 633   // set senderSP value
 634   __ lea(r13, Address(rsp, wordSize));
 635 
 636 #ifdef ASSERT
 637   __ check_stack_alignment(r13, "sender stack not aligned");
 638 #endif
 639   if (extraspace > 0) {
 640     // Pop the return address
 641     __ pop(rax);
 642 
 643     __ subptr(rsp, extraspace);
 644 
 645     // Push the return address
 646     __ push(rax);
 647 
 648     // Account for the return address location since we store it first rather
 649     // than hold it in a register across all the shuffling
 650     extraspace += wordSize;
 651   }
 652 
 653 #ifdef ASSERT
 654   __ check_stack_alignment(rsp, "callee stack not aligned", wordSize, rax);
 655 #endif
 656 
 657   // Now write the args into the outgoing interpreter space
 658   for (int i = 0; i < total_args_passed; i++) {
 659     if (sig_bt[i] == T_VOID) {
 660       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 661       continue;
 662     }
 663 
 664     // offset to start parameters
 665     int st_off   = (total_args_passed - i) * Interpreter::stackElementSize;
 666     int next_off = st_off - Interpreter::stackElementSize;
 667 
 668     // Say 4 args:
 669     // i   st_off
 670     // 0   32 T_LONG
 671     // 1   24 T_VOID
 672     // 2   16 T_OBJECT
 673     // 3    8 T_BOOL
 674     // -    0 return address
 675     //
 676     // However to make thing extra confusing. Because we can fit a long/double in
 677     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 678     // leaves one slot empty and only stores to a single slot. In this case the
 679     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 680 
 681     VMReg r_1 = regs[i].first();
 682     VMReg r_2 = regs[i].second();
 683     if (!r_1->is_valid()) {
 684       assert(!r_2->is_valid(), "");
 685       continue;
 686     }
 687     if (r_1->is_stack()) {
 688       // memory to memory use rax
 689       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 690       if (!r_2->is_valid()) {
 691         // sign extend??
 692         __ movl(rax, Address(rsp, ld_off));
 693         __ movptr(Address(rsp, st_off), rax);
 694 
 695       } else {
 696 
 697         __ movq(rax, Address(rsp, ld_off));
 698 
 699         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 700         // T_DOUBLE and T_LONG use two slots in the interpreter
 701         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 702           // ld_off == LSW, ld_off+wordSize == MSW
 703           // st_off == MSW, next_off == LSW
 704           __ movq(Address(rsp, next_off), rax);
 705 #ifdef ASSERT
 706           // Overwrite the unused slot with known junk
 707           __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
 708           __ movptr(Address(rsp, st_off), rax);
 709 #endif /* ASSERT */
 710         } else {
 711           __ movq(Address(rsp, st_off), rax);
 712         }
 713       }
 714     } else if (r_1->is_Register()) {
 715       Register r = r_1->as_Register();
 716       if (!r_2->is_valid()) {
 717         // must be only an int (or less ) so move only 32bits to slot
 718         // why not sign extend??
 719         __ movl(Address(rsp, st_off), r);
 720       } else {
 721         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 722         // T_DOUBLE and T_LONG use two slots in the interpreter
 723         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 724           // long/double in gpr
 725 #ifdef ASSERT
 726           // Overwrite the unused slot with known junk
 727           __ mov64(rax, CONST64(0xdeadffffdeadaaab));
 728           __ movptr(Address(rsp, st_off), rax);
 729 #endif /* ASSERT */
 730           __ movq(Address(rsp, next_off), r);
 731         } else {
 732           __ movptr(Address(rsp, st_off), r);
 733         }
 734       }
 735     } else {
 736       assert(r_1->is_XMMRegister(), "");
 737       if (!r_2->is_valid()) {
 738         // only a float use just part of the slot
 739         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
 740       } else {
 741 #ifdef ASSERT
 742         // Overwrite the unused slot with known junk
 743         __ mov64(rax, CONST64(0xdeadffffdeadaaac));
 744         __ movptr(Address(rsp, st_off), rax);
 745 #endif /* ASSERT */
 746         __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
 747       }
 748     }
 749   }
 750 
 751   // Schedule the branch target address early.
 752   __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
 753   __ jmp(rcx);
 754 }
 755 
 756 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
 757                         address code_start, address code_end,
 758                         Label& L_ok) {
 759   Label L_fail;
 760   __ lea(temp_reg, ExternalAddress(code_start));
 761   __ cmpptr(pc_reg, temp_reg);
 762   __ jcc(Assembler::belowEqual, L_fail);
 763   __ lea(temp_reg, ExternalAddress(code_end));
 764   __ cmpptr(pc_reg, temp_reg);
 765   __ jcc(Assembler::below, L_ok);
 766   __ bind(L_fail);
 767 }
 768 
 769 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 770                                     int total_args_passed,
 771                                     int comp_args_on_stack,
 772                                     const BasicType *sig_bt,
 773                                     const VMRegPair *regs) {
 774 
 775   // Note: r13 contains the senderSP on entry. We must preserve it since
 776   // we may do a i2c -> c2i transition if we lose a race where compiled
 777   // code goes non-entrant while we get args ready.
 778   // In addition we use r13 to locate all the interpreter args as
 779   // we must align the stack to 16 bytes on an i2c entry else we
 780   // lose alignment we expect in all compiled code and register
 781   // save code can segv when fxsave instructions find improperly
 782   // aligned stack pointer.
 783 
 784   // Adapters can be frameless because they do not require the caller
 785   // to perform additional cleanup work, such as correcting the stack pointer.
 786   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
 787   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
 788   // even if a callee has modified the stack pointer.
 789   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
 790   // routinely repairs its caller's stack pointer (from sender_sp, which is set
 791   // up via the senderSP register).
 792   // In other words, if *either* the caller or callee is interpreted, we can
 793   // get the stack pointer repaired after a call.
 794   // This is why c2i and i2c adapters cannot be indefinitely composed.
 795   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 796   // both caller and callee would be compiled methods, and neither would
 797   // clean up the stack pointer changes performed by the two adapters.
 798   // If this happens, control eventually transfers back to the compiled
 799   // caller, but with an uncorrected stack, causing delayed havoc.
 800 
 801   if (VerifyAdapterCalls &&
 802       (Interpreter::code() != nullptr || StubRoutines::final_stubs_code() != nullptr)) {
 803     // So, let's test for cascading c2i/i2c adapters right now.
 804     //  assert(Interpreter::contains($return_addr) ||
 805     //         StubRoutines::contains($return_addr),
 806     //         "i2c adapter must return to an interpreter frame");
 807     __ block_comment("verify_i2c { ");
 808     // Pick up the return address
 809     __ movptr(rax, Address(rsp, 0));
 810     Label L_ok;
 811     if (Interpreter::code() != nullptr) {
 812       range_check(masm, rax, r11,
 813                   Interpreter::code()->code_start(),
 814                   Interpreter::code()->code_end(),
 815                   L_ok);
 816     }
 817     if (StubRoutines::initial_stubs_code() != nullptr) {
 818       range_check(masm, rax, r11,
 819                   StubRoutines::initial_stubs_code()->code_begin(),
 820                   StubRoutines::initial_stubs_code()->code_end(),
 821                   L_ok);
 822     }
 823     if (StubRoutines::final_stubs_code() != nullptr) {
 824       range_check(masm, rax, r11,
 825                   StubRoutines::final_stubs_code()->code_begin(),
 826                   StubRoutines::final_stubs_code()->code_end(),
 827                   L_ok);
 828     }
 829     const char* msg = "i2c adapter must return to an interpreter frame";
 830     __ block_comment(msg);
 831     __ stop(msg);
 832     __ bind(L_ok);
 833     __ block_comment("} verify_i2ce ");
 834   }
 835 
 836   // Must preserve original SP for loading incoming arguments because
 837   // we need to align the outgoing SP for compiled code.
 838   __ movptr(r11, rsp);
 839 
 840   // Pick up the return address
 841   __ pop(rax);
 842 
 843   // Convert 4-byte c2 stack slots to words.
 844   int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 845 
 846   if (comp_args_on_stack) {
 847     __ subptr(rsp, comp_words_on_stack * wordSize);
 848   }
 849 
 850   // Ensure compiled code always sees stack at proper alignment
 851   __ andptr(rsp, -16);
 852 
 853   // push the return address and misalign the stack that youngest frame always sees
 854   // as far as the placement of the call instruction
 855   __ push(rax);
 856 
 857   // Put saved SP in another register
 858   const Register saved_sp = rax;
 859   __ movptr(saved_sp, r11);
 860 
 861   // Will jump to the compiled code just as if compiled code was doing it.
 862   // Pre-load the register-jump target early, to schedule it better.
 863   __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
 864 
 865 #if INCLUDE_JVMCI
 866   if (EnableJVMCI) {
 867     // check if this call should be routed towards a specific entry point
 868     __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
 869     Label no_alternative_target;
 870     __ jcc(Assembler::equal, no_alternative_target);
 871     __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 872     __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
 873     __ bind(no_alternative_target);
 874   }
 875 #endif // INCLUDE_JVMCI
 876 
 877   // Now generate the shuffle code.  Pick up all register args and move the
 878   // rest through the floating point stack top.
 879   for (int i = 0; i < total_args_passed; i++) {
 880     if (sig_bt[i] == T_VOID) {
 881       // Longs and doubles are passed in native word order, but misaligned
 882       // in the 32-bit build.
 883       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 884       continue;
 885     }
 886 
 887     // Pick up 0, 1 or 2 words from SP+offset.
 888 
 889     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 890             "scrambled load targets?");
 891     // Load in argument order going down.
 892     int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
 893     // Point to interpreter value (vs. tag)
 894     int next_off = ld_off - Interpreter::stackElementSize;
 895     //
 896     //
 897     //
 898     VMReg r_1 = regs[i].first();
 899     VMReg r_2 = regs[i].second();
 900     if (!r_1->is_valid()) {
 901       assert(!r_2->is_valid(), "");
 902       continue;
 903     }
 904     if (r_1->is_stack()) {
 905       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 906       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
 907 
 908       // We can use r13 as a temp here because compiled code doesn't need r13 as an input
 909       // and if we end up going thru a c2i because of a miss a reasonable value of r13
 910       // will be generated.
 911       if (!r_2->is_valid()) {
 912         // sign extend???
 913         __ movl(r13, Address(saved_sp, ld_off));
 914         __ movptr(Address(rsp, st_off), r13);
 915       } else {
 916         //
 917         // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 918         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 919         // So we must adjust where to pick up the data to match the interpreter.
 920         //
 921         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 922         // are accessed as negative so LSW is at LOW address
 923 
 924         // ld_off is MSW so get LSW
 925         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 926                            next_off : ld_off;
 927         __ movq(r13, Address(saved_sp, offset));
 928         // st_off is LSW (i.e. reg.first())
 929         __ movq(Address(rsp, st_off), r13);
 930       }
 931     } else if (r_1->is_Register()) {  // Register argument
 932       Register r = r_1->as_Register();
 933       assert(r != rax, "must be different");
 934       if (r_2->is_valid()) {
 935         //
 936         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
 937         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
 938         // So we must adjust where to pick up the data to match the interpreter.
 939 
 940         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 941                            next_off : ld_off;
 942 
 943         // this can be a misaligned move
 944         __ movq(r, Address(saved_sp, offset));
 945       } else {
 946         // sign extend and use a full word?
 947         __ movl(r, Address(saved_sp, ld_off));
 948       }
 949     } else {
 950       if (!r_2->is_valid()) {
 951         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
 952       } else {
 953         __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
 954       }
 955     }
 956   }
 957 
 958   __ push_cont_fastpath(); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
 959 
 960   // 6243940 We might end up in handle_wrong_method if
 961   // the callee is deoptimized as we race thru here. If that
 962   // happens we don't want to take a safepoint because the
 963   // caller frame will look interpreted and arguments are now
 964   // "compiled" so it is much better to make this transition
 965   // invisible to the stack walking code. Unfortunately if
 966   // we try and find the callee by normal means a safepoint
 967   // is possible. So we stash the desired callee in the thread
 968   // and the vm will find there should this case occur.
 969 
 970   __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
 971 
 972   // put Method* where a c2i would expect should we end up there
 973   // only needed because eof c2 resolve stubs return Method* as a result in
 974   // rax
 975   __ mov(rax, rbx);
 976   __ jmp(r11);
 977 }
 978 
 979 // ---------------------------------------------------------------
 980 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 981                                                             int total_args_passed,
 982                                                             int comp_args_on_stack,
 983                                                             const BasicType *sig_bt,
 984                                                             const VMRegPair *regs,
 985                                                             AdapterFingerPrint* fingerprint) {
 986   address i2c_entry = __ pc();
 987 
 988   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 989 
 990   // -------------------------------------------------------------------------
 991   // Generate a C2I adapter.  On entry we know rbx holds the Method* during calls
 992   // to the interpreter.  The args start out packed in the compiled layout.  They
 993   // need to be unpacked into the interpreter layout.  This will almost always
 994   // require some stack space.  We grow the current (compiled) stack, then repack
 995   // the args.  We  finally end in a jump to the generic interpreter entry point.
 996   // On exit from the interpreter, the interpreter will restore our SP (lest the
 997   // compiled code, which relies solely on SP and not RBP, get sick).
 998 
 999   address c2i_unverified_entry = __ pc();
1000   Label skip_fixup;
1001 
1002   Register data = rax;
1003   Register receiver = j_rarg0;
1004   Register temp = rbx;
1005 
1006   {
1007     __ ic_check(1 /* end_alignment */);
1008     __ movptr(rbx, Address(data, CompiledICData::speculated_method_offset()));
1009     // Method might have been compiled since the call site was patched to
1010     // interpreted if that is the case treat it as a miss so we can get
1011     // the call site corrected.
1012     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
1013     __ jcc(Assembler::equal, skip_fixup);
1014     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1015   }
1016 
1017   address c2i_entry = __ pc();
1018 
1019   // Class initialization barrier for static methods
1020   address c2i_no_clinit_check_entry = nullptr;
1021   if (VM_Version::supports_fast_class_init_checks()) {
1022     Label L_skip_barrier;
1023     Register method = rbx;
1024 
1025     { // Bypass the barrier for non-static methods
1026       Register flags = rscratch1;
1027       __ movl(flags, Address(method, Method::access_flags_offset()));
1028       __ testl(flags, JVM_ACC_STATIC);
1029       __ jcc(Assembler::zero, L_skip_barrier); // non-static
1030     }
1031 
1032     Register klass = rscratch1;
1033     __ load_method_holder(klass, method);
1034     __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
1035 
1036     __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
1037 
1038     __ bind(L_skip_barrier);
1039     c2i_no_clinit_check_entry = __ pc();
1040   }
1041 
1042   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1043   bs->c2i_entry_barrier(masm);
1044 
1045   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
1046 
1047   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
1048 }
1049 
1050 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1051                                          VMRegPair *regs,
1052                                          int total_args_passed) {
1053 
1054 // We return the amount of VMRegImpl stack slots we need to reserve for all
1055 // the arguments NOT counting out_preserve_stack_slots.
1056 
1057 // NOTE: These arrays will have to change when c1 is ported
1058 #ifdef _WIN64
1059     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1060       c_rarg0, c_rarg1, c_rarg2, c_rarg3
1061     };
1062     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1063       c_farg0, c_farg1, c_farg2, c_farg3
1064     };
1065 #else
1066     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1067       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
1068     };
1069     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1070       c_farg0, c_farg1, c_farg2, c_farg3,
1071       c_farg4, c_farg5, c_farg6, c_farg7
1072     };
1073 #endif // _WIN64
1074 
1075 
1076     uint int_args = 0;
1077     uint fp_args = 0;
1078     uint stk_args = 0; // inc by 2 each time
1079 
1080     for (int i = 0; i < total_args_passed; i++) {
1081       switch (sig_bt[i]) {
1082       case T_BOOLEAN:
1083       case T_CHAR:
1084       case T_BYTE:
1085       case T_SHORT:
1086       case T_INT:
1087         if (int_args < Argument::n_int_register_parameters_c) {
1088           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1089 #ifdef _WIN64
1090           fp_args++;
1091           // Allocate slots for callee to stuff register args the stack.
1092           stk_args += 2;
1093 #endif
1094         } else {
1095           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1096           stk_args += 2;
1097         }
1098         break;
1099       case T_LONG:
1100         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1101         // fall through
1102       case T_OBJECT:
1103       case T_ARRAY:
1104       case T_ADDRESS:
1105       case T_METADATA:
1106         if (int_args < Argument::n_int_register_parameters_c) {
1107           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1108 #ifdef _WIN64
1109           fp_args++;
1110           stk_args += 2;
1111 #endif
1112         } else {
1113           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1114           stk_args += 2;
1115         }
1116         break;
1117       case T_FLOAT:
1118         if (fp_args < Argument::n_float_register_parameters_c) {
1119           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1120 #ifdef _WIN64
1121           int_args++;
1122           // Allocate slots for callee to stuff register args the stack.
1123           stk_args += 2;
1124 #endif
1125         } else {
1126           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1127           stk_args += 2;
1128         }
1129         break;
1130       case T_DOUBLE:
1131         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1132         if (fp_args < Argument::n_float_register_parameters_c) {
1133           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
1134 #ifdef _WIN64
1135           int_args++;
1136           // Allocate slots for callee to stuff register args the stack.
1137           stk_args += 2;
1138 #endif
1139         } else {
1140           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1141           stk_args += 2;
1142         }
1143         break;
1144       case T_VOID: // Halves of longs and doubles
1145         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1146         regs[i].set_bad();
1147         break;
1148       default:
1149         ShouldNotReachHere();
1150         break;
1151       }
1152     }
1153 #ifdef _WIN64
1154   // windows abi requires that we always allocate enough stack space
1155   // for 4 64bit registers to be stored down.
1156   if (stk_args < 8) {
1157     stk_args = 8;
1158   }
1159 #endif // _WIN64
1160 
1161   return stk_args;
1162 }
1163 
1164 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
1165                                              uint num_bits,
1166                                              uint total_args_passed) {
1167   assert(num_bits == 64 || num_bits == 128 || num_bits == 256 || num_bits == 512,
1168          "only certain vector sizes are supported for now");
1169 
1170   static const XMMRegister VEC_ArgReg[32] = {
1171      xmm0,  xmm1,  xmm2,  xmm3,  xmm4,  xmm5,  xmm6,  xmm7,
1172      xmm8,  xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,
1173     xmm16, xmm17, xmm18, xmm19, xmm20, xmm21, xmm22, xmm23,
1174     xmm24, xmm25, xmm26, xmm27, xmm28, xmm29, xmm30, xmm31
1175   };
1176 
1177   uint stk_args = 0;
1178   uint fp_args = 0;
1179 
1180   for (uint i = 0; i < total_args_passed; i++) {
1181     VMReg vmreg = VEC_ArgReg[fp_args++]->as_VMReg();
1182     int next_val = num_bits == 64 ? 1 : (num_bits == 128 ? 3 : (num_bits  == 256 ? 7 : 15));
1183     regs[i].set_pair(vmreg->next(next_val), vmreg);
1184   }
1185 
1186   return stk_args;
1187 }
1188 
1189 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1190   // We always ignore the frame_slots arg and just use the space just below frame pointer
1191   // which by this time is free to use
1192   switch (ret_type) {
1193   case T_FLOAT:
1194     __ movflt(Address(rbp, -wordSize), xmm0);
1195     break;
1196   case T_DOUBLE:
1197     __ movdbl(Address(rbp, -wordSize), xmm0);
1198     break;
1199   case T_VOID:  break;
1200   default: {
1201     __ movptr(Address(rbp, -wordSize), rax);
1202     }
1203   }
1204 }
1205 
1206 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1207   // We always ignore the frame_slots arg and just use the space just below frame pointer
1208   // which by this time is free to use
1209   switch (ret_type) {
1210   case T_FLOAT:
1211     __ movflt(xmm0, Address(rbp, -wordSize));
1212     break;
1213   case T_DOUBLE:
1214     __ movdbl(xmm0, Address(rbp, -wordSize));
1215     break;
1216   case T_VOID:  break;
1217   default: {
1218     __ movptr(rax, Address(rbp, -wordSize));
1219     }
1220   }
1221 }
1222 
1223 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1224     for ( int i = first_arg ; i < arg_count ; i++ ) {
1225       if (args[i].first()->is_Register()) {
1226         __ push(args[i].first()->as_Register());
1227       } else if (args[i].first()->is_XMMRegister()) {
1228         __ subptr(rsp, 2*wordSize);
1229         __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
1230       }
1231     }
1232 }
1233 
1234 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1235     for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1236       if (args[i].first()->is_Register()) {
1237         __ pop(args[i].first()->as_Register());
1238       } else if (args[i].first()->is_XMMRegister()) {
1239         __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
1240         __ addptr(rsp, 2*wordSize);
1241       }
1242     }
1243 }
1244 
1245 static void verify_oop_args(MacroAssembler* masm,
1246                             const methodHandle& method,
1247                             const BasicType* sig_bt,
1248                             const VMRegPair* regs) {
1249   Register temp_reg = rbx;  // not part of any compiled calling seq
1250   if (VerifyOops) {
1251     for (int i = 0; i < method->size_of_parameters(); i++) {
1252       if (is_reference_type(sig_bt[i])) {
1253         VMReg r = regs[i].first();
1254         assert(r->is_valid(), "bad oop arg");
1255         if (r->is_stack()) {
1256           __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1257           __ verify_oop(temp_reg);
1258         } else {
1259           __ verify_oop(r->as_Register());
1260         }
1261       }
1262     }
1263   }
1264 }
1265 
1266 static void check_continuation_enter_argument(VMReg actual_vmreg,
1267                                               Register expected_reg,
1268                                               const char* name) {
1269   assert(!actual_vmreg->is_stack(), "%s cannot be on stack", name);
1270   assert(actual_vmreg->as_Register() == expected_reg,
1271          "%s is in unexpected register: %s instead of %s",
1272          name, actual_vmreg->as_Register()->name(), expected_reg->name());
1273 }
1274 
1275 
1276 //---------------------------- continuation_enter_setup ---------------------------
1277 //
1278 // Arguments:
1279 //   None.
1280 //
1281 // Results:
1282 //   rsp: pointer to blank ContinuationEntry
1283 //
1284 // Kills:
1285 //   rax
1286 //
1287 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) {
1288   assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, "");
1289   assert(in_bytes(ContinuationEntry::cont_offset())  % VMRegImpl::stack_slot_size == 0, "");
1290   assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, "");
1291 
1292   stack_slots += checked_cast<int>(ContinuationEntry::size()) / wordSize;
1293   __ subptr(rsp, checked_cast<int32_t>(ContinuationEntry::size()));
1294 
1295   int frame_size = (checked_cast<int>(ContinuationEntry::size()) + wordSize) / VMRegImpl::stack_slot_size;
1296   OopMap* map = new OopMap(frame_size, 0);
1297 
1298   __ movptr(rax, Address(r15_thread, JavaThread::cont_entry_offset()));
1299   __ movptr(Address(rsp, ContinuationEntry::parent_offset()), rax);
1300   __ movptr(Address(r15_thread, JavaThread::cont_entry_offset()), rsp);
1301 
1302   return map;
1303 }
1304 
1305 //---------------------------- fill_continuation_entry ---------------------------
1306 //
1307 // Arguments:
1308 //   rsp: pointer to blank Continuation entry
1309 //   reg_cont_obj: pointer to the continuation
1310 //   reg_flags: flags
1311 //
1312 // Results:
1313 //   rsp: pointer to filled out ContinuationEntry
1314 //
1315 // Kills:
1316 //   rax
1317 //
1318 static void fill_continuation_entry(MacroAssembler* masm, Register reg_cont_obj, Register reg_flags) {
1319   assert_different_registers(rax, reg_cont_obj, reg_flags);
1320 #ifdef ASSERT
1321   __ movl(Address(rsp, ContinuationEntry::cookie_offset()), ContinuationEntry::cookie_value());
1322 #endif
1323   __ movptr(Address(rsp, ContinuationEntry::cont_offset()), reg_cont_obj);
1324   __ movl  (Address(rsp, ContinuationEntry::flags_offset()), reg_flags);
1325   __ movptr(Address(rsp, ContinuationEntry::chunk_offset()), 0);
1326   __ movl(Address(rsp, ContinuationEntry::argsize_offset()), 0);
1327   __ movl(Address(rsp, ContinuationEntry::pin_count_offset()), 0);
1328 
1329   __ movptr(rax, Address(r15_thread, JavaThread::cont_fastpath_offset()));
1330   __ movptr(Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()), rax);
1331   __ movq(rax, Address(r15_thread, JavaThread::held_monitor_count_offset()));
1332   __ movq(Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()), rax);
1333 
1334   __ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0);
1335   __ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), 0);
1336 }
1337 
1338 //---------------------------- continuation_enter_cleanup ---------------------------
1339 //
1340 // Arguments:
1341 //   rsp: pointer to the ContinuationEntry
1342 //
1343 // Results:
1344 //   rsp: pointer to the spilled rbp in the entry frame
1345 //
1346 // Kills:
1347 //   rbx
1348 //
1349 void static continuation_enter_cleanup(MacroAssembler* masm) {
1350 #ifdef ASSERT
1351   Label L_good_sp;
1352   __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
1353   __ jcc(Assembler::equal, L_good_sp);
1354   __ stop("Incorrect rsp at continuation_enter_cleanup");
1355   __ bind(L_good_sp);
1356 #endif
1357   __ movptr(rbx, Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()));
1358   __ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rbx);
1359 
1360   if (CheckJNICalls) {
1361     // Check if this is a virtual thread continuation
1362     Label L_skip_vthread_code;
1363     __ cmpl(Address(rsp, ContinuationEntry::flags_offset()), 0);
1364     __ jcc(Assembler::equal, L_skip_vthread_code);
1365 
1366     // If the held monitor count is > 0 and this vthread is terminating then
1367     // it failed to release a JNI monitor. So we issue the same log message
1368     // that JavaThread::exit does.
1369     __ cmpptr(Address(r15_thread, JavaThread::jni_monitor_count_offset()), 0);
1370     __ jcc(Assembler::equal, L_skip_vthread_code);
1371 
1372     // rax may hold an exception oop, save it before the call
1373     __ push(rax);
1374     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held));
1375     __ pop(rax);
1376 
1377     // For vthreads we have to explicitly zero the JNI monitor count of the carrier
1378     // on termination. The held count is implicitly zeroed below when we restore from
1379     // the parent held count (which has to be zero).
1380     __ movq(Address(r15_thread, JavaThread::jni_monitor_count_offset()), 0);
1381 
1382     __ bind(L_skip_vthread_code);
1383   }
1384 #ifdef ASSERT
1385   else {
1386     // Check if this is a virtual thread continuation
1387     Label L_skip_vthread_code;
1388     __ cmpl(Address(rsp, ContinuationEntry::flags_offset()), 0);
1389     __ jcc(Assembler::equal, L_skip_vthread_code);
1390 
1391     // See comment just above. If not checking JNI calls the JNI count is only
1392     // needed for assertion checking.
1393     __ movq(Address(r15_thread, JavaThread::jni_monitor_count_offset()), 0);
1394 
1395     __ bind(L_skip_vthread_code);
1396   }
1397 #endif
1398 
1399   __ movq(rbx, Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()));
1400   __ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), rbx);
1401 
1402   __ movptr(rbx, Address(rsp, ContinuationEntry::parent_offset()));
1403   __ movptr(Address(r15_thread, JavaThread::cont_entry_offset()), rbx);
1404   __ addptr(rsp, checked_cast<int32_t>(ContinuationEntry::size()));
1405 }
1406 
1407 static void gen_continuation_enter(MacroAssembler* masm,
1408                                    const VMRegPair* regs,
1409                                    int& exception_offset,
1410                                    OopMapSet* oop_maps,
1411                                    int& frame_complete,
1412                                    int& stack_slots,
1413                                    int& interpreted_entry_offset,
1414                                    int& compiled_entry_offset) {
1415 
1416   // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
1417   int pos_cont_obj   = 0;
1418   int pos_is_cont    = 1;
1419   int pos_is_virtual = 2;
1420 
1421   // The platform-specific calling convention may present the arguments in various registers.
1422   // To simplify the rest of the code, we expect the arguments to reside at these known
1423   // registers, and we additionally check the placement here in case calling convention ever
1424   // changes.
1425   Register reg_cont_obj   = c_rarg1;
1426   Register reg_is_cont    = c_rarg2;
1427   Register reg_is_virtual = c_rarg3;
1428 
1429   check_continuation_enter_argument(regs[pos_cont_obj].first(),   reg_cont_obj,   "Continuation object");
1430   check_continuation_enter_argument(regs[pos_is_cont].first(),    reg_is_cont,    "isContinue");
1431   check_continuation_enter_argument(regs[pos_is_virtual].first(), reg_is_virtual, "isVirtualThread");
1432 
1433   // Utility methods kill rax, make sure there are no collisions
1434   assert_different_registers(rax, reg_cont_obj, reg_is_cont, reg_is_virtual);
1435 
1436   AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
1437                          relocInfo::static_call_type);
1438 
1439   address start = __ pc();
1440 
1441   Label L_thaw, L_exit;
1442 
1443   // i2i entry used at interp_only_mode only
1444   interpreted_entry_offset = __ pc() - start;
1445   {
1446 #ifdef ASSERT
1447     Label is_interp_only;
1448     __ cmpb(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0);
1449     __ jcc(Assembler::notEqual, is_interp_only);
1450     __ stop("enterSpecial interpreter entry called when not in interp_only_mode");
1451     __ bind(is_interp_only);
1452 #endif
1453 
1454     __ pop(rax); // return address
1455     // Read interpreter arguments into registers (this is an ad-hoc i2c adapter)
1456     __ movptr(c_rarg1, Address(rsp, Interpreter::stackElementSize*2));
1457     __ movl(c_rarg2,   Address(rsp, Interpreter::stackElementSize*1));
1458     __ movl(c_rarg3,   Address(rsp, Interpreter::stackElementSize*0));
1459     __ andptr(rsp, -16); // Ensure compiled code always sees stack at proper alignment
1460     __ push(rax); // return address
1461     __ push_cont_fastpath();
1462 
1463     __ enter();
1464 
1465     stack_slots = 2; // will be adjusted in setup
1466     OopMap* map = continuation_enter_setup(masm, stack_slots);
1467     // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe,
1468     // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
1469 
1470     __ verify_oop(reg_cont_obj);
1471 
1472     fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual);
1473 
1474     // If continuation, call to thaw. Otherwise, resolve the call and exit.
1475     __ testptr(reg_is_cont, reg_is_cont);
1476     __ jcc(Assembler::notZero, L_thaw);
1477 
1478     // --- Resolve path
1479 
1480     // Make sure the call is patchable
1481     __ align(BytesPerWord, __ offset() + NativeCall::displacement_offset);
1482     // Emit stub for static call
1483     address stub = CompiledDirectCall::emit_to_interp_stub(masm, __ pc());
1484     if (stub == nullptr) {
1485       fatal("CodeCache is full at gen_continuation_enter");
1486     }
1487     __ call(resolve);
1488     oop_maps->add_gc_map(__ pc() - start, map);
1489     __ post_call_nop();
1490 
1491     __ jmp(L_exit);
1492   }
1493 
1494   // compiled entry
1495   __ align(CodeEntryAlignment);
1496   compiled_entry_offset = __ pc() - start;
1497   __ enter();
1498 
1499   stack_slots = 2; // will be adjusted in setup
1500   OopMap* map = continuation_enter_setup(masm, stack_slots);
1501 
1502   // Frame is now completed as far as size and linkage.
1503   frame_complete = __ pc() - start;
1504 
1505   __ verify_oop(reg_cont_obj);
1506 
1507   fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual);
1508 
1509   // If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue)
1510   __ testptr(reg_is_cont, reg_is_cont);
1511   __ jccb(Assembler::notZero, L_thaw);
1512 
1513   // --- call Continuation.enter(Continuation c, boolean isContinue)
1514 
1515   // Make sure the call is patchable
1516   __ align(BytesPerWord, __ offset() + NativeCall::displacement_offset);
1517 
1518   // Emit stub for static call
1519   address stub = CompiledDirectCall::emit_to_interp_stub(masm, __ pc());
1520   if (stub == nullptr) {
1521     fatal("CodeCache is full at gen_continuation_enter");
1522   }
1523 
1524   // The call needs to be resolved. There's a special case for this in
1525   // SharedRuntime::find_callee_info_helper() which calls
1526   // LinkResolver::resolve_continuation_enter() which resolves the call to
1527   // Continuation.enter(Continuation c, boolean isContinue).
1528   __ call(resolve);
1529 
1530   oop_maps->add_gc_map(__ pc() - start, map);
1531   __ post_call_nop();
1532 
1533   __ jmpb(L_exit);
1534 
1535   // --- Thawing path
1536 
1537   __ bind(L_thaw);
1538 
1539   __ call(RuntimeAddress(StubRoutines::cont_thaw()));
1540 
1541   ContinuationEntry::_return_pc_offset = __ pc() - start;
1542   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1543   __ post_call_nop();
1544 
1545   // --- Normal exit (resolve/thawing)
1546 
1547   __ bind(L_exit);
1548 
1549   continuation_enter_cleanup(masm);
1550   __ pop(rbp);
1551   __ ret(0);
1552 
1553   // --- Exception handling path
1554 
1555   exception_offset = __ pc() - start;
1556 
1557   continuation_enter_cleanup(masm);
1558   __ pop(rbp);
1559 
1560   __ movptr(c_rarg0, r15_thread);
1561   __ movptr(c_rarg1, Address(rsp, 0)); // return address
1562 
1563   // rax still holds the original exception oop, save it before the call
1564   __ push(rax);
1565 
1566   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 2);
1567   __ movptr(rbx, rax);
1568 
1569   // Continue at exception handler:
1570   //   rax: exception oop
1571   //   rbx: exception handler
1572   //   rdx: exception pc
1573   __ pop(rax);
1574   __ verify_oop(rax);
1575   __ pop(rdx);
1576   __ jmp(rbx);
1577 }
1578 
1579 static void gen_continuation_yield(MacroAssembler* masm,
1580                                    const VMRegPair* regs,
1581                                    OopMapSet* oop_maps,
1582                                    int& frame_complete,
1583                                    int& stack_slots,
1584                                    int& compiled_entry_offset) {
1585   enum layout {
1586     rbp_off,
1587     rbpH_off,
1588     return_off,
1589     return_off2,
1590     framesize // inclusive of return address
1591   };
1592   stack_slots = framesize /  VMRegImpl::slots_per_word;
1593   assert(stack_slots == 2, "recheck layout");
1594 
1595   address start = __ pc();
1596   compiled_entry_offset = __ pc() - start;
1597   __ enter();
1598   address the_pc = __ pc();
1599 
1600   frame_complete = the_pc - start;
1601 
1602   // This nop must be exactly at the PC we push into the frame info.
1603   // We use this nop for fast CodeBlob lookup, associate the OopMap
1604   // with it right away.
1605   __ post_call_nop();
1606   OopMap* map = new OopMap(framesize, 1);
1607   oop_maps->add_gc_map(frame_complete, map);
1608 
1609   __ set_last_Java_frame(rsp, rbp, the_pc, rscratch1);
1610   __ movptr(c_rarg0, r15_thread);
1611   __ movptr(c_rarg1, rsp);
1612   __ call_VM_leaf(Continuation::freeze_entry(), 2);
1613   __ reset_last_Java_frame(true);
1614 
1615   Label L_pinned;
1616 
1617   __ testptr(rax, rax);
1618   __ jcc(Assembler::notZero, L_pinned);
1619 
1620   __ movptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
1621   continuation_enter_cleanup(masm);
1622   __ pop(rbp);
1623   __ ret(0);
1624 
1625   __ bind(L_pinned);
1626 
1627   // Pinned, return to caller
1628 
1629   // handle pending exception thrown by freeze
1630   __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
1631   Label ok;
1632   __ jcc(Assembler::equal, ok);
1633   __ leave();
1634   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1635   __ bind(ok);
1636 
1637   __ leave();
1638   __ ret(0);
1639 }
1640 
1641 static void gen_special_dispatch(MacroAssembler* masm,
1642                                  const methodHandle& method,
1643                                  const BasicType* sig_bt,
1644                                  const VMRegPair* regs) {
1645   verify_oop_args(masm, method, sig_bt, regs);
1646   vmIntrinsics::ID iid = method->intrinsic_id();
1647 
1648   // Now write the args into the outgoing interpreter space
1649   bool     has_receiver   = false;
1650   Register receiver_reg   = noreg;
1651   int      member_arg_pos = -1;
1652   Register member_reg     = noreg;
1653   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1654   if (ref_kind != 0) {
1655     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1656     member_reg = rbx;  // known to be free at this point
1657     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1658   } else if (iid == vmIntrinsics::_invokeBasic) {
1659     has_receiver = true;
1660   } else if (iid == vmIntrinsics::_linkToNative) {
1661     member_arg_pos = method->size_of_parameters() - 1;  // trailing NativeEntryPoint argument
1662     member_reg = rbx;  // known to be free at this point
1663   } else {
1664     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1665   }
1666 
1667   if (member_reg != noreg) {
1668     // Load the member_arg into register, if necessary.
1669     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1670     VMReg r = regs[member_arg_pos].first();
1671     if (r->is_stack()) {
1672       __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1673     } else {
1674       // no data motion is needed
1675       member_reg = r->as_Register();
1676     }
1677   }
1678 
1679   if (has_receiver) {
1680     // Make sure the receiver is loaded into a register.
1681     assert(method->size_of_parameters() > 0, "oob");
1682     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1683     VMReg r = regs[0].first();
1684     assert(r->is_valid(), "bad receiver arg");
1685     if (r->is_stack()) {
1686       // Porting note:  This assumes that compiled calling conventions always
1687       // pass the receiver oop in a register.  If this is not true on some
1688       // platform, pick a temp and load the receiver from stack.
1689       fatal("receiver always in a register");
1690       receiver_reg = j_rarg0;  // known to be free at this point
1691       __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1692     } else {
1693       // no data motion is needed
1694       receiver_reg = r->as_Register();
1695     }
1696   }
1697 
1698   // Figure out which address we are really jumping to:
1699   MethodHandles::generate_method_handle_dispatch(masm, iid,
1700                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1701 }
1702 
1703 // ---------------------------------------------------------------------------
1704 // Generate a native wrapper for a given method.  The method takes arguments
1705 // in the Java compiled code convention, marshals them to the native
1706 // convention (handlizes oops, etc), transitions to native, makes the call,
1707 // returns to java state (possibly blocking), unhandlizes any result and
1708 // returns.
1709 //
1710 // Critical native functions are a shorthand for the use of
1711 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1712 // functions.  The wrapper is expected to unpack the arguments before
1713 // passing them to the callee. Critical native functions leave the state _in_Java,
1714 // since they cannot stop for GC.
1715 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1716 // block and the check for pending exceptions it's impossible for them
1717 // to be thrown.
1718 //
1719 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1720                                                 const methodHandle& method,
1721                                                 int compile_id,
1722                                                 BasicType* in_sig_bt,
1723                                                 VMRegPair* in_regs,
1724                                                 BasicType ret_type) {
1725   if (method->is_continuation_native_intrinsic()) {
1726     int exception_offset = -1;
1727     OopMapSet* oop_maps = new OopMapSet();
1728     int frame_complete = -1;
1729     int stack_slots = -1;
1730     int interpreted_entry_offset = -1;
1731     int vep_offset = -1;
1732     if (method->is_continuation_enter_intrinsic()) {
1733       gen_continuation_enter(masm,
1734                              in_regs,
1735                              exception_offset,
1736                              oop_maps,
1737                              frame_complete,
1738                              stack_slots,
1739                              interpreted_entry_offset,
1740                              vep_offset);
1741     } else if (method->is_continuation_yield_intrinsic()) {
1742       gen_continuation_yield(masm,
1743                              in_regs,
1744                              oop_maps,
1745                              frame_complete,
1746                              stack_slots,
1747                              vep_offset);
1748     } else {
1749       guarantee(false, "Unknown Continuation native intrinsic");
1750     }
1751 
1752 #ifdef ASSERT
1753     if (method->is_continuation_enter_intrinsic()) {
1754       assert(interpreted_entry_offset != -1, "Must be set");
1755       assert(exception_offset != -1,         "Must be set");
1756     } else {
1757       assert(interpreted_entry_offset == -1, "Must be unset");
1758       assert(exception_offset == -1,         "Must be unset");
1759     }
1760     assert(frame_complete != -1,    "Must be set");
1761     assert(stack_slots != -1,       "Must be set");
1762     assert(vep_offset != -1,        "Must be set");
1763 #endif
1764 
1765     __ flush();
1766     nmethod* nm = nmethod::new_native_nmethod(method,
1767                                               compile_id,
1768                                               masm->code(),
1769                                               vep_offset,
1770                                               frame_complete,
1771                                               stack_slots,
1772                                               in_ByteSize(-1),
1773                                               in_ByteSize(-1),
1774                                               oop_maps,
1775                                               exception_offset);
1776     if (nm == nullptr) return nm;
1777     if (method->is_continuation_enter_intrinsic()) {
1778       ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1779     } else if (method->is_continuation_yield_intrinsic()) {
1780       _cont_doYield_stub = nm;
1781     }
1782     return nm;
1783   }
1784 
1785   if (method->is_method_handle_intrinsic()) {
1786     vmIntrinsics::ID iid = method->intrinsic_id();
1787     intptr_t start = (intptr_t)__ pc();
1788     int vep_offset = ((intptr_t)__ pc()) - start;
1789     gen_special_dispatch(masm,
1790                          method,
1791                          in_sig_bt,
1792                          in_regs);
1793     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1794     __ flush();
1795     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1796     return nmethod::new_native_nmethod(method,
1797                                        compile_id,
1798                                        masm->code(),
1799                                        vep_offset,
1800                                        frame_complete,
1801                                        stack_slots / VMRegImpl::slots_per_word,
1802                                        in_ByteSize(-1),
1803                                        in_ByteSize(-1),
1804                                        nullptr);
1805   }
1806   address native_func = method->native_function();
1807   assert(native_func != nullptr, "must have function");
1808 
1809   // An OopMap for lock (and class if static)
1810   OopMapSet *oop_maps = new OopMapSet();
1811   intptr_t start = (intptr_t)__ pc();
1812 
1813   // We have received a description of where all the java arg are located
1814   // on entry to the wrapper. We need to convert these args to where
1815   // the jni function will expect them. To figure out where they go
1816   // we convert the java signature to a C signature by inserting
1817   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1818 
1819   const int total_in_args = method->size_of_parameters();
1820   int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1821 
1822   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1823   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1824   BasicType* in_elem_bt = nullptr;
1825 
1826   int argc = 0;
1827   out_sig_bt[argc++] = T_ADDRESS;
1828   if (method->is_static()) {
1829     out_sig_bt[argc++] = T_OBJECT;
1830   }
1831 
1832   for (int i = 0; i < total_in_args ; i++ ) {
1833     out_sig_bt[argc++] = in_sig_bt[i];
1834   }
1835 
1836   // Now figure out where the args must be stored and how much stack space
1837   // they require.
1838   int out_arg_slots;
1839   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1840 
1841   // Compute framesize for the wrapper.  We need to handlize all oops in
1842   // incoming registers
1843 
1844   // Calculate the total number of stack slots we will need.
1845 
1846   // First count the abi requirement plus all of the outgoing args
1847   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1848 
1849   // Now the space for the inbound oop handle area
1850   int total_save_slots = 6 * VMRegImpl::slots_per_word;  // 6 arguments passed in registers
1851 
1852   int oop_handle_offset = stack_slots;
1853   stack_slots += total_save_slots;
1854 
1855   // Now any space we need for handlizing a klass if static method
1856 
1857   int klass_slot_offset = 0;
1858   int klass_offset = -1;
1859   int lock_slot_offset = 0;
1860   bool is_static = false;
1861 
1862   if (method->is_static()) {
1863     klass_slot_offset = stack_slots;
1864     stack_slots += VMRegImpl::slots_per_word;
1865     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1866     is_static = true;
1867   }
1868 
1869   // Plus a lock if needed
1870 
1871   if (method->is_synchronized()) {
1872     lock_slot_offset = stack_slots;
1873     stack_slots += VMRegImpl::slots_per_word;
1874   }
1875 
1876   // Now a place (+2) to save return values or temp during shuffling
1877   // + 4 for return address (which we own) and saved rbp
1878   stack_slots += 6;
1879 
1880   // Ok The space we have allocated will look like:
1881   //
1882   //
1883   // FP-> |                     |
1884   //      |---------------------|
1885   //      | 2 slots for moves   |
1886   //      |---------------------|
1887   //      | lock box (if sync)  |
1888   //      |---------------------| <- lock_slot_offset
1889   //      | klass (if static)   |
1890   //      |---------------------| <- klass_slot_offset
1891   //      | oopHandle area      |
1892   //      |---------------------| <- oop_handle_offset (6 java arg registers)
1893   //      | outbound memory     |
1894   //      | based arguments     |
1895   //      |                     |
1896   //      |---------------------|
1897   //      |                     |
1898   // SP-> | out_preserved_slots |
1899   //
1900   //
1901 
1902 
1903   // Now compute actual number of stack words we need rounding to make
1904   // stack properly aligned.
1905   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1906 
1907   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1908 
1909   // First thing make an ic check to see if we should even be here
1910 
1911   // We are free to use all registers as temps without saving them and
1912   // restoring them except rbp. rbp is the only callee save register
1913   // as far as the interpreter and the compiler(s) are concerned.
1914 
1915   const Register receiver = j_rarg0;
1916 
1917   Label exception_pending;
1918 
1919   assert_different_registers(receiver, rscratch1, rscratch2);
1920   __ verify_oop(receiver);
1921   __ ic_check(8 /* end_alignment */);
1922 
1923   int vep_offset = ((intptr_t)__ pc()) - start;
1924 
1925   if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
1926     Label L_skip_barrier;
1927     Register klass = r10;
1928     __ mov_metadata(klass, method->method_holder()); // InstanceKlass*
1929     __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
1930 
1931     __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
1932 
1933     __ bind(L_skip_barrier);
1934   }
1935 
1936 #ifdef COMPILER1
1937   // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
1938   if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
1939     inline_check_hashcode_from_object_header(masm, method, j_rarg0 /*obj_reg*/, rax /*result*/);
1940   }
1941 #endif // COMPILER1
1942 
1943   // The instruction at the verified entry point must be 5 bytes or longer
1944   // because it can be patched on the fly by make_non_entrant. The stack bang
1945   // instruction fits that requirement.
1946 
1947   // Generate stack overflow check
1948   __ bang_stack_with_offset((int)StackOverflow::stack_shadow_zone_size());
1949 
1950   // Generate a new frame for the wrapper.
1951   __ enter();
1952   // -2 because return address is already present and so is saved rbp
1953   __ subptr(rsp, stack_size - 2*wordSize);
1954 
1955   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1956   // native wrapper is not hot enough to micro optimize the nmethod entry barrier with an out-of-line stub
1957   bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */);
1958 
1959   // Frame is now completed as far as size and linkage.
1960   int frame_complete = ((intptr_t)__ pc()) - start;
1961 
1962 #ifdef ASSERT
1963   __ check_stack_alignment(rsp, "improperly aligned stack");
1964 #endif /* ASSERT */
1965 
1966 
1967   // We use r14 as the oop handle for the receiver/klass
1968   // It is callee save so it survives the call to native
1969 
1970   const Register oop_handle_reg = r14;
1971 
1972   //
1973   // We immediately shuffle the arguments so that any vm call we have to
1974   // make from here on out (sync slow path, jvmti, etc.) we will have
1975   // captured the oops from our caller and have a valid oopMap for
1976   // them.
1977 
1978   // -----------------
1979   // The Grand Shuffle
1980 
1981   // The Java calling convention is either equal (linux) or denser (win64) than the
1982   // c calling convention. However the because of the jni_env argument the c calling
1983   // convention always has at least one more (and two for static) arguments than Java.
1984   // Therefore if we move the args from java -> c backwards then we will never have
1985   // a register->register conflict and we don't have to build a dependency graph
1986   // and figure out how to break any cycles.
1987   //
1988 
1989   // Record esp-based slot for receiver on stack for non-static methods
1990   int receiver_offset = -1;
1991 
1992   // This is a trick. We double the stack slots so we can claim
1993   // the oops in the caller's frame. Since we are sure to have
1994   // more args than the caller doubling is enough to make
1995   // sure we can capture all the incoming oop args from the
1996   // caller.
1997   //
1998   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1999 
2000   // Mark location of rbp (someday)
2001   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
2002 
2003   // Use eax, ebx as temporaries during any memory-memory moves we have to do
2004   // All inbound args are referenced based on rbp and all outbound args via rsp.
2005 
2006 
2007 #ifdef ASSERT
2008   bool reg_destroyed[Register::number_of_registers];
2009   bool freg_destroyed[XMMRegister::number_of_registers];
2010   for ( int r = 0 ; r < Register::number_of_registers ; r++ ) {
2011     reg_destroyed[r] = false;
2012   }
2013   for ( int f = 0 ; f < XMMRegister::number_of_registers ; f++ ) {
2014     freg_destroyed[f] = false;
2015   }
2016 
2017 #endif /* ASSERT */
2018 
2019   // For JNI natives the incoming and outgoing registers are offset upwards.
2020   GrowableArray<int> arg_order(2 * total_in_args);
2021 
2022   VMRegPair tmp_vmreg;
2023   tmp_vmreg.set2(rbx->as_VMReg());
2024 
2025   for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2026     arg_order.push(i);
2027     arg_order.push(c_arg);
2028   }
2029 
2030   int temploc = -1;
2031   for (int ai = 0; ai < arg_order.length(); ai += 2) {
2032     int i = arg_order.at(ai);
2033     int c_arg = arg_order.at(ai + 1);
2034     __ block_comment(err_msg("move %d -> %d", i, c_arg));
2035 #ifdef ASSERT
2036     if (in_regs[i].first()->is_Register()) {
2037       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2038     } else if (in_regs[i].first()->is_XMMRegister()) {
2039       assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2040     }
2041     if (out_regs[c_arg].first()->is_Register()) {
2042       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2043     } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2044       freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2045     }
2046 #endif /* ASSERT */
2047     switch (in_sig_bt[i]) {
2048       case T_ARRAY:
2049       case T_OBJECT:
2050         __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2051                     ((i == 0) && (!is_static)),
2052                     &receiver_offset);
2053         break;
2054       case T_VOID:
2055         break;
2056 
2057       case T_FLOAT:
2058         __ float_move(in_regs[i], out_regs[c_arg]);
2059           break;
2060 
2061       case T_DOUBLE:
2062         assert( i + 1 < total_in_args &&
2063                 in_sig_bt[i + 1] == T_VOID &&
2064                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2065         __ double_move(in_regs[i], out_regs[c_arg]);
2066         break;
2067 
2068       case T_LONG :
2069         __ long_move(in_regs[i], out_regs[c_arg]);
2070         break;
2071 
2072       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2073 
2074       default:
2075         __ move32_64(in_regs[i], out_regs[c_arg]);
2076     }
2077   }
2078 
2079   int c_arg;
2080 
2081   // Pre-load a static method's oop into r14.  Used both by locking code and
2082   // the normal JNI call code.
2083   // point c_arg at the first arg that is already loaded in case we
2084   // need to spill before we call out
2085   c_arg = total_c_args - total_in_args;
2086 
2087   if (method->is_static()) {
2088 
2089     //  load oop into a register
2090     __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
2091 
2092     // Now handlize the static class mirror it's known not-null.
2093     __ movptr(Address(rsp, klass_offset), oop_handle_reg);
2094     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2095 
2096     // Now get the handle
2097     __ lea(oop_handle_reg, Address(rsp, klass_offset));
2098     // store the klass handle as second argument
2099     __ movptr(c_rarg1, oop_handle_reg);
2100     // and protect the arg if we must spill
2101     c_arg--;
2102   }
2103 
2104   // Change state to native (we save the return address in the thread, since it might not
2105   // be pushed on the stack when we do a stack traversal). It is enough that the pc()
2106   // points into the right code segment. It does not have to be the correct return pc.
2107   // We use the same pc/oopMap repeatedly when we call out
2108 
2109   intptr_t the_pc = (intptr_t) __ pc();
2110   oop_maps->add_gc_map(the_pc - start, map);
2111 
2112   __ set_last_Java_frame(rsp, noreg, (address)the_pc, rscratch1);
2113 
2114 
2115   // We have all of the arguments setup at this point. We must not touch any register
2116   // argument registers at this point (what if we save/restore them there are no oop?
2117 
2118   {
2119     SkipIfEqual skip(masm, &DTraceMethodProbes, false, rscratch1);
2120     // protect the args we've loaded
2121     save_args(masm, total_c_args, c_arg, out_regs);
2122     __ mov_metadata(c_rarg1, method());
2123     __ call_VM_leaf(
2124       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2125       r15_thread, c_rarg1);
2126     restore_args(masm, total_c_args, c_arg, out_regs);
2127   }
2128 
2129   // RedefineClasses() tracing support for obsolete method entry
2130   if (log_is_enabled(Trace, redefine, class, obsolete)) {
2131     // protect the args we've loaded
2132     save_args(masm, total_c_args, c_arg, out_regs);
2133     __ mov_metadata(c_rarg1, method());
2134     __ call_VM_leaf(
2135       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2136       r15_thread, c_rarg1);
2137     restore_args(masm, total_c_args, c_arg, out_regs);
2138   }
2139 
2140   // Lock a synchronized method
2141 
2142   // Register definitions used by locking and unlocking
2143 
2144   const Register swap_reg = rax;  // Must use rax for cmpxchg instruction
2145   const Register obj_reg  = rbx;  // Will contain the oop
2146   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
2147   const Register old_hdr  = r13;  // value of old header at unlock time
2148 
2149   Label slow_path_lock;
2150   Label lock_done;
2151 
2152   if (method->is_synchronized()) {
2153     Label count_mon;
2154 
2155     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2156 
2157     // Get the handle (the 2nd argument)
2158     __ mov(oop_handle_reg, c_rarg1);
2159 
2160     // Get address of the box
2161 
2162     __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2163 
2164     // Load the oop from the handle
2165     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2166 
2167     if (LockingMode == LM_MONITOR) {
2168       __ jmp(slow_path_lock);
2169     } else if (LockingMode == LM_LEGACY) {
2170       // Load immediate 1 into swap_reg %rax
2171       __ movl(swap_reg, 1);
2172 
2173       // Load (object->mark() | 1) into swap_reg %rax
2174       __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2175 
2176       // Save (object->mark() | 1) into BasicLock's displaced header
2177       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2178 
2179       // src -> dest iff dest == rax else rax <- dest
2180       __ lock();
2181       __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2182       __ jcc(Assembler::equal, count_mon);
2183 
2184       // Hmm should this move to the slow path code area???
2185 
2186       // Test if the oopMark is an obvious stack pointer, i.e.,
2187       //  1) (mark & 3) == 0, and
2188       //  2) rsp <= mark < mark + os::pagesize()
2189       // These 3 tests can be done by evaluating the following
2190       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2191       // assuming both stack pointer and pagesize have their
2192       // least significant 2 bits clear.
2193       // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2194 
2195       __ subptr(swap_reg, rsp);
2196       __ andptr(swap_reg, 3 - (int)os::vm_page_size());
2197 
2198       // Save the test result, for recursive case, the result is zero
2199       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2200       __ jcc(Assembler::notEqual, slow_path_lock);
2201     } else {
2202       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2203       __ lightweight_lock(lock_reg, obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
2204     }
2205     __ bind(count_mon);
2206     __ inc_held_monitor_count();
2207 
2208     // Slow path will re-enter here
2209     __ bind(lock_done);
2210   }
2211 
2212   // Finally just about ready to make the JNI call
2213 
2214   // get JNIEnv* which is first argument to native
2215   __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2216 
2217   // Now set thread in native
2218   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2219 
2220   __ call(RuntimeAddress(native_func));
2221 
2222   // Verify or restore cpu control state after JNI call
2223   __ restore_cpu_control_state_after_jni(rscratch1);
2224 
2225   // Unpack native results.
2226   switch (ret_type) {
2227   case T_BOOLEAN: __ c2bool(rax);            break;
2228   case T_CHAR   : __ movzwl(rax, rax);      break;
2229   case T_BYTE   : __ sign_extend_byte (rax); break;
2230   case T_SHORT  : __ sign_extend_short(rax); break;
2231   case T_INT    : /* nothing to do */        break;
2232   case T_DOUBLE :
2233   case T_FLOAT  :
2234     // Result is in xmm0 we'll save as needed
2235     break;
2236   case T_ARRAY:                 // Really a handle
2237   case T_OBJECT:                // Really a handle
2238       break; // can't de-handlize until after safepoint check
2239   case T_VOID: break;
2240   case T_LONG: break;
2241   default       : ShouldNotReachHere();
2242   }
2243 
2244   Label after_transition;
2245 
2246   // Switch thread to "native transition" state before reading the synchronization state.
2247   // This additional state is necessary because reading and testing the synchronization
2248   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2249   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2250   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2251   //     Thread A is resumed to finish this native method, but doesn't block here since it
2252   //     didn't see any synchronization is progress, and escapes.
2253   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2254 
2255   // Force this write out before the read below
2256   if (!UseSystemMemoryBarrier) {
2257     __ membar(Assembler::Membar_mask_bits(
2258               Assembler::LoadLoad | Assembler::LoadStore |
2259               Assembler::StoreLoad | Assembler::StoreStore));
2260   }
2261 
2262   // check for safepoint operation in progress and/or pending suspend requests
2263   {
2264     Label Continue;
2265     Label slow_path;
2266 
2267     __ safepoint_poll(slow_path, r15_thread, true /* at_return */, false /* in_nmethod */);
2268 
2269     __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
2270     __ jcc(Assembler::equal, Continue);
2271     __ bind(slow_path);
2272 
2273     // Don't use call_VM as it will see a possible pending exception and forward it
2274     // and never return here preventing us from clearing _last_native_pc down below.
2275     // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2276     // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2277     // by hand.
2278     //
2279     __ vzeroupper();
2280     save_native_result(masm, ret_type, stack_slots);
2281     __ mov(c_rarg0, r15_thread);
2282     __ mov(r12, rsp); // remember sp
2283     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2284     __ andptr(rsp, -16); // align stack as required by ABI
2285     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2286     __ mov(rsp, r12); // restore sp
2287     __ reinit_heapbase();
2288     // Restore any method result value
2289     restore_native_result(masm, ret_type, stack_slots);
2290     __ bind(Continue);
2291   }
2292 
2293   // change thread state
2294   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2295   __ bind(after_transition);
2296 
2297   Label reguard;
2298   Label reguard_done;
2299   __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
2300   __ jcc(Assembler::equal, reguard);
2301   __ bind(reguard_done);
2302 
2303   // native result if any is live
2304 
2305   // Unlock
2306   Label slow_path_unlock;
2307   Label unlock_done;
2308   if (method->is_synchronized()) {
2309 
2310     Label fast_done;
2311 
2312     // Get locked oop from the handle we passed to jni
2313     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2314 
2315     if (LockingMode == LM_LEGACY) {
2316       Label not_recur;
2317       // Simple recursive lock?
2318       __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), NULL_WORD);
2319       __ jcc(Assembler::notEqual, not_recur);
2320       __ dec_held_monitor_count();
2321       __ jmpb(fast_done);
2322       __ bind(not_recur);
2323     }
2324 
2325     // Must save rax if it is live now because cmpxchg must use it
2326     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2327       save_native_result(masm, ret_type, stack_slots);
2328     }
2329 
2330     if (LockingMode == LM_MONITOR) {
2331       __ jmp(slow_path_unlock);
2332     } else if (LockingMode == LM_LEGACY) {
2333       // get address of the stack lock
2334       __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2335       //  get old displaced header
2336       __ movptr(old_hdr, Address(rax, 0));
2337 
2338       // Atomic swap old header if oop still contains the stack lock
2339       __ lock();
2340       __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2341       __ jcc(Assembler::notEqual, slow_path_unlock);
2342       __ dec_held_monitor_count();
2343     } else {
2344       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2345       __ lightweight_unlock(obj_reg, swap_reg, r15_thread, lock_reg, slow_path_unlock);
2346       __ dec_held_monitor_count();
2347     }
2348 
2349     // slow path re-enters here
2350     __ bind(unlock_done);
2351     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2352       restore_native_result(masm, ret_type, stack_slots);
2353     }
2354 
2355     __ bind(fast_done);
2356   }
2357   {
2358     SkipIfEqual skip(masm, &DTraceMethodProbes, false, rscratch1);
2359     save_native_result(masm, ret_type, stack_slots);
2360     __ mov_metadata(c_rarg1, method());
2361     __ call_VM_leaf(
2362          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2363          r15_thread, c_rarg1);
2364     restore_native_result(masm, ret_type, stack_slots);
2365   }
2366 
2367   __ reset_last_Java_frame(false);
2368 
2369   // Unbox oop result, e.g. JNIHandles::resolve value.
2370   if (is_reference_type(ret_type)) {
2371     __ resolve_jobject(rax /* value */,
2372                        r15_thread /* thread */,
2373                        rcx /* tmp */);
2374   }
2375 
2376   if (CheckJNICalls) {
2377     // clear_pending_jni_exception_check
2378     __ movptr(Address(r15_thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
2379   }
2380 
2381   // reset handle block
2382   __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
2383   __ movl(Address(rcx, JNIHandleBlock::top_offset()), NULL_WORD);
2384 
2385   // pop our frame
2386 
2387   __ leave();
2388 
2389   // Any exception pending?
2390   __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2391   __ jcc(Assembler::notEqual, exception_pending);
2392 
2393   // Return
2394 
2395   __ ret(0);
2396 
2397   // Unexpected paths are out of line and go here
2398 
2399   // forward the exception
2400   __ bind(exception_pending);
2401 
2402   // and forward the exception
2403   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2404 
2405   // Slow path locking & unlocking
2406   if (method->is_synchronized()) {
2407 
2408     // BEGIN Slow path lock
2409     __ bind(slow_path_lock);
2410 
2411     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2412     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2413 
2414     // protect the args we've loaded
2415     save_args(masm, total_c_args, c_arg, out_regs);
2416 
2417     __ mov(c_rarg0, obj_reg);
2418     __ mov(c_rarg1, lock_reg);
2419     __ mov(c_rarg2, r15_thread);
2420 
2421     // Not a leaf but we have last_Java_frame setup as we want
2422     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2423     restore_args(masm, total_c_args, c_arg, out_regs);
2424 
2425 #ifdef ASSERT
2426     { Label L;
2427     __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2428     __ jcc(Assembler::equal, L);
2429     __ stop("no pending exception allowed on exit from monitorenter");
2430     __ bind(L);
2431     }
2432 #endif
2433     __ jmp(lock_done);
2434 
2435     // END Slow path lock
2436 
2437     // BEGIN Slow path unlock
2438     __ bind(slow_path_unlock);
2439 
2440     // If we haven't already saved the native result we must save it now as xmm registers
2441     // are still exposed.
2442     __ vzeroupper();
2443     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2444       save_native_result(masm, ret_type, stack_slots);
2445     }
2446 
2447     __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2448 
2449     __ mov(c_rarg0, obj_reg);
2450     __ mov(c_rarg2, r15_thread);
2451     __ mov(r12, rsp); // remember sp
2452     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2453     __ andptr(rsp, -16); // align stack as required by ABI
2454 
2455     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2456     // NOTE that obj_reg == rbx currently
2457     __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
2458     __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2459 
2460     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2461     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2462     __ mov(rsp, r12); // restore sp
2463     __ reinit_heapbase();
2464 #ifdef ASSERT
2465     {
2466       Label L;
2467       __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2468       __ jcc(Assembler::equal, L);
2469       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2470       __ bind(L);
2471     }
2472 #endif /* ASSERT */
2473 
2474     __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
2475 
2476     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2477       restore_native_result(masm, ret_type, stack_slots);
2478     }
2479     __ jmp(unlock_done);
2480 
2481     // END Slow path unlock
2482 
2483   } // synchronized
2484 
2485   // SLOW PATH Reguard the stack if needed
2486 
2487   __ bind(reguard);
2488   __ vzeroupper();
2489   save_native_result(masm, ret_type, stack_slots);
2490   __ mov(r12, rsp); // remember sp
2491   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2492   __ andptr(rsp, -16); // align stack as required by ABI
2493   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2494   __ mov(rsp, r12); // restore sp
2495   __ reinit_heapbase();
2496   restore_native_result(masm, ret_type, stack_slots);
2497   // and continue
2498   __ jmp(reguard_done);
2499 
2500 
2501 
2502   __ flush();
2503 
2504   nmethod *nm = nmethod::new_native_nmethod(method,
2505                                             compile_id,
2506                                             masm->code(),
2507                                             vep_offset,
2508                                             frame_complete,
2509                                             stack_slots / VMRegImpl::slots_per_word,
2510                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2511                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2512                                             oop_maps);
2513 
2514   return nm;
2515 }
2516 
2517 // this function returns the adjust size (in number of words) to a c2i adapter
2518 // activation for use during deoptimization
2519 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2520   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2521 }
2522 
2523 
2524 uint SharedRuntime::out_preserve_stack_slots() {
2525   return 0;
2526 }
2527 
2528 
2529 // Number of stack slots between incoming argument block and the start of
2530 // a new frame.  The PROLOG must add this many slots to the stack.  The
2531 // EPILOG must remove this many slots.  amd64 needs two slots for
2532 // return address.
2533 uint SharedRuntime::in_preserve_stack_slots() {
2534   return 4 + 2 * VerifyStackAtCalls;
2535 }
2536 
2537 //------------------------------generate_deopt_blob----------------------------
2538 void SharedRuntime::generate_deopt_blob() {
2539   // Allocate space for the code
2540   ResourceMark rm;
2541   // Setup code generation tools
2542   int pad = 0;
2543   if (UseAVX > 2) {
2544     pad += 1024;
2545   }
2546 #if INCLUDE_JVMCI
2547   if (EnableJVMCI) {
2548     pad += 512; // Increase the buffer size when compiling for JVMCI
2549   }
2550 #endif
2551   CodeBuffer buffer("deopt_blob", 2560+pad, 1024);
2552   MacroAssembler* masm = new MacroAssembler(&buffer);
2553   int frame_size_in_words;
2554   OopMap* map = nullptr;
2555   OopMapSet *oop_maps = new OopMapSet();
2556 
2557   // -------------
2558   // This code enters when returning to a de-optimized nmethod.  A return
2559   // address has been pushed on the stack, and return values are in
2560   // registers.
2561   // If we are doing a normal deopt then we were called from the patched
2562   // nmethod from the point we returned to the nmethod. So the return
2563   // address on the stack is wrong by NativeCall::instruction_size
2564   // We will adjust the value so it looks like we have the original return
2565   // address on the stack (like when we eagerly deoptimized).
2566   // In the case of an exception pending when deoptimizing, we enter
2567   // with a return address on the stack that points after the call we patched
2568   // into the exception handler. We have the following register state from,
2569   // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2570   //    rax: exception oop
2571   //    rbx: exception handler
2572   //    rdx: throwing pc
2573   // So in this case we simply jam rdx into the useless return address and
2574   // the stack looks just like we want.
2575   //
2576   // At this point we need to de-opt.  We save the argument return
2577   // registers.  We call the first C routine, fetch_unroll_info().  This
2578   // routine captures the return values and returns a structure which
2579   // describes the current frame size and the sizes of all replacement frames.
2580   // The current frame is compiled code and may contain many inlined
2581   // functions, each with their own JVM state.  We pop the current frame, then
2582   // push all the new frames.  Then we call the C routine unpack_frames() to
2583   // populate these frames.  Finally unpack_frames() returns us the new target
2584   // address.  Notice that callee-save registers are BLOWN here; they have
2585   // already been captured in the vframeArray at the time the return PC was
2586   // patched.
2587   address start = __ pc();
2588   Label cont;
2589 
2590   // Prolog for non exception case!
2591 
2592   // Save everything in sight.
2593   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_wide_vectors*/ true);
2594 
2595   // Normal deoptimization.  Save exec mode for unpack_frames.
2596   __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
2597   __ jmp(cont);
2598 
2599   int reexecute_offset = __ pc() - start;
2600 #if INCLUDE_JVMCI && !defined(COMPILER1)
2601   if (EnableJVMCI && UseJVMCICompiler) {
2602     // JVMCI does not use this kind of deoptimization
2603     __ should_not_reach_here();
2604   }
2605 #endif
2606 
2607   // Reexecute case
2608   // return address is the pc describes what bci to do re-execute at
2609 
2610   // No need to update map as each call to save_live_registers will produce identical oopmap
2611   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_wide_vectors*/ true);
2612 
2613   __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
2614   __ jmp(cont);
2615 
2616 #if INCLUDE_JVMCI
2617   Label after_fetch_unroll_info_call;
2618   int implicit_exception_uncommon_trap_offset = 0;
2619   int uncommon_trap_offset = 0;
2620 
2621   if (EnableJVMCI) {
2622     implicit_exception_uncommon_trap_offset = __ pc() - start;
2623 
2624     __ pushptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2625     __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())), NULL_WORD);
2626 
2627     uncommon_trap_offset = __ pc() - start;
2628 
2629     // Save everything in sight.
2630     RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_wide_vectors*/ true);
2631     // fetch_unroll_info needs to call last_java_frame()
2632     __ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
2633 
2634     __ movl(c_rarg1, Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())));
2635     __ movl(Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())), -1);
2636 
2637     __ movl(r14, Deoptimization::Unpack_reexecute);
2638     __ mov(c_rarg0, r15_thread);
2639     __ movl(c_rarg2, r14); // exec mode
2640     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2641     oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2642 
2643     __ reset_last_Java_frame(false);
2644 
2645     __ jmp(after_fetch_unroll_info_call);
2646   } // EnableJVMCI
2647 #endif // INCLUDE_JVMCI
2648 
2649   int exception_offset = __ pc() - start;
2650 
2651   // Prolog for exception case
2652 
2653   // all registers are dead at this entry point, except for rax, and
2654   // rdx which contain the exception oop and exception pc
2655   // respectively.  Set them in TLS and fall thru to the
2656   // unpack_with_exception_in_tls entry point.
2657 
2658   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
2659   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax);
2660 
2661   int exception_in_tls_offset = __ pc() - start;
2662 
2663   // new implementation because exception oop is now passed in JavaThread
2664 
2665   // Prolog for exception case
2666   // All registers must be preserved because they might be used by LinearScan
2667   // Exceptiop oop and throwing PC are passed in JavaThread
2668   // tos: stack at point of call to method that threw the exception (i.e. only
2669   // args are on the stack, no return address)
2670 
2671   // make room on stack for the return address
2672   // It will be patched later with the throwing pc. The correct value is not
2673   // available now because loading it from memory would destroy registers.
2674   __ push(0);
2675 
2676   // Save everything in sight.
2677   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_wide_vectors*/ true);
2678 
2679   // Now it is safe to overwrite any register
2680 
2681   // Deopt during an exception.  Save exec mode for unpack_frames.
2682   __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
2683 
2684   // load throwing pc from JavaThread and patch it as the return address
2685   // of the current frame. Then clear the field in JavaThread
2686 
2687   __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
2688   __ movptr(Address(rbp, wordSize), rdx);
2689   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), NULL_WORD);
2690 
2691 #ifdef ASSERT
2692   // verify that there is really an exception oop in JavaThread
2693   __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
2694   __ verify_oop(rax);
2695 
2696   // verify that there is no pending exception
2697   Label no_pending_exception;
2698   __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
2699   __ testptr(rax, rax);
2700   __ jcc(Assembler::zero, no_pending_exception);
2701   __ stop("must not have pending exception here");
2702   __ bind(no_pending_exception);
2703 #endif
2704 
2705   __ bind(cont);
2706 
2707   // Call C code.  Need thread and this frame, but NOT official VM entry
2708   // crud.  We cannot block on this call, no GC can happen.
2709   //
2710   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2711 
2712   // fetch_unroll_info needs to call last_java_frame().
2713 
2714   __ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
2715 #ifdef ASSERT
2716   { Label L;
2717     __ cmpptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
2718     __ jcc(Assembler::equal, L);
2719     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2720     __ bind(L);
2721   }
2722 #endif // ASSERT
2723   __ mov(c_rarg0, r15_thread);
2724   __ movl(c_rarg1, r14); // exec_mode
2725   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2726 
2727   // Need to have an oopmap that tells fetch_unroll_info where to
2728   // find any register it might need.
2729   oop_maps->add_gc_map(__ pc() - start, map);
2730 
2731   __ reset_last_Java_frame(false);
2732 
2733 #if INCLUDE_JVMCI
2734   if (EnableJVMCI) {
2735     __ bind(after_fetch_unroll_info_call);
2736   }
2737 #endif
2738 
2739   // Load UnrollBlock* into rdi
2740   __ mov(rdi, rax);
2741 
2742   __ movl(r14, Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset()));
2743    Label noException;
2744   __ cmpl(r14, Deoptimization::Unpack_exception);   // Was exception pending?
2745   __ jcc(Assembler::notEqual, noException);
2746   __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
2747   // QQQ this is useless it was null above
2748   __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
2749   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), NULL_WORD);
2750   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), NULL_WORD);
2751 
2752   __ verify_oop(rax);
2753 
2754   // Overwrite the result registers with the exception results.
2755   __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
2756   // I think this is useless
2757   __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx);
2758 
2759   __ bind(noException);
2760 
2761   // Only register save data is on the stack.
2762   // Now restore the result registers.  Everything else is either dead
2763   // or captured in the vframeArray.
2764   RegisterSaver::restore_result_registers(masm);
2765 
2766   // All of the register save area has been popped of the stack. Only the
2767   // return address remains.
2768 
2769   // Pop all the frames we must move/replace.
2770   //
2771   // Frame picture (youngest to oldest)
2772   // 1: self-frame (no frame link)
2773   // 2: deopting frame  (no frame link)
2774   // 3: caller of deopting frame (could be compiled/interpreted).
2775   //
2776   // Note: by leaving the return address of self-frame on the stack
2777   // and using the size of frame 2 to adjust the stack
2778   // when we are done the return to frame 3 will still be on the stack.
2779 
2780   // Pop deoptimized frame
2781   __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2782   __ addptr(rsp, rcx);
2783 
2784   // rsp should be pointing at the return address to the caller (3)
2785 
2786   // Pick up the initial fp we should save
2787   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2788   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset()));
2789 
2790 #ifdef ASSERT
2791   // Compilers generate code that bang the stack by as much as the
2792   // interpreter would need. So this stack banging should never
2793   // trigger a fault. Verify that it does not on non product builds.
2794   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2795   __ bang_stack_size(rbx, rcx);
2796 #endif
2797 
2798   // Load address of array of frame pcs into rcx
2799   __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset()));
2800 
2801   // Trash the old pc
2802   __ addptr(rsp, wordSize);
2803 
2804   // Load address of array of frame sizes into rsi
2805   __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset()));
2806 
2807   // Load counter into rdx
2808   __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset()));
2809 
2810   // Now adjust the caller's stack to make up for the extra locals
2811   // but record the original sp so that we can save it in the skeletal interpreter
2812   // frame and the stack walking of interpreter_sender will get the unextended sp
2813   // value and not the "real" sp value.
2814 
2815   const Register sender_sp = r8;
2816 
2817   __ mov(sender_sp, rsp);
2818   __ movl(rbx, Address(rdi,
2819                        Deoptimization::UnrollBlock::
2820                        caller_adjustment_offset()));
2821   __ subptr(rsp, rbx);
2822 
2823   // Push interpreter frames in a loop
2824   Label loop;
2825   __ bind(loop);
2826   __ movptr(rbx, Address(rsi, 0));      // Load frame size
2827   __ subptr(rbx, 2*wordSize);           // We'll push pc and ebp by hand
2828   __ pushptr(Address(rcx, 0));          // Save return address
2829   __ enter();                           // Save old & set new ebp
2830   __ subptr(rsp, rbx);                  // Prolog
2831   // This value is corrected by layout_activation_impl
2832   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
2833   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
2834   __ mov(sender_sp, rsp);               // Pass sender_sp to next frame
2835   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
2836   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
2837   __ decrementl(rdx);                   // Decrement counter
2838   __ jcc(Assembler::notZero, loop);
2839   __ pushptr(Address(rcx, 0));          // Save final return address
2840 
2841   // Re-push self-frame
2842   __ enter();                           // Save old & set new ebp
2843 
2844   // Allocate a full sized register save area.
2845   // Return address and rbp are in place, so we allocate two less words.
2846   __ subptr(rsp, (frame_size_in_words - 2) * wordSize);
2847 
2848   // Restore frame locals after moving the frame
2849   __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0);
2850   __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
2851 
2852   // Call C code.  Need thread but NOT official VM entry
2853   // crud.  We cannot block on this call, no GC can happen.  Call should
2854   // restore return values to their stack-slots with the new SP.
2855   //
2856   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2857 
2858   // Use rbp because the frames look interpreted now
2859   // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
2860   // Don't need the precise return PC here, just precise enough to point into this code blob.
2861   address the_pc = __ pc();
2862   __ set_last_Java_frame(noreg, rbp, the_pc, rscratch1);
2863 
2864   __ andptr(rsp, -(StackAlignmentInBytes));  // Fix stack alignment as required by ABI
2865   __ mov(c_rarg0, r15_thread);
2866   __ movl(c_rarg1, r14); // second arg: exec_mode
2867   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2868   // Revert SP alignment after call since we're going to do some SP relative addressing below
2869   __ movptr(rsp, Address(r15_thread, JavaThread::last_Java_sp_offset()));
2870 
2871   // Set an oopmap for the call site
2872   // Use the same PC we used for the last java frame
2873   oop_maps->add_gc_map(the_pc - start,
2874                        new OopMap( frame_size_in_words, 0 ));
2875 
2876   // Clear fp AND pc
2877   __ reset_last_Java_frame(true);
2878 
2879   // Collect return values
2880   __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
2881   __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
2882   // I think this is useless (throwing pc?)
2883   __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes()));
2884 
2885   // Pop self-frame.
2886   __ leave();                           // Epilog
2887 
2888   // Jump to interpreter
2889   __ ret(0);
2890 
2891   // Make sure all code is generated
2892   masm->flush();
2893 
2894   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2895   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2896 #if INCLUDE_JVMCI
2897   if (EnableJVMCI) {
2898     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2899     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2900   }
2901 #endif
2902 }
2903 
2904 #ifdef COMPILER2
2905 //------------------------------generate_uncommon_trap_blob--------------------
2906 void SharedRuntime::generate_uncommon_trap_blob() {
2907   // Allocate space for the code
2908   ResourceMark rm;
2909   // Setup code generation tools
2910   CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2911   MacroAssembler* masm = new MacroAssembler(&buffer);
2912 
2913   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2914 
2915   address start = __ pc();
2916 
2917   // Push self-frame.  We get here with a return address on the
2918   // stack, so rsp is 8-byte aligned until we allocate our frame.
2919   __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
2920 
2921   // No callee saved registers. rbp is assumed implicitly saved
2922   __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
2923 
2924   // compiler left unloaded_class_index in j_rarg0 move to where the
2925   // runtime expects it.
2926   __ movl(c_rarg1, j_rarg0);
2927 
2928   __ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
2929 
2930   // Call C code.  Need thread but NOT official VM entry
2931   // crud.  We cannot block on this call, no GC can happen.  Call should
2932   // capture callee-saved registers as well as return values.
2933   // Thread is in rdi already.
2934   //
2935   // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
2936 
2937   __ mov(c_rarg0, r15_thread);
2938   __ movl(c_rarg2, Deoptimization::Unpack_uncommon_trap);
2939   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2940 
2941   // Set an oopmap for the call site
2942   OopMapSet* oop_maps = new OopMapSet();
2943   OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
2944 
2945   // location of rbp is known implicitly by the frame sender code
2946 
2947   oop_maps->add_gc_map(__ pc() - start, map);
2948 
2949   __ reset_last_Java_frame(false);
2950 
2951   // Load UnrollBlock* into rdi
2952   __ mov(rdi, rax);
2953 
2954 #ifdef ASSERT
2955   { Label L;
2956     __ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset()),
2957               Deoptimization::Unpack_uncommon_trap);
2958     __ jcc(Assembler::equal, L);
2959     __ stop("SharedRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
2960     __ bind(L);
2961   }
2962 #endif
2963 
2964   // Pop all the frames we must move/replace.
2965   //
2966   // Frame picture (youngest to oldest)
2967   // 1: self-frame (no frame link)
2968   // 2: deopting frame  (no frame link)
2969   // 3: caller of deopting frame (could be compiled/interpreted).
2970 
2971   // Pop self-frame.  We have no frame, and must rely only on rax and rsp.
2972   __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
2973 
2974   // Pop deoptimized frame (int)
2975   __ movl(rcx, Address(rdi,
2976                        Deoptimization::UnrollBlock::
2977                        size_of_deoptimized_frame_offset()));
2978   __ addptr(rsp, rcx);
2979 
2980   // rsp should be pointing at the return address to the caller (3)
2981 
2982   // Pick up the initial fp we should save
2983   // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2984   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset()));
2985 
2986 #ifdef ASSERT
2987   // Compilers generate code that bang the stack by as much as the
2988   // interpreter would need. So this stack banging should never
2989   // trigger a fault. Verify that it does not on non product builds.
2990   __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2991   __ bang_stack_size(rbx, rcx);
2992 #endif
2993 
2994   // Load address of array of frame pcs into rcx (address*)
2995   __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset()));
2996 
2997   // Trash the return pc
2998   __ addptr(rsp, wordSize);
2999 
3000   // Load address of array of frame sizes into rsi (intptr_t*)
3001   __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock:: frame_sizes_offset()));
3002 
3003   // Counter
3004   __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock:: number_of_frames_offset())); // (int)
3005 
3006   // Now adjust the caller's stack to make up for the extra locals but
3007   // record the original sp so that we can save it in the skeletal
3008   // interpreter frame and the stack walking of interpreter_sender
3009   // will get the unextended sp value and not the "real" sp value.
3010 
3011   const Register sender_sp = r8;
3012 
3013   __ mov(sender_sp, rsp);
3014   __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset())); // (int)
3015   __ subptr(rsp, rbx);
3016 
3017   // Push interpreter frames in a loop
3018   Label loop;
3019   __ bind(loop);
3020   __ movptr(rbx, Address(rsi, 0)); // Load frame size
3021   __ subptr(rbx, 2 * wordSize);    // We'll push pc and rbp by hand
3022   __ pushptr(Address(rcx, 0));     // Save return address
3023   __ enter();                      // Save old & set new rbp
3024   __ subptr(rsp, rbx);             // Prolog
3025   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
3026             sender_sp);            // Make it walkable
3027   // This value is corrected by layout_activation_impl
3028   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
3029   __ mov(sender_sp, rsp);          // Pass sender_sp to next frame
3030   __ addptr(rsi, wordSize);        // Bump array pointer (sizes)
3031   __ addptr(rcx, wordSize);        // Bump array pointer (pcs)
3032   __ decrementl(rdx);              // Decrement counter
3033   __ jcc(Assembler::notZero, loop);
3034   __ pushptr(Address(rcx, 0));     // Save final return address
3035 
3036   // Re-push self-frame
3037   __ enter();                 // Save old & set new rbp
3038   __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
3039                               // Prolog
3040 
3041   // Use rbp because the frames look interpreted now
3042   // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3043   // Don't need the precise return PC here, just precise enough to point into this code blob.
3044   address the_pc = __ pc();
3045   __ set_last_Java_frame(noreg, rbp, the_pc, rscratch1);
3046 
3047   // Call C code.  Need thread but NOT official VM entry
3048   // crud.  We cannot block on this call, no GC can happen.  Call should
3049   // restore return values to their stack-slots with the new SP.
3050   // Thread is in rdi already.
3051   //
3052   // BasicType unpack_frames(JavaThread* thread, int exec_mode);
3053 
3054   __ andptr(rsp, -(StackAlignmentInBytes)); // Align SP as required by ABI
3055   __ mov(c_rarg0, r15_thread);
3056   __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap);
3057   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3058 
3059   // Set an oopmap for the call site
3060   // Use the same PC we used for the last java frame
3061   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3062 
3063   // Clear fp AND pc
3064   __ reset_last_Java_frame(true);
3065 
3066   // Pop self-frame.
3067   __ leave();                 // Epilog
3068 
3069   // Jump to interpreter
3070   __ ret(0);
3071 
3072   // Make sure all code is generated
3073   masm->flush();
3074 
3075   _uncommon_trap_blob =  UncommonTrapBlob::create(&buffer, oop_maps,
3076                                                  SimpleRuntimeFrame::framesize >> 1);
3077 }
3078 #endif // COMPILER2
3079 
3080 //------------------------------generate_handler_blob------
3081 //
3082 // Generate a special Compile2Runtime blob that saves all registers,
3083 // and setup oopmap.
3084 //
3085 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3086   assert(StubRoutines::forward_exception_entry() != nullptr,
3087          "must be generated before");
3088 
3089   ResourceMark rm;
3090   OopMapSet *oop_maps = new OopMapSet();
3091   OopMap* map;
3092 
3093   // Allocate space for the code.  Setup code generation tools.
3094   CodeBuffer buffer("handler_blob", 2048, 1024);
3095   MacroAssembler* masm = new MacroAssembler(&buffer);
3096 
3097   address start   = __ pc();
3098   address call_pc = nullptr;
3099   int frame_size_in_words;
3100   bool cause_return = (poll_type == POLL_AT_RETURN);
3101   bool save_wide_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3102 
3103   // Make room for return address (or push it again)
3104   if (!cause_return) {
3105     __ push(rbx);
3106   }
3107 
3108   // Save registers, fpu state, and flags
3109   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_wide_vectors);
3110 
3111   // The following is basically a call_VM.  However, we need the precise
3112   // address of the call in order to generate an oopmap. Hence, we do all the
3113   // work ourselves.
3114 
3115   __ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);  // JavaFrameAnchor::capture_last_Java_pc() will get the pc from the return address, which we store next:
3116 
3117   // The return address must always be correct so that frame constructor never
3118   // sees an invalid pc.
3119 
3120   if (!cause_return) {
3121     // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
3122     // Additionally, rbx is a callee saved register and we can look at it later to determine
3123     // if someone changed the return address for us!
3124     __ movptr(rbx, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
3125     __ movptr(Address(rbp, wordSize), rbx);
3126   }
3127 
3128   // Do the call
3129   __ mov(c_rarg0, r15_thread);
3130   __ call(RuntimeAddress(call_ptr));
3131 
3132   // Set an oopmap for the call site.  This oopmap will map all
3133   // oop-registers and debug-info registers as callee-saved.  This
3134   // will allow deoptimization at this safepoint to find all possible
3135   // debug-info recordings, as well as let GC find all oops.
3136 
3137   oop_maps->add_gc_map( __ pc() - start, map);
3138 
3139   Label noException;
3140 
3141   __ reset_last_Java_frame(false);
3142 
3143   __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
3144   __ jcc(Assembler::equal, noException);
3145 
3146   // Exception pending
3147 
3148   RegisterSaver::restore_live_registers(masm, save_wide_vectors);
3149 
3150   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3151 
3152   // No exception case
3153   __ bind(noException);
3154 
3155   Label no_adjust;
3156 #ifdef ASSERT
3157   Label bail;
3158 #endif
3159   if (!cause_return) {
3160     Label no_prefix, not_special;
3161 
3162     // If our stashed return pc was modified by the runtime we avoid touching it
3163     __ cmpptr(rbx, Address(rbp, wordSize));
3164     __ jccb(Assembler::notEqual, no_adjust);
3165 
3166     // Skip over the poll instruction.
3167     // See NativeInstruction::is_safepoint_poll()
3168     // Possible encodings:
3169     //      85 00       test   %eax,(%rax)
3170     //      85 01       test   %eax,(%rcx)
3171     //      85 02       test   %eax,(%rdx)
3172     //      85 03       test   %eax,(%rbx)
3173     //      85 06       test   %eax,(%rsi)
3174     //      85 07       test   %eax,(%rdi)
3175     //
3176     //   41 85 00       test   %eax,(%r8)
3177     //   41 85 01       test   %eax,(%r9)
3178     //   41 85 02       test   %eax,(%r10)
3179     //   41 85 03       test   %eax,(%r11)
3180     //   41 85 06       test   %eax,(%r14)
3181     //   41 85 07       test   %eax,(%r15)
3182     //
3183     //      85 04 24    test   %eax,(%rsp)
3184     //   41 85 04 24    test   %eax,(%r12)
3185     //      85 45 00    test   %eax,0x0(%rbp)
3186     //   41 85 45 00    test   %eax,0x0(%r13)
3187 
3188     __ cmpb(Address(rbx, 0), NativeTstRegMem::instruction_rex_b_prefix);
3189     __ jcc(Assembler::notEqual, no_prefix);
3190     __ addptr(rbx, 1);
3191     __ bind(no_prefix);
3192 #ifdef ASSERT
3193     __ movptr(rax, rbx); // remember where 0x85 should be, for verification below
3194 #endif
3195     // r12/r13/rsp/rbp base encoding takes 3 bytes with the following register values:
3196     // r12/rsp 0x04
3197     // r13/rbp 0x05
3198     __ movzbq(rcx, Address(rbx, 1));
3199     __ andptr(rcx, 0x07); // looking for 0x04 .. 0x05
3200     __ subptr(rcx, 4);    // looking for 0x00 .. 0x01
3201     __ cmpptr(rcx, 1);
3202     __ jcc(Assembler::above, not_special);
3203     __ addptr(rbx, 1);
3204     __ bind(not_special);
3205 #ifdef ASSERT
3206     // Verify the correct encoding of the poll we're about to skip.
3207     __ cmpb(Address(rax, 0), NativeTstRegMem::instruction_code_memXregl);
3208     __ jcc(Assembler::notEqual, bail);
3209     // Mask out the modrm bits
3210     __ testb(Address(rax, 1), NativeTstRegMem::modrm_mask);
3211     // rax encodes to 0, so if the bits are nonzero it's incorrect
3212     __ jcc(Assembler::notZero, bail);
3213 #endif
3214     // Adjust return pc forward to step over the safepoint poll instruction
3215     __ addptr(rbx, 2);
3216     __ movptr(Address(rbp, wordSize), rbx);
3217   }
3218 
3219   __ bind(no_adjust);
3220   // Normal exit, restore registers and exit.
3221   RegisterSaver::restore_live_registers(masm, save_wide_vectors);
3222   __ ret(0);
3223 
3224 #ifdef ASSERT
3225   __ bind(bail);
3226   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
3227 #endif
3228 
3229   // Make sure all code is generated
3230   masm->flush();
3231 
3232   // Fill-out other meta info
3233   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3234 }
3235 
3236 //
3237 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3238 //
3239 // Generate a stub that calls into vm to find out the proper destination
3240 // of a java call. All the argument registers are live at this point
3241 // but since this is generic code we don't know what they are and the caller
3242 // must do any gc of the args.
3243 //
3244 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3245   assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
3246 
3247   // allocate space for the code
3248   ResourceMark rm;
3249 
3250   CodeBuffer buffer(name, 1200, 512);
3251   MacroAssembler* masm = new MacroAssembler(&buffer);
3252 
3253   int frame_size_in_words;
3254 
3255   OopMapSet *oop_maps = new OopMapSet();
3256   OopMap* map = nullptr;
3257 
3258   int start = __ offset();
3259 
3260   // No need to save vector registers since they are caller-saved anyway.
3261   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_wide_vectors*/ false);
3262 
3263   int frame_complete = __ offset();
3264 
3265   __ set_last_Java_frame(noreg, noreg, nullptr, rscratch1);
3266 
3267   __ mov(c_rarg0, r15_thread);
3268 
3269   __ call(RuntimeAddress(destination));
3270 
3271 
3272   // Set an oopmap for the call site.
3273   // We need this not only for callee-saved registers, but also for volatile
3274   // registers that the compiler might be keeping live across a safepoint.
3275 
3276   oop_maps->add_gc_map( __ offset() - start, map);
3277 
3278   // rax contains the address we are going to jump to assuming no exception got installed
3279 
3280   // clear last_Java_sp
3281   __ reset_last_Java_frame(false);
3282   // check for pending exceptions
3283   Label pending;
3284   __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
3285   __ jcc(Assembler::notEqual, pending);
3286 
3287   // get the returned Method*
3288   __ get_vm_result_2(rbx, r15_thread);
3289   __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
3290 
3291   __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3292 
3293   RegisterSaver::restore_live_registers(masm);
3294 
3295   // We are back to the original state on entry and ready to go.
3296 
3297   __ jmp(rax);
3298 
3299   // Pending exception after the safepoint
3300 
3301   __ bind(pending);
3302 
3303   RegisterSaver::restore_live_registers(masm);
3304 
3305   // exception pending => remove activation and forward to exception handler
3306 
3307   __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), NULL_WORD);
3308 
3309   __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3310   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3311 
3312   // -------------
3313   // make sure all code is generated
3314   masm->flush();
3315 
3316   // return the  blob
3317   // frame_size_words or bytes??
3318   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3319 }
3320 
3321 //------------------------------Montgomery multiplication------------------------
3322 //
3323 
3324 #ifndef _WINDOWS
3325 
3326 // Subtract 0:b from carry:a.  Return carry.
3327 static julong
3328 sub(julong a[], julong b[], julong carry, long len) {
3329   long long i = 0, cnt = len;
3330   julong tmp;
3331   asm volatile("clc; "
3332                "0: ; "
3333                "mov (%[b], %[i], 8), %[tmp]; "
3334                "sbb %[tmp], (%[a], %[i], 8); "
3335                "inc %[i]; dec %[cnt]; "
3336                "jne 0b; "
3337                "mov %[carry], %[tmp]; sbb $0, %[tmp]; "
3338                : [i]"+r"(i), [cnt]"+r"(cnt), [tmp]"=&r"(tmp)
3339                : [a]"r"(a), [b]"r"(b), [carry]"r"(carry)
3340                : "memory");
3341   return tmp;
3342 }
3343 
3344 // Multiply (unsigned) Long A by Long B, accumulating the double-
3345 // length result into the accumulator formed of T0, T1, and T2.
3346 #define MACC(A, B, T0, T1, T2)                                  \
3347 do {                                                            \
3348   unsigned long hi, lo;                                         \
3349   __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4"   \
3350            : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2)  \
3351            : "r"(A), "a"(B) : "cc");                            \
3352  } while(0)
3353 
3354 // As above, but add twice the double-length result into the
3355 // accumulator.
3356 #define MACC2(A, B, T0, T1, T2)                                 \
3357 do {                                                            \
3358   unsigned long hi, lo;                                         \
3359   __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4; " \
3360            "add %%rax, %2; adc %%rdx, %3; adc $0, %4"           \
3361            : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2)  \
3362            : "r"(A), "a"(B) : "cc");                            \
3363  } while(0)
3364 
3365 #else //_WINDOWS
3366 
3367 static julong
3368 sub(julong a[], julong b[], julong carry, long len) {
3369   long i;
3370   julong tmp;
3371   unsigned char c = 1;
3372   for (i = 0; i < len; i++) {
3373     c = _addcarry_u64(c, a[i], ~b[i], &tmp);
3374     a[i] = tmp;
3375   }
3376   c = _addcarry_u64(c, carry, ~0, &tmp);
3377   return tmp;
3378 }
3379 
3380 // Multiply (unsigned) Long A by Long B, accumulating the double-
3381 // length result into the accumulator formed of T0, T1, and T2.
3382 #define MACC(A, B, T0, T1, T2)                          \
3383 do {                                                    \
3384   julong hi, lo;                            \
3385   lo = _umul128(A, B, &hi);                             \
3386   unsigned char c = _addcarry_u64(0, lo, T0, &T0);      \
3387   c = _addcarry_u64(c, hi, T1, &T1);                    \
3388   _addcarry_u64(c, T2, 0, &T2);                         \
3389  } while(0)
3390 
3391 // As above, but add twice the double-length result into the
3392 // accumulator.
3393 #define MACC2(A, B, T0, T1, T2)                         \
3394 do {                                                    \
3395   julong hi, lo;                            \
3396   lo = _umul128(A, B, &hi);                             \
3397   unsigned char c = _addcarry_u64(0, lo, T0, &T0);      \
3398   c = _addcarry_u64(c, hi, T1, &T1);                    \
3399   _addcarry_u64(c, T2, 0, &T2);                         \
3400   c = _addcarry_u64(0, lo, T0, &T0);                    \
3401   c = _addcarry_u64(c, hi, T1, &T1);                    \
3402   _addcarry_u64(c, T2, 0, &T2);                         \
3403  } while(0)
3404 
3405 #endif //_WINDOWS
3406 
3407 // Fast Montgomery multiplication.  The derivation of the algorithm is
3408 // in  A Cryptographic Library for the Motorola DSP56000,
3409 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237.
3410 
3411 static void NOINLINE
3412 montgomery_multiply(julong a[], julong b[], julong n[],
3413                     julong m[], julong inv, int len) {
3414   julong t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3415   int i;
3416 
3417   assert(inv * n[0] == ULLONG_MAX, "broken inverse in Montgomery multiply");
3418 
3419   for (i = 0; i < len; i++) {
3420     int j;
3421     for (j = 0; j < i; j++) {
3422       MACC(a[j], b[i-j], t0, t1, t2);
3423       MACC(m[j], n[i-j], t0, t1, t2);
3424     }
3425     MACC(a[i], b[0], t0, t1, t2);
3426     m[i] = t0 * inv;
3427     MACC(m[i], n[0], t0, t1, t2);
3428 
3429     assert(t0 == 0, "broken Montgomery multiply");
3430 
3431     t0 = t1; t1 = t2; t2 = 0;
3432   }
3433 
3434   for (i = len; i < 2*len; i++) {
3435     int j;
3436     for (j = i-len+1; j < len; j++) {
3437       MACC(a[j], b[i-j], t0, t1, t2);
3438       MACC(m[j], n[i-j], t0, t1, t2);
3439     }
3440     m[i-len] = t0;
3441     t0 = t1; t1 = t2; t2 = 0;
3442   }
3443 
3444   while (t0)
3445     t0 = sub(m, n, t0, len);
3446 }
3447 
3448 // Fast Montgomery squaring.  This uses asymptotically 25% fewer
3449 // multiplies so it should be up to 25% faster than Montgomery
3450 // multiplication.  However, its loop control is more complex and it
3451 // may actually run slower on some machines.
3452 
3453 static void NOINLINE
3454 montgomery_square(julong a[], julong n[],
3455                   julong m[], julong inv, int len) {
3456   julong t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3457   int i;
3458 
3459   assert(inv * n[0] == ULLONG_MAX, "broken inverse in Montgomery square");
3460 
3461   for (i = 0; i < len; i++) {
3462     int j;
3463     int end = (i+1)/2;
3464     for (j = 0; j < end; j++) {
3465       MACC2(a[j], a[i-j], t0, t1, t2);
3466       MACC(m[j], n[i-j], t0, t1, t2);
3467     }
3468     if ((i & 1) == 0) {
3469       MACC(a[j], a[j], t0, t1, t2);
3470     }
3471     for (; j < i; j++) {
3472       MACC(m[j], n[i-j], t0, t1, t2);
3473     }
3474     m[i] = t0 * inv;
3475     MACC(m[i], n[0], t0, t1, t2);
3476 
3477     assert(t0 == 0, "broken Montgomery square");
3478 
3479     t0 = t1; t1 = t2; t2 = 0;
3480   }
3481 
3482   for (i = len; i < 2*len; i++) {
3483     int start = i-len+1;
3484     int end = start + (len - start)/2;
3485     int j;
3486     for (j = start; j < end; j++) {
3487       MACC2(a[j], a[i-j], t0, t1, t2);
3488       MACC(m[j], n[i-j], t0, t1, t2);
3489     }
3490     if ((i & 1) == 0) {
3491       MACC(a[j], a[j], t0, t1, t2);
3492     }
3493     for (; j < len; j++) {
3494       MACC(m[j], n[i-j], t0, t1, t2);
3495     }
3496     m[i-len] = t0;
3497     t0 = t1; t1 = t2; t2 = 0;
3498   }
3499 
3500   while (t0)
3501     t0 = sub(m, n, t0, len);
3502 }
3503 
3504 // Swap words in a longword.
3505 static julong swap(julong x) {
3506   return (x << 32) | (x >> 32);
3507 }
3508 
3509 // Copy len longwords from s to d, word-swapping as we go.  The
3510 // destination array is reversed.
3511 static void reverse_words(julong *s, julong *d, int len) {
3512   d += len;
3513   while(len-- > 0) {
3514     d--;
3515     *d = swap(*s);
3516     s++;
3517   }
3518 }
3519 
3520 // The threshold at which squaring is advantageous was determined
3521 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz.
3522 #define MONTGOMERY_SQUARING_THRESHOLD 64
3523 
3524 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
3525                                         jint len, jlong inv,
3526                                         jint *m_ints) {
3527   assert(len % 2 == 0, "array length in montgomery_multiply must be even");
3528   int longwords = len/2;
3529 
3530   // Make very sure we don't use so much space that the stack might
3531   // overflow.  512 jints corresponds to an 16384-bit integer and
3532   // will use here a total of 8k bytes of stack space.
3533   int divisor = sizeof(julong) * 4;
3534   guarantee(longwords <= 8192 / divisor, "must be");
3535   int total_allocation = longwords * sizeof (julong) * 4;
3536   julong *scratch = (julong *)alloca(total_allocation);
3537 
3538   // Local scratch arrays
3539   julong
3540     *a = scratch + 0 * longwords,
3541     *b = scratch + 1 * longwords,
3542     *n = scratch + 2 * longwords,
3543     *m = scratch + 3 * longwords;
3544 
3545   reverse_words((julong *)a_ints, a, longwords);
3546   reverse_words((julong *)b_ints, b, longwords);
3547   reverse_words((julong *)n_ints, n, longwords);
3548 
3549   ::montgomery_multiply(a, b, n, m, (julong)inv, longwords);
3550 
3551   reverse_words(m, (julong *)m_ints, longwords);
3552 }
3553 
3554 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
3555                                       jint len, jlong inv,
3556                                       jint *m_ints) {
3557   assert(len % 2 == 0, "array length in montgomery_square must be even");
3558   int longwords = len/2;
3559 
3560   // Make very sure we don't use so much space that the stack might
3561   // overflow.  512 jints corresponds to an 16384-bit integer and
3562   // will use here a total of 6k bytes of stack space.
3563   int divisor = sizeof(julong) * 3;
3564   guarantee(longwords <= (8192 / divisor), "must be");
3565   int total_allocation = longwords * sizeof (julong) * 3;
3566   julong *scratch = (julong *)alloca(total_allocation);
3567 
3568   // Local scratch arrays
3569   julong
3570     *a = scratch + 0 * longwords,
3571     *n = scratch + 1 * longwords,
3572     *m = scratch + 2 * longwords;
3573 
3574   reverse_words((julong *)a_ints, a, longwords);
3575   reverse_words((julong *)n_ints, n, longwords);
3576 
3577   if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
3578     ::montgomery_square(a, n, m, (julong)inv, longwords);
3579   } else {
3580     ::montgomery_multiply(a, a, n, m, (julong)inv, longwords);
3581   }
3582 
3583   reverse_words(m, (julong *)m_ints, longwords);
3584 }
3585 
3586 #ifdef COMPILER2
3587 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
3588 //
3589 //------------------------------generate_exception_blob---------------------------
3590 // creates exception blob at the end
3591 // Using exception blob, this code is jumped from a compiled method.
3592 // (see emit_exception_handler in x86_64.ad file)
3593 //
3594 // Given an exception pc at a call we call into the runtime for the
3595 // handler in this method. This handler might merely restore state
3596 // (i.e. callee save registers) unwind the frame and jump to the
3597 // exception handler for the nmethod if there is no Java level handler
3598 // for the nmethod.
3599 //
3600 // This code is entered with a jmp.
3601 //
3602 // Arguments:
3603 //   rax: exception oop
3604 //   rdx: exception pc
3605 //
3606 // Results:
3607 //   rax: exception oop
3608 //   rdx: exception pc in caller or ???
3609 //   destination: exception handler of caller
3610 //
3611 // Note: the exception pc MUST be at a call (precise debug information)
3612 //       Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved.
3613 //
3614 
3615 void OptoRuntime::generate_exception_blob() {
3616   assert(!OptoRuntime::is_callee_saved_register(RDX_num), "");
3617   assert(!OptoRuntime::is_callee_saved_register(RAX_num), "");
3618   assert(!OptoRuntime::is_callee_saved_register(RCX_num), "");
3619 
3620   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3621 
3622   // Allocate space for the code
3623   ResourceMark rm;
3624   // Setup code generation tools
3625   CodeBuffer buffer("exception_blob", 2048, 1024);
3626   MacroAssembler* masm = new MacroAssembler(&buffer);
3627 
3628 
3629   address start = __ pc();
3630 
3631   // Exception pc is 'return address' for stack walker
3632   __ push(rdx);
3633   __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
3634 
3635   // Save callee-saved registers.  See x86_64.ad.
3636 
3637   // rbp is an implicitly saved callee saved register (i.e., the calling
3638   // convention will save/restore it in the prolog/epilog). Other than that
3639   // there are no callee save registers now that adapter frames are gone.
3640 
3641   __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3642 
3643   // Store exception in Thread object. We cannot pass any arguments to the
3644   // handle_exception call, since we do not want to make any assumption
3645   // about the size of the frame where the exception happened in.
3646   // c_rarg0 is either rdi (Linux) or rcx (Windows).
3647   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
3648   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3649 
3650   // This call does all the hard work.  It checks if an exception handler
3651   // exists in the method.
3652   // If so, it returns the handler address.
3653   // If not, it prepares for stack-unwinding, restoring the callee-save
3654   // registers of the frame being removed.
3655   //
3656   // address OptoRuntime::handle_exception_C(JavaThread* thread)
3657 
3658   // At a method handle call, the stack may not be properly aligned
3659   // when returning with an exception.
3660   address the_pc = __ pc();
3661   __ set_last_Java_frame(noreg, noreg, the_pc, rscratch1);
3662   __ mov(c_rarg0, r15_thread);
3663   __ andptr(rsp, -(StackAlignmentInBytes));    // Align stack
3664   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
3665 
3666   // Set an oopmap for the call site.  This oopmap will only be used if we
3667   // are unwinding the stack.  Hence, all locations will be dead.
3668   // Callee-saved registers will be the same as the frame above (i.e.,
3669   // handle_exception_stub), since they were restored when we got the
3670   // exception.
3671 
3672   OopMapSet* oop_maps = new OopMapSet();
3673 
3674   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3675 
3676   __ reset_last_Java_frame(false);
3677 
3678   // Restore callee-saved registers
3679 
3680   // rbp is an implicitly saved callee-saved register (i.e., the calling
3681   // convention will save restore it in prolog/epilog) Other than that
3682   // there are no callee save registers now that adapter frames are gone.
3683 
3684   __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
3685 
3686   __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
3687   __ pop(rdx);                  // No need for exception pc anymore
3688 
3689   // rax: exception handler
3690 
3691   // We have a handler in rax (could be deopt blob).
3692   __ mov(r8, rax);
3693 
3694   // Get the exception oop
3695   __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3696   // Get the exception pc in case we are deoptimized
3697   __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3698 #ifdef ASSERT
3699   __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), NULL_WORD);
3700   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), NULL_WORD);
3701 #endif
3702   // Clear the exception oop so GC no longer processes it as a root.
3703   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), NULL_WORD);
3704 
3705   // rax: exception oop
3706   // r8:  exception handler
3707   // rdx: exception pc
3708   // Jump to handler
3709 
3710   __ jmp(r8);
3711 
3712   // Make sure all code is generated
3713   masm->flush();
3714 
3715   // Set exception blob
3716   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3717 }
3718 #endif // COMPILER2