1 /*
   2  * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "asm/macroAssembler.inline.hpp"
  30 #include "classfile/symbolTable.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/compiledIC.hpp"
  33 #include "code/debugInfoRec.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/barrierSetAssembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "interpreter/interp_masm.hpp"
  39 #include "logging/log.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "nativeInst_aarch64.hpp"
  42 #include "oops/klass.inline.hpp"
  43 #include "oops/method.inline.hpp"
  44 #include "prims/methodHandles.hpp"
  45 #include "runtime/continuation.hpp"
  46 #include "runtime/continuationEntry.inline.hpp"
  47 #include "runtime/globals.hpp"
  48 #include "runtime/jniHandles.hpp"
  49 #include "runtime/safepointMechanism.hpp"
  50 #include "runtime/sharedRuntime.hpp"
  51 #include "runtime/signature.hpp"
  52 #include "runtime/stubRoutines.hpp"
  53 #include "runtime/timerTrace.hpp"
  54 #include "runtime/vframeArray.hpp"
  55 #include "utilities/align.hpp"
  56 #include "utilities/formatBuffer.hpp"
  57 #include "vmreg_aarch64.inline.hpp"
  58 #ifdef COMPILER1
  59 #include "c1/c1_Runtime1.hpp"
  60 #endif
  61 #ifdef COMPILER2
  62 #include "adfiles/ad_aarch64.hpp"
  63 #include "opto/runtime.hpp"
  64 #endif
  65 #if INCLUDE_JVMCI
  66 #include "jvmci/jvmciJavaClasses.hpp"
  67 #endif
  68 
  69 #define __ masm->
  70 
  71 #ifdef PRODUCT
  72 #define BLOCK_COMMENT(str) /* nothing */
  73 #else
  74 #define BLOCK_COMMENT(str) __ block_comment(str)
  75 #endif
  76 
  77 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  78 
  79 // FIXME -- this is used by C1
  80 class RegisterSaver {
  81   const bool _save_vectors;
  82  public:
  83   RegisterSaver(bool save_vectors) : _save_vectors(save_vectors) {}
  84 
  85   OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
  86   void restore_live_registers(MacroAssembler* masm);
  87 
  88   // Offsets into the register save area
  89   // Used by deoptimization when it is managing result register
  90   // values on its own
  91 
  92   int reg_offset_in_bytes(Register r);
  93   int r0_offset_in_bytes()    { return reg_offset_in_bytes(r0); }
  94   int rscratch1_offset_in_bytes()    { return reg_offset_in_bytes(rscratch1); }
  95   int v0_offset_in_bytes();
  96 
  97   // Total stack size in bytes for saving sve predicate registers.
  98   int total_sve_predicate_in_bytes();
  99 
 100   // Capture info about frame layout
 101   // Note this is only correct when not saving full vectors.
 102   enum layout {
 103                 fpu_state_off = 0,
 104                 fpu_state_end = fpu_state_off + FPUStateSizeInWords - 1,
 105                 // The frame sender code expects that rfp will be in
 106                 // the "natural" place and will override any oopMap
 107                 // setting for it. We must therefore force the layout
 108                 // so that it agrees with the frame sender code.
 109                 r0_off = fpu_state_off + FPUStateSizeInWords,
 110                 rfp_off = r0_off + (Register::number_of_registers - 2) * Register::max_slots_per_register,
 111                 return_off = rfp_off + Register::max_slots_per_register,      // slot for return address
 112                 reg_save_size = return_off + Register::max_slots_per_register};
 113 
 114 };
 115 
 116 int RegisterSaver::reg_offset_in_bytes(Register r) {
 117   // The integer registers are located above the floating point
 118   // registers in the stack frame pushed by save_live_registers() so the
 119   // offset depends on whether we are saving full vectors, and whether
 120   // those vectors are NEON or SVE.
 121 
 122   int slots_per_vect = FloatRegister::save_slots_per_register;
 123 
 124 #if COMPILER2_OR_JVMCI
 125   if (_save_vectors) {
 126     slots_per_vect = FloatRegister::slots_per_neon_register;
 127 
 128 #ifdef COMPILER2
 129     if (Matcher::supports_scalable_vector()) {
 130       slots_per_vect = Matcher::scalable_vector_reg_size(T_FLOAT);
 131     }
 132 #endif
 133   }
 134 #endif
 135 
 136   int r0_offset = v0_offset_in_bytes() + (slots_per_vect * FloatRegister::number_of_registers) * BytesPerInt;
 137   return r0_offset + r->encoding() * wordSize;
 138 }
 139 
 140 int RegisterSaver::v0_offset_in_bytes() {
 141   // The floating point registers are located above the predicate registers if
 142   // they are present in the stack frame pushed by save_live_registers(). So the
 143   // offset depends on the saved total predicate vectors in the stack frame.
 144   return (total_sve_predicate_in_bytes() / VMRegImpl::stack_slot_size) * BytesPerInt;
 145 }
 146 
 147 int RegisterSaver::total_sve_predicate_in_bytes() {
 148 #ifdef COMPILER2
 149   if (_save_vectors && Matcher::supports_scalable_vector()) {
 150     return (Matcher::scalable_vector_reg_size(T_BYTE) >> LogBitsPerByte) *
 151            PRegister::number_of_registers;
 152   }
 153 #endif
 154   return 0;
 155 }
 156 
 157 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
 158   bool use_sve = false;
 159   int sve_vector_size_in_bytes = 0;
 160   int sve_vector_size_in_slots = 0;
 161   int sve_predicate_size_in_slots = 0;
 162   int total_predicate_in_bytes = total_sve_predicate_in_bytes();
 163   int total_predicate_in_slots = total_predicate_in_bytes / VMRegImpl::stack_slot_size;
 164 
 165 #ifdef COMPILER2
 166   use_sve = Matcher::supports_scalable_vector();
 167   if (use_sve) {
 168     sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 169     sve_vector_size_in_slots = Matcher::scalable_vector_reg_size(T_FLOAT);
 170     sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
 171   }
 172 #endif
 173 
 174 #if COMPILER2_OR_JVMCI
 175   if (_save_vectors) {
 176     int extra_save_slots_per_register = 0;
 177     // Save upper half of vector registers
 178     if (use_sve) {
 179       extra_save_slots_per_register = sve_vector_size_in_slots - FloatRegister::save_slots_per_register;
 180     } else {
 181       extra_save_slots_per_register = FloatRegister::extra_save_slots_per_neon_register;
 182     }
 183     int extra_vector_bytes = extra_save_slots_per_register *
 184                              VMRegImpl::stack_slot_size *
 185                              FloatRegister::number_of_registers;
 186     additional_frame_words += ((extra_vector_bytes + total_predicate_in_bytes) / wordSize);
 187   }
 188 #else
 189   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 190 #endif
 191 
 192   int frame_size_in_bytes = align_up(additional_frame_words * wordSize +
 193                                      reg_save_size * BytesPerInt, 16);
 194   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 195   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 196   // The caller will allocate additional_frame_words
 197   int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
 198   // CodeBlob frame size is in words.
 199   int frame_size_in_words = frame_size_in_bytes / wordSize;
 200   *total_frame_words = frame_size_in_words;
 201 
 202   // Save Integer and Float registers.
 203   __ enter();
 204   __ push_CPU_state(_save_vectors, use_sve, sve_vector_size_in_bytes, total_predicate_in_bytes);
 205 
 206   // Set an oopmap for the call site.  This oopmap will map all
 207   // oop-registers and debug-info registers as callee-saved.  This
 208   // will allow deoptimization at this safepoint to find all possible
 209   // debug-info recordings, as well as let GC find all oops.
 210 
 211   OopMapSet *oop_maps = new OopMapSet();
 212   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 213 
 214   for (int i = 0; i < Register::number_of_registers; i++) {
 215     Register r = as_Register(i);
 216     if (i <= rfp->encoding() && r != rscratch1 && r != rscratch2) {
 217       // SP offsets are in 4-byte words.
 218       // Register slots are 8 bytes wide, 32 floating-point registers.
 219       int sp_offset = Register::max_slots_per_register * i +
 220                       FloatRegister::save_slots_per_register * FloatRegister::number_of_registers;
 221       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots), r->as_VMReg());
 222     }
 223   }
 224 
 225   for (int i = 0; i < FloatRegister::number_of_registers; i++) {
 226     FloatRegister r = as_FloatRegister(i);
 227     int sp_offset = 0;
 228     if (_save_vectors) {
 229       sp_offset = use_sve ? (total_predicate_in_slots + sve_vector_size_in_slots * i) :
 230                             (FloatRegister::slots_per_neon_register * i);
 231     } else {
 232       sp_offset = FloatRegister::save_slots_per_register * i;
 233     }
 234     oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), r->as_VMReg());
 235   }
 236 
 237   return oop_map;
 238 }
 239 
 240 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 241 #ifdef COMPILER2
 242   __ pop_CPU_state(_save_vectors, Matcher::supports_scalable_vector(),
 243                    Matcher::scalable_vector_reg_size(T_BYTE), total_sve_predicate_in_bytes());
 244 #else
 245 #if !INCLUDE_JVMCI
 246   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 247 #endif
 248   __ pop_CPU_state(_save_vectors);
 249 #endif
 250   __ ldp(rfp, lr, Address(__ post(sp, 2 * wordSize)));
 251   __ authenticate_return_address();
 252 }
 253 
 254 // Is vector's size (in bytes) bigger than a size saved by default?
 255 // 8 bytes vector registers are saved by default on AArch64.
 256 // The SVE supported min vector size is 8 bytes and we need to save
 257 // predicate registers when the vector size is 8 bytes as well.
 258 bool SharedRuntime::is_wide_vector(int size) {
 259   return size > 8 || (UseSVE > 0 && size >= 8);
 260 }
 261 
 262 // ---------------------------------------------------------------------------
 263 // Read the array of BasicTypes from a signature, and compute where the
 264 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
 265 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
 266 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
 267 // as framesizes are fixed.
 268 // VMRegImpl::stack0 refers to the first slot 0(sp).
 269 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
 270 // Register up to Register::number_of_registers are the 64-bit
 271 // integer registers.
 272 
 273 // Note: the INPUTS in sig_bt are in units of Java argument words,
 274 // which are 64-bit.  The OUTPUTS are in 32-bit units.
 275 
 276 // The Java calling convention is a "shifted" version of the C ABI.
 277 // By skipping the first C ABI register we can call non-static jni
 278 // methods with small numbers of arguments without having to shuffle
 279 // the arguments at all. Since we control the java ABI we ought to at
 280 // least get some advantage out of it.
 281 
 282 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 283                                            VMRegPair *regs,
 284                                            int total_args_passed) {
 285 
 286   // Create the mapping between argument positions and
 287   // registers.
 288   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
 289     j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7
 290   };
 291   static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
 292     j_farg0, j_farg1, j_farg2, j_farg3,
 293     j_farg4, j_farg5, j_farg6, j_farg7
 294   };
 295 
 296 
 297   uint int_args = 0;
 298   uint fp_args = 0;
 299   uint stk_args = 0;
 300 
 301   for (int i = 0; i < total_args_passed; i++) {
 302     switch (sig_bt[i]) {
 303     case T_BOOLEAN:
 304     case T_CHAR:
 305     case T_BYTE:
 306     case T_SHORT:
 307     case T_INT:
 308       if (int_args < Argument::n_int_register_parameters_j) {
 309         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 310       } else {
 311         stk_args = align_up(stk_args, 2);
 312         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 313         stk_args += 1;
 314       }
 315       break;
 316     case T_VOID:
 317       // halves of T_LONG or T_DOUBLE
 318       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 319       regs[i].set_bad();
 320       break;
 321     case T_LONG:
 322       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 323       // fall through
 324     case T_OBJECT:
 325     case T_ARRAY:
 326     case T_ADDRESS:
 327       if (int_args < Argument::n_int_register_parameters_j) {
 328         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 329       } else {
 330         stk_args = align_up(stk_args, 2);
 331         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 332         stk_args += 2;
 333       }
 334       break;
 335     case T_FLOAT:
 336       if (fp_args < Argument::n_float_register_parameters_j) {
 337         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 338       } else {
 339         stk_args = align_up(stk_args, 2);
 340         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 341         stk_args += 1;
 342       }
 343       break;
 344     case T_DOUBLE:
 345       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 346       if (fp_args < Argument::n_float_register_parameters_j) {
 347         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 348       } else {
 349         stk_args = align_up(stk_args, 2);
 350         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 351         stk_args += 2;
 352       }
 353       break;
 354     default:
 355       ShouldNotReachHere();
 356       break;
 357     }
 358   }
 359 
 360   return stk_args;
 361 }
 362 
 363 
 364 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j;
 365 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
 366 
 367 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) {
 368 
 369   // Create the mapping between argument positions and registers.
 370 
 371   static const Register INT_ArgReg[java_return_convention_max_int] = {
 372     r0 /* j_rarg7 */, j_rarg6, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
 373   };
 374 
 375   static const FloatRegister FP_ArgReg[java_return_convention_max_float] = {
 376     j_farg0, j_farg1, j_farg2, j_farg3, j_farg4, j_farg5, j_farg6, j_farg7
 377   };
 378 
 379   uint int_args = 0;
 380   uint fp_args = 0;
 381 
 382   for (int i = 0; i < total_args_passed; i++) {
 383     switch (sig_bt[i]) {
 384     case T_BOOLEAN:
 385     case T_CHAR:
 386     case T_BYTE:
 387     case T_SHORT:
 388     case T_INT:
 389       if (int_args < SharedRuntime::java_return_convention_max_int) {
 390         regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
 391         int_args ++;
 392       } else {
 393         return -1;
 394       }
 395       break;
 396     case T_VOID:
 397       // halves of T_LONG or T_DOUBLE
 398       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 399       regs[i].set_bad();
 400       break;
 401     case T_LONG:
 402       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 403       // fall through
 404     case T_OBJECT:
 405     case T_ARRAY:
 406     case T_ADDRESS:
 407       // Should T_METADATA be added to java_calling_convention as well ?
 408     case T_METADATA:
 409       if (int_args < SharedRuntime::java_return_convention_max_int) {
 410         regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
 411         int_args ++;
 412       } else {
 413         return -1;
 414       }
 415       break;
 416     case T_FLOAT:
 417       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 418         regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
 419         fp_args ++;
 420       } else {
 421         return -1;
 422       }
 423       break;
 424     case T_DOUBLE:
 425       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 426       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 427         regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
 428         fp_args ++;
 429       } else {
 430         return -1;
 431       }
 432       break;
 433     default:
 434       ShouldNotReachHere();
 435       break;
 436     }
 437   }
 438 
 439   return int_args + fp_args;
 440 }
 441 
 442 // Patch the callers callsite with entry to compiled code if it exists.
 443 static void patch_callers_callsite(MacroAssembler *masm) {
 444   Label L;
 445   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 446   __ cbz(rscratch1, L);
 447 
 448   __ enter();
 449   __ push_CPU_state();
 450 
 451   // VM needs caller's callsite
 452   // VM needs target method
 453   // This needs to be a long call since we will relocate this adapter to
 454   // the codeBuffer and it may not reach
 455 
 456 #ifndef PRODUCT
 457   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 458 #endif
 459 
 460   __ mov(c_rarg0, rmethod);
 461   __ mov(c_rarg1, lr);
 462   __ authenticate_return_address(c_rarg1);
 463   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 464   __ blr(rscratch1);
 465 
 466   // Explicit isb required because fixup_callers_callsite may change the code
 467   // stream.
 468   __ safepoint_isb();
 469 
 470   __ pop_CPU_state();
 471   // restore sp
 472   __ leave();
 473   __ bind(L);
 474 }
 475 
 476 // For each inline type argument, sig includes the list of fields of
 477 // the inline type. This utility function computes the number of
 478 // arguments for the call if inline types are passed by reference (the
 479 // calling convention the interpreter expects).
 480 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
 481   int total_args_passed = 0;
 482   if (InlineTypePassFieldsAsArgs) {
 483      for (int i = 0; i < sig_extended->length(); i++) {
 484        BasicType bt = sig_extended->at(i)._bt;
 485        if (bt == T_METADATA) {
 486          // In sig_extended, an inline type argument starts with:
 487          // T_METADATA, followed by the types of the fields of the
 488          // inline type and T_VOID to mark the end of the value
 489          // type. Inline types are flattened so, for instance, in the
 490          // case of an inline type with an int field and an inline type
 491          // field that itself has 2 fields, an int and a long:
 492          // T_METADATA T_INT T_METADATA T_INT T_LONG T_VOID (second
 493          // slot for the T_LONG) T_VOID (inner inline type) T_VOID
 494          // (outer inline type)
 495          total_args_passed++;
 496          int vt = 1;
 497          do {
 498            i++;
 499            BasicType bt = sig_extended->at(i)._bt;
 500            BasicType prev_bt = sig_extended->at(i-1)._bt;
 501            if (bt == T_METADATA) {
 502              vt++;
 503            } else if (bt == T_VOID &&
 504                       prev_bt != T_LONG &&
 505                       prev_bt != T_DOUBLE) {
 506              vt--;
 507            }
 508          } while (vt != 0);
 509        } else {
 510          total_args_passed++;
 511        }
 512      }
 513   } else {
 514     total_args_passed = sig_extended->length();
 515   }
 516 
 517   return total_args_passed;
 518 }
 519 
 520 
 521 static void gen_c2i_adapter_helper(MacroAssembler* masm,
 522                                    BasicType bt,
 523                                    BasicType prev_bt,
 524                                    size_t size_in_bytes,
 525                                    const VMRegPair& reg_pair,
 526                                    const Address& to,
 527                                    Register tmp1,
 528                                    Register tmp2,
 529                                    Register tmp3,
 530                                    int extraspace,
 531                                    bool is_oop) {
 532   if (bt == T_VOID) {
 533     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 534     return;
 535   }
 536 
 537   // Say 4 args:
 538   // i   st_off
 539   // 0   32 T_LONG
 540   // 1   24 T_VOID
 541   // 2   16 T_OBJECT
 542   // 3    8 T_BOOL
 543   // -    0 return address
 544   //
 545   // However to make thing extra confusing. Because we can fit a Java long/double in
 546   // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 547   // leaves one slot empty and only stores to a single slot. In this case the
 548   // slot that is occupied is the T_VOID slot. See I said it was confusing.
 549 
 550   bool wide = (size_in_bytes == wordSize);
 551   VMReg r_1 = reg_pair.first();
 552   VMReg r_2 = reg_pair.second();
 553   assert(r_2->is_valid() == wide, "invalid size");
 554   if (!r_1->is_valid()) {
 555     assert(!r_2->is_valid(), "");
 556     return;
 557   }
 558 
 559   if (!r_1->is_FloatRegister()) {
 560     Register val = r25;
 561     if (r_1->is_stack()) {
 562       // memory to memory use r25 (scratch registers is used by store_heap_oop)
 563       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 564       __ load_sized_value(val, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
 565     } else {
 566       val = r_1->as_Register();
 567     }
 568     assert_different_registers(to.base(), val, tmp1, tmp2, tmp3);
 569     if (is_oop) {
 570       __ store_heap_oop(to, val, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 571     } else {
 572       __ store_sized_value(to, val, size_in_bytes);
 573     }
 574   } else {
 575     if (wide) {
 576       __ strd(r_1->as_FloatRegister(), to);
 577     } else {
 578       // only a float use just part of the slot
 579       __ strs(r_1->as_FloatRegister(), to);
 580     }
 581   }
 582 }
 583 
 584 static void gen_c2i_adapter(MacroAssembler *masm,
 585                             const GrowableArray<SigEntry>* sig_extended,
 586                             const VMRegPair *regs,
 587                             bool requires_clinit_barrier,
 588                             address& c2i_no_clinit_check_entry,
 589                             Label& skip_fixup,
 590                             address start,
 591                             OopMapSet* oop_maps,
 592                             int& frame_complete,
 593                             int& frame_size_in_words,
 594                             bool alloc_inline_receiver) {
 595   if (requires_clinit_barrier && VM_Version::supports_fast_class_init_checks()) {
 596     Label L_skip_barrier;
 597 
 598     { // Bypass the barrier for non-static methods
 599       __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
 600       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 601       __ br(Assembler::EQ, L_skip_barrier); // non-static
 602     }
 603 
 604     __ load_method_holder(rscratch2, rmethod);
 605     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 606     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 607 
 608     __ bind(L_skip_barrier);
 609     c2i_no_clinit_check_entry = __ pc();
 610   }
 611 
 612   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 613   bs->c2i_entry_barrier(masm);
 614 
 615   // Before we get into the guts of the C2I adapter, see if we should be here
 616   // at all.  We've come from compiled code and are attempting to jump to the
 617   // interpreter, which means the caller made a static call to get here
 618   // (vcalls always get a compiled target if there is one).  Check for a
 619   // compiled target.  If there is one, we need to patch the caller's call.
 620   patch_callers_callsite(masm);
 621 
 622   __ bind(skip_fixup);
 623 
 624   // Name some registers to be used in the following code. We can use
 625   // anything except r0-r7 which are arguments in the Java calling
 626   // convention, rmethod (r12), and r13 which holds the outgoing sender
 627   // SP for the interpreter.
 628   Register buf_array = r10;   // Array of buffered inline types
 629   Register buf_oop = r11;     // Buffered inline type oop
 630   Register tmp1 = r15;
 631   Register tmp2 = r16;
 632   Register tmp3 = r17;
 633 
 634   if (InlineTypePassFieldsAsArgs) {
 635     // Is there an inline type argument?
 636     bool has_inline_argument = false;
 637     for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) {
 638       has_inline_argument = (sig_extended->at(i)._bt == T_METADATA);
 639     }
 640     if (has_inline_argument) {
 641       // There is at least an inline type argument: we're coming from
 642       // compiled code so we have no buffers to back the inline types
 643       // Allocate the buffers here with a runtime call.
 644       RegisterSaver reg_save(false /* save_vectors */);
 645       OopMap* map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
 646 
 647       frame_complete = __ offset();
 648       address the_pc = __ pc();
 649 
 650       Label retaddr;
 651       __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
 652 
 653       __ mov(c_rarg0, rthread);
 654       __ mov(c_rarg1, rmethod);
 655       __ mov(c_rarg2, (int64_t)alloc_inline_receiver);
 656 
 657       __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types)));
 658       __ blr(rscratch1);
 659       __ bind(retaddr);
 660 
 661       oop_maps->add_gc_map(__ pc() - start, map);
 662       __ reset_last_Java_frame(false);
 663 
 664       reg_save.restore_live_registers(masm);
 665 
 666       Label no_exception;
 667       __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
 668       __ cbz(rscratch1, no_exception);
 669 
 670       __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
 671       __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
 672       __ b(RuntimeAddress(StubRoutines::forward_exception_entry()));
 673 
 674       __ bind(no_exception);
 675 
 676       // We get an array of objects from the runtime call
 677       __ get_vm_result(buf_array, rthread);
 678       __ get_vm_result_2(rmethod, rthread); // TODO: required to keep the callee Method live?
 679     }
 680   }
 681 
 682   // Since all args are passed on the stack, total_args_passed *
 683   // Interpreter::stackElementSize is the space we need.
 684 
 685   int total_args_passed = compute_total_args_passed_int(sig_extended);
 686   int extraspace = total_args_passed * Interpreter::stackElementSize;
 687 
 688   // stack is aligned, keep it that way
 689   extraspace = align_up(extraspace, StackAlignmentInBytes);
 690 
 691   // set senderSP value
 692   __ mov(r19_sender_sp, sp);
 693 
 694   __ sub(sp, sp, extraspace);
 695 
 696   // Now write the args into the outgoing interpreter space
 697 
 698   // next_arg_comp is the next argument from the compiler point of
 699   // view (inline type fields are passed in registers/on the stack). In
 700   // sig_extended, an inline type argument starts with: T_METADATA,
 701   // followed by the types of the fields of the inline type and T_VOID
 702   // to mark the end of the inline type. ignored counts the number of
 703   // T_METADATA/T_VOID. next_vt_arg is the next inline type argument:
 704   // used to get the buffer for that argument from the pool of buffers
 705   // we allocated above and want to pass to the
 706   // interpreter. next_arg_int is the next argument from the
 707   // interpreter point of view (inline types are passed by reference).
 708   for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
 709        next_arg_comp < sig_extended->length(); next_arg_comp++) {
 710     assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
 711     assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?");
 712     BasicType bt = sig_extended->at(next_arg_comp)._bt;
 713     int st_off = (total_args_passed - next_arg_int - 1) * Interpreter::stackElementSize;
 714     if (!InlineTypePassFieldsAsArgs || bt != T_METADATA) {
 715       int next_off = st_off - Interpreter::stackElementSize;
 716       const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
 717       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
 718       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
 719       gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 720                              size_in_bytes, reg_pair, Address(sp, offset), tmp1, tmp2, tmp3, extraspace, false);
 721       next_arg_int++;
 722 #ifdef ASSERT
 723       if (bt == T_LONG || bt == T_DOUBLE) {
 724         // Overwrite the unused slot with known junk
 725         __ mov(rscratch1, CONST64(0xdeadffffdeadaaaa));
 726         __ str(rscratch1, Address(sp, st_off));
 727       }
 728 #endif /* ASSERT */
 729     } else {
 730       ignored++;
 731       // get the buffer from the just allocated pool of buffers
 732       int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_OBJECT);
 733       __ load_heap_oop(buf_oop, Address(buf_array, index), tmp1, tmp2);
 734       next_vt_arg++; next_arg_int++;
 735       int vt = 1;
 736       // write fields we get from compiled code in registers/stack
 737       // slots to the buffer: we know we are done with that inline type
 738       // argument when we hit the T_VOID that acts as an end of inline
 739       // type delimiter for this inline type. Inline types are flattened
 740       // so we might encounter embedded inline types. Each entry in
 741       // sig_extended contains a field offset in the buffer.
 742       Label L_null;
 743       do {
 744         next_arg_comp++;
 745         BasicType bt = sig_extended->at(next_arg_comp)._bt;
 746         BasicType prev_bt = sig_extended->at(next_arg_comp - 1)._bt;
 747         if (bt == T_METADATA) {
 748           vt++;
 749           ignored++;
 750         } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
 751           vt--;
 752           ignored++;
 753         } else {
 754           int off = sig_extended->at(next_arg_comp)._offset;
 755           if (off == -1) {
 756             // Nullable inline type argument, emit null check
 757             VMReg reg = regs[next_arg_comp-ignored].first();
 758             Label L_notNull;
 759             if (reg->is_stack()) {
 760               int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 761               __ ldrb(tmp1, Address(sp, ld_off));
 762               __ cbnz(tmp1, L_notNull);
 763             } else {
 764               __ cbnz(reg->as_Register(), L_notNull);
 765             }
 766             __ str(zr, Address(sp, st_off));
 767             __ b(L_null);
 768             __ bind(L_notNull);
 769             continue;
 770           }
 771           assert(off > 0, "offset in object should be positive");
 772           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 773           bool is_oop = is_reference_type(bt);
 774           gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 775                                  size_in_bytes, regs[next_arg_comp-ignored], Address(buf_oop, off), tmp1, tmp2, tmp3, extraspace, is_oop);
 776         }
 777       } while (vt != 0);
 778       // pass the buffer to the interpreter
 779       __ str(buf_oop, Address(sp, st_off));
 780       __ bind(L_null);
 781     }
 782   }
 783 
 784   __ mov(esp, sp); // Interp expects args on caller's expression stack
 785 
 786   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 787   __ br(rscratch1);
 788 }
 789 
 790 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs) {
 791 
 792 
 793   // Note: r19_sender_sp contains the senderSP on entry. We must
 794   // preserve it since we may do a i2c -> c2i transition if we lose a
 795   // race where compiled code goes non-entrant while we get args
 796   // ready.
 797 
 798   // Adapters are frameless.
 799 
 800   // An i2c adapter is frameless because the *caller* frame, which is
 801   // interpreted, routinely repairs its own esp (from
 802   // interpreter_frame_last_sp), even if a callee has modified the
 803   // stack pointer.  It also recalculates and aligns sp.
 804 
 805   // A c2i adapter is frameless because the *callee* frame, which is
 806   // interpreted, routinely repairs its caller's sp (from sender_sp,
 807   // which is set up via the senderSP register).
 808 
 809   // In other words, if *either* the caller or callee is interpreted, we can
 810   // get the stack pointer repaired after a call.
 811 
 812   // This is why c2i and i2c adapters cannot be indefinitely composed.
 813   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 814   // both caller and callee would be compiled methods, and neither would
 815   // clean up the stack pointer changes performed by the two adapters.
 816   // If this happens, control eventually transfers back to the compiled
 817   // caller, but with an uncorrected stack, causing delayed havoc.
 818 
 819   if (VerifyAdapterCalls &&
 820       (Interpreter::code() != nullptr || StubRoutines::final_stubs_code() != nullptr)) {
 821 #if 0
 822     // So, let's test for cascading c2i/i2c adapters right now.
 823     //  assert(Interpreter::contains($return_addr) ||
 824     //         StubRoutines::contains($return_addr),
 825     //         "i2c adapter must return to an interpreter frame");
 826     __ block_comment("verify_i2c { ");
 827     Label L_ok;
 828     if (Interpreter::code() != nullptr) {
 829       range_check(masm, rax, r11,
 830                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 831                   L_ok);
 832     }
 833     if (StubRoutines::initial_stubs_code() != nullptr) {
 834       range_check(masm, rax, r11,
 835                   StubRoutines::initial_stubs_code()->code_begin(),
 836                   StubRoutines::initial_stubs_code()->code_end(),
 837                   L_ok);
 838     }
 839     if (StubRoutines::final_stubs_code() != nullptr) {
 840       range_check(masm, rax, r11,
 841                   StubRoutines::final_stubs_code()->code_begin(),
 842                   StubRoutines::final_stubs_code()->code_end(),
 843                   L_ok);
 844     }
 845     const char* msg = "i2c adapter must return to an interpreter frame";
 846     __ block_comment(msg);
 847     __ stop(msg);
 848     __ bind(L_ok);
 849     __ block_comment("} verify_i2ce ");
 850 #endif
 851   }
 852 
 853   // Cut-out for having no stack args.
 854   int comp_words_on_stack = 0;
 855   if (comp_args_on_stack) {
 856      comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord;
 857      __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 858      __ andr(sp, rscratch1, -16);
 859   }
 860 
 861   // Will jump to the compiled code just as if compiled code was doing it.
 862   // Pre-load the register-jump target early, to schedule it better.
 863   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_inline_offset())));
 864 
 865 #if INCLUDE_JVMCI
 866   if (EnableJVMCI) {
 867     // check if this call should be routed towards a specific entry point
 868     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 869     Label no_alternative_target;
 870     __ cbz(rscratch2, no_alternative_target);
 871     __ mov(rscratch1, rscratch2);
 872     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 873     __ bind(no_alternative_target);
 874   }
 875 #endif // INCLUDE_JVMCI
 876 
 877   int total_args_passed = sig->length();
 878 
 879   // Now generate the shuffle code.
 880   for (int i = 0; i < total_args_passed; i++) {
 881     BasicType bt = sig->at(i)._bt;
 882     if (bt == T_VOID) {
 883       assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
 884       continue;
 885     }
 886 
 887     // Pick up 0, 1 or 2 words from SP+offset.
 888     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?");
 889 
 890     // Load in argument order going down.
 891     int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 892     // Point to interpreter value (vs. tag)
 893     int next_off = ld_off - Interpreter::stackElementSize;
 894     //
 895     //
 896     //
 897     VMReg r_1 = regs[i].first();
 898     VMReg r_2 = regs[i].second();
 899     if (!r_1->is_valid()) {
 900       assert(!r_2->is_valid(), "");
 901       continue;
 902     }
 903     if (r_1->is_stack()) {
 904       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 905       int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size;
 906       if (!r_2->is_valid()) {
 907         // sign extend???
 908         __ ldrsw(rscratch2, Address(esp, ld_off));
 909         __ str(rscratch2, Address(sp, st_off));
 910       } else {
 911         //
 912         // We are using two optoregs. This can be either T_OBJECT,
 913         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 914         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 915         // So we must adjust where to pick up the data to match the
 916         // interpreter.
 917         //
 918         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 919         // are accessed as negative so LSW is at LOW address
 920 
 921         // ld_off is MSW so get LSW
 922         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
 923         __ ldr(rscratch2, Address(esp, offset));
 924         // st_off is LSW (i.e. reg.first())
 925          __ str(rscratch2, Address(sp, st_off));
 926        }
 927      } else if (r_1->is_Register()) {  // Register argument
 928        Register r = r_1->as_Register();
 929        if (r_2->is_valid()) {
 930          //
 931          // We are using two VMRegs. This can be either T_OBJECT,
 932          // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 933          // two slots but only uses one for thr T_LONG or T_DOUBLE case
 934          // So we must adjust where to pick up the data to match the
 935          // interpreter.
 936 
 937         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
 938 
 939          // this can be a misaligned move
 940          __ ldr(r, Address(esp, offset));
 941        } else {
 942          // sign extend and use a full word?
 943          __ ldrw(r, Address(esp, ld_off));
 944        }
 945      } else {
 946        if (!r_2->is_valid()) {
 947          __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 948        } else {
 949          __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 950        }
 951      }
 952    }
 953 
 954 
 955   __ mov(rscratch2, rscratch1);
 956   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 957   __ mov(rscratch1, rscratch2);
 958 
 959   // 6243940 We might end up in handle_wrong_method if
 960   // the callee is deoptimized as we race thru here. If that
 961   // happens we don't want to take a safepoint because the
 962   // caller frame will look interpreted and arguments are now
 963   // "compiled" so it is much better to make this transition
 964   // invisible to the stack walking code. Unfortunately if
 965   // we try and find the callee by normal means a safepoint
 966   // is possible. So we stash the desired callee in the thread
 967   // and the vm will find there should this case occur.
 968 
 969   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 970   __ br(rscratch1);
 971 }
 972 
 973 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) {
 974   Register data = rscratch2;
 975   __ ic_check(1 /* end_alignment */);
 976   __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));
 977 
 978   // Method might have been compiled since the call site was patched to
 979   // interpreted; if that is the case treat it as a miss so we can get
 980   // the call site corrected.
 981   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 982   __ cbz(rscratch1, skip_fixup);
 983   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 984 }
 985 
 986 // ---------------------------------------------------------------
 987 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,
 988                                                             int comp_args_on_stack,
 989                                                             const GrowableArray<SigEntry>* sig,
 990                                                             const VMRegPair* regs,
 991                                                             const GrowableArray<SigEntry>* sig_cc,
 992                                                             const VMRegPair* regs_cc,
 993                                                             const GrowableArray<SigEntry>* sig_cc_ro,
 994                                                             const VMRegPair* regs_cc_ro,
 995                                                             AdapterFingerPrint* fingerprint,
 996                                                             AdapterBlob*& new_adapter,
 997                                                             bool allocate_code_blob) {
 998 
 999   address i2c_entry = __ pc();
1000   gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);
1001 
1002   // -------------------------------------------------------------------------
1003   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
1004   // to the interpreter.  The args start out packed in the compiled layout.  They
1005   // need to be unpacked into the interpreter layout.  This will almost always
1006   // require some stack space.  We grow the current (compiled) stack, then repack
1007   // the args.  We  finally end in a jump to the generic interpreter entry point.
1008   // On exit from the interpreter, the interpreter will restore our SP (lest the
1009   // compiled code, which relies solely on SP and not FP, get sick).
1010 
1011   address c2i_unverified_entry        = __ pc();
1012   address c2i_unverified_inline_entry = __ pc();
1013   Label skip_fixup;
1014 
1015   gen_inline_cache_check(masm, skip_fixup);
1016 
1017   OopMapSet* oop_maps = new OopMapSet();
1018   int frame_complete = CodeOffsets::frame_never_safe;
1019   int frame_size_in_words = 0;
1020 
1021   // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver)
1022   address c2i_no_clinit_check_entry = nullptr;
1023   address c2i_inline_ro_entry = __ pc();
1024   if (regs_cc != regs_cc_ro) {
1025     // No class init barrier needed because method is guaranteed to be non-static
1026     gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, /* requires_clinit_barrier = */ false, c2i_no_clinit_check_entry,
1027                     skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1028     skip_fixup.reset();
1029   }
1030 
1031   // Scalarized c2i adapter
1032   address c2i_entry        = __ pc();
1033   address c2i_inline_entry = __ pc();
1034   gen_c2i_adapter(masm, sig_cc, regs_cc, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1035                   skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ true);
1036 
1037   // Non-scalarized c2i adapter
1038   if (regs != regs_cc) {
1039     c2i_unverified_inline_entry = __ pc();
1040     Label inline_entry_skip_fixup;
1041     gen_inline_cache_check(masm, inline_entry_skip_fixup);
1042 
1043     c2i_inline_entry = __ pc();
1044     gen_c2i_adapter(masm, sig, regs, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1045                     inline_entry_skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1046   }
1047 
1048 
1049   // The c2i adapter might safepoint and trigger a GC. The caller must make sure that
1050   // the GC knows about the location of oop argument locations passed to the c2i adapter.
1051   if (allocate_code_blob) {
1052     bool caller_must_gc_arguments = (regs != regs_cc);
1053     new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);
1054   }
1055 
1056   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry, c2i_unverified_inline_entry, c2i_no_clinit_check_entry);
1057 }
1058 
1059 static int c_calling_convention_priv(const BasicType *sig_bt,
1060                                          VMRegPair *regs,
1061                                          int total_args_passed) {
1062 
1063 // We return the amount of VMRegImpl stack slots we need to reserve for all
1064 // the arguments NOT counting out_preserve_stack_slots.
1065 
1066     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1067       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
1068     };
1069     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1070       c_farg0, c_farg1, c_farg2, c_farg3,
1071       c_farg4, c_farg5, c_farg6, c_farg7
1072     };
1073 
1074     uint int_args = 0;
1075     uint fp_args = 0;
1076     uint stk_args = 0; // inc by 2 each time
1077 
1078     for (int i = 0; i < total_args_passed; i++) {
1079       switch (sig_bt[i]) {
1080       case T_BOOLEAN:
1081       case T_CHAR:
1082       case T_BYTE:
1083       case T_SHORT:
1084       case T_INT:
1085         if (int_args < Argument::n_int_register_parameters_c) {
1086           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1087         } else {
1088 #ifdef __APPLE__
1089           // Less-than word types are stored one after another.
1090           // The code is unable to handle this so bailout.
1091           return -1;
1092 #endif
1093           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1094           stk_args += 2;
1095         }
1096         break;
1097       case T_LONG:
1098         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1099         // fall through
1100       case T_OBJECT:
1101       case T_ARRAY:
1102       case T_ADDRESS:
1103       case T_METADATA:
1104         if (int_args < Argument::n_int_register_parameters_c) {
1105           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1106         } else {
1107           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1108           stk_args += 2;
1109         }
1110         break;
1111       case T_FLOAT:
1112         if (fp_args < Argument::n_float_register_parameters_c) {
1113           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1114         } else {
1115 #ifdef __APPLE__
1116           // Less-than word types are stored one after another.
1117           // The code is unable to handle this so bailout.
1118           return -1;
1119 #endif
1120           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1121           stk_args += 2;
1122         }
1123         break;
1124       case T_DOUBLE:
1125         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1126         if (fp_args < Argument::n_float_register_parameters_c) {
1127           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
1128         } else {
1129           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1130           stk_args += 2;
1131         }
1132         break;
1133       case T_VOID: // Halves of longs and doubles
1134         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1135         regs[i].set_bad();
1136         break;
1137       default:
1138         ShouldNotReachHere();
1139         break;
1140       }
1141     }
1142 
1143   return stk_args;
1144 }
1145 
1146 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
1147                                              uint num_bits,
1148                                              uint total_args_passed) {
1149   Unimplemented();
1150   return 0;
1151 }
1152 
1153 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1154                                          VMRegPair *regs,
1155                                          int total_args_passed)
1156 {
1157   int result = c_calling_convention_priv(sig_bt, regs, total_args_passed);
1158   guarantee(result >= 0, "Unsupported arguments configuration");
1159   return result;
1160 }
1161 
1162 
1163 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1164   // We always ignore the frame_slots arg and just use the space just below frame pointer
1165   // which by this time is free to use
1166   switch (ret_type) {
1167   case T_FLOAT:
1168     __ strs(v0, Address(rfp, -wordSize));
1169     break;
1170   case T_DOUBLE:
1171     __ strd(v0, Address(rfp, -wordSize));
1172     break;
1173   case T_VOID:  break;
1174   default: {
1175     __ str(r0, Address(rfp, -wordSize));
1176     }
1177   }
1178 }
1179 
1180 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1181   // We always ignore the frame_slots arg and just use the space just below frame pointer
1182   // which by this time is free to use
1183   switch (ret_type) {
1184   case T_FLOAT:
1185     __ ldrs(v0, Address(rfp, -wordSize));
1186     break;
1187   case T_DOUBLE:
1188     __ ldrd(v0, Address(rfp, -wordSize));
1189     break;
1190   case T_VOID:  break;
1191   default: {
1192     __ ldr(r0, Address(rfp, -wordSize));
1193     }
1194   }
1195 }
1196 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1197   RegSet x;
1198   for ( int i = first_arg ; i < arg_count ; i++ ) {
1199     if (args[i].first()->is_Register()) {
1200       x = x + args[i].first()->as_Register();
1201     } else if (args[i].first()->is_FloatRegister()) {
1202       __ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize)));
1203     }
1204   }
1205   __ push(x, sp);
1206 }
1207 
1208 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1209   RegSet x;
1210   for ( int i = first_arg ; i < arg_count ; i++ ) {
1211     if (args[i].first()->is_Register()) {
1212       x = x + args[i].first()->as_Register();
1213     } else {
1214       ;
1215     }
1216   }
1217   __ pop(x, sp);
1218   for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1219     if (args[i].first()->is_Register()) {
1220       ;
1221     } else if (args[i].first()->is_FloatRegister()) {
1222       __ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize)));
1223     }
1224   }
1225 }
1226 
1227 static void verify_oop_args(MacroAssembler* masm,
1228                             const methodHandle& method,
1229                             const BasicType* sig_bt,
1230                             const VMRegPair* regs) {
1231   Register temp_reg = r19;  // not part of any compiled calling seq
1232   if (VerifyOops) {
1233     for (int i = 0; i < method->size_of_parameters(); i++) {
1234       if (sig_bt[i] == T_OBJECT ||
1235           sig_bt[i] == T_ARRAY) {
1236         VMReg r = regs[i].first();
1237         assert(r->is_valid(), "bad oop arg");
1238         if (r->is_stack()) {
1239           __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1240           __ verify_oop(temp_reg);
1241         } else {
1242           __ verify_oop(r->as_Register());
1243         }
1244       }
1245     }
1246   }
1247 }
1248 
1249 // on exit, sp points to the ContinuationEntry
1250 static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots) {
1251   assert(ContinuationEntry::size() % VMRegImpl::stack_slot_size == 0, "");
1252   assert(in_bytes(ContinuationEntry::cont_offset())  % VMRegImpl::stack_slot_size == 0, "");
1253   assert(in_bytes(ContinuationEntry::chunk_offset()) % VMRegImpl::stack_slot_size == 0, "");
1254 
1255   stack_slots += (int)ContinuationEntry::size()/wordSize;
1256   __ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata
1257 
1258   OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize)/ VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
1259 
1260   __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1261   __ str(rscratch1, Address(sp, ContinuationEntry::parent_offset()));
1262   __ mov(rscratch1, sp); // we can't use sp as the source in str
1263   __ str(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1264 
1265   return map;
1266 }
1267 
1268 // on entry c_rarg1 points to the continuation
1269 //          sp points to ContinuationEntry
1270 //          c_rarg3 -- isVirtualThread
1271 static void fill_continuation_entry(MacroAssembler* masm) {
1272 #ifdef ASSERT
1273   __ movw(rscratch1, ContinuationEntry::cookie_value());
1274   __ strw(rscratch1, Address(sp, ContinuationEntry::cookie_offset()));
1275 #endif
1276 
1277   __ str (c_rarg1, Address(sp, ContinuationEntry::cont_offset()));
1278   __ strw(c_rarg3, Address(sp, ContinuationEntry::flags_offset()));
1279   __ str (zr,      Address(sp, ContinuationEntry::chunk_offset()));
1280   __ strw(zr,      Address(sp, ContinuationEntry::argsize_offset()));
1281   __ strw(zr,      Address(sp, ContinuationEntry::pin_count_offset()));
1282 
1283   __ ldr(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1284   __ str(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1285   __ ldr(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
1286   __ str(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
1287 
1288   __ str(zr, Address(rthread, JavaThread::cont_fastpath_offset()));
1289   __ str(zr, Address(rthread, JavaThread::held_monitor_count_offset()));
1290 }
1291 
1292 // on entry, sp points to the ContinuationEntry
1293 // on exit, rfp points to the spilled rfp in the entry frame
1294 static void continuation_enter_cleanup(MacroAssembler* masm) {
1295 #ifndef PRODUCT
1296   Label OK;
1297   __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1298   __ cmp(sp, rscratch1);
1299   __ br(Assembler::EQ, OK);
1300   __ stop("incorrect sp1");
1301   __ bind(OK);
1302 #endif
1303   __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_cont_fastpath_offset()));
1304   __ str(rscratch1, Address(rthread, JavaThread::cont_fastpath_offset()));
1305 
1306   if (CheckJNICalls) {
1307     // Check if this is a virtual thread continuation
1308     Label L_skip_vthread_code;
1309     __ ldrw(rscratch1, Address(sp, ContinuationEntry::flags_offset()));
1310     __ cbzw(rscratch1, L_skip_vthread_code);
1311 
1312     // If the held monitor count is > 0 and this vthread is terminating then
1313     // it failed to release a JNI monitor. So we issue the same log message
1314     // that JavaThread::exit does.
1315     __ ldr(rscratch1, Address(rthread, JavaThread::jni_monitor_count_offset()));
1316     __ cbz(rscratch1, L_skip_vthread_code);
1317 
1318     // Save return value potentially containing the exception oop in callee-saved R19.
1319     __ mov(r19, r0);
1320     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::log_jni_monitor_still_held));
1321     // Restore potential return value.
1322     __ mov(r0, r19);
1323 
1324     // For vthreads we have to explicitly zero the JNI monitor count of the carrier
1325     // on termination. The held count is implicitly zeroed below when we restore from
1326     // the parent held count (which has to be zero).
1327     __ str(zr, Address(rthread, JavaThread::jni_monitor_count_offset()));
1328 
1329     __ bind(L_skip_vthread_code);
1330   }
1331 #ifdef ASSERT
1332   else {
1333     // Check if this is a virtual thread continuation
1334     Label L_skip_vthread_code;
1335     __ ldrw(rscratch1, Address(sp, ContinuationEntry::flags_offset()));
1336     __ cbzw(rscratch1, L_skip_vthread_code);
1337 
1338     // See comment just above. If not checking JNI calls the JNI count is only
1339     // needed for assertion checking.
1340     __ str(zr, Address(rthread, JavaThread::jni_monitor_count_offset()));
1341 
1342     __ bind(L_skip_vthread_code);
1343   }
1344 #endif
1345 
1346   __ ldr(rscratch1, Address(sp, ContinuationEntry::parent_held_monitor_count_offset()));
1347   __ str(rscratch1, Address(rthread, JavaThread::held_monitor_count_offset()));
1348 
1349   __ ldr(rscratch2, Address(sp, ContinuationEntry::parent_offset()));
1350   __ str(rscratch2, Address(rthread, JavaThread::cont_entry_offset()));
1351   __ add(rfp, sp, (int)ContinuationEntry::size());
1352 }
1353 
1354 // enterSpecial(Continuation c, boolean isContinue, boolean isVirtualThread)
1355 // On entry: c_rarg1 -- the continuation object
1356 //           c_rarg2 -- isContinue
1357 //           c_rarg3 -- isVirtualThread
1358 static void gen_continuation_enter(MacroAssembler* masm,
1359                                  const methodHandle& method,
1360                                  const BasicType* sig_bt,
1361                                  const VMRegPair* regs,
1362                                  int& exception_offset,
1363                                  OopMapSet*oop_maps,
1364                                  int& frame_complete,
1365                                  int& stack_slots,
1366                                  int& interpreted_entry_offset,
1367                                  int& compiled_entry_offset) {
1368   //verify_oop_args(masm, method, sig_bt, regs);
1369   Address resolve(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
1370 
1371   address start = __ pc();
1372 
1373   Label call_thaw, exit;
1374 
1375   // i2i entry used at interp_only_mode only
1376   interpreted_entry_offset = __ pc() - start;
1377   {
1378 
1379 #ifdef ASSERT
1380     Label is_interp_only;
1381     __ ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
1382     __ cbnzw(rscratch1, is_interp_only);
1383     __ stop("enterSpecial interpreter entry called when not in interp_only_mode");
1384     __ bind(is_interp_only);
1385 #endif
1386 
1387     // Read interpreter arguments into registers (this is an ad-hoc i2c adapter)
1388     __ ldr(c_rarg1, Address(esp, Interpreter::stackElementSize*2));
1389     __ ldr(c_rarg2, Address(esp, Interpreter::stackElementSize*1));
1390     __ ldr(c_rarg3, Address(esp, Interpreter::stackElementSize*0));
1391     __ push_cont_fastpath(rthread);
1392 
1393     __ enter();
1394     stack_slots = 2; // will be adjusted in setup
1395     OopMap* map = continuation_enter_setup(masm, stack_slots);
1396     // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe,
1397     // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
1398 
1399     fill_continuation_entry(masm);
1400 
1401     __ cbnz(c_rarg2, call_thaw);
1402 
1403     const address tr_call = __ trampoline_call(resolve);
1404     if (tr_call == nullptr) {
1405       fatal("CodeCache is full at gen_continuation_enter");
1406     }
1407 
1408     oop_maps->add_gc_map(__ pc() - start, map);
1409     __ post_call_nop();
1410 
1411     __ b(exit);
1412 
1413     address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1414     if (stub == nullptr) {
1415       fatal("CodeCache is full at gen_continuation_enter");
1416     }
1417   }
1418 
1419   // compiled entry
1420   __ align(CodeEntryAlignment);
1421   compiled_entry_offset = __ pc() - start;
1422 
1423   __ enter();
1424   stack_slots = 2; // will be adjusted in setup
1425   OopMap* map = continuation_enter_setup(masm, stack_slots);
1426   frame_complete = __ pc() - start;
1427 
1428   fill_continuation_entry(masm);
1429 
1430   __ cbnz(c_rarg2, call_thaw);
1431 
1432   const address tr_call = __ trampoline_call(resolve);
1433   if (tr_call == nullptr) {
1434     fatal("CodeCache is full at gen_continuation_enter");
1435   }
1436 
1437   oop_maps->add_gc_map(__ pc() - start, map);
1438   __ post_call_nop();
1439 
1440   __ b(exit);
1441 
1442   __ bind(call_thaw);
1443 
1444   __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1445   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1446   ContinuationEntry::_return_pc_offset = __ pc() - start;
1447   __ post_call_nop();
1448 
1449   __ bind(exit);
1450   continuation_enter_cleanup(masm);
1451   __ leave();
1452   __ ret(lr);
1453 
1454   /// exception handling
1455 
1456   exception_offset = __ pc() - start;
1457   {
1458       __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1459 
1460       continuation_enter_cleanup(masm);
1461 
1462       __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1463       __ authenticate_return_address(c_rarg1);
1464       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1);
1465 
1466       // see OptoRuntime::generate_exception_blob: r0 -- exception oop, r3 -- exception pc
1467 
1468       __ mov(r1, r0); // the exception handler
1469       __ mov(r0, r19); // restore return value contaning the exception oop
1470       __ verify_oop(r0);
1471 
1472       __ leave();
1473       __ mov(r3, lr);
1474       __ br(r1); // the exception handler
1475   }
1476 
1477   address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
1478   if (stub == nullptr) {
1479     fatal("CodeCache is full at gen_continuation_enter");
1480   }
1481 }
1482 
1483 static void gen_continuation_yield(MacroAssembler* masm,
1484                                    const methodHandle& method,
1485                                    const BasicType* sig_bt,
1486                                    const VMRegPair* regs,
1487                                    OopMapSet* oop_maps,
1488                                    int& frame_complete,
1489                                    int& stack_slots,
1490                                    int& compiled_entry_offset) {
1491     enum layout {
1492       rfp_off1,
1493       rfp_off2,
1494       lr_off,
1495       lr_off2,
1496       framesize // inclusive of return address
1497     };
1498     // assert(is_even(framesize/2), "sp not 16-byte aligned");
1499     stack_slots = framesize /  VMRegImpl::slots_per_word;
1500     assert(stack_slots == 2, "recheck layout");
1501 
1502     address start = __ pc();
1503 
1504     compiled_entry_offset = __ pc() - start;
1505     __ enter();
1506 
1507     __ mov(c_rarg1, sp);
1508 
1509     frame_complete = __ pc() - start;
1510     address the_pc = __ pc();
1511 
1512     __ post_call_nop(); // this must be exactly after the pc value that is pushed into the frame info, we use this nop for fast CodeBlob lookup
1513 
1514     __ mov(c_rarg0, rthread);
1515     __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
1516     __ call_VM_leaf(Continuation::freeze_entry(), 2);
1517     __ reset_last_Java_frame(true);
1518 
1519     Label pinned;
1520 
1521     __ cbnz(r0, pinned);
1522 
1523     // We've succeeded, set sp to the ContinuationEntry
1524     __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
1525     __ mov(sp, rscratch1);
1526     continuation_enter_cleanup(masm);
1527 
1528     __ bind(pinned); // pinned -- return to caller
1529 
1530     // handle pending exception thrown by freeze
1531     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1532     Label ok;
1533     __ cbz(rscratch1, ok);
1534     __ leave();
1535     __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1536     __ br(rscratch1);
1537     __ bind(ok);
1538 
1539     __ leave();
1540     __ ret(lr);
1541 
1542     OopMap* map = new OopMap(framesize, 1);
1543     oop_maps->add_gc_map(the_pc - start, map);
1544 }
1545 
1546 static void gen_special_dispatch(MacroAssembler* masm,
1547                                  const methodHandle& method,
1548                                  const BasicType* sig_bt,
1549                                  const VMRegPair* regs) {
1550   verify_oop_args(masm, method, sig_bt, regs);
1551   vmIntrinsics::ID iid = method->intrinsic_id();
1552 
1553   // Now write the args into the outgoing interpreter space
1554   bool     has_receiver   = false;
1555   Register receiver_reg   = noreg;
1556   int      member_arg_pos = -1;
1557   Register member_reg     = noreg;
1558   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1559   if (ref_kind != 0) {
1560     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1561     member_reg = r19;  // known to be free at this point
1562     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1563   } else if (iid == vmIntrinsics::_invokeBasic) {
1564     has_receiver = true;
1565   } else if (iid == vmIntrinsics::_linkToNative) {
1566     member_arg_pos = method->size_of_parameters() - 1;  // trailing NativeEntryPoint argument
1567     member_reg = r19;  // known to be free at this point
1568   } else {
1569     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1570   }
1571 
1572   if (member_reg != noreg) {
1573     // Load the member_arg into register, if necessary.
1574     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1575     VMReg r = regs[member_arg_pos].first();
1576     if (r->is_stack()) {
1577       __ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1578     } else {
1579       // no data motion is needed
1580       member_reg = r->as_Register();
1581     }
1582   }
1583 
1584   if (has_receiver) {
1585     // Make sure the receiver is loaded into a register.
1586     assert(method->size_of_parameters() > 0, "oob");
1587     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1588     VMReg r = regs[0].first();
1589     assert(r->is_valid(), "bad receiver arg");
1590     if (r->is_stack()) {
1591       // Porting note:  This assumes that compiled calling conventions always
1592       // pass the receiver oop in a register.  If this is not true on some
1593       // platform, pick a temp and load the receiver from stack.
1594       fatal("receiver always in a register");
1595       receiver_reg = r2;  // known to be free at this point
1596       __ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1597     } else {
1598       // no data motion is needed
1599       receiver_reg = r->as_Register();
1600     }
1601   }
1602 
1603   // Figure out which address we are really jumping to:
1604   MethodHandles::generate_method_handle_dispatch(masm, iid,
1605                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1606 }
1607 
1608 // ---------------------------------------------------------------------------
1609 // Generate a native wrapper for a given method.  The method takes arguments
1610 // in the Java compiled code convention, marshals them to the native
1611 // convention (handlizes oops, etc), transitions to native, makes the call,
1612 // returns to java state (possibly blocking), unhandlizes any result and
1613 // returns.
1614 //
1615 // Critical native functions are a shorthand for the use of
1616 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1617 // functions.  The wrapper is expected to unpack the arguments before
1618 // passing them to the callee. Critical native functions leave the state _in_Java,
1619 // since they block out GC.
1620 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1621 // block and the check for pending exceptions it's impossible for them
1622 // to be thrown.
1623 //
1624 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1625                                                 const methodHandle& method,
1626                                                 int compile_id,
1627                                                 BasicType* in_sig_bt,
1628                                                 VMRegPair* in_regs,
1629                                                 BasicType ret_type) {
1630   if (method->is_continuation_native_intrinsic()) {
1631     int exception_offset = -1;
1632     OopMapSet* oop_maps = new OopMapSet();
1633     int frame_complete = -1;
1634     int stack_slots = -1;
1635     int interpreted_entry_offset = -1;
1636     int vep_offset = -1;
1637     if (method->is_continuation_enter_intrinsic()) {
1638       gen_continuation_enter(masm,
1639                              method,
1640                              in_sig_bt,
1641                              in_regs,
1642                              exception_offset,
1643                              oop_maps,
1644                              frame_complete,
1645                              stack_slots,
1646                              interpreted_entry_offset,
1647                              vep_offset);
1648     } else if (method->is_continuation_yield_intrinsic()) {
1649       gen_continuation_yield(masm,
1650                              method,
1651                              in_sig_bt,
1652                              in_regs,
1653                              oop_maps,
1654                              frame_complete,
1655                              stack_slots,
1656                              vep_offset);
1657     } else {
1658       guarantee(false, "Unknown Continuation native intrinsic");
1659     }
1660 
1661 #ifdef ASSERT
1662     if (method->is_continuation_enter_intrinsic()) {
1663       assert(interpreted_entry_offset != -1, "Must be set");
1664       assert(exception_offset != -1,         "Must be set");
1665     } else {
1666       assert(interpreted_entry_offset == -1, "Must be unset");
1667       assert(exception_offset == -1,         "Must be unset");
1668     }
1669     assert(frame_complete != -1,    "Must be set");
1670     assert(stack_slots != -1,       "Must be set");
1671     assert(vep_offset != -1,        "Must be set");
1672 #endif
1673 
1674     __ flush();
1675     nmethod* nm = nmethod::new_native_nmethod(method,
1676                                               compile_id,
1677                                               masm->code(),
1678                                               vep_offset,
1679                                               frame_complete,
1680                                               stack_slots,
1681                                               in_ByteSize(-1),
1682                                               in_ByteSize(-1),
1683                                               oop_maps,
1684                                               exception_offset);
1685     if (nm == nullptr) return nm;
1686     if (method->is_continuation_enter_intrinsic()) {
1687       ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1688     } else if (method->is_continuation_yield_intrinsic()) {
1689       _cont_doYield_stub = nm;
1690     } else {
1691       guarantee(false, "Unknown Continuation native intrinsic");
1692     }
1693     return nm;
1694   }
1695 
1696   if (method->is_method_handle_intrinsic()) {
1697     vmIntrinsics::ID iid = method->intrinsic_id();
1698     intptr_t start = (intptr_t)__ pc();
1699     int vep_offset = ((intptr_t)__ pc()) - start;
1700 
1701     // First instruction must be a nop as it may need to be patched on deoptimisation
1702     __ nop();
1703     gen_special_dispatch(masm,
1704                          method,
1705                          in_sig_bt,
1706                          in_regs);
1707     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1708     __ flush();
1709     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1710     return nmethod::new_native_nmethod(method,
1711                                        compile_id,
1712                                        masm->code(),
1713                                        vep_offset,
1714                                        frame_complete,
1715                                        stack_slots / VMRegImpl::slots_per_word,
1716                                        in_ByteSize(-1),
1717                                        in_ByteSize(-1),
1718                                        nullptr);
1719   }
1720   address native_func = method->native_function();
1721   assert(native_func != nullptr, "must have function");
1722 
1723   // An OopMap for lock (and class if static)
1724   OopMapSet *oop_maps = new OopMapSet();
1725   intptr_t start = (intptr_t)__ pc();
1726 
1727   // We have received a description of where all the java arg are located
1728   // on entry to the wrapper. We need to convert these args to where
1729   // the jni function will expect them. To figure out where they go
1730   // we convert the java signature to a C signature by inserting
1731   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1732 
1733   const int total_in_args = method->size_of_parameters();
1734   int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1735 
1736   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1737   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1738   BasicType* in_elem_bt = nullptr;
1739 
1740   int argc = 0;
1741   out_sig_bt[argc++] = T_ADDRESS;
1742   if (method->is_static()) {
1743     out_sig_bt[argc++] = T_OBJECT;
1744   }
1745 
1746   for (int i = 0; i < total_in_args ; i++ ) {
1747     out_sig_bt[argc++] = in_sig_bt[i];
1748   }
1749 
1750   // Now figure out where the args must be stored and how much stack space
1751   // they require.
1752   int out_arg_slots;
1753   out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, total_c_args);
1754 
1755   if (out_arg_slots < 0) {
1756     return nullptr;
1757   }
1758 
1759   // Compute framesize for the wrapper.  We need to handlize all oops in
1760   // incoming registers
1761 
1762   // Calculate the total number of stack slots we will need.
1763 
1764   // First count the abi requirement plus all of the outgoing args
1765   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1766 
1767   // Now the space for the inbound oop handle area
1768   int total_save_slots = 8 * VMRegImpl::slots_per_word;  // 8 arguments passed in registers
1769 
1770   int oop_handle_offset = stack_slots;
1771   stack_slots += total_save_slots;
1772 
1773   // Now any space we need for handlizing a klass if static method
1774 
1775   int klass_slot_offset = 0;
1776   int klass_offset = -1;
1777   int lock_slot_offset = 0;
1778   bool is_static = false;
1779 
1780   if (method->is_static()) {
1781     klass_slot_offset = stack_slots;
1782     stack_slots += VMRegImpl::slots_per_word;
1783     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1784     is_static = true;
1785   }
1786 
1787   // Plus a lock if needed
1788 
1789   if (method->is_synchronized()) {
1790     lock_slot_offset = stack_slots;
1791     stack_slots += VMRegImpl::slots_per_word;
1792   }
1793 
1794   // Now a place (+2) to save return values or temp during shuffling
1795   // + 4 for return address (which we own) and saved rfp
1796   stack_slots += 6;
1797 
1798   // Ok The space we have allocated will look like:
1799   //
1800   //
1801   // FP-> |                     |
1802   //      |---------------------|
1803   //      | 2 slots for moves   |
1804   //      |---------------------|
1805   //      | lock box (if sync)  |
1806   //      |---------------------| <- lock_slot_offset
1807   //      | klass (if static)   |
1808   //      |---------------------| <- klass_slot_offset
1809   //      | oopHandle area      |
1810   //      |---------------------| <- oop_handle_offset (8 java arg registers)
1811   //      | outbound memory     |
1812   //      | based arguments     |
1813   //      |                     |
1814   //      |---------------------|
1815   //      |                     |
1816   // SP-> | out_preserved_slots |
1817   //
1818   //
1819 
1820 
1821   // Now compute actual number of stack words we need rounding to make
1822   // stack properly aligned.
1823   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1824 
1825   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1826 
1827   // First thing make an ic check to see if we should even be here
1828 
1829   // We are free to use all registers as temps without saving them and
1830   // restoring them except rfp. rfp is the only callee save register
1831   // as far as the interpreter and the compiler(s) are concerned.
1832 
1833   const Register receiver = j_rarg0;
1834 
1835   Label exception_pending;
1836 
1837   assert_different_registers(receiver, rscratch1);
1838   __ verify_oop(receiver);
1839   __ ic_check(8 /* end_alignment */);
1840 
1841   // Verified entry point must be aligned
1842   int vep_offset = ((intptr_t)__ pc()) - start;
1843 
1844   // If we have to make this method not-entrant we'll overwrite its
1845   // first instruction with a jump.  For this action to be legal we
1846   // must ensure that this first instruction is a B, BL, NOP, BKPT,
1847   // SVC, HVC, or SMC.  Make it a NOP.
1848   __ nop();
1849 
1850   if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
1851     Label L_skip_barrier;
1852     __ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
1853     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1854     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1855 
1856     __ bind(L_skip_barrier);
1857   }
1858 
1859   // Generate stack overflow check
1860   __ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size()));
1861 
1862   // Generate a new frame for the wrapper.
1863   __ enter();
1864   // -2 because return address is already present and so is saved rfp
1865   __ sub(sp, sp, stack_size - 2*wordSize);
1866 
1867   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1868   bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */);
1869 
1870   // Frame is now completed as far as size and linkage.
1871   int frame_complete = ((intptr_t)__ pc()) - start;
1872 
1873   // We use r20 as the oop handle for the receiver/klass
1874   // It is callee save so it survives the call to native
1875 
1876   const Register oop_handle_reg = r20;
1877 
1878   //
1879   // We immediately shuffle the arguments so that any vm call we have to
1880   // make from here on out (sync slow path, jvmti, etc.) we will have
1881   // captured the oops from our caller and have a valid oopMap for
1882   // them.
1883 
1884   // -----------------
1885   // The Grand Shuffle
1886 
1887   // The Java calling convention is either equal (linux) or denser (win64) than the
1888   // c calling convention. However the because of the jni_env argument the c calling
1889   // convention always has at least one more (and two for static) arguments than Java.
1890   // Therefore if we move the args from java -> c backwards then we will never have
1891   // a register->register conflict and we don't have to build a dependency graph
1892   // and figure out how to break any cycles.
1893   //
1894 
1895   // Record esp-based slot for receiver on stack for non-static methods
1896   int receiver_offset = -1;
1897 
1898   // This is a trick. We double the stack slots so we can claim
1899   // the oops in the caller's frame. Since we are sure to have
1900   // more args than the caller doubling is enough to make
1901   // sure we can capture all the incoming oop args from the
1902   // caller.
1903   //
1904   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1905 
1906   // Mark location of rfp (someday)
1907   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp));
1908 
1909 
1910   int float_args = 0;
1911   int int_args = 0;
1912 
1913 #ifdef ASSERT
1914   bool reg_destroyed[Register::number_of_registers];
1915   bool freg_destroyed[FloatRegister::number_of_registers];
1916   for ( int r = 0 ; r < Register::number_of_registers ; r++ ) {
1917     reg_destroyed[r] = false;
1918   }
1919   for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) {
1920     freg_destroyed[f] = false;
1921   }
1922 
1923 #endif /* ASSERT */
1924 
1925   // For JNI natives the incoming and outgoing registers are offset upwards.
1926   GrowableArray<int> arg_order(2 * total_in_args);
1927   VMRegPair tmp_vmreg;
1928   tmp_vmreg.set2(r19->as_VMReg());
1929 
1930   for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1931     arg_order.push(i);
1932     arg_order.push(c_arg);
1933   }
1934 
1935   int temploc = -1;
1936   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1937     int i = arg_order.at(ai);
1938     int c_arg = arg_order.at(ai + 1);
1939     __ block_comment(err_msg("move %d -> %d", i, c_arg));
1940     assert(c_arg != -1 && i != -1, "wrong order");
1941 #ifdef ASSERT
1942     if (in_regs[i].first()->is_Register()) {
1943       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1944     } else if (in_regs[i].first()->is_FloatRegister()) {
1945       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1946     }
1947     if (out_regs[c_arg].first()->is_Register()) {
1948       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1949     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1950       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1951     }
1952 #endif /* ASSERT */
1953     switch (in_sig_bt[i]) {
1954       case T_ARRAY:
1955       case T_OBJECT:
1956         __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1957                        ((i == 0) && (!is_static)),
1958                        &receiver_offset);
1959         int_args++;
1960         break;
1961       case T_VOID:
1962         break;
1963 
1964       case T_FLOAT:
1965         __ float_move(in_regs[i], out_regs[c_arg]);
1966         float_args++;
1967         break;
1968 
1969       case T_DOUBLE:
1970         assert( i + 1 < total_in_args &&
1971                 in_sig_bt[i + 1] == T_VOID &&
1972                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1973         __ double_move(in_regs[i], out_regs[c_arg]);
1974         float_args++;
1975         break;
1976 
1977       case T_LONG :
1978         __ long_move(in_regs[i], out_regs[c_arg]);
1979         int_args++;
1980         break;
1981 
1982       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1983 
1984       default:
1985         __ move32_64(in_regs[i], out_regs[c_arg]);
1986         int_args++;
1987     }
1988   }
1989 
1990   // point c_arg at the first arg that is already loaded in case we
1991   // need to spill before we call out
1992   int c_arg = total_c_args - total_in_args;
1993 
1994   // Pre-load a static method's oop into c_rarg1.
1995   if (method->is_static()) {
1996 
1997     //  load oop into a register
1998     __ movoop(c_rarg1,
1999               JNIHandles::make_local(method->method_holder()->java_mirror()));
2000 
2001     // Now handlize the static class mirror it's known not-null.
2002     __ str(c_rarg1, Address(sp, klass_offset));
2003     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2004 
2005     // Now get the handle
2006     __ lea(c_rarg1, Address(sp, klass_offset));
2007     // and protect the arg if we must spill
2008     c_arg--;
2009   }
2010 
2011   // Change state to native (we save the return address in the thread, since it might not
2012   // be pushed on the stack when we do a stack traversal).
2013   // We use the same pc/oopMap repeatedly when we call out
2014 
2015   Label native_return;
2016   __ set_last_Java_frame(sp, noreg, native_return, rscratch1);
2017 
2018   Label dtrace_method_entry, dtrace_method_entry_done;
2019   if (DTraceMethodProbes) {
2020     __ b(dtrace_method_entry);
2021     __ bind(dtrace_method_entry_done);
2022   }
2023 
2024   // RedefineClasses() tracing support for obsolete method entry
2025   if (log_is_enabled(Trace, redefine, class, obsolete)) {
2026     // protect the args we've loaded
2027     save_args(masm, total_c_args, c_arg, out_regs);
2028     __ mov_metadata(c_rarg1, method());
2029     __ call_VM_leaf(
2030       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2031       rthread, c_rarg1);
2032     restore_args(masm, total_c_args, c_arg, out_regs);
2033   }
2034 
2035   // Lock a synchronized method
2036 
2037   // Register definitions used by locking and unlocking
2038 
2039   const Register swap_reg = r0;
2040   const Register obj_reg  = r19;  // Will contain the oop
2041   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
2042   const Register old_hdr  = r13;  // value of old header at unlock time
2043   const Register lock_tmp = r14;  // Temporary used by lightweight_lock/unlock
2044   const Register tmp = lr;
2045 
2046   Label slow_path_lock;
2047   Label lock_done;
2048 
2049   if (method->is_synchronized()) {
2050     Label count;
2051     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2052 
2053     // Get the handle (the 2nd argument)
2054     __ mov(oop_handle_reg, c_rarg1);
2055 
2056     // Get address of the box
2057 
2058     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2059 
2060     // Load the oop from the handle
2061     __ ldr(obj_reg, Address(oop_handle_reg, 0));
2062 
2063     if (LockingMode == LM_MONITOR) {
2064       __ b(slow_path_lock);
2065     } else if (LockingMode == LM_LEGACY) {
2066       // Load (object->mark() | 1) into swap_reg %r0
2067       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2068       __ orr(swap_reg, rscratch1, 1);
2069       if (EnableValhalla) {
2070         // Mask inline_type bit such that we go to the slow path if object is an inline type
2071         __ andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
2072       }
2073 
2074       // Save (object->mark() | 1) into BasicLock's displaced header
2075       __ str(swap_reg, Address(lock_reg, mark_word_offset));
2076 
2077       // src -> dest iff dest == r0 else r0 <- dest
2078       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
2079 
2080       // Hmm should this move to the slow path code area???
2081 
2082       // Test if the oopMark is an obvious stack pointer, i.e.,
2083       //  1) (mark & 3) == 0, and
2084       //  2) sp <= mark < mark + os::pagesize()
2085       // These 3 tests can be done by evaluating the following
2086       // expression: ((mark - sp) & (3 - os::vm_page_size())),
2087       // assuming both stack pointer and pagesize have their
2088       // least significant 2 bits clear.
2089       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
2090 
2091       __ sub(swap_reg, sp, swap_reg);
2092       __ neg(swap_reg, swap_reg);
2093       __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
2094 
2095       // Save the test result, for recursive case, the result is zero
2096       __ str(swap_reg, Address(lock_reg, mark_word_offset));
2097       __ br(Assembler::NE, slow_path_lock);
2098     } else {
2099       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2100       __ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
2101     }
2102     __ bind(count);
2103     __ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
2104 
2105     // Slow path will re-enter here
2106     __ bind(lock_done);
2107   }
2108 
2109 
2110   // Finally just about ready to make the JNI call
2111 
2112   // get JNIEnv* which is first argument to native
2113   __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
2114 
2115   // Now set thread in native
2116   __ mov(rscratch1, _thread_in_native);
2117   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
2118   __ stlrw(rscratch1, rscratch2);
2119 
2120   __ rt_call(native_func);
2121 
2122   __ bind(native_return);
2123 
2124   intptr_t return_pc = (intptr_t) __ pc();
2125   oop_maps->add_gc_map(return_pc - start, map);
2126 
2127   // Verify or restore cpu control state after JNI call
2128   __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
2129 
2130   // Unpack native results.
2131   switch (ret_type) {
2132   case T_BOOLEAN: __ c2bool(r0);                     break;
2133   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
2134   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
2135   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
2136   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
2137   case T_DOUBLE :
2138   case T_FLOAT  :
2139     // Result is in v0 we'll save as needed
2140     break;
2141   case T_ARRAY:                 // Really a handle
2142   case T_OBJECT:                // Really a handle
2143       break; // can't de-handlize until after safepoint check
2144   case T_VOID: break;
2145   case T_LONG: break;
2146   default       : ShouldNotReachHere();
2147   }
2148 
2149   Label safepoint_in_progress, safepoint_in_progress_done;
2150   Label after_transition;
2151 
2152   // Switch thread to "native transition" state before reading the synchronization state.
2153   // This additional state is necessary because reading and testing the synchronization
2154   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2155   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2156   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2157   //     Thread A is resumed to finish this native method, but doesn't block here since it
2158   //     didn't see any synchronization is progress, and escapes.
2159   __ mov(rscratch1, _thread_in_native_trans);
2160 
2161   __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
2162 
2163   // Force this write out before the read below
2164   if (!UseSystemMemoryBarrier) {
2165     __ dmb(Assembler::ISH);
2166   }
2167 
2168   __ verify_sve_vector_length();
2169 
2170   // Check for safepoint operation in progress and/or pending suspend requests.
2171   {
2172     // No need for acquire as Java threads always disarm themselves.
2173     __ safepoint_poll(safepoint_in_progress, true /* at_return */, false /* acquire */, false /* in_nmethod */);
2174     __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
2175     __ cbnzw(rscratch1, safepoint_in_progress);
2176     __ bind(safepoint_in_progress_done);
2177   }
2178 
2179   // change thread state
2180   __ mov(rscratch1, _thread_in_Java);
2181   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
2182   __ stlrw(rscratch1, rscratch2);
2183   __ bind(after_transition);
2184 
2185   Label reguard;
2186   Label reguard_done;
2187   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
2188   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
2189   __ br(Assembler::EQ, reguard);
2190   __ bind(reguard_done);
2191 
2192   // native result if any is live
2193 
2194   // Unlock
2195   Label unlock_done;
2196   Label slow_path_unlock;
2197   if (method->is_synchronized()) {
2198 
2199     // Get locked oop from the handle we passed to jni
2200     __ ldr(obj_reg, Address(oop_handle_reg, 0));
2201 
2202     Label done, not_recursive;
2203 
2204     if (LockingMode == LM_LEGACY) {
2205       // Simple recursive lock?
2206       __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2207       __ cbnz(rscratch1, not_recursive);
2208       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
2209       __ b(done);
2210     }
2211 
2212     __ bind(not_recursive);
2213 
2214     // Must save r0 if if it is live now because cmpxchg must use it
2215     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2216       save_native_result(masm, ret_type, stack_slots);
2217     }
2218 
2219     if (LockingMode == LM_MONITOR) {
2220       __ b(slow_path_unlock);
2221     } else if (LockingMode == LM_LEGACY) {
2222       // get address of the stack lock
2223       __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2224       //  get old displaced header
2225       __ ldr(old_hdr, Address(r0, 0));
2226 
2227       // Atomic swap old header if oop still contains the stack lock
2228       Label count;
2229       __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
2230       __ bind(count);
2231       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
2232     } else {
2233       assert(LockingMode == LM_LIGHTWEIGHT, "");
2234       __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
2235       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
2236     }
2237 
2238     // slow path re-enters here
2239     __ bind(unlock_done);
2240     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2241       restore_native_result(masm, ret_type, stack_slots);
2242     }
2243 
2244     __ bind(done);
2245   }
2246 
2247   Label dtrace_method_exit, dtrace_method_exit_done;
2248   if (DTraceMethodProbes) {
2249     __ b(dtrace_method_exit);
2250     __ bind(dtrace_method_exit_done);
2251   }
2252 
2253   __ reset_last_Java_frame(false);
2254 
2255   // Unbox oop result, e.g. JNIHandles::resolve result.
2256   if (is_reference_type(ret_type)) {
2257     __ resolve_jobject(r0, r1, r2);
2258   }
2259 
2260   if (CheckJNICalls) {
2261     // clear_pending_jni_exception_check
2262     __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
2263   }
2264 
2265   // reset handle block
2266   __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
2267   __ str(zr, Address(r2, JNIHandleBlock::top_offset()));
2268 
2269   __ leave();
2270 
2271   // Any exception pending?
2272   __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2273   __ cbnz(rscratch1, exception_pending);
2274 
2275   // We're done
2276   __ ret(lr);
2277 
2278   // Unexpected paths are out of line and go here
2279 
2280   // forward the exception
2281   __ bind(exception_pending);
2282 
2283   // and forward the exception
2284   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2285 
2286   // Slow path locking & unlocking
2287   if (method->is_synchronized()) {
2288 
2289     __ block_comment("Slow path lock {");
2290     __ bind(slow_path_lock);
2291 
2292     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2293     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2294 
2295     // protect the args we've loaded
2296     save_args(masm, total_c_args, c_arg, out_regs);
2297 
2298     __ mov(c_rarg0, obj_reg);
2299     __ mov(c_rarg1, lock_reg);
2300     __ mov(c_rarg2, rthread);
2301 
2302     // Not a leaf but we have last_Java_frame setup as we want
2303     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2304     restore_args(masm, total_c_args, c_arg, out_regs);
2305 
2306 #ifdef ASSERT
2307     { Label L;
2308       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2309       __ cbz(rscratch1, L);
2310       __ stop("no pending exception allowed on exit from monitorenter");
2311       __ bind(L);
2312     }
2313 #endif
2314     __ b(lock_done);
2315 
2316     __ block_comment("} Slow path lock");
2317 
2318     __ block_comment("Slow path unlock {");
2319     __ bind(slow_path_unlock);
2320 
2321     // If we haven't already saved the native result we must save it now as xmm registers
2322     // are still exposed.
2323 
2324     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2325       save_native_result(masm, ret_type, stack_slots);
2326     }
2327 
2328     __ mov(c_rarg2, rthread);
2329     __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2330     __ mov(c_rarg0, obj_reg);
2331 
2332     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2333     // NOTE that obj_reg == r19 currently
2334     __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2335     __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2336 
2337     __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
2338 
2339 #ifdef ASSERT
2340     {
2341       Label L;
2342       __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2343       __ cbz(rscratch1, L);
2344       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2345       __ bind(L);
2346     }
2347 #endif /* ASSERT */
2348 
2349     __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2350 
2351     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2352       restore_native_result(masm, ret_type, stack_slots);
2353     }
2354     __ b(unlock_done);
2355 
2356     __ block_comment("} Slow path unlock");
2357 
2358   } // synchronized
2359 
2360   // SLOW PATH Reguard the stack if needed
2361 
2362   __ bind(reguard);
2363   save_native_result(masm, ret_type, stack_slots);
2364   __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2365   restore_native_result(masm, ret_type, stack_slots);
2366   // and continue
2367   __ b(reguard_done);
2368 
2369   // SLOW PATH safepoint
2370   {
2371     __ block_comment("safepoint {");
2372     __ bind(safepoint_in_progress);
2373 
2374     // Don't use call_VM as it will see a possible pending exception and forward it
2375     // and never return here preventing us from clearing _last_native_pc down below.
2376     //
2377     save_native_result(masm, ret_type, stack_slots);
2378     __ mov(c_rarg0, rthread);
2379 #ifndef PRODUCT
2380   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2381 #endif
2382     __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2383     __ blr(rscratch1);
2384 
2385     // Restore any method result value
2386     restore_native_result(masm, ret_type, stack_slots);
2387 
2388     __ b(safepoint_in_progress_done);
2389     __ block_comment("} safepoint");
2390   }
2391 
2392   // SLOW PATH dtrace support
2393   if (DTraceMethodProbes) {
2394     {
2395       __ block_comment("dtrace entry {");
2396       __ bind(dtrace_method_entry);
2397 
2398       // We have all of the arguments setup at this point. We must not touch any register
2399       // argument registers at this point (what if we save/restore them there are no oop?
2400 
2401       save_args(masm, total_c_args, c_arg, out_regs);
2402       __ mov_metadata(c_rarg1, method());
2403       __ call_VM_leaf(
2404         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2405         rthread, c_rarg1);
2406       restore_args(masm, total_c_args, c_arg, out_regs);
2407       __ b(dtrace_method_entry_done);
2408       __ block_comment("} dtrace entry");
2409     }
2410 
2411     {
2412       __ block_comment("dtrace exit {");
2413       __ bind(dtrace_method_exit);
2414       save_native_result(masm, ret_type, stack_slots);
2415       __ mov_metadata(c_rarg1, method());
2416       __ call_VM_leaf(
2417         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2418         rthread, c_rarg1);
2419       restore_native_result(masm, ret_type, stack_slots);
2420       __ b(dtrace_method_exit_done);
2421       __ block_comment("} dtrace exit");
2422     }
2423   }
2424 
2425   __ flush();
2426 
2427   nmethod *nm = nmethod::new_native_nmethod(method,
2428                                             compile_id,
2429                                             masm->code(),
2430                                             vep_offset,
2431                                             frame_complete,
2432                                             stack_slots / VMRegImpl::slots_per_word,
2433                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2434                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2435                                             oop_maps);
2436 
2437   return nm;
2438 }
2439 
2440 // this function returns the adjust size (in number of words) to a c2i adapter
2441 // activation for use during deoptimization
2442 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2443   assert(callee_locals >= callee_parameters,
2444           "test and remove; got more parms than locals");
2445   if (callee_locals < callee_parameters)
2446     return 0;                   // No adjustment for negative locals
2447   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2448   // diff is counted in stack words
2449   return align_up(diff, 2);
2450 }
2451 
2452 
2453 //------------------------------generate_deopt_blob----------------------------
2454 void SharedRuntime::generate_deopt_blob() {
2455   // Allocate space for the code
2456   ResourceMark rm;
2457   // Setup code generation tools
2458   int pad = 0;
2459 #if INCLUDE_JVMCI
2460   if (EnableJVMCI) {
2461     pad += 512; // Increase the buffer size when compiling for JVMCI
2462   }
2463 #endif
2464   const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id);
2465   CodeBuffer buffer(name, 2048+pad, 1024);
2466   MacroAssembler* masm = new MacroAssembler(&buffer);
2467   int frame_size_in_words;
2468   OopMap* map = nullptr;
2469   OopMapSet *oop_maps = new OopMapSet();
2470   RegisterSaver reg_save(COMPILER2_OR_JVMCI != 0);
2471 
2472   // -------------
2473   // This code enters when returning to a de-optimized nmethod.  A return
2474   // address has been pushed on the stack, and return values are in
2475   // registers.
2476   // If we are doing a normal deopt then we were called from the patched
2477   // nmethod from the point we returned to the nmethod. So the return
2478   // address on the stack is wrong by NativeCall::instruction_size
2479   // We will adjust the value so it looks like we have the original return
2480   // address on the stack (like when we eagerly deoptimized).
2481   // In the case of an exception pending when deoptimizing, we enter
2482   // with a return address on the stack that points after the call we patched
2483   // into the exception handler. We have the following register state from,
2484   // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2485   //    r0: exception oop
2486   //    r19: exception handler
2487   //    r3: throwing pc
2488   // So in this case we simply jam r3 into the useless return address and
2489   // the stack looks just like we want.
2490   //
2491   // At this point we need to de-opt.  We save the argument return
2492   // registers.  We call the first C routine, fetch_unroll_info().  This
2493   // routine captures the return values and returns a structure which
2494   // describes the current frame size and the sizes of all replacement frames.
2495   // The current frame is compiled code and may contain many inlined
2496   // functions, each with their own JVM state.  We pop the current frame, then
2497   // push all the new frames.  Then we call the C routine unpack_frames() to
2498   // populate these frames.  Finally unpack_frames() returns us the new target
2499   // address.  Notice that callee-save registers are BLOWN here; they have
2500   // already been captured in the vframeArray at the time the return PC was
2501   // patched.
2502   address start = __ pc();
2503   Label cont;
2504 
2505   // Prolog for non exception case!
2506 
2507   // Save everything in sight.
2508   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2509 
2510   // Normal deoptimization.  Save exec mode for unpack_frames.
2511   __ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved
2512   __ b(cont);
2513 
2514   int reexecute_offset = __ pc() - start;
2515 #if INCLUDE_JVMCI && !defined(COMPILER1)
2516   if (UseJVMCICompiler) {
2517     // JVMCI does not use this kind of deoptimization
2518     __ should_not_reach_here();
2519   }
2520 #endif
2521 
2522   // Reexecute case
2523   // return address is the pc describes what bci to do re-execute at
2524 
2525   // No need to update map as each call to save_live_registers will produce identical oopmap
2526   (void) reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2527 
2528   __ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved
2529   __ b(cont);
2530 
2531 #if INCLUDE_JVMCI
2532   Label after_fetch_unroll_info_call;
2533   int implicit_exception_uncommon_trap_offset = 0;
2534   int uncommon_trap_offset = 0;
2535 
2536   if (EnableJVMCI) {
2537     implicit_exception_uncommon_trap_offset = __ pc() - start;
2538 
2539     __ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2540     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2541 
2542     uncommon_trap_offset = __ pc() - start;
2543 
2544     // Save everything in sight.
2545     reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2546     // fetch_unroll_info needs to call last_java_frame()
2547     Label retaddr;
2548     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2549 
2550     __ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2551     __ movw(rscratch1, -1);
2552     __ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2553 
2554     __ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute);
2555     __ mov(c_rarg0, rthread);
2556     __ movw(c_rarg2, rcpool); // exec mode
2557     __ lea(rscratch1,
2558            RuntimeAddress(CAST_FROM_FN_PTR(address,
2559                                            Deoptimization::uncommon_trap)));
2560     __ blr(rscratch1);
2561     __ bind(retaddr);
2562     oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2563 
2564     __ reset_last_Java_frame(false);
2565 
2566     __ b(after_fetch_unroll_info_call);
2567   } // EnableJVMCI
2568 #endif // INCLUDE_JVMCI
2569 
2570   int exception_offset = __ pc() - start;
2571 
2572   // Prolog for exception case
2573 
2574   // all registers are dead at this entry point, except for r0, and
2575   // r3 which contain the exception oop and exception pc
2576   // respectively.  Set them in TLS and fall thru to the
2577   // unpack_with_exception_in_tls entry point.
2578 
2579   __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
2580   __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
2581 
2582   int exception_in_tls_offset = __ pc() - start;
2583 
2584   // new implementation because exception oop is now passed in JavaThread
2585 
2586   // Prolog for exception case
2587   // All registers must be preserved because they might be used by LinearScan
2588   // Exceptiop oop and throwing PC are passed in JavaThread
2589   // tos: stack at point of call to method that threw the exception (i.e. only
2590   // args are on the stack, no return address)
2591 
2592   // The return address pushed by save_live_registers will be patched
2593   // later with the throwing pc. The correct value is not available
2594   // now because loading it from memory would destroy registers.
2595 
2596   // NB: The SP at this point must be the SP of the method that is
2597   // being deoptimized.  Deoptimization assumes that the frame created
2598   // here by save_live_registers is immediately below the method's SP.
2599   // This is a somewhat fragile mechanism.
2600 
2601   // Save everything in sight.
2602   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2603 
2604   // Now it is safe to overwrite any register
2605 
2606   // Deopt during an exception.  Save exec mode for unpack_frames.
2607   __ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved
2608 
2609   // load throwing pc from JavaThread and patch it as the return address
2610   // of the current frame. Then clear the field in JavaThread
2611   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2612   __ protect_return_address(r3);
2613   __ str(r3, Address(rfp, wordSize));
2614   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2615 
2616 #ifdef ASSERT
2617   // verify that there is really an exception oop in JavaThread
2618   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2619   __ verify_oop(r0);
2620 
2621   // verify that there is no pending exception
2622   Label no_pending_exception;
2623   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2624   __ cbz(rscratch1, no_pending_exception);
2625   __ stop("must not have pending exception here");
2626   __ bind(no_pending_exception);
2627 #endif
2628 
2629   __ bind(cont);
2630 
2631   // Call C code.  Need thread and this frame, but NOT official VM entry
2632   // crud.  We cannot block on this call, no GC can happen.
2633   //
2634   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2635 
2636   // fetch_unroll_info needs to call last_java_frame().
2637 
2638   Label retaddr;
2639   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2640 #ifdef ASSERT
2641   { Label L;
2642     __ ldr(rscratch1, Address(rthread, JavaThread::last_Java_fp_offset()));
2643     __ cbz(rscratch1, L);
2644     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2645     __ bind(L);
2646   }
2647 #endif // ASSERT
2648   __ mov(c_rarg0, rthread);
2649   __ mov(c_rarg1, rcpool);
2650   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2651   __ blr(rscratch1);
2652   __ bind(retaddr);
2653 
2654   // Need to have an oopmap that tells fetch_unroll_info where to
2655   // find any register it might need.
2656   oop_maps->add_gc_map(__ pc() - start, map);
2657 
2658   __ reset_last_Java_frame(false);
2659 
2660 #if INCLUDE_JVMCI
2661   if (EnableJVMCI) {
2662     __ bind(after_fetch_unroll_info_call);
2663   }
2664 #endif
2665 
2666   // Load UnrollBlock* into r5
2667   __ mov(r5, r0);
2668 
2669   __ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset()));
2670    Label noException;
2671   __ cmpw(rcpool, Deoptimization::Unpack_exception);   // Was exception pending?
2672   __ br(Assembler::NE, noException);
2673   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2674   // QQQ this is useless it was null above
2675   __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2676   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
2677   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2678 
2679   __ verify_oop(r0);
2680 
2681   // Overwrite the result registers with the exception results.
2682   __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2683   // I think this is useless
2684   // __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2685 
2686   __ bind(noException);
2687 
2688   // Only register save data is on the stack.
2689   // Now restore the result registers.  Everything else is either dead
2690   // or captured in the vframeArray.
2691 
2692   // Restore fp result register
2693   __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2694   // Restore integer result register
2695   __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2696 
2697   // Pop all of the register save area off the stack
2698   __ add(sp, sp, frame_size_in_words * wordSize);
2699 
2700   // All of the register save area has been popped of the stack. Only the
2701   // return address remains.
2702 
2703   // Pop all the frames we must move/replace.
2704   //
2705   // Frame picture (youngest to oldest)
2706   // 1: self-frame (no frame link)
2707   // 2: deopting frame  (no frame link)
2708   // 3: caller of deopting frame (could be compiled/interpreted).
2709   //
2710   // Note: by leaving the return address of self-frame on the stack
2711   // and using the size of frame 2 to adjust the stack
2712   // when we are done the return to frame 3 will still be on the stack.
2713 
2714   // Pop deoptimized frame
2715   __ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
2716   __ sub(r2, r2, 2 * wordSize);
2717   __ add(sp, sp, r2);
2718   __ ldp(rfp, zr, __ post(sp, 2 * wordSize));
2719 
2720 #ifdef ASSERT
2721   // Compilers generate code that bang the stack by as much as the
2722   // interpreter would need. So this stack banging should never
2723   // trigger a fault. Verify that it does not on non product builds.
2724   __ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
2725   __ bang_stack_size(r19, r2);
2726 #endif
2727   // Load address of array of frame pcs into r2
2728   __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset()));
2729 
2730   // Trash the old pc
2731   // __ addptr(sp, wordSize);  FIXME ????
2732 
2733   // Load address of array of frame sizes into r4
2734   __ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset()));
2735 
2736   // Load counter into r3
2737   __ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset()));
2738 
2739   // Now adjust the caller's stack to make up for the extra locals
2740   // but record the original sp so that we can save it in the skeletal interpreter
2741   // frame and the stack walking of interpreter_sender will get the unextended sp
2742   // value and not the "real" sp value.
2743 
2744   const Register sender_sp = r6;
2745 
2746   __ mov(sender_sp, sp);
2747   __ ldrw(r19, Address(r5,
2748                        Deoptimization::UnrollBlock::
2749                        caller_adjustment_offset()));
2750   __ sub(sp, sp, r19);
2751 
2752   // Push interpreter frames in a loop
2753   __ mov(rscratch1, (uint64_t)0xDEADDEAD);        // Make a recognizable pattern
2754   __ mov(rscratch2, rscratch1);
2755   Label loop;
2756   __ bind(loop);
2757   __ ldr(r19, Address(__ post(r4, wordSize)));          // Load frame size
2758   __ sub(r19, r19, 2*wordSize);           // We'll push pc and fp by hand
2759   __ ldr(lr, Address(__ post(r2, wordSize)));  // Load pc
2760   __ enter();                           // Save old & set new fp
2761   __ sub(sp, sp, r19);                  // Prolog
2762   // This value is corrected by layout_activation_impl
2763   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2764   __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2765   __ mov(sender_sp, sp);               // Pass sender_sp to next frame
2766   __ sub(r3, r3, 1);                   // Decrement counter
2767   __ cbnz(r3, loop);
2768 
2769     // Re-push self-frame
2770   __ ldr(lr, Address(r2));
2771   __ enter();
2772 
2773   // Allocate a full sized register save area.  We subtract 2 because
2774   // enter() just pushed 2 words
2775   __ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2776 
2777   // Restore frame locals after moving the frame
2778   __ strd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2779   __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2780 
2781   // Call C code.  Need thread but NOT official VM entry
2782   // crud.  We cannot block on this call, no GC can happen.  Call should
2783   // restore return values to their stack-slots with the new SP.
2784   //
2785   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2786 
2787   // Use rfp because the frames look interpreted now
2788   // Don't need the precise return PC here, just precise enough to point into this code blob.
2789   address the_pc = __ pc();
2790   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2791 
2792   __ mov(c_rarg0, rthread);
2793   __ movw(c_rarg1, rcpool); // second arg: exec_mode
2794   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2795   __ blr(rscratch1);
2796 
2797   // Set an oopmap for the call site
2798   // Use the same PC we used for the last java frame
2799   oop_maps->add_gc_map(the_pc - start,
2800                        new OopMap( frame_size_in_words, 0 ));
2801 
2802   // Clear fp AND pc
2803   __ reset_last_Java_frame(true);
2804 
2805   // Collect return values
2806   __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2807   __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2808   // I think this is useless (throwing pc?)
2809   // __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2810 
2811   // Pop self-frame.
2812   __ leave();                           // Epilog
2813 
2814   // Jump to interpreter
2815   __ ret(lr);
2816 
2817   // Make sure all code is generated
2818   masm->flush();
2819 
2820   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2821   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2822 #if INCLUDE_JVMCI
2823   if (EnableJVMCI) {
2824     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2825     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2826   }
2827 #endif
2828 }
2829 
2830 // Number of stack slots between incoming argument block and the start of
2831 // a new frame.  The PROLOG must add this many slots to the stack.  The
2832 // EPILOG must remove this many slots. aarch64 needs two slots for
2833 // return address and fp.
2834 // TODO think this is correct but check
2835 uint SharedRuntime::in_preserve_stack_slots() {
2836   return 4;
2837 }
2838 
2839 uint SharedRuntime::out_preserve_stack_slots() {
2840   return 0;
2841 }
2842 
2843 
2844 //------------------------------generate_handler_blob------
2845 //
2846 // Generate a special Compile2Runtime blob that saves all registers,
2847 // and setup oopmap.
2848 //
2849 SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) {
2850   assert(is_polling_page_id(id), "expected a polling page stub id");
2851 
2852   ResourceMark rm;
2853   OopMapSet *oop_maps = new OopMapSet();
2854   OopMap* map;
2855 
2856   // Allocate space for the code.  Setup code generation tools.
2857   const char* name = SharedRuntime::stub_name(id);
2858   CodeBuffer buffer(name, 2048, 1024);
2859   MacroAssembler* masm = new MacroAssembler(&buffer);
2860 
2861   address start   = __ pc();
2862   address call_pc = nullptr;
2863   int frame_size_in_words;
2864   bool cause_return = (id == SharedStubId::polling_page_return_handler_id);
2865   RegisterSaver reg_save(id == SharedStubId::polling_page_vectors_safepoint_handler_id /* save_vectors */);
2866 
2867   // When the signal occurred, the LR was either signed and stored on the stack (in which
2868   // case it will be restored from the stack before being used) or unsigned and not stored
2869   // on the stack. Stipping ensures we get the right value.
2870   __ strip_return_address();
2871 
2872   // Save Integer and Float registers.
2873   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2874 
2875   // The following is basically a call_VM.  However, we need the precise
2876   // address of the call in order to generate an oopmap. Hence, we do all the
2877   // work ourselves.
2878 
2879   Label retaddr;
2880   __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2881 
2882   // The return address must always be correct so that frame constructor never
2883   // sees an invalid pc.
2884 
2885   if (!cause_return) {
2886     // overwrite the return address pushed by save_live_registers
2887     // Additionally, r20 is a callee-saved register so we can look at
2888     // it later to determine if someone changed the return address for
2889     // us!
2890     __ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset()));
2891     __ protect_return_address(r20);
2892     __ str(r20, Address(rfp, wordSize));
2893   }
2894 
2895   // Do the call
2896   __ mov(c_rarg0, rthread);
2897   __ lea(rscratch1, RuntimeAddress(call_ptr));
2898   __ blr(rscratch1);
2899   __ bind(retaddr);
2900 
2901   // Set an oopmap for the call site.  This oopmap will map all
2902   // oop-registers and debug-info registers as callee-saved.  This
2903   // will allow deoptimization at this safepoint to find all possible
2904   // debug-info recordings, as well as let GC find all oops.
2905 
2906   oop_maps->add_gc_map( __ pc() - start, map);
2907 
2908   Label noException;
2909 
2910   __ reset_last_Java_frame(false);
2911 
2912   __ membar(Assembler::LoadLoad | Assembler::LoadStore);
2913 
2914   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2915   __ cbz(rscratch1, noException);
2916 
2917   // Exception pending
2918 
2919   reg_save.restore_live_registers(masm);
2920 
2921   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2922 
2923   // No exception case
2924   __ bind(noException);
2925 
2926   Label no_adjust, bail;
2927   if (!cause_return) {
2928     // If our stashed return pc was modified by the runtime we avoid touching it
2929     __ ldr(rscratch1, Address(rfp, wordSize));
2930     __ cmp(r20, rscratch1);
2931     __ br(Assembler::NE, no_adjust);
2932     __ authenticate_return_address(r20);
2933 
2934 #ifdef ASSERT
2935     // Verify the correct encoding of the poll we're about to skip.
2936     // See NativeInstruction::is_ldrw_to_zr()
2937     __ ldrw(rscratch1, Address(r20));
2938     __ ubfx(rscratch2, rscratch1, 22, 10);
2939     __ cmpw(rscratch2, 0b1011100101);
2940     __ br(Assembler::NE, bail);
2941     __ ubfx(rscratch2, rscratch1, 0, 5);
2942     __ cmpw(rscratch2, 0b11111);
2943     __ br(Assembler::NE, bail);
2944 #endif
2945     // Adjust return pc forward to step over the safepoint poll instruction
2946     __ add(r20, r20, NativeInstruction::instruction_size);
2947     __ protect_return_address(r20);
2948     __ str(r20, Address(rfp, wordSize));
2949   }
2950 
2951   __ bind(no_adjust);
2952   // Normal exit, restore registers and exit.
2953   reg_save.restore_live_registers(masm);
2954 
2955   __ ret(lr);
2956 
2957 #ifdef ASSERT
2958   __ bind(bail);
2959   __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2960 #endif
2961 
2962   // Make sure all code is generated
2963   masm->flush();
2964 
2965   // Fill-out other meta info
2966   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2967 }
2968 
2969 //
2970 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2971 //
2972 // Generate a stub that calls into vm to find out the proper destination
2973 // of a java call. All the argument registers are live at this point
2974 // but since this is generic code we don't know what they are and the caller
2975 // must do any gc of the args.
2976 //
2977 RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) {
2978   assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
2979   assert(is_resolve_id(id), "expected a resolve stub id");
2980 
2981   // allocate space for the code
2982   ResourceMark rm;
2983 
2984   const char* name = SharedRuntime::stub_name(id);
2985   CodeBuffer buffer(name, 1000, 512);
2986   MacroAssembler* masm                = new MacroAssembler(&buffer);
2987 
2988   int frame_size_in_words;
2989   RegisterSaver reg_save(false /* save_vectors */);
2990 
2991   OopMapSet *oop_maps = new OopMapSet();
2992   OopMap* map = nullptr;
2993 
2994   int start = __ offset();
2995 
2996   map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2997 
2998   int frame_complete = __ offset();
2999 
3000   {
3001     Label retaddr;
3002     __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
3003 
3004     __ mov(c_rarg0, rthread);
3005     __ lea(rscratch1, RuntimeAddress(destination));
3006 
3007     __ blr(rscratch1);
3008     __ bind(retaddr);
3009   }
3010 
3011   // Set an oopmap for the call site.
3012   // We need this not only for callee-saved registers, but also for volatile
3013   // registers that the compiler might be keeping live across a safepoint.
3014 
3015   oop_maps->add_gc_map( __ offset() - start, map);
3016 
3017   // r0 contains the address we are going to jump to assuming no exception got installed
3018 
3019   // clear last_Java_sp
3020   __ reset_last_Java_frame(false);
3021   // check for pending exceptions
3022   Label pending;
3023   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
3024   __ cbnz(rscratch1, pending);
3025 
3026   // get the returned Method*
3027   __ get_vm_result_2(rmethod, rthread);
3028   __ str(rmethod, Address(sp, reg_save.reg_offset_in_bytes(rmethod)));
3029 
3030   // r0 is where we want to jump, overwrite rscratch1 which is saved and scratch
3031   __ str(r0, Address(sp, reg_save.rscratch1_offset_in_bytes()));
3032   reg_save.restore_live_registers(masm);
3033 
3034   // We are back to the original state on entry and ready to go.
3035 
3036   __ br(rscratch1);
3037 
3038   // Pending exception after the safepoint
3039 
3040   __ bind(pending);
3041 
3042   reg_save.restore_live_registers(masm);
3043 
3044   // exception pending => remove activation and forward to exception handler
3045 
3046   __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
3047 
3048   __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
3049   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3050 
3051   // -------------
3052   // make sure all code is generated
3053   masm->flush();
3054 
3055   // return the  blob
3056   // frame_size_words or bytes??
3057   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3058 }
3059 
3060 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
3061   BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K);
3062   CodeBuffer buffer(buf);
3063   short buffer_locs[20];
3064   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3065                                          sizeof(buffer_locs)/sizeof(relocInfo));
3066 
3067   MacroAssembler _masm(&buffer);
3068   MacroAssembler* masm = &_masm;
3069 
3070   const Array<SigEntry>* sig_vk = vk->extended_sig();
3071   const Array<VMRegPair>* regs = vk->return_regs();
3072 
3073   int pack_fields_jobject_off = __ offset();
3074   // Resolve pre-allocated buffer from JNI handle.
3075   // We cannot do this in generate_call_stub() because it requires GC code to be initialized.
3076   Register Rresult = r14;  // See StubGenerator::generate_call_stub().
3077   __ ldr(r0, Address(Rresult));
3078   __ resolve_jobject(r0 /* value */,
3079                      rthread /* thread */,
3080                      r12 /* tmp */);
3081   __ str(r0, Address(Rresult));
3082 
3083   int pack_fields_off = __ offset();
3084 
3085   int j = 1;
3086   for (int i = 0; i < sig_vk->length(); i++) {
3087     BasicType bt = sig_vk->at(i)._bt;
3088     if (bt == T_METADATA) {
3089       continue;
3090     }
3091     if (bt == T_VOID) {
3092       if (sig_vk->at(i-1)._bt == T_LONG ||
3093           sig_vk->at(i-1)._bt == T_DOUBLE) {
3094         j++;
3095       }
3096       continue;
3097     }
3098     int off = sig_vk->at(i)._offset;
3099     VMRegPair pair = regs->at(j);
3100     VMReg r_1 = pair.first();
3101     VMReg r_2 = pair.second();
3102     Address to(r0, off);
3103     if (bt == T_FLOAT) {
3104       __ strs(r_1->as_FloatRegister(), to);
3105     } else if (bt == T_DOUBLE) {
3106       __ strd(r_1->as_FloatRegister(), to);
3107     } else {
3108       Register val = r_1->as_Register();
3109       assert_different_registers(to.base(), val, r15, r16, r17);
3110       if (is_reference_type(bt)) {
3111         __ store_heap_oop(to, val, r15, r16, r17, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
3112       } else {
3113         __ store_sized_value(to, r_1->as_Register(), type2aelembytes(bt));
3114       }
3115     }
3116     j++;
3117   }
3118   assert(j == regs->length(), "missed a field?");
3119 
3120   __ ret(lr);
3121 
3122   int unpack_fields_off = __ offset();
3123 
3124   Label skip;
3125   __ cbz(r0, skip);
3126 
3127   j = 1;
3128   for (int i = 0; i < sig_vk->length(); i++) {
3129     BasicType bt = sig_vk->at(i)._bt;
3130     if (bt == T_METADATA) {
3131       continue;
3132     }
3133     if (bt == T_VOID) {
3134       if (sig_vk->at(i-1)._bt == T_LONG ||
3135           sig_vk->at(i-1)._bt == T_DOUBLE) {
3136         j++;
3137       }
3138       continue;
3139     }
3140     int off = sig_vk->at(i)._offset;
3141     assert(off > 0, "offset in object should be positive");
3142     VMRegPair pair = regs->at(j);
3143     VMReg r_1 = pair.first();
3144     VMReg r_2 = pair.second();
3145     Address from(r0, off);
3146     if (bt == T_FLOAT) {
3147       __ ldrs(r_1->as_FloatRegister(), from);
3148     } else if (bt == T_DOUBLE) {
3149       __ ldrd(r_1->as_FloatRegister(), from);
3150     } else if (bt == T_OBJECT || bt == T_ARRAY) {
3151       assert_different_registers(r0, r_1->as_Register());
3152       __ load_heap_oop(r_1->as_Register(), from, rscratch1, rscratch2);
3153     } else {
3154       assert(is_java_primitive(bt), "unexpected basic type");
3155       assert_different_registers(r0, r_1->as_Register());
3156 
3157       size_t size_in_bytes = type2aelembytes(bt);
3158       __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
3159     }
3160     j++;
3161   }
3162   assert(j == regs->length(), "missed a field?");
3163 
3164   __ bind(skip);
3165 
3166   __ ret(lr);
3167 
3168   __ flush();
3169 
3170   return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
3171 }
3172 
3173 // Continuation point for throwing of implicit exceptions that are
3174 // not handled in the current activation. Fabricates an exception
3175 // oop and initiates normal exception dispatching in this
3176 // frame. Since we need to preserve callee-saved values (currently
3177 // only for C2, but done for C1 as well) we need a callee-saved oop
3178 // map and therefore have to make these stubs into RuntimeStubs
3179 // rather than BufferBlobs.  If the compiler needs all registers to
3180 // be preserved between the fault point and the exception handler
3181 // then it must assume responsibility for that in
3182 // AbstractCompiler::continuation_for_implicit_null_exception or
3183 // continuation_for_implicit_division_by_zero_exception. All other
3184 // implicit exceptions (e.g., NullPointerException or
3185 // AbstractMethodError on entry) are either at call sites or
3186 // otherwise assume that stack unwinding will be initiated, so
3187 // caller saved registers were assumed volatile in the compiler.
3188 
3189 RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) {
3190   assert(is_throw_id(id), "expected a throw stub id");
3191 
3192   const char* name = SharedRuntime::stub_name(id);
3193 
3194   // Information about frame layout at time of blocking runtime call.
3195   // Note that we only have to preserve callee-saved registers since
3196   // the compilers are responsible for supplying a continuation point
3197   // if they expect all registers to be preserved.
3198   // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
3199   enum layout {
3200     rfp_off = 0,
3201     rfp_off2,
3202     return_off,
3203     return_off2,
3204     framesize // inclusive of return address
3205   };
3206 
3207   int insts_size = 512;
3208   int locs_size  = 64;
3209 
3210   ResourceMark rm;
3211   const char* timer_msg = "SharedRuntime generate_throw_exception";
3212   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
3213 
3214   CodeBuffer code(name, insts_size, locs_size);
3215   OopMapSet* oop_maps  = new OopMapSet();
3216   MacroAssembler* masm = new MacroAssembler(&code);
3217 
3218   address start = __ pc();
3219 
3220   // This is an inlined and slightly modified version of call_VM
3221   // which has the ability to fetch the return PC out of
3222   // thread-local storage and also sets up last_Java_sp slightly
3223   // differently than the real call_VM
3224 
3225   __ enter(); // Save FP and LR before call
3226 
3227   assert(is_even(framesize/2), "sp not 16-byte aligned");
3228 
3229   // lr and fp are already in place
3230   __ sub(sp, rfp, ((uint64_t)framesize-4) << LogBytesPerInt); // prolog
3231 
3232   int frame_complete = __ pc() - start;
3233 
3234   // Set up last_Java_sp and last_Java_fp
3235   address the_pc = __ pc();
3236   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
3237 
3238   __ mov(c_rarg0, rthread);
3239   BLOCK_COMMENT("call runtime_entry");
3240   __ mov(rscratch1, runtime_entry);
3241   __ blr(rscratch1);
3242 
3243   // Generate oop map
3244   OopMap* map = new OopMap(framesize, 0);
3245 
3246   oop_maps->add_gc_map(the_pc - start, map);
3247 
3248   __ reset_last_Java_frame(true);
3249 
3250   // Reinitialize the ptrue predicate register, in case the external runtime
3251   // call clobbers ptrue reg, as we may return to SVE compiled code.
3252   __ reinitialize_ptrue();
3253 
3254   __ leave();
3255 
3256   // check for pending exceptions
3257 #ifdef ASSERT
3258   Label L;
3259   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
3260   __ cbnz(rscratch1, L);
3261   __ should_not_reach_here();
3262   __ bind(L);
3263 #endif // ASSERT
3264   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3265 
3266   // codeBlob framesize is in words (not VMRegImpl::slot_size)
3267   RuntimeStub* stub =
3268     RuntimeStub::new_runtime_stub(name,
3269                                   &code,
3270                                   frame_complete,
3271                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3272                                   oop_maps, false);
3273   return stub;
3274 }
3275 
3276 #if INCLUDE_JFR
3277 
3278 static void jfr_prologue(address the_pc, MacroAssembler* masm, Register thread) {
3279   __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
3280   __ mov(c_rarg0, thread);
3281 }
3282 
3283 // The handle is dereferenced through a load barrier.
3284 static void jfr_epilogue(MacroAssembler* masm) {
3285   __ reset_last_Java_frame(true);
3286 }
3287 
3288 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
3289 // It returns a jobject handle to the event writer.
3290 // The handle is dereferenced and the return value is the event writer oop.
3291 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
3292   enum layout {
3293     rbp_off,
3294     rbpH_off,
3295     return_off,
3296     return_off2,
3297     framesize // inclusive of return address
3298   };
3299 
3300   int insts_size = 1024;
3301   int locs_size = 64;
3302   const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id);
3303   CodeBuffer code(name, insts_size, locs_size);
3304   OopMapSet* oop_maps = new OopMapSet();
3305   MacroAssembler* masm = new MacroAssembler(&code);
3306 
3307   address start = __ pc();
3308   __ enter();
3309   int frame_complete = __ pc() - start;
3310   address the_pc = __ pc();
3311   jfr_prologue(the_pc, masm, rthread);
3312   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
3313   jfr_epilogue(masm);
3314   __ resolve_global_jobject(r0, rscratch1, rscratch2);
3315   __ leave();
3316   __ ret(lr);
3317 
3318   OopMap* map = new OopMap(framesize, 1); // rfp
3319   oop_maps->add_gc_map(the_pc - start, map);
3320 
3321   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
3322     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
3323                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3324                                   oop_maps, false);
3325   return stub;
3326 }
3327 
3328 // For c2: call to return a leased buffer.
3329 RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
3330   enum layout {
3331     rbp_off,
3332     rbpH_off,
3333     return_off,
3334     return_off2,
3335     framesize // inclusive of return address
3336   };
3337 
3338   int insts_size = 1024;
3339   int locs_size = 64;
3340 
3341   const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id);
3342   CodeBuffer code(name, insts_size, locs_size);
3343   OopMapSet* oop_maps = new OopMapSet();
3344   MacroAssembler* masm = new MacroAssembler(&code);
3345 
3346   address start = __ pc();
3347   __ enter();
3348   int frame_complete = __ pc() - start;
3349   address the_pc = __ pc();
3350   jfr_prologue(the_pc, masm, rthread);
3351   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
3352   jfr_epilogue(masm);
3353 
3354   __ leave();
3355   __ ret(lr);
3356 
3357   OopMap* map = new OopMap(framesize, 1); // rfp
3358   oop_maps->add_gc_map(the_pc - start, map);
3359 
3360   RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
3361     RuntimeStub::new_runtime_stub(name, &code, frame_complete,
3362                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3363                                   oop_maps, false);
3364   return stub;
3365 }
3366 
3367 #endif // INCLUDE_JFR