1 /*
   2  * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/assembler.inline.hpp"
  26 #include "code/compiledIC.hpp"
  27 #include "code/debugInfoRec.hpp"
  28 #include "code/vtableStubs.hpp"
  29 #include "compiler/oopMap.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "logging/log.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/klass.inline.hpp"
  35 #include "prims/methodHandles.hpp"
  36 #include "runtime/jniHandles.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 #include "runtime/safepointMechanism.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "runtime/timerTrace.hpp"
  41 #include "runtime/vframeArray.hpp"
  42 #include "utilities/align.hpp"
  43 #include "utilities/powerOfTwo.hpp"
  44 #include "vmreg_arm.inline.hpp"
  45 #ifdef COMPILER1
  46 #include "c1/c1_Runtime1.hpp"
  47 #endif
  48 #ifdef COMPILER2
  49 #include "opto/runtime.hpp"
  50 #endif
  51 
  52 #define __ masm->
  53 
  54 class RegisterSaver {
  55 public:
  56 
  57   // Special registers:
  58   //              32-bit ARM     64-bit ARM
  59   //  Rthread:       R10            R28
  60   //  LR:            R14            R30
  61 
  62   // Rthread is callee saved in the C ABI and never changed by compiled code:
  63   // no need to save it.
  64 
  65   // 2 slots for LR: the one at LR_offset and an other one at R14/R30_offset.
  66   // The one at LR_offset is a return address that is needed by stack walking.
  67   // A c2 method uses LR as a standard register so it may be live when we
  68   // branch to the runtime. The slot at R14/R30_offset is for the value of LR
  69   // in case it's live in the method we are coming from.
  70 
  71 
  72   enum RegisterLayout {
  73     fpu_save_size = FloatRegisterImpl::number_of_registers,
  74 #ifndef __SOFTFP__
  75     D0_offset = 0,
  76 #endif
  77     R0_offset = fpu_save_size,
  78     R1_offset,
  79     R2_offset,
  80     R3_offset,
  81     R4_offset,
  82     R5_offset,
  83     R6_offset,
  84 #if (FP_REG_NUM != 7)
  85     // if not saved as FP
  86     R7_offset,
  87 #endif
  88     R8_offset,
  89     R9_offset,
  90 #if (FP_REG_NUM != 11)
  91     // if not saved as FP
  92     R11_offset,
  93 #endif
  94     R12_offset,
  95     R14_offset,
  96     FP_offset,
  97     LR_offset,
  98     reg_save_size,
  99 
 100     Rmethod_offset = R9_offset,
 101     Rtemp_offset = R12_offset,
 102   };
 103 
 104   // all regs but Rthread (R10), FP (R7 or R11), SP and PC
 105   // (altFP_7_11 is the one among R7 and R11 which is not FP)
 106 #define SAVED_BASE_REGS (RegisterSet(R0, R6) | RegisterSet(R8, R9) | RegisterSet(R12) | R14 | altFP_7_11)
 107 
 108 
 109   //  When LR may be live in the nmethod from which we are coming
 110   //  then lr_saved is true, the return address is saved before the
 111   //  call to save_live_register by the caller and LR contains the
 112   //  live value.
 113 
 114   static OopMap* save_live_registers(MacroAssembler* masm,
 115                                      int* total_frame_words,
 116                                      bool lr_saved = false);
 117   static void restore_live_registers(MacroAssembler* masm, bool restore_lr = true);
 118 
 119 };
 120 
 121 
 122 
 123 
 124 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm,
 125                                            int* total_frame_words,
 126                                            bool lr_saved) {
 127   *total_frame_words = reg_save_size;
 128 
 129   OopMapSet *oop_maps = new OopMapSet();
 130   OopMap* map = new OopMap(VMRegImpl::slots_per_word * (*total_frame_words), 0);
 131 
 132   if (lr_saved) {
 133     __ push(RegisterSet(FP));
 134   } else {
 135     __ push(RegisterSet(FP) | RegisterSet(LR));
 136   }
 137   __ push(SAVED_BASE_REGS);
 138   if (HaveVFP) {
 139     if (VM_Version::has_vfp3_32()) {
 140       __ fpush(FloatRegisterSet(D16, 16));
 141     } else {
 142       if (FloatRegisterImpl::number_of_registers > 32) {
 143         assert(FloatRegisterImpl::number_of_registers == 64, "nb fp registers should be 64");
 144         __ sub(SP, SP, 32 * wordSize);
 145       }
 146     }
 147     __ fpush(FloatRegisterSet(D0, 16));
 148   } else {
 149     __ sub(SP, SP, fpu_save_size * wordSize);
 150   }
 151 
 152   int i;
 153   int j=0;
 154   for (i = R0_offset; i <= R9_offset; i++) {
 155     if (j == FP_REG_NUM) {
 156       // skip the FP register, managed below.
 157       j++;
 158     }
 159     map->set_callee_saved(VMRegImpl::stack2reg(i), as_Register(j)->as_VMReg());
 160     j++;
 161   }
 162   assert(j == R10->encoding(), "must be");
 163 #if (FP_REG_NUM != 11)
 164   // add R11, if not managed as FP
 165   map->set_callee_saved(VMRegImpl::stack2reg(R11_offset), R11->as_VMReg());
 166 #endif
 167   map->set_callee_saved(VMRegImpl::stack2reg(R12_offset), R12->as_VMReg());
 168   map->set_callee_saved(VMRegImpl::stack2reg(R14_offset), R14->as_VMReg());
 169   if (HaveVFP) {
 170     for (i = 0; i < (VM_Version::has_vfp3_32() ? 64 : 32); i+=2) {
 171       map->set_callee_saved(VMRegImpl::stack2reg(i), as_FloatRegister(i)->as_VMReg());
 172       map->set_callee_saved(VMRegImpl::stack2reg(i + 1), as_FloatRegister(i)->as_VMReg()->next());
 173     }
 174   }
 175 
 176   return map;
 177 }
 178 
 179 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_lr) {
 180   if (HaveVFP) {
 181     __ fpop(FloatRegisterSet(D0, 16));
 182     if (VM_Version::has_vfp3_32()) {
 183       __ fpop(FloatRegisterSet(D16, 16));
 184     } else {
 185       if (FloatRegisterImpl::number_of_registers > 32) {
 186         assert(FloatRegisterImpl::number_of_registers == 64, "nb fp registers should be 64");
 187         __ add(SP, SP, 32 * wordSize);
 188       }
 189     }
 190   } else {
 191     __ add(SP, SP, fpu_save_size * wordSize);
 192   }
 193   __ pop(SAVED_BASE_REGS);
 194   if (restore_lr) {
 195     __ pop(RegisterSet(FP) | RegisterSet(LR));
 196   } else {
 197     __ pop(RegisterSet(FP));
 198   }
 199 }
 200 
 201 
 202 static void push_result_registers(MacroAssembler* masm, BasicType ret_type) {
 203 #ifdef __ABI_HARD__
 204   if (ret_type == T_DOUBLE || ret_type == T_FLOAT) {
 205     __ sub(SP, SP, 8);
 206     __ fstd(D0, Address(SP));
 207     return;
 208   }
 209 #endif // __ABI_HARD__
 210   __ raw_push(R0, R1);
 211 }
 212 
 213 static void pop_result_registers(MacroAssembler* masm, BasicType ret_type) {
 214 #ifdef __ABI_HARD__
 215   if (ret_type == T_DOUBLE || ret_type == T_FLOAT) {
 216     __ fldd(D0, Address(SP));
 217     __ add(SP, SP, 8);
 218     return;
 219   }
 220 #endif // __ABI_HARD__
 221   __ raw_pop(R0, R1);
 222 }
 223 
 224 static void push_param_registers(MacroAssembler* masm, int fp_regs_in_arguments) {
 225   // R1-R3 arguments need to be saved, but we push 4 registers for 8-byte alignment
 226   __ push(RegisterSet(R0, R3));
 227 
 228   // preserve arguments
 229   // Likely not needed as the locking code won't probably modify volatile FP registers,
 230   // but there is no way to guarantee that
 231   if (fp_regs_in_arguments) {
 232     // convert fp_regs_in_arguments to a number of double registers
 233     int double_regs_num = (fp_regs_in_arguments + 1) >> 1;
 234     __ fpush_hardfp(FloatRegisterSet(D0, double_regs_num));
 235   }
 236 }
 237 
 238 static void pop_param_registers(MacroAssembler* masm, int fp_regs_in_arguments) {
 239   if (fp_regs_in_arguments) {
 240     int double_regs_num = (fp_regs_in_arguments + 1) >> 1;
 241     __ fpop_hardfp(FloatRegisterSet(D0, double_regs_num));
 242   }
 243   __ pop(RegisterSet(R0, R3));
 244 }
 245 
 246 
 247 
 248 // Is vector's size (in bytes) bigger than a size saved by default?
 249 // All vector registers are saved by default on ARM.
 250 bool SharedRuntime::is_wide_vector(int size) {
 251   return false;
 252 }
 253 
 254 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 255                                         VMRegPair *regs,
 256                                         int total_args_passed) {
 257   int slot = 0;
 258   int ireg = 0;
 259 #ifdef __ABI_HARD__
 260   int fp_slot = 0;
 261   int single_fpr_slot = 0;
 262 #endif // __ABI_HARD__
 263   for (int i = 0; i < total_args_passed; i++) {
 264     switch (sig_bt[i]) {
 265     case T_SHORT:
 266     case T_CHAR:
 267     case T_BYTE:
 268     case T_BOOLEAN:
 269     case T_INT:
 270     case T_ARRAY:
 271     case T_OBJECT:
 272     case T_ADDRESS:
 273     case T_METADATA:
 274 #ifndef __ABI_HARD__
 275     case T_FLOAT:
 276 #endif // !__ABI_HARD__
 277       if (ireg < 4) {
 278         Register r = as_Register(ireg);
 279         regs[i].set1(r->as_VMReg());
 280         ireg++;
 281       } else {
 282         regs[i].set1(VMRegImpl::stack2reg(slot));
 283         slot++;
 284       }
 285       break;
 286     case T_LONG:
 287 #ifndef __ABI_HARD__
 288     case T_DOUBLE:
 289 #endif // !__ABI_HARD__
 290       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
 291       if (ireg <= 2) {
 292 #if (ALIGN_WIDE_ARGUMENTS == 1)
 293         if(ireg & 1) ireg++;  // Aligned location required
 294 #endif
 295         Register r1 = as_Register(ireg);
 296         Register r2 = as_Register(ireg + 1);
 297         regs[i].set_pair(r2->as_VMReg(), r1->as_VMReg());
 298         ireg += 2;
 299 #if (ALIGN_WIDE_ARGUMENTS == 0)
 300       } else if (ireg == 3) {
 301         // uses R3 + one stack slot
 302         Register r = as_Register(ireg);
 303         regs[i].set_pair(VMRegImpl::stack2reg(slot), r->as_VMReg());
 304         ireg += 1;
 305         slot += 1;
 306 #endif
 307       } else {
 308         if (slot & 1) slot++; // Aligned location required
 309         regs[i].set_pair(VMRegImpl::stack2reg(slot+1), VMRegImpl::stack2reg(slot));
 310         slot += 2;
 311         ireg = 4;
 312       }
 313       break;
 314     case T_VOID:
 315       regs[i].set_bad();
 316       break;
 317 #ifdef __ABI_HARD__
 318     case T_FLOAT:
 319       if ((fp_slot < 16)||(single_fpr_slot & 1)) {
 320         if ((single_fpr_slot & 1) == 0) {
 321           single_fpr_slot = fp_slot;
 322           fp_slot += 2;
 323         }
 324         FloatRegister r = as_FloatRegister(single_fpr_slot);
 325         single_fpr_slot++;
 326         regs[i].set1(r->as_VMReg());
 327       } else {
 328         regs[i].set1(VMRegImpl::stack2reg(slot));
 329         slot++;
 330       }
 331       break;
 332     case T_DOUBLE:
 333       assert(ALIGN_WIDE_ARGUMENTS == 1, "ABI_HARD not supported with unaligned wide arguments");
 334       if (fp_slot <= 14) {
 335         FloatRegister r1 = as_FloatRegister(fp_slot);
 336         FloatRegister r2 = as_FloatRegister(fp_slot+1);
 337         regs[i].set_pair(r2->as_VMReg(), r1->as_VMReg());
 338         fp_slot += 2;
 339       } else {
 340         if(slot & 1) slot++;
 341         regs[i].set_pair(VMRegImpl::stack2reg(slot+1), VMRegImpl::stack2reg(slot));
 342         slot += 2;
 343         single_fpr_slot = 16;
 344       }
 345       break;
 346 #endif // __ABI_HARD__
 347     default:
 348       ShouldNotReachHere();
 349     }
 350   }
 351   return slot;
 352 }
 353 
 354 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
 355                                              uint num_bits,
 356                                              uint total_args_passed) {
 357   Unimplemented();
 358   return 0;
 359 }
 360 
 361 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
 362                                            VMRegPair *regs,
 363                                            int total_args_passed) {
 364 #ifdef __SOFTFP__
 365   // soft float is the same as the C calling convention.
 366   return c_calling_convention(sig_bt, regs, nullptr, total_args_passed);
 367 #endif // __SOFTFP__
 368   int slot = 0;
 369   int ireg = 0;
 370   int freg = 0;
 371   int single_fpr = 0;
 372 
 373   for (int i = 0; i < total_args_passed; i++) {
 374     switch (sig_bt[i]) {
 375     case T_SHORT:
 376     case T_CHAR:
 377     case T_BYTE:
 378     case T_BOOLEAN:
 379     case T_INT:
 380     case T_ARRAY:
 381     case T_OBJECT:
 382     case T_ADDRESS:
 383       if (ireg < 4) {
 384         Register r = as_Register(ireg++);
 385         regs[i].set1(r->as_VMReg());
 386       } else {
 387         regs[i].set1(VMRegImpl::stack2reg(slot++));
 388       }
 389       break;
 390     case T_FLOAT:
 391       // C2 utilizes S14/S15 for mem-mem moves
 392       if ((freg < 16 COMPILER2_PRESENT(-2)) || (single_fpr & 1)) {
 393         if ((single_fpr & 1) == 0) {
 394           single_fpr = freg;
 395           freg += 2;
 396         }
 397         FloatRegister r = as_FloatRegister(single_fpr++);
 398         regs[i].set1(r->as_VMReg());
 399       } else {
 400         regs[i].set1(VMRegImpl::stack2reg(slot++));
 401       }
 402       break;
 403     case T_DOUBLE:
 404       // C2 utilizes S14/S15 for mem-mem moves
 405       if (freg <= 14 COMPILER2_PRESENT(-2)) {
 406         FloatRegister r1 = as_FloatRegister(freg);
 407         FloatRegister r2 = as_FloatRegister(freg + 1);
 408         regs[i].set_pair(r2->as_VMReg(), r1->as_VMReg());
 409         freg += 2;
 410       } else {
 411         // Keep internally the aligned calling convention,
 412         // ignoring ALIGN_WIDE_ARGUMENTS
 413         if (slot & 1) slot++;
 414         regs[i].set_pair(VMRegImpl::stack2reg(slot + 1), VMRegImpl::stack2reg(slot));
 415         slot += 2;
 416         single_fpr = 16;
 417       }
 418       break;
 419     case T_LONG:
 420       // Keep internally the aligned calling convention,
 421       // ignoring ALIGN_WIDE_ARGUMENTS
 422       if (ireg <= 2) {
 423         if (ireg & 1) ireg++;
 424         Register r1 = as_Register(ireg);
 425         Register r2 = as_Register(ireg + 1);
 426         regs[i].set_pair(r2->as_VMReg(), r1->as_VMReg());
 427         ireg += 2;
 428       } else {
 429         if (slot & 1) slot++;
 430         regs[i].set_pair(VMRegImpl::stack2reg(slot + 1), VMRegImpl::stack2reg(slot));
 431         slot += 2;
 432         ireg = 4;
 433       }
 434       break;
 435     case T_VOID:
 436       regs[i].set_bad();
 437       break;
 438     default:
 439       ShouldNotReachHere();
 440     }
 441   }
 442 
 443   return slot;
 444 }
 445 
 446 static void patch_callers_callsite(MacroAssembler *masm) {
 447   Label skip;
 448 
 449   __ ldr(Rtemp, Address(Rmethod, Method::code_offset()));
 450   __ cbz(Rtemp, skip);
 451 
 452   // Pushing an even number of registers for stack alignment.
 453   // Selecting R9, which had to be saved anyway for some platforms.
 454   __ push(RegisterSet(R0, R3) | R9 | LR);
 455   __ fpush_hardfp(FloatRegisterSet(D0, 8));
 456 
 457   __ mov(R0, Rmethod);
 458   __ mov(R1, LR);
 459   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
 460 
 461   __ fpop_hardfp(FloatRegisterSet(D0, 8));
 462   __ pop(RegisterSet(R0, R3) | R9 | LR);
 463 
 464   __ bind(skip);
 465 }
 466 
 467 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 468                                     int total_args_passed, int comp_args_on_stack,
 469                                     const BasicType *sig_bt, const VMRegPair *regs) {
 470   // TODO: ARM - May be can use ldm to load arguments
 471   const Register tmp = Rtemp; // avoid erasing R5_mh
 472 
 473   // Next assert may not be needed but safer. Extra analysis required
 474   // if this there is not enough free registers and we need to use R5 here.
 475   assert_different_registers(tmp, R5_mh);
 476 
 477   // 6243940 We might end up in handle_wrong_method if
 478   // the callee is deoptimized as we race thru here. If that
 479   // happens we don't want to take a safepoint because the
 480   // caller frame will look interpreted and arguments are now
 481   // "compiled" so it is much better to make this transition
 482   // invisible to the stack walking code. Unfortunately if
 483   // we try and find the callee by normal means a safepoint
 484   // is possible. So we stash the desired callee in the thread
 485   // and the vm will find there should this case occur.
 486   Address callee_target_addr(Rthread, JavaThread::callee_target_offset());
 487   __ str(Rmethod, callee_target_addr);
 488 
 489 
 490   assert_different_registers(tmp, R0, R1, R2, R3, Rsender_sp, Rmethod);
 491 
 492   const Register initial_sp = Rmethod; // temporarily scratched
 493 
 494   // Old code was modifying R4 but this looks unsafe (particularly with JSR292)
 495   assert_different_registers(tmp, R0, R1, R2, R3, Rsender_sp, initial_sp);
 496 
 497   __ mov(initial_sp, SP);
 498 
 499   if (comp_args_on_stack) {
 500     __ sub_slow(SP, SP, comp_args_on_stack * VMRegImpl::stack_slot_size);
 501   }
 502   __ bic(SP, SP, StackAlignmentInBytes - 1);
 503 
 504   for (int i = 0; i < total_args_passed; i++) {
 505     if (sig_bt[i] == T_VOID) {
 506       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 507       continue;
 508     }
 509     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "must be ordered");
 510     int arg_offset = Interpreter::expr_offset_in_bytes(total_args_passed - 1 - i);
 511 
 512     VMReg r_1 = regs[i].first();
 513     VMReg r_2 = regs[i].second();
 514     if (r_1->is_stack()) {
 515       int stack_offset = r_1->reg2stack() * VMRegImpl::stack_slot_size;
 516       if (!r_2->is_valid()) {
 517         __ ldr(tmp, Address(initial_sp, arg_offset));
 518         __ str(tmp, Address(SP, stack_offset));
 519       } else {
 520         __ ldr(tmp, Address(initial_sp, arg_offset - Interpreter::stackElementSize));
 521         __ str(tmp, Address(SP, stack_offset));
 522         __ ldr(tmp, Address(initial_sp, arg_offset));
 523         __ str(tmp, Address(SP, stack_offset + wordSize));
 524       }
 525     } else if (r_1->is_Register()) {
 526       if (!r_2->is_valid()) {
 527         __ ldr(r_1->as_Register(), Address(initial_sp, arg_offset));
 528       } else {
 529         __ ldr(r_1->as_Register(), Address(initial_sp, arg_offset - Interpreter::stackElementSize));
 530         __ ldr(r_2->as_Register(), Address(initial_sp, arg_offset));
 531       }
 532     } else if (r_1->is_FloatRegister()) {
 533 #ifdef __SOFTFP__
 534       ShouldNotReachHere();
 535 #endif // __SOFTFP__
 536       if (!r_2->is_valid()) {
 537         __ flds(r_1->as_FloatRegister(), Address(initial_sp, arg_offset));
 538       } else {
 539         __ fldd(r_1->as_FloatRegister(), Address(initial_sp, arg_offset - Interpreter::stackElementSize));
 540       }
 541     } else {
 542       assert(!r_1->is_valid() && !r_2->is_valid(), "must be");
 543     }
 544   }
 545 
 546   // restore Rmethod (scratched for initial_sp)
 547   __ ldr(Rmethod, callee_target_addr);
 548   __ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
 549 
 550 }
 551 
 552 static void gen_c2i_adapter(MacroAssembler *masm,
 553                             int total_args_passed,  int comp_args_on_stack,
 554                             const BasicType *sig_bt, const VMRegPair *regs,
 555                             Label& skip_fixup) {
 556   // TODO: ARM - May be can use stm to deoptimize arguments
 557   const Register tmp = Rtemp;
 558 
 559   patch_callers_callsite(masm);
 560   __ bind(skip_fixup);
 561 
 562   __ mov(Rsender_sp, SP); // not yet saved
 563 
 564 
 565   int extraspace = total_args_passed * Interpreter::stackElementSize;
 566   if (extraspace) {
 567     __ sub_slow(SP, SP, extraspace);
 568   }
 569 
 570   for (int i = 0; i < total_args_passed; i++) {
 571     if (sig_bt[i] == T_VOID) {
 572       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 573       continue;
 574     }
 575     int stack_offset = (total_args_passed - 1 - i) * Interpreter::stackElementSize;
 576 
 577     VMReg r_1 = regs[i].first();
 578     VMReg r_2 = regs[i].second();
 579     if (r_1->is_stack()) {
 580       int arg_offset = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 581       if (!r_2->is_valid()) {
 582         __ ldr(tmp, Address(SP, arg_offset));
 583         __ str(tmp, Address(SP, stack_offset));
 584       } else {
 585         __ ldr(tmp, Address(SP, arg_offset));
 586         __ str(tmp, Address(SP, stack_offset - Interpreter::stackElementSize));
 587         __ ldr(tmp, Address(SP, arg_offset + wordSize));
 588         __ str(tmp, Address(SP, stack_offset));
 589       }
 590     } else if (r_1->is_Register()) {
 591       if (!r_2->is_valid()) {
 592         __ str(r_1->as_Register(), Address(SP, stack_offset));
 593       } else {
 594         __ str(r_1->as_Register(), Address(SP, stack_offset - Interpreter::stackElementSize));
 595         __ str(r_2->as_Register(), Address(SP, stack_offset));
 596       }
 597     } else if (r_1->is_FloatRegister()) {
 598 #ifdef __SOFTFP__
 599       ShouldNotReachHere();
 600 #endif // __SOFTFP__
 601       if (!r_2->is_valid()) {
 602         __ fsts(r_1->as_FloatRegister(), Address(SP, stack_offset));
 603       } else {
 604         __ fstd(r_1->as_FloatRegister(), Address(SP, stack_offset - Interpreter::stackElementSize));
 605       }
 606     } else {
 607       assert(!r_1->is_valid() && !r_2->is_valid(), "must be");
 608     }
 609   }
 610 
 611   __ ldr(PC, Address(Rmethod, Method::interpreter_entry_offset()));
 612 
 613 }
 614 
 615 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 616                                                             int total_args_passed,
 617                                                             int comp_args_on_stack,
 618                                                             const BasicType *sig_bt,
 619                                                             const VMRegPair *regs,
 620                                                             AdapterFingerPrint* fingerprint) {
 621   address i2c_entry = __ pc();
 622   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 623 
 624   address c2i_unverified_entry = __ pc();
 625   Label skip_fixup;
 626   const Register receiver       = R0;
 627   const Register holder_klass   = Rtemp; // XXX should be OK for C2 but not 100% sure
 628 
 629   __ ic_check(1 /* end_alignment */);
 630   __ ldr(Rmethod, Address(Ricklass, CompiledICData::speculated_method_offset()));
 631 
 632   __ ldr(Rtemp, Address(Rmethod, Method::code_offset()), eq);
 633   __ cmp(Rtemp, 0, eq);
 634   __ b(skip_fixup, eq);
 635   __ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, noreg, ne);
 636 
 637   address c2i_entry = __ pc();
 638   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 639 
 640   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 641 }
 642 
 643 
 644 static int reg2offset_in(VMReg r) {
 645   // Account for saved FP and LR
 646   return r->reg2stack() * VMRegImpl::stack_slot_size + 2*wordSize;
 647 }
 648 
 649 static int reg2offset_out(VMReg r) {
 650   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 651 }
 652 
 653 
 654 static void verify_oop_args(MacroAssembler* masm,
 655                             const methodHandle& method,
 656                             const BasicType* sig_bt,
 657                             const VMRegPair* regs) {
 658   Register temp_reg = Rmethod;  // not part of any compiled calling seq
 659   if (VerifyOops) {
 660     for (int i = 0; i < method->size_of_parameters(); i++) {
 661       if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
 662         VMReg r = regs[i].first();
 663         assert(r->is_valid(), "bad oop arg");
 664         if (r->is_stack()) {
 665           __ ldr(temp_reg, Address(SP, r->reg2stack() * VMRegImpl::stack_slot_size));
 666           __ verify_oop(temp_reg);
 667         } else {
 668           __ verify_oop(r->as_Register());
 669         }
 670       }
 671     }
 672   }
 673 }
 674 
 675 static void gen_special_dispatch(MacroAssembler* masm,
 676                                  const methodHandle& method,
 677                                  const BasicType* sig_bt,
 678                                  const VMRegPair* regs) {
 679   verify_oop_args(masm, method, sig_bt, regs);
 680   vmIntrinsics::ID iid = method->intrinsic_id();
 681 
 682   // Now write the args into the outgoing interpreter space
 683   bool     has_receiver   = false;
 684   Register receiver_reg   = noreg;
 685   int      member_arg_pos = -1;
 686   Register member_reg     = noreg;
 687   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
 688   if (ref_kind != 0) {
 689     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
 690     member_reg = Rmethod;  // known to be free at this point
 691     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
 692   } else if (iid == vmIntrinsics::_invokeBasic) {
 693     has_receiver = true;
 694   } else {
 695     fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
 696   }
 697 
 698   if (member_reg != noreg) {
 699     // Load the member_arg into register, if necessary.
 700     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
 701     VMReg r = regs[member_arg_pos].first();
 702     if (r->is_stack()) {
 703       __ ldr(member_reg, Address(SP, r->reg2stack() * VMRegImpl::stack_slot_size));
 704     } else {
 705       // no data motion is needed
 706       member_reg = r->as_Register();
 707     }
 708   }
 709 
 710   if (has_receiver) {
 711     // Make sure the receiver is loaded into a register.
 712     assert(method->size_of_parameters() > 0, "oob");
 713     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
 714     VMReg r = regs[0].first();
 715     assert(r->is_valid(), "bad receiver arg");
 716     if (r->is_stack()) {
 717       // Porting note:  This assumes that compiled calling conventions always
 718       // pass the receiver oop in a register.  If this is not true on some
 719       // platform, pick a temp and load the receiver from stack.
 720       assert(false, "receiver always in a register");
 721       receiver_reg = j_rarg0;  // known to be free at this point
 722       __ ldr(receiver_reg, Address(SP, r->reg2stack() * VMRegImpl::stack_slot_size));
 723     } else {
 724       // no data motion is needed
 725       receiver_reg = r->as_Register();
 726     }
 727   }
 728 
 729   // Figure out which address we are really jumping to:
 730   MethodHandles::generate_method_handle_dispatch(masm, iid,
 731                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
 732 }
 733 
 734 // ---------------------------------------------------------------------------
 735 // Generate a native wrapper for a given method.  The method takes arguments
 736 // in the Java compiled code convention, marshals them to the native
 737 // convention (handlizes oops, etc), transitions to native, makes the call,
 738 // returns to java state (possibly blocking), unhandlizes any result and
 739 // returns.
 740 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
 741                                                 const methodHandle& method,
 742                                                 int compile_id,
 743                                                 BasicType* in_sig_bt,
 744                                                 VMRegPair* in_regs,
 745                                                 BasicType ret_type) {
 746   if (method->is_method_handle_intrinsic()) {
 747     vmIntrinsics::ID iid = method->intrinsic_id();
 748     intptr_t start = (intptr_t)__ pc();
 749     int vep_offset = ((intptr_t)__ pc()) - start;
 750     gen_special_dispatch(masm,
 751                          method,
 752                          in_sig_bt,
 753                          in_regs);
 754     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
 755     __ flush();
 756     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
 757     return nmethod::new_native_nmethod(method,
 758                                        compile_id,
 759                                        masm->code(),
 760                                        vep_offset,
 761                                        frame_complete,
 762                                        stack_slots / VMRegImpl::slots_per_word,
 763                                        in_ByteSize(-1),
 764                                        in_ByteSize(-1),
 765                                        (OopMapSet*)nullptr);
 766   }
 767   // Arguments for JNI method include JNIEnv and Class if static
 768 
 769   // Usage of Rtemp should be OK since scratched by native call
 770 
 771   bool method_is_static = method->is_static();
 772 
 773   const int total_in_args = method->size_of_parameters();
 774   int total_c_args = total_in_args + (method_is_static ? 2 : 1);
 775 
 776   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
 777   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
 778 
 779   int argc = 0;
 780   out_sig_bt[argc++] = T_ADDRESS;
 781   if (method_is_static) {
 782     out_sig_bt[argc++] = T_OBJECT;
 783   }
 784 
 785   int i;
 786   for (i = 0; i < total_in_args; i++) {
 787     out_sig_bt[argc++] = in_sig_bt[i];
 788   }
 789 
 790   int out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
 791   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
 792   // Since object arguments need to be wrapped, we must preserve space
 793   // for those object arguments which come in registers (GPR_PARAMS maximum)
 794   // plus one more slot for Klass handle (for static methods)
 795   int oop_handle_offset = stack_slots;
 796   stack_slots += (GPR_PARAMS + 1) * VMRegImpl::slots_per_word;
 797 
 798   // Plus a lock if needed
 799   int lock_slot_offset = 0;
 800   if (method->is_synchronized()) {
 801     lock_slot_offset = stack_slots;
 802     assert(sizeof(BasicLock) == wordSize, "adjust this code");
 803     stack_slots += VMRegImpl::slots_per_word;
 804   }
 805 
 806   // Space to save return address and FP
 807   stack_slots += 2 * VMRegImpl::slots_per_word;
 808 
 809   // Calculate the final stack size taking account of alignment
 810   stack_slots = align_up(stack_slots, StackAlignmentInBytes / VMRegImpl::stack_slot_size);
 811   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
 812   int lock_slot_fp_offset = stack_size - 2 * wordSize -
 813     lock_slot_offset * VMRegImpl::stack_slot_size;
 814 
 815   // Unverified entry point
 816   address start = __ pc();
 817 
 818   const Register receiver = R0; // see receiverOpr()
 819   __ verify_oop(receiver);
 820   // Inline cache check
 821   __ ic_check(CodeEntryAlignment /* end_alignment */);
 822 
 823   // Verified entry point
 824   int vep_offset = __ pc() - start;
 825 
 826   if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
 827     // Object.hashCode, System.identityHashCode can pull the hashCode from the header word
 828     // instead of doing a full VM transition once it's been computed.
 829     Label slow_case;
 830     const Register obj_reg = R0;
 831 
 832     // Unlike for Object.hashCode, System.identityHashCode is static method and
 833     // gets object as argument instead of the receiver.
 834     if (method->intrinsic_id() == vmIntrinsics::_identityHashCode) {
 835       assert(method->is_static(), "method should be static");
 836       // return 0 for null reference input, return val = R0 = obj_reg = 0
 837       __ cmp(obj_reg, 0);
 838       __ bx(LR, eq);
 839     }
 840 
 841     __ ldr(Rtemp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 842 
 843     assert(markWord::unlocked_value == 1, "adjust this code");
 844     __ tbz(Rtemp, exact_log2(markWord::unlocked_value), slow_case);
 845 
 846     __ bics(Rtemp, Rtemp, ~markWord::hash_mask_in_place);
 847     __ mov(R0, AsmOperand(Rtemp, lsr, markWord::hash_shift), ne);
 848     __ bx(LR, ne);
 849 
 850     __ bind(slow_case);
 851   }
 852 
 853   // Bang stack pages
 854   __ arm_stack_overflow_check(stack_size, Rtemp);
 855 
 856   // Setup frame linkage
 857   __ raw_push(FP, LR);
 858   __ mov(FP, SP);
 859   __ sub_slow(SP, SP, stack_size - 2*wordSize);
 860 
 861   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 862   assert(bs != nullptr, "Sanity");
 863   bs->nmethod_entry_barrier(masm);
 864 
 865   int frame_complete = __ pc() - start;
 866 
 867   OopMapSet* oop_maps = new OopMapSet();
 868   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
 869   const int extra_args = method_is_static ? 2 : 1;
 870   int receiver_offset = -1;
 871   int fp_regs_in_arguments = 0;
 872 
 873   for (i = total_in_args; --i >= 0; ) {
 874     switch (in_sig_bt[i]) {
 875     case T_ARRAY:
 876     case T_OBJECT: {
 877       VMReg src = in_regs[i].first();
 878       VMReg dst = out_regs[i + extra_args].first();
 879       if (src->is_stack()) {
 880         assert(dst->is_stack(), "must be");
 881         assert(i != 0, "Incoming receiver is always in a register");
 882         __ ldr(Rtemp, Address(FP, reg2offset_in(src)));
 883         __ cmp(Rtemp, 0);
 884         __ add(Rtemp, FP, reg2offset_in(src), ne);
 885         __ str(Rtemp, Address(SP, reg2offset_out(dst)));
 886         int offset_in_older_frame = src->reg2stack() + SharedRuntime::out_preserve_stack_slots();
 887         map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
 888       } else {
 889         int offset = oop_handle_offset * VMRegImpl::stack_slot_size;
 890         __ str(src->as_Register(), Address(SP, offset));
 891         map->set_oop(VMRegImpl::stack2reg(oop_handle_offset));
 892         if ((i == 0) && (!method_is_static)) {
 893           receiver_offset = offset;
 894         }
 895         oop_handle_offset += VMRegImpl::slots_per_word;
 896 
 897         if (dst->is_stack()) {
 898           __ movs(Rtemp, src->as_Register());
 899           __ add(Rtemp, SP, offset, ne);
 900           __ str(Rtemp, Address(SP, reg2offset_out(dst)));
 901         } else {
 902           __ movs(dst->as_Register(), src->as_Register());
 903           __ add(dst->as_Register(), SP, offset, ne);
 904         }
 905       }
 906     }
 907 
 908     case T_VOID:
 909       break;
 910 
 911 
 912 #ifdef __SOFTFP__
 913     case T_DOUBLE:
 914 #endif
 915     case T_LONG: {
 916       VMReg src_1 = in_regs[i].first();
 917       VMReg src_2 = in_regs[i].second();
 918       VMReg dst_1 = out_regs[i + extra_args].first();
 919       VMReg dst_2 = out_regs[i + extra_args].second();
 920 #if (ALIGN_WIDE_ARGUMENTS == 0)
 921       // C convention can mix a register and a stack slot for a
 922       // 64-bits native argument.
 923 
 924       // Note: following code should work independently of whether
 925       // the Java calling convention follows C convention or whether
 926       // it aligns 64-bit values.
 927       if (dst_2->is_Register()) {
 928         if (src_1->as_Register() != dst_1->as_Register()) {
 929           assert(src_1->as_Register() != dst_2->as_Register() &&
 930                  src_2->as_Register() != dst_2->as_Register(), "must be");
 931           __ mov(dst_2->as_Register(), src_2->as_Register());
 932           __ mov(dst_1->as_Register(), src_1->as_Register());
 933         } else {
 934           assert(src_2->as_Register() == dst_2->as_Register(), "must be");
 935         }
 936       } else if (src_2->is_Register()) {
 937         if (dst_1->is_Register()) {
 938           // dst mixes a register and a stack slot
 939           assert(dst_2->is_stack() && src_1->is_Register() && src_2->is_Register(), "must be");
 940           assert(src_1->as_Register() != dst_1->as_Register(), "must be");
 941           __ str(src_2->as_Register(), Address(SP, reg2offset_out(dst_2)));
 942           __ mov(dst_1->as_Register(), src_1->as_Register());
 943         } else {
 944           // registers to stack slots
 945           assert(dst_2->is_stack() && src_1->is_Register() && src_2->is_Register(), "must be");
 946           __ str(src_1->as_Register(), Address(SP, reg2offset_out(dst_1)));
 947           __ str(src_2->as_Register(), Address(SP, reg2offset_out(dst_2)));
 948         }
 949       } else if (src_1->is_Register()) {
 950         if (dst_1->is_Register()) {
 951           // src and dst must be R3 + stack slot
 952           assert(dst_1->as_Register() == src_1->as_Register(), "must be");
 953           __ ldr(Rtemp,    Address(FP, reg2offset_in(src_2)));
 954           __ str(Rtemp,    Address(SP, reg2offset_out(dst_2)));
 955         } else {
 956           // <R3,stack> -> <stack,stack>
 957           assert(dst_2->is_stack() && src_2->is_stack(), "must be");
 958           __ ldr(LR, Address(FP, reg2offset_in(src_2)));
 959           __ str(src_1->as_Register(), Address(SP, reg2offset_out(dst_1)));
 960           __ str(LR, Address(SP, reg2offset_out(dst_2)));
 961         }
 962       } else {
 963         assert(src_2->is_stack() && dst_1->is_stack() && dst_2->is_stack(), "must be");
 964         __ ldr(Rtemp, Address(FP, reg2offset_in(src_1)));
 965         __ ldr(LR,    Address(FP, reg2offset_in(src_2)));
 966         __ str(Rtemp, Address(SP, reg2offset_out(dst_1)));
 967         __ str(LR,    Address(SP, reg2offset_out(dst_2)));
 968       }
 969 #else // ALIGN_WIDE_ARGUMENTS
 970       if (src_1->is_stack()) {
 971         assert(src_2->is_stack() && dst_1->is_stack() && dst_2->is_stack(), "must be");
 972         __ ldr(Rtemp, Address(FP, reg2offset_in(src_1)));
 973         __ ldr(LR,    Address(FP, reg2offset_in(src_2)));
 974         __ str(Rtemp, Address(SP, reg2offset_out(dst_1)));
 975         __ str(LR,    Address(SP, reg2offset_out(dst_2)));
 976       } else if (dst_1->is_stack()) {
 977         assert(dst_2->is_stack() && src_1->is_Register() && src_2->is_Register(), "must be");
 978         __ str(src_1->as_Register(), Address(SP, reg2offset_out(dst_1)));
 979         __ str(src_2->as_Register(), Address(SP, reg2offset_out(dst_2)));
 980       } else if (src_1->as_Register() == dst_1->as_Register()) {
 981         assert(src_2->as_Register() == dst_2->as_Register(), "must be");
 982       } else {
 983         assert(src_1->as_Register() != dst_2->as_Register() &&
 984                src_2->as_Register() != dst_2->as_Register(), "must be");
 985         __ mov(dst_2->as_Register(), src_2->as_Register());
 986         __ mov(dst_1->as_Register(), src_1->as_Register());
 987       }
 988 #endif // ALIGN_WIDE_ARGUMENTS
 989       break;
 990     }
 991 
 992 #if (!defined __SOFTFP__ && !defined __ABI_HARD__)
 993     case T_FLOAT: {
 994       VMReg src = in_regs[i].first();
 995       VMReg dst = out_regs[i + extra_args].first();
 996       if (src->is_stack()) {
 997         assert(dst->is_stack(), "must be");
 998         __ ldr(Rtemp, Address(FP, reg2offset_in(src)));
 999         __ str(Rtemp, Address(SP, reg2offset_out(dst)));
1000       } else if (dst->is_stack()) {
1001         __ fsts(src->as_FloatRegister(), Address(SP, reg2offset_out(dst)));
1002       } else {
1003         assert(src->is_FloatRegister() && dst->is_Register(), "must be");
1004         __ fmrs(dst->as_Register(), src->as_FloatRegister());
1005       }
1006       break;
1007     }
1008 
1009     case T_DOUBLE: {
1010       VMReg src_1 = in_regs[i].first();
1011       VMReg src_2 = in_regs[i].second();
1012       VMReg dst_1 = out_regs[i + extra_args].first();
1013       VMReg dst_2 = out_regs[i + extra_args].second();
1014       if (src_1->is_stack()) {
1015         assert(src_2->is_stack() && dst_1->is_stack() && dst_2->is_stack(), "must be");
1016         __ ldr(Rtemp, Address(FP, reg2offset_in(src_1)));
1017         __ ldr(LR,    Address(FP, reg2offset_in(src_2)));
1018         __ str(Rtemp, Address(SP, reg2offset_out(dst_1)));
1019         __ str(LR,    Address(SP, reg2offset_out(dst_2)));
1020       } else if (dst_1->is_stack()) {
1021         assert(dst_2->is_stack() && src_1->is_FloatRegister(), "must be");
1022         __ fstd(src_1->as_FloatRegister(), Address(SP, reg2offset_out(dst_1)));
1023 #if (ALIGN_WIDE_ARGUMENTS == 0)
1024       } else if (dst_2->is_stack()) {
1025         assert(! src_2->is_stack(), "must be"); // assuming internal java convention is aligned
1026         // double register must go into R3 + one stack slot
1027         __ fmrrd(dst_1->as_Register(), Rtemp, src_1->as_FloatRegister());
1028         __ str(Rtemp, Address(SP, reg2offset_out(dst_2)));
1029 #endif
1030       } else {
1031         assert(src_1->is_FloatRegister() && dst_1->is_Register() && dst_2->is_Register(), "must be");
1032         __ fmrrd(dst_1->as_Register(), dst_2->as_Register(), src_1->as_FloatRegister());
1033       }
1034       break;
1035     }
1036 #endif // __SOFTFP__
1037 
1038 #ifdef __ABI_HARD__
1039     case T_FLOAT: {
1040       VMReg src = in_regs[i].first();
1041       VMReg dst = out_regs[i + extra_args].first();
1042       if (src->is_stack()) {
1043         if (dst->is_stack()) {
1044           __ ldr(Rtemp, Address(FP, reg2offset_in(src)));
1045           __ str(Rtemp, Address(SP, reg2offset_out(dst)));
1046         } else {
1047           // C2 Java calling convention does not populate S14 and S15, therefore
1048           // those need to be loaded from stack here
1049           __ flds(dst->as_FloatRegister(), Address(FP, reg2offset_in(src)));
1050           fp_regs_in_arguments++;
1051         }
1052       } else {
1053         assert(src->is_FloatRegister(), "must be");
1054         fp_regs_in_arguments++;
1055       }
1056       break;
1057     }
1058     case T_DOUBLE: {
1059       VMReg src_1 = in_regs[i].first();
1060       VMReg src_2 = in_regs[i].second();
1061       VMReg dst_1 = out_regs[i + extra_args].first();
1062       VMReg dst_2 = out_regs[i + extra_args].second();
1063       if (src_1->is_stack()) {
1064         if (dst_1->is_stack()) {
1065           assert(dst_2->is_stack(), "must be");
1066           __ ldr(Rtemp, Address(FP, reg2offset_in(src_1)));
1067           __ ldr(LR,    Address(FP, reg2offset_in(src_2)));
1068           __ str(Rtemp, Address(SP, reg2offset_out(dst_1)));
1069           __ str(LR,    Address(SP, reg2offset_out(dst_2)));
1070         } else {
1071           // C2 Java calling convention does not populate S14 and S15, therefore
1072           // those need to be loaded from stack here
1073           __ fldd(dst_1->as_FloatRegister(), Address(FP, reg2offset_in(src_1)));
1074           fp_regs_in_arguments += 2;
1075         }
1076       } else {
1077         assert(src_1->is_FloatRegister() && src_2->is_FloatRegister(), "must be");
1078         fp_regs_in_arguments += 2;
1079       }
1080       break;
1081     }
1082 #endif // __ABI_HARD__
1083 
1084     default: {
1085       assert(in_sig_bt[i] != T_ADDRESS, "found T_ADDRESS in java args");
1086       VMReg src = in_regs[i].first();
1087       VMReg dst = out_regs[i + extra_args].first();
1088       if (src->is_stack()) {
1089         assert(dst->is_stack(), "must be");
1090         __ ldr(Rtemp, Address(FP, reg2offset_in(src)));
1091         __ str(Rtemp, Address(SP, reg2offset_out(dst)));
1092       } else if (dst->is_stack()) {
1093         __ str(src->as_Register(), Address(SP, reg2offset_out(dst)));
1094       } else {
1095         assert(src->is_Register() && dst->is_Register(), "must be");
1096         __ mov(dst->as_Register(), src->as_Register());
1097       }
1098     }
1099     }
1100   }
1101 
1102   // Get Klass mirror
1103   int klass_offset = -1;
1104   if (method_is_static) {
1105     klass_offset = oop_handle_offset * VMRegImpl::stack_slot_size;
1106     __ mov_oop(Rtemp, JNIHandles::make_local(method->method_holder()->java_mirror()));
1107     __ add(c_rarg1, SP, klass_offset);
1108     __ str(Rtemp, Address(SP, klass_offset));
1109     map->set_oop(VMRegImpl::stack2reg(oop_handle_offset));
1110   }
1111 
1112   // the PC offset given to add_gc_map must match the PC saved in set_last_Java_frame
1113   int pc_offset = __ set_last_Java_frame(SP, FP, true, Rtemp);
1114   assert(((__ pc()) - start) == __ offset(), "warning: start differs from code_begin");
1115   oop_maps->add_gc_map(pc_offset, map);
1116 
1117   // Order last_Java_pc store with the thread state transition (to _thread_in_native)
1118   __ membar(MacroAssembler::StoreStore, Rtemp);
1119 
1120   // RedefineClasses() tracing support for obsolete method entry
1121   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1122     __ save_caller_save_registers();
1123     __ mov(R0, Rthread);
1124     __ mov_metadata(R1, method());
1125     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), R0, R1);
1126     __ restore_caller_save_registers();
1127   }
1128 
1129   const Register sync_handle = R5;
1130   const Register sync_obj    = R6;
1131   const Register disp_hdr    = altFP_7_11;
1132   const Register tmp         = R8;
1133 
1134   Label slow_lock, lock_done, fast_lock;
1135   if (method->is_synchronized()) {
1136     // The first argument is a handle to sync object (a class or an instance)
1137     __ ldr(sync_obj, Address(R1));
1138     // Remember the handle for the unlocking code
1139     __ mov(sync_handle, R1);
1140 
1141     if (LockingMode == LM_LIGHTWEIGHT) {
1142       log_trace(fastlock)("SharedRuntime lock fast");
1143       __ lightweight_lock(sync_obj /* object */, disp_hdr /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
1144                           0x7 /* savemask */, slow_lock);
1145       // Fall through to lock_done
1146     } else if (LockingMode == LM_LEGACY) {
1147       const Register mark = tmp;
1148       // On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
1149       // That would be acceptable as either CAS or slow case path is taken in that case
1150 
1151       __ ldr(mark, Address(sync_obj, oopDesc::mark_offset_in_bytes()));
1152       __ sub(disp_hdr, FP, lock_slot_fp_offset);
1153       __ tst(mark, markWord::unlocked_value);
1154       __ b(fast_lock, ne);
1155 
1156       // Check for recursive lock
1157       // See comments in InterpreterMacroAssembler::lock_object for
1158       // explanations on the fast recursive locking check.
1159       // Check independently the low bits and the distance to SP
1160       // -1- test low 2 bits
1161       __ movs(Rtemp, AsmOperand(mark, lsl, 30));
1162       // -2- test (hdr - SP) if the low two bits are 0
1163       __ sub(Rtemp, mark, SP, eq);
1164       __ movs(Rtemp, AsmOperand(Rtemp, lsr, exact_log2(os::vm_page_size())), eq);
1165       // If still 'eq' then recursive locking OK
1166       // set to zero if recursive lock, set to non zero otherwise (see discussion in JDK-8267042)
1167       __ str(Rtemp, Address(disp_hdr, BasicLock::displaced_header_offset_in_bytes()));
1168       __ b(lock_done, eq);
1169       __ b(slow_lock);
1170 
1171       __ bind(fast_lock);
1172       __ str(mark, Address(disp_hdr, BasicLock::displaced_header_offset_in_bytes()));
1173 
1174       __ cas_for_lock_acquire(mark, disp_hdr, sync_obj, Rtemp, slow_lock);
1175     }
1176     __ bind(lock_done);
1177   }
1178 
1179   // Get JNIEnv*
1180   __ add(c_rarg0, Rthread, in_bytes(JavaThread::jni_environment_offset()));
1181 
1182   // Perform thread state transition
1183   __ mov(Rtemp, _thread_in_native);
1184   __ str(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1185 
1186   // Finally, call the native method
1187   __ call(method->native_function());
1188 
1189   // Set FPSCR/FPCR to a known state
1190   if (AlwaysRestoreFPU) {
1191     __ restore_default_fp_mode();
1192   }
1193 
1194   // Ensure a Boolean result is mapped to 0..1
1195   if (ret_type == T_BOOLEAN) {
1196     __ c2bool(R0);
1197   }
1198 
1199   // Do a safepoint check while thread is in transition state
1200   Label call_safepoint_runtime, return_to_java;
1201   __ mov(Rtemp, _thread_in_native_trans);
1202   __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1203 
1204   // make sure the store is observed before reading the SafepointSynchronize state and further mem refs
1205   if (!UseSystemMemoryBarrier) {
1206     __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp);
1207   }
1208 
1209   __ safepoint_poll(R2, call_safepoint_runtime);
1210   __ ldr_u32(R3, Address(Rthread, JavaThread::suspend_flags_offset()));
1211   __ cmp(R3, 0);
1212   __ b(call_safepoint_runtime, ne);
1213 
1214   __ bind(return_to_java);
1215 
1216   // Perform thread state transition and reguard stack yellow pages if needed
1217   Label reguard, reguard_done;
1218   __ mov(Rtemp, _thread_in_Java);
1219   __ ldr_s32(R2, Address(Rthread, JavaThread::stack_guard_state_offset()));
1220   __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1221 
1222   __ cmp(R2, StackOverflow::stack_guard_yellow_reserved_disabled);
1223   __ b(reguard, eq);
1224   __ bind(reguard_done);
1225 
1226   Label slow_unlock, unlock_done;
1227   if (method->is_synchronized()) {
1228     if (LockingMode == LM_LIGHTWEIGHT) {
1229       log_trace(fastlock)("SharedRuntime unlock fast");
1230       __ lightweight_unlock(sync_obj, R2 /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
1231                             7 /* savemask */, slow_unlock);
1232       // Fall through
1233     } else if (LockingMode == LM_LEGACY) {
1234       // See C1_MacroAssembler::unlock_object() for more comments
1235       __ ldr(sync_obj, Address(sync_handle));
1236 
1237       // See C1_MacroAssembler::unlock_object() for more comments
1238       __ ldr(R2, Address(disp_hdr, BasicLock::displaced_header_offset_in_bytes()));
1239       __ cbz(R2, unlock_done);
1240 
1241       __ cas_for_lock_release(disp_hdr, R2, sync_obj, Rtemp, slow_unlock);
1242     }
1243     __ bind(unlock_done);
1244   }
1245 
1246   // Set last java frame and handle block to zero
1247   __ ldr(LR, Address(Rthread, JavaThread::active_handles_offset()));
1248   __ reset_last_Java_frame(Rtemp); // sets Rtemp to 0 on 32-bit ARM
1249 
1250   __ str_32(Rtemp, Address(LR, JNIHandleBlock::top_offset()));
1251   if (CheckJNICalls) {
1252     __ str(__ zero_register(Rtemp), Address(Rthread, JavaThread::pending_jni_exception_check_fn_offset()));
1253   }
1254 
1255   // Unbox oop result, e.g. JNIHandles::resolve value in R0.
1256   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
1257     __ resolve_jobject(R0,      // value
1258                        Rtemp,   // tmp1
1259                        R1_tmp); // tmp2
1260   }
1261 
1262   // Any exception pending?
1263   __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset()));
1264   __ mov(SP, FP);
1265 
1266   __ cmp(Rtemp, 0);
1267   // Pop the frame and return if no exception pending
1268   __ pop(RegisterSet(FP) | RegisterSet(PC), eq);
1269   // Pop the frame and forward the exception. Rexception_pc contains return address.
1270   __ ldr(FP, Address(SP, wordSize, post_indexed), ne);
1271   __ ldr(Rexception_pc, Address(SP, wordSize, post_indexed), ne);
1272   __ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp);
1273 
1274   // Safepoint operation and/or pending suspend request is in progress.
1275   // Save the return values and call the runtime function by hand.
1276   __ bind(call_safepoint_runtime);
1277   push_result_registers(masm, ret_type);
1278   __ mov(R0, Rthread);
1279   __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
1280   pop_result_registers(masm, ret_type);
1281   __ b(return_to_java);
1282 
1283   // Reguard stack pages. Save native results around a call to C runtime.
1284   __ bind(reguard);
1285   push_result_registers(masm, ret_type);
1286   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
1287   pop_result_registers(masm, ret_type);
1288   __ b(reguard_done);
1289 
1290   if (method->is_synchronized()) {
1291     // Locking slow case
1292     __ bind(slow_lock);
1293 
1294     push_param_registers(masm, fp_regs_in_arguments);
1295 
1296     // last_Java_frame is already set, so do call_VM manually; no exception can occur
1297     __ mov(R0, sync_obj);
1298     __ mov(R1, disp_hdr);
1299     __ mov(R2, Rthread);
1300     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C));
1301 
1302     pop_param_registers(masm, fp_regs_in_arguments);
1303 
1304     __ b(lock_done);
1305 
1306     // Unlocking slow case
1307     __ bind(slow_unlock);
1308 
1309     push_result_registers(masm, ret_type);
1310 
1311     // Clear pending exception before reentering VM.
1312     // Can store the oop in register since it is a leaf call.
1313     assert_different_registers(Rtmp_save1, sync_obj, disp_hdr);
1314     __ ldr(Rtmp_save1, Address(Rthread, Thread::pending_exception_offset()));
1315     Register zero = __ zero_register(Rtemp);
1316     __ str(zero, Address(Rthread, Thread::pending_exception_offset()));
1317     __ mov(R0, sync_obj);
1318     __ mov(R1, disp_hdr);
1319     __ mov(R2, Rthread);
1320     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
1321     __ str(Rtmp_save1, Address(Rthread, Thread::pending_exception_offset()));
1322 
1323     pop_result_registers(masm, ret_type);
1324 
1325     __ b(unlock_done);
1326   }
1327 
1328   __ flush();
1329   return nmethod::new_native_nmethod(method,
1330                                      compile_id,
1331                                      masm->code(),
1332                                      vep_offset,
1333                                      frame_complete,
1334                                      stack_slots / VMRegImpl::slots_per_word,
1335                                      in_ByteSize(method_is_static ? klass_offset : receiver_offset),
1336                                      in_ByteSize(lock_slot_offset * VMRegImpl::stack_slot_size),
1337                                      oop_maps);
1338 }
1339 
1340 // this function returns the adjust size (in number of words) to a c2i adapter
1341 // activation for use during deoptimization
1342 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
1343   int extra_locals_size = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
1344   return extra_locals_size;
1345 }
1346 
1347 
1348 // Number of stack slots between incoming argument block and the start of
1349 // a new frame.  The PROLOG must add this many slots to the stack.  The
1350 // EPILOG must remove this many slots.
1351 // FP + LR
1352 uint SharedRuntime::in_preserve_stack_slots() {
1353   return 2 * VMRegImpl::slots_per_word;
1354 }
1355 
1356 uint SharedRuntime::out_preserve_stack_slots() {
1357   return 0;
1358 }
1359 
1360 VMReg SharedRuntime::thread_register() {
1361   Unimplemented();
1362   return nullptr;
1363 }
1364 
1365 //------------------------------generate_deopt_blob----------------------------
1366 void SharedRuntime::generate_deopt_blob() {
1367   ResourceMark rm;
1368   const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id);
1369   CodeBuffer buffer(name, 1024, 1024);
1370   int frame_size_in_words;
1371   OopMapSet* oop_maps;
1372   int reexecute_offset;
1373   int exception_in_tls_offset;
1374   int exception_offset;
1375 
1376   MacroAssembler* masm = new MacroAssembler(&buffer);
1377   Label cont;
1378   const Register Rkind   = R9; // caller-saved
1379   const Register Rublock = R6;
1380   const Register Rsender = altFP_7_11;
1381   assert_different_registers(Rkind, Rublock, Rsender, Rexception_obj, Rexception_pc, R0, R1, R2, R3, R8, Rtemp);
1382 
1383   address start = __ pc();
1384 
1385   oop_maps = new OopMapSet();
1386   // LR saved by caller (can be live in c2 method)
1387 
1388   // A deopt is a case where LR may be live in the c2 nmethod. So it's
1389   // not possible to call the deopt blob from the nmethod and pass the
1390   // address of the deopt handler of the nmethod in LR. What happens
1391   // now is that the caller of the deopt blob pushes the current
1392   // address so the deopt blob doesn't have to do it. This way LR can
1393   // be preserved, contains the live value from the nmethod and is
1394   // saved at R14/R30_offset here.
1395   OopMap* map = RegisterSaver::save_live_registers(masm, &frame_size_in_words, true);
1396   __ mov(Rkind, Deoptimization::Unpack_deopt);
1397   __ b(cont);
1398 
1399   exception_offset = __ pc() - start;
1400 
1401   // Transfer Rexception_obj & Rexception_pc in TLS and fall thru to the
1402   // exception_in_tls_offset entry point.
1403   __ str(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
1404   __ str(Rexception_pc, Address(Rthread, JavaThread::exception_pc_offset()));
1405   // Force return value to null to avoid confusing the escape analysis
1406   // logic. Everything is dead here anyway.
1407   __ mov(R0, 0);
1408 
1409   exception_in_tls_offset = __ pc() - start;
1410 
1411   // Exception data is in JavaThread structure
1412   // Patch the return address of the current frame
1413   __ ldr(LR, Address(Rthread, JavaThread::exception_pc_offset()));
1414   (void) RegisterSaver::save_live_registers(masm, &frame_size_in_words);
1415   {
1416     const Register Rzero = __ zero_register(Rtemp); // XXX should be OK for C2 but not 100% sure
1417     __ str(Rzero, Address(Rthread, JavaThread::exception_pc_offset()));
1418   }
1419   __ mov(Rkind, Deoptimization::Unpack_exception);
1420   __ b(cont);
1421 
1422   reexecute_offset = __ pc() - start;
1423 
1424   (void) RegisterSaver::save_live_registers(masm, &frame_size_in_words);
1425   __ mov(Rkind, Deoptimization::Unpack_reexecute);
1426 
1427   // Calculate UnrollBlock and save the result in Rublock
1428   __ bind(cont);
1429   __ mov(R0, Rthread);
1430   __ mov(R1, Rkind);
1431 
1432   int pc_offset = __ set_last_Java_frame(SP, FP, false, Rtemp); // note: FP may not need to be saved (not on x86)
1433   assert(((__ pc()) - start) == __ offset(), "warning: start differs from code_begin");
1434   __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info));
1435   if (pc_offset == -1) {
1436     pc_offset = __ offset();
1437   }
1438   oop_maps->add_gc_map(pc_offset, map);
1439   __ reset_last_Java_frame(Rtemp); // Rtemp free since scratched by far call
1440 
1441   __ mov(Rublock, R0);
1442 
1443   // Reload Rkind from the UnrollBlock (might have changed)
1444   __ ldr_s32(Rkind, Address(Rublock, Deoptimization::UnrollBlock::unpack_kind_offset()));
1445   Label noException;
1446   __ cmp_32(Rkind, Deoptimization::Unpack_exception);   // Was exception pending?
1447   __ b(noException, ne);
1448   // handle exception case
1449 #ifdef ASSERT
1450   // assert that exception_pc is zero in tls
1451   { Label L;
1452     __ ldr(Rexception_pc, Address(Rthread, JavaThread::exception_pc_offset()));
1453     __ cbz(Rexception_pc, L);
1454     __ stop("exception pc should be null");
1455     __ bind(L);
1456   }
1457 #endif
1458   __ ldr(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
1459   __ verify_oop(Rexception_obj);
1460   {
1461     const Register Rzero = __ zero_register(Rtemp);
1462     __ str(Rzero, Address(Rthread, JavaThread::exception_oop_offset()));
1463   }
1464 
1465   __ bind(noException);
1466 
1467   // This frame is going away.  Fetch return value, so we can move it to
1468   // a new frame.
1469   __ ldr(R0, Address(SP, RegisterSaver::R0_offset * wordSize));
1470   __ ldr(R1, Address(SP, RegisterSaver::R1_offset * wordSize));
1471 #ifndef __SOFTFP__
1472   __ ldr_double(D0, Address(SP, RegisterSaver::D0_offset * wordSize));
1473 #endif
1474   // pop frame
1475   __ add(SP, SP, RegisterSaver::reg_save_size * wordSize);
1476 
1477   // Set initial stack state before pushing interpreter frames
1478   __ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
1479   __ ldr(R2, Address(Rublock, Deoptimization::UnrollBlock::frame_pcs_offset()));
1480   __ ldr(R3, Address(Rublock, Deoptimization::UnrollBlock::frame_sizes_offset()));
1481 
1482   __ add(SP, SP, Rtemp);
1483 
1484 #ifdef ASSERT
1485   // Compilers generate code that bang the stack by as much as the
1486   // interpreter would need. So this stack banging should never
1487   // trigger a fault. Verify that it does not on non product builds.
1488   // See if it is enough stack to push deoptimized frames.
1489   //
1490   // The compiled method that we are deoptimizing was popped from the stack.
1491   // If the stack bang results in a stack overflow, we don't return to the
1492   // method that is being deoptimized. The stack overflow exception is
1493   // propagated to the caller of the deoptimized method. Need to get the pc
1494   // from the caller in LR and restore FP.
1495   __ ldr(LR, Address(R2, 0));
1496   __ ldr(FP, Address(Rublock, Deoptimization::UnrollBlock::initial_info_offset()));
1497   __ ldr_s32(R8, Address(Rublock, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
1498   __ arm_stack_overflow_check(R8, Rtemp);
1499 #endif
1500   __ ldr_s32(R8, Address(Rublock, Deoptimization::UnrollBlock::number_of_frames_offset()));
1501 
1502   // Pick up the initial fp we should save
1503   // XXX Note: was ldr(FP, Address(FP));
1504 
1505   // The compiler no longer uses FP as a frame pointer for the
1506   // compiled code. It can be used by the allocator in C2 or to
1507   // memorize the original SP for JSR292 call sites.
1508 
1509   // Hence, ldr(FP, Address(FP)) is probably not correct. For x86,
1510   // Deoptimization::fetch_unroll_info computes the right FP value and
1511   // stores it in Rublock.initial_info. This has been activated for ARM.
1512   __ ldr(FP, Address(Rublock, Deoptimization::UnrollBlock::initial_info_offset()));
1513 
1514   __ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::caller_adjustment_offset()));
1515   __ mov(Rsender, SP);
1516   __ sub(SP, SP, Rtemp);
1517 
1518   // Push interpreter frames in a loop
1519   Label loop;
1520   __ bind(loop);
1521   __ ldr(LR, Address(R2, wordSize, post_indexed));         // load frame pc
1522   __ ldr(Rtemp, Address(R3, wordSize, post_indexed));      // load frame size
1523 
1524   __ raw_push(FP, LR);                                     // create new frame
1525   __ mov(FP, SP);
1526   __ sub(Rtemp, Rtemp, 2*wordSize);
1527 
1528   __ sub(SP, SP, Rtemp);
1529 
1530   __ str(Rsender, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
1531   __ mov(LR, 0);
1532   __ str(LR, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1533 
1534   __ subs(R8, R8, 1);                               // decrement counter
1535   __ mov(Rsender, SP);
1536   __ b(loop, ne);
1537 
1538   // Re-push self-frame
1539   __ ldr(LR, Address(R2));
1540   __ raw_push(FP, LR);
1541   __ mov(FP, SP);
1542   __ sub(SP, SP, (frame_size_in_words - 2) * wordSize);
1543 
1544   // Restore frame locals after moving the frame
1545   __ str(R0, Address(SP, RegisterSaver::R0_offset * wordSize));
1546   __ str(R1, Address(SP, RegisterSaver::R1_offset * wordSize));
1547 
1548 #ifndef __SOFTFP__
1549   __ str_double(D0, Address(SP, RegisterSaver::D0_offset * wordSize));
1550 #endif // !__SOFTFP__
1551 
1552 #ifdef ASSERT
1553   // Reload Rkind from the UnrollBlock and check that it was not overwritten (Rkind is not callee-saved)
1554   { Label L;
1555     __ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::unpack_kind_offset()));
1556     __ cmp_32(Rkind, Rtemp);
1557     __ b(L, eq);
1558     __ stop("Rkind was overwritten");
1559     __ bind(L);
1560   }
1561 #endif
1562 
1563   // Call unpack_frames with proper arguments
1564   __ mov(R0, Rthread);
1565   __ mov(R1, Rkind);
1566 
1567   pc_offset = __ set_last_Java_frame(SP, FP, true, Rtemp);
1568   assert(((__ pc()) - start) == __ offset(), "warning: start differs from code_begin");
1569   __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames));
1570   if (pc_offset == -1) {
1571     pc_offset = __ offset();
1572   }
1573   oop_maps->add_gc_map(pc_offset, new OopMap(frame_size_in_words * VMRegImpl::slots_per_word, 0));
1574   __ reset_last_Java_frame(Rtemp); // Rtemp free since scratched by far call
1575 
1576   // Collect return values, pop self-frame and jump to interpreter
1577   __ ldr(R0, Address(SP, RegisterSaver::R0_offset * wordSize));
1578   __ ldr(R1, Address(SP, RegisterSaver::R1_offset * wordSize));
1579   // Interpreter floats controlled by __SOFTFP__, but compiler
1580   // float return value registers controlled by __ABI_HARD__
1581   // This matters for vfp-sflt builds.
1582 #ifndef __SOFTFP__
1583   // Interpreter hard float
1584 #ifdef __ABI_HARD__
1585   // Compiler float return value in FP registers
1586   __ ldr_double(D0, Address(SP, RegisterSaver::D0_offset * wordSize));
1587 #else
1588   // Compiler float return value in integer registers,
1589   // copy to D0 for interpreter (S0 <-- R0)
1590   __ fmdrr(D0_tos, R0, R1);
1591 #endif
1592 #endif // !__SOFTFP__
1593   __ mov(SP, FP);
1594 
1595   __ pop(RegisterSet(FP) | RegisterSet(PC));
1596 
1597   __ flush();
1598 
1599   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset,
1600                                            reexecute_offset, frame_size_in_words);
1601   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
1602 }
1603 
1604 //------------------------------generate_handler_blob------
1605 //
1606 // Generate a special Compile2Runtime blob that saves all registers,
1607 // setup oopmap, and calls safepoint code to stop the compiled code for
1608 // a safepoint.
1609 //
1610 SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) {
1611   assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
1612   assert(is_polling_page_id(id), "expected a polling page stub id");
1613 
1614   ResourceMark rm;
1615   const char* name = SharedRuntime::stub_name(id);
1616   CodeBuffer buffer(name, 256, 256);
1617   int frame_size_words;
1618   OopMapSet* oop_maps;
1619 
1620   bool cause_return = (id == SharedStubId::polling_page_return_handler_id);
1621 
1622   MacroAssembler* masm = new MacroAssembler(&buffer);
1623   address start = __ pc();
1624   oop_maps = new OopMapSet();
1625 
1626   if (!cause_return) {
1627     __ sub(SP, SP, 4); // make room for LR which may still be live
1628                        // here if we are coming from a c2 method
1629   }
1630 
1631   OopMap* map = RegisterSaver::save_live_registers(masm, &frame_size_words, !cause_return);
1632   if (!cause_return) {
1633     // update saved PC with correct value
1634     // need 2 steps because LR can be live in c2 method
1635     __ ldr(LR, Address(Rthread, JavaThread::saved_exception_pc_offset()));
1636     __ str(LR, Address(SP, RegisterSaver::LR_offset * wordSize));
1637   }
1638 
1639   __ mov(R0, Rthread);
1640   int pc_offset = __ set_last_Java_frame(SP, FP, false, Rtemp); // note: FP may not need to be saved (not on x86)
1641   assert(((__ pc()) - start) == __ offset(), "warning: start differs from code_begin");
1642   __ call(call_ptr);
1643   if (pc_offset == -1) {
1644     pc_offset = __ offset();
1645   }
1646   oop_maps->add_gc_map(pc_offset, map);
1647   __ reset_last_Java_frame(Rtemp); // Rtemp free since scratched by far call
1648 
1649   if (!cause_return) {
1650     // If our stashed return pc was modified by the runtime we avoid touching it
1651     __ ldr(R3_tmp, Address(Rthread, JavaThread::saved_exception_pc_offset()));
1652     __ ldr(R2_tmp, Address(SP, RegisterSaver::LR_offset * wordSize));
1653     __ cmp(R2_tmp, R3_tmp);
1654     // Adjust return pc forward to step over the safepoint poll instruction
1655     __ add(R2_tmp, R2_tmp, 4, eq);
1656     __ str(R2_tmp, Address(SP, RegisterSaver::LR_offset * wordSize), eq);
1657 
1658     // Check for pending exception
1659     __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset()));
1660     __ cmp(Rtemp, 0);
1661 
1662     RegisterSaver::restore_live_registers(masm, false);
1663     __ pop(PC, eq);
1664     __ pop(Rexception_pc);
1665   } else {
1666     // Check for pending exception
1667     __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset()));
1668     __ cmp(Rtemp, 0);
1669 
1670     RegisterSaver::restore_live_registers(masm);
1671     __ bx(LR, eq);
1672     __ mov(Rexception_pc, LR);
1673   }
1674 
1675   __ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp);
1676 
1677   __ flush();
1678 
1679   return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
1680 }
1681 
1682 RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) {
1683   assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
1684   assert(is_resolve_id(id), "expected a resolve stub id");
1685 
1686   ResourceMark rm;
1687   const char* name = SharedRuntime::stub_name(id);
1688   CodeBuffer buffer(name, 1000, 512);
1689   int frame_size_words;
1690   OopMapSet *oop_maps;
1691   int frame_complete;
1692 
1693   MacroAssembler* masm = new MacroAssembler(&buffer);
1694   Label pending_exception;
1695 
1696   int start = __ offset();
1697 
1698   oop_maps = new OopMapSet();
1699   OopMap* map = RegisterSaver::save_live_registers(masm, &frame_size_words);
1700 
1701   frame_complete = __ offset();
1702 
1703   __ mov(R0, Rthread);
1704 
1705   int pc_offset = __ set_last_Java_frame(SP, FP, false, Rtemp);
1706   assert(start == 0, "warning: start differs from code_begin");
1707   __ call(destination);
1708   if (pc_offset == -1) {
1709     pc_offset = __ offset();
1710   }
1711   oop_maps->add_gc_map(pc_offset, map);
1712   __ reset_last_Java_frame(Rtemp); // Rtemp free since scratched by far call
1713 
1714   __ ldr(R1, Address(Rthread, Thread::pending_exception_offset()));
1715   __ cbnz(R1, pending_exception);
1716 
1717   // Overwrite saved register values
1718 
1719   // Place metadata result of VM call into Rmethod
1720   __ get_vm_result_2(R1, Rtemp);
1721   __ str(R1, Address(SP, RegisterSaver::Rmethod_offset * wordSize));
1722 
1723   // Place target address (VM call result) into Rtemp
1724   __ str(R0, Address(SP, RegisterSaver::Rtemp_offset * wordSize));
1725 
1726   RegisterSaver::restore_live_registers(masm);
1727   __ jump(Rtemp);
1728 
1729   __ bind(pending_exception);
1730 
1731   RegisterSaver::restore_live_registers(masm);
1732   const Register Rzero = __ zero_register(Rtemp);
1733   __ str(Rzero, Address(Rthread, JavaThread::vm_result_2_offset()));
1734   __ mov(Rexception_pc, LR);
1735   __ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp);
1736 
1737   __ flush();
1738 
1739   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
1740 }
1741 
1742 //------------------------------------------------------------------------------------------------------------------------
1743 // Continuation point for throwing of implicit exceptions that are not handled in
1744 // the current activation. Fabricates an exception oop and initiates normal
1745 // exception dispatching in this frame.
1746 RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) {
1747   assert(is_throw_id(id), "expected a throw stub id");
1748 
1749   const char* name = SharedRuntime::stub_name(id);
1750 
1751   int insts_size = 128;
1752   int locs_size  = 32;
1753 
1754   ResourceMark rm;
1755   const char* timer_msg = "SharedRuntime generate_throw_exception";
1756   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
1757 
1758   CodeBuffer code(name, insts_size, locs_size);
1759   OopMapSet* oop_maps;
1760   int frame_size;
1761   int frame_complete;
1762 
1763   oop_maps = new OopMapSet();
1764   MacroAssembler* masm = new MacroAssembler(&code);
1765 
1766   address start = __ pc();
1767 
1768   frame_size = 2;
1769   __ mov(Rexception_pc, LR);
1770   __ raw_push(FP, LR);
1771 
1772   frame_complete = __ pc() - start;
1773 
1774   // Any extra arguments are already supposed to be R1 and R2
1775   __ mov(R0, Rthread);
1776 
1777   int pc_offset = __ set_last_Java_frame(SP, FP, false, Rtemp);
1778   assert(((__ pc()) - start) == __ offset(), "warning: start differs from code_begin");
1779   __ call(runtime_entry);
1780   if (pc_offset == -1) {
1781     pc_offset = __ offset();
1782   }
1783 
1784   // Generate oop map
1785   OopMap* map =  new OopMap(frame_size*VMRegImpl::slots_per_word, 0);
1786   oop_maps->add_gc_map(pc_offset, map);
1787   __ reset_last_Java_frame(Rtemp); // Rtemp free since scratched by far call
1788 
1789   __ raw_pop(FP, LR);
1790   __ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp);
1791 
1792   RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete,
1793                                                     frame_size, oop_maps, false);
1794   return stub;
1795 }
1796 
1797 #if INCLUDE_JFR
1798 
1799 // For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
1800 // It returns a jobject handle to the event writer.
1801 // The handle is dereferenced and the return value is the event writer oop.
1802 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
1803   enum layout {
1804     r1_off,
1805     r2_off,
1806     return_off,
1807     framesize // inclusive of return address
1808   };
1809 
1810   const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id);
1811   CodeBuffer code(name, 512, 64);
1812   MacroAssembler* masm = new MacroAssembler(&code);
1813 
1814   address start = __ pc();
1815   __ raw_push(R1, R2, LR);
1816   address the_pc = __ pc();
1817 
1818   int frame_complete = the_pc - start;
1819 
1820   __ set_last_Java_frame(SP, FP, true, Rtemp);
1821   __ mov(c_rarg0, Rthread);
1822   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), c_rarg0);
1823   __ reset_last_Java_frame(Rtemp);
1824 
1825   // R0 is jobject handle result, unpack and process it through a barrier.
1826   __ resolve_global_jobject(R0, Rtemp, R1);
1827 
1828   __ raw_pop(R1, R2, LR);
1829   __ ret();
1830 
1831   OopMapSet* oop_maps = new OopMapSet();
1832   OopMap* map = new OopMap(framesize, 1);
1833   oop_maps->add_gc_map(frame_complete, map);
1834 
1835   RuntimeStub* stub =
1836     RuntimeStub::new_runtime_stub(name,
1837                                   &code,
1838                                   frame_complete,
1839                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
1840                                   oop_maps,
1841                                   false);
1842   return stub;
1843 }
1844 
1845 // For c2: call to return a leased buffer.
1846 RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
1847   enum layout {
1848     r1_off,
1849     r2_off,
1850     return_off,
1851     framesize // inclusive of return address
1852   };
1853 
1854   const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id);
1855   CodeBuffer code(name, 512, 64);
1856   MacroAssembler* masm = new MacroAssembler(&code);
1857 
1858   address start = __ pc();
1859   __ raw_push(R1, R2, LR);
1860   address the_pc = __ pc();
1861 
1862   int frame_complete = the_pc - start;
1863 
1864   __ set_last_Java_frame(SP, FP, true, Rtemp);
1865   __ mov(c_rarg0, Rthread);
1866   __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), c_rarg0);
1867   __ reset_last_Java_frame(Rtemp);
1868 
1869   __ raw_pop(R1, R2, LR);
1870   __ ret();
1871 
1872   OopMapSet* oop_maps = new OopMapSet();
1873   OopMap* map = new OopMap(framesize, 1);
1874   oop_maps->add_gc_map(frame_complete, map);
1875 
1876   RuntimeStub* stub =
1877     RuntimeStub::new_runtime_stub(name,
1878                                   &code,
1879                                   frame_complete,
1880                                   (framesize >> (LogBytesPerWord - LogBytesPerInt)),
1881                                   oop_maps,
1882                                   false);
1883   return stub;
1884 }
1885 
1886 #endif // INCLUDE_JFR