1 /*
   2  * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include <sys/types.h>
  27 
  28 #include "precompiled.hpp"
  29 #include "jvm.h"
  30 #include "asm/assembler.hpp"
  31 #include "asm/assembler.inline.hpp"
  32 #include "ci/ciEnv.hpp"
  33 #include "ci/ciInlineKlass.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "gc/shared/cardTableBarrierSet.hpp"
  37 #include "gc/shared/cardTable.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/tlab_globals.hpp"
  40 #include "interpreter/bytecodeHistogram.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "compiler/compileTask.hpp"
  43 #include "compiler/disassembler.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "memory/universe.hpp"
  46 #include "nativeInst_aarch64.hpp"
  47 #include "oops/accessDecorators.hpp"
  48 #include "oops/compressedOops.inline.hpp"
  49 #include "oops/klass.inline.hpp"
  50 #include "runtime/icache.hpp"
  51 #include "runtime/interfaceSupport.inline.hpp"
  52 #include "runtime/jniHandles.inline.hpp"
  53 #include "runtime/sharedRuntime.hpp"
  54 #include "runtime/signature_cc.hpp"
  55 #include "runtime/stubRoutines.hpp"
  56 #include "runtime/thread.hpp"
  57 #include "utilities/powerOfTwo.hpp"
  58 #include "vmreg_aarch64.inline.hpp"
  59 #ifdef COMPILER1
  60 #include "c1/c1_LIRAssembler.hpp"
  61 #endif
  62 #ifdef COMPILER2
  63 #include "oops/oop.hpp"
  64 #include "opto/compile.hpp"
  65 #include "opto/node.hpp"
  66 #include "opto/output.hpp"
  67 #endif
  68 
  69 #ifdef PRODUCT
  70 #define BLOCK_COMMENT(str) /* nothing */
  71 #else
  72 #define BLOCK_COMMENT(str) block_comment(str)
  73 #endif
  74 #define STOP(str) stop(str);
  75 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  76 
  77 // Patch any kind of instruction; there may be several instructions.
  78 // Return the total length (in bytes) of the instructions.
  79 int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
  80   int instructions = 1;
  81   assert((uint64_t)target < (1ull << 48), "48-bit overflow in address constant");
  82   intptr_t offset = (target - branch) >> 2;
  83   unsigned insn = *(unsigned*)branch;
  84   if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) {
  85     // Load register (literal)
  86     Instruction_aarch64::spatch(branch, 23, 5, offset);
  87   } else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) {
  88     // Unconditional branch (immediate)
  89     Instruction_aarch64::spatch(branch, 25, 0, offset);
  90   } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) {
  91     // Conditional branch (immediate)
  92     Instruction_aarch64::spatch(branch, 23, 5, offset);
  93   } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) {
  94     // Compare & branch (immediate)
  95     Instruction_aarch64::spatch(branch, 23, 5, offset);
  96   } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) {
  97     // Test & branch (immediate)
  98     Instruction_aarch64::spatch(branch, 18, 5, offset);
  99   } else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) {
 100     // PC-rel. addressing
 101     offset = target-branch;
 102     int shift = Instruction_aarch64::extract(insn, 31, 31);
 103     if (shift) {
 104       uint64_t dest = (uint64_t)target;
 105       uint64_t pc_page = (uint64_t)branch >> 12;
 106       uint64_t adr_page = (uint64_t)target >> 12;
 107       unsigned offset_lo = dest & 0xfff;
 108       offset = adr_page - pc_page;
 109 
 110       // We handle 4 types of PC relative addressing
 111       //   1 - adrp    Rx, target_page
 112       //       ldr/str Ry, [Rx, #offset_in_page]
 113       //   2 - adrp    Rx, target_page
 114       //       add     Ry, Rx, #offset_in_page
 115       //   3 - adrp    Rx, target_page (page aligned reloc, offset == 0)
 116       //       movk    Rx, #imm16<<32
 117       //   4 - adrp    Rx, target_page (page aligned reloc, offset == 0)
 118       // In the first 3 cases we must check that Rx is the same in the adrp and the
 119       // subsequent ldr/str, add or movk instruction. Otherwise we could accidentally end
 120       // up treating a type 4 relocation as a type 1, 2 or 3 just because it happened
 121       // to be followed by a random unrelated ldr/str, add or movk instruction.
 122       //
 123       unsigned insn2 = ((unsigned*)branch)[1];
 124       if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
 125                 Instruction_aarch64::extract(insn, 4, 0) ==
 126                         Instruction_aarch64::extract(insn2, 9, 5)) {
 127         // Load/store register (unsigned immediate)
 128         unsigned size = Instruction_aarch64::extract(insn2, 31, 30);
 129         Instruction_aarch64::patch(branch + sizeof (unsigned),
 130                                     21, 10, offset_lo >> size);
 131         guarantee(((dest >> size) << size) == dest, "misaligned target");
 132         instructions = 2;
 133       } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
 134                 Instruction_aarch64::extract(insn, 4, 0) ==
 135                         Instruction_aarch64::extract(insn2, 4, 0)) {
 136         // add (immediate)
 137         Instruction_aarch64::patch(branch + sizeof (unsigned),
 138                                    21, 10, offset_lo);
 139         instructions = 2;
 140       } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 &&
 141                    Instruction_aarch64::extract(insn, 4, 0) ==
 142                      Instruction_aarch64::extract(insn2, 4, 0)) {
 143         // movk #imm16<<32
 144         Instruction_aarch64::patch(branch + 4, 20, 5, (uint64_t)target >> 32);
 145         uintptr_t dest = ((uintptr_t)target & 0xffffffffULL) | ((uintptr_t)branch & 0xffff00000000ULL);
 146         uintptr_t pc_page = (uintptr_t)branch >> 12;
 147         uintptr_t adr_page = (uintptr_t)dest >> 12;
 148         offset = adr_page - pc_page;
 149         instructions = 2;
 150       }
 151     }
 152     int offset_lo = offset & 3;
 153     offset >>= 2;
 154     Instruction_aarch64::spatch(branch, 23, 5, offset);
 155     Instruction_aarch64::patch(branch, 30, 29, offset_lo);
 156   } else if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010100) {
 157     uint64_t dest = (uint64_t)target;
 158     // Move wide constant
 159     assert(nativeInstruction_at(branch+4)->is_movk(), "wrong insns in patch");
 160     assert(nativeInstruction_at(branch+8)->is_movk(), "wrong insns in patch");
 161     Instruction_aarch64::patch(branch, 20, 5, dest & 0xffff);
 162     Instruction_aarch64::patch(branch+4, 20, 5, (dest >>= 16) & 0xffff);
 163     Instruction_aarch64::patch(branch+8, 20, 5, (dest >>= 16) & 0xffff);
 164     assert(target_addr_for_insn(branch) == target, "should be");
 165     instructions = 3;
 166   } else if (NativeInstruction::is_ldrw_to_zr(address(&insn))) {
 167     // nothing to do
 168     assert(target == 0, "did not expect to relocate target for polling page load");
 169   } else {
 170     ShouldNotReachHere();
 171   }
 172   return instructions * NativeInstruction::instruction_size;
 173 }
 174 
 175 int MacroAssembler::patch_oop(address insn_addr, address o) {
 176   int instructions;
 177   unsigned insn = *(unsigned*)insn_addr;
 178   assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
 179 
 180   // OOPs are either narrow (32 bits) or wide (48 bits).  We encode
 181   // narrow OOPs by setting the upper 16 bits in the first
 182   // instruction.
 183   if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
 184     // Move narrow OOP
 185     uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o));
 186     Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
 187     Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
 188     instructions = 2;
 189   } else {
 190     // Move wide OOP
 191     assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
 192     uintptr_t dest = (uintptr_t)o;
 193     Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
 194     Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
 195     Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
 196     instructions = 3;
 197   }
 198   return instructions * NativeInstruction::instruction_size;
 199 }
 200 
 201 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) {
 202   // Metatdata pointers are either narrow (32 bits) or wide (48 bits).
 203   // We encode narrow ones by setting the upper 16 bits in the first
 204   // instruction.
 205   NativeInstruction *insn = nativeInstruction_at(insn_addr);
 206   assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 &&
 207          nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
 208 
 209   Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
 210   Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
 211   return 2 * NativeInstruction::instruction_size;
 212 }
 213 
 214 address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
 215   intptr_t offset = 0;
 216   if ((Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000) {
 217     // Load register (literal)
 218     offset = Instruction_aarch64::sextract(insn, 23, 5);
 219     return address(((uint64_t)insn_addr + (offset << 2)));
 220   } else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) {
 221     // Unconditional branch (immediate)
 222     offset = Instruction_aarch64::sextract(insn, 25, 0);
 223   } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) {
 224     // Conditional branch (immediate)
 225     offset = Instruction_aarch64::sextract(insn, 23, 5);
 226   } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) {
 227     // Compare & branch (immediate)
 228     offset = Instruction_aarch64::sextract(insn, 23, 5);
 229    } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) {
 230     // Test & branch (immediate)
 231     offset = Instruction_aarch64::sextract(insn, 18, 5);
 232   } else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) {
 233     // PC-rel. addressing
 234     offset = Instruction_aarch64::extract(insn, 30, 29);
 235     offset |= Instruction_aarch64::sextract(insn, 23, 5) << 2;
 236     int shift = Instruction_aarch64::extract(insn, 31, 31) ? 12 : 0;
 237     if (shift) {
 238       offset <<= shift;
 239       uint64_t target_page = ((uint64_t)insn_addr) + offset;
 240       target_page &= ((uint64_t)-1) << shift;
 241       // Return the target address for the following sequences
 242       //   1 - adrp    Rx, target_page
 243       //       ldr/str Ry, [Rx, #offset_in_page]
 244       //   2 - adrp    Rx, target_page
 245       //       add     Ry, Rx, #offset_in_page
 246       //   3 - adrp    Rx, target_page (page aligned reloc, offset == 0)
 247       //       movk    Rx, #imm12<<32
 248       //   4 - adrp    Rx, target_page (page aligned reloc, offset == 0)
 249       //
 250       // In the first two cases  we check that the register is the same and
 251       // return the target_page + the offset within the page.
 252       // Otherwise we assume it is a page aligned relocation and return
 253       // the target page only.
 254       //
 255       unsigned insn2 = ((unsigned*)insn_addr)[1];
 256       if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
 257                 Instruction_aarch64::extract(insn, 4, 0) ==
 258                         Instruction_aarch64::extract(insn2, 9, 5)) {
 259         // Load/store register (unsigned immediate)
 260         unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
 261         unsigned int size = Instruction_aarch64::extract(insn2, 31, 30);
 262         return address(target_page + (byte_offset << size));
 263       } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
 264                 Instruction_aarch64::extract(insn, 4, 0) ==
 265                         Instruction_aarch64::extract(insn2, 4, 0)) {
 266         // add (immediate)
 267         unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
 268         return address(target_page + byte_offset);
 269       } else {
 270         if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110  &&
 271                Instruction_aarch64::extract(insn, 4, 0) ==
 272                  Instruction_aarch64::extract(insn2, 4, 0)) {
 273           target_page = (target_page & 0xffffffff) |
 274                          ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
 275         }
 276         return (address)target_page;
 277       }
 278     } else {
 279       ShouldNotReachHere();
 280     }
 281   } else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) {
 282     uint32_t *insns = (uint32_t *)insn_addr;
 283     // Move wide constant: movz, movk, movk.  See movptr().
 284     assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
 285     assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
 286     return address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
 287                    + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
 288                    + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
 289   } else {
 290     ShouldNotReachHere();
 291   }
 292   return address(((uint64_t)insn_addr + (offset << 2)));
 293 }
 294 
 295 address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) {
 296   if (NativeInstruction::is_ldrw_to_zr(address(&insn))) {
 297     return 0;
 298   }
 299   return MacroAssembler::target_addr_for_insn(insn_addr, insn);
 300 }
 301 
 302 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod) {
 303   if (acquire) {
 304     lea(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
 305     ldar(rscratch1, rscratch1);
 306   } else {
 307     ldr(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
 308   }
 309   if (at_return) {
 310     // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
 311     // we may safely use the sp instead to perform the stack watermark check.
 312     cmp(in_nmethod ? sp : rfp, rscratch1);
 313     br(Assembler::HI, slow_path);
 314   } else {
 315     tbnz(rscratch1, log2i_exact(SafepointMechanism::poll_bit()), slow_path);
 316   }
 317 }
 318 
 319 void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
 320   // we must set sp to zero to clear frame
 321   str(zr, Address(rthread, JavaThread::last_Java_sp_offset()));
 322 
 323   // must clear fp, so that compiled frames are not confused; it is
 324   // possible that we need it only for debugging
 325   if (clear_fp) {
 326     str(zr, Address(rthread, JavaThread::last_Java_fp_offset()));
 327   }
 328 
 329   // Always clear the pc because it could have been set by make_walkable()
 330   str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
 331 }
 332 
 333 // Calls to C land
 334 //
 335 // When entering C land, the rfp, & resp of the last Java frame have to be recorded
 336 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
 337 // has to be reset to 0. This is required to allow proper stack traversal.
 338 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 339                                          Register last_java_fp,
 340                                          Register last_java_pc,
 341                                          Register scratch) {
 342 
 343   if (last_java_pc->is_valid()) {
 344       str(last_java_pc, Address(rthread,
 345                                 JavaThread::frame_anchor_offset()
 346                                 + JavaFrameAnchor::last_Java_pc_offset()));
 347     }
 348 
 349   // determine last_java_sp register
 350   if (last_java_sp == sp) {
 351     mov(scratch, sp);
 352     last_java_sp = scratch;
 353   } else if (!last_java_sp->is_valid()) {
 354     last_java_sp = esp;
 355   }
 356 
 357   str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset()));
 358 
 359   // last_java_fp is optional
 360   if (last_java_fp->is_valid()) {
 361     str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset()));
 362   }
 363 }
 364 
 365 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 366                                          Register last_java_fp,
 367                                          address  last_java_pc,
 368                                          Register scratch) {
 369   assert(last_java_pc != NULL, "must provide a valid PC");
 370 
 371   adr(scratch, last_java_pc);
 372   str(scratch, Address(rthread,
 373                        JavaThread::frame_anchor_offset()
 374                        + JavaFrameAnchor::last_Java_pc_offset()));
 375 
 376   set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
 377 }
 378 
 379 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 380                                          Register last_java_fp,
 381                                          Label &L,
 382                                          Register scratch) {
 383   if (L.is_bound()) {
 384     set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
 385   } else {
 386     InstructionMark im(this);
 387     L.add_patch_at(code(), locator());
 388     set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
 389   }
 390 }
 391 
 392 void MacroAssembler::far_call(Address entry, CodeBuffer *cbuf, Register tmp) {
 393   assert(ReservedCodeCacheSize < 4*G, "branch out of range");
 394   assert(CodeCache::find_blob(entry.target()) != NULL,
 395          "destination of far call not found in code cache");
 396   if (far_branches()) {
 397     uint64_t offset;
 398     // We can use ADRP here because we know that the total size of
 399     // the code cache cannot exceed 2Gb.
 400     adrp(tmp, entry, offset);
 401     add(tmp, tmp, offset);
 402     if (cbuf) cbuf->set_insts_mark();
 403     blr(tmp);
 404   } else {
 405     if (cbuf) cbuf->set_insts_mark();
 406     bl(entry);
 407   }
 408 }
 409 
 410 void MacroAssembler::far_jump(Address entry, CodeBuffer *cbuf, Register tmp) {
 411   assert(ReservedCodeCacheSize < 4*G, "branch out of range");
 412   assert(CodeCache::find_blob(entry.target()) != NULL,
 413          "destination of far call not found in code cache");
 414   if (far_branches()) {
 415     uint64_t offset;
 416     // We can use ADRP here because we know that the total size of
 417     // the code cache cannot exceed 2Gb.
 418     adrp(tmp, entry, offset);
 419     add(tmp, tmp, offset);
 420     if (cbuf) cbuf->set_insts_mark();
 421     br(tmp);
 422   } else {
 423     if (cbuf) cbuf->set_insts_mark();
 424     b(entry);
 425   }
 426 }
 427 
 428 void MacroAssembler::reserved_stack_check() {
 429     // testing if reserved zone needs to be enabled
 430     Label no_reserved_zone_enabling;
 431 
 432     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 433     cmp(sp, rscratch1);
 434     br(Assembler::LO, no_reserved_zone_enabling);
 435 
 436     enter();   // LR and FP are live.
 437     lea(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone));
 438     mov(c_rarg0, rthread);
 439     blr(rscratch1);
 440     leave();
 441 
 442     // We have already removed our own frame.
 443     // throw_delayed_StackOverflowError will think that it's been
 444     // called by our caller.
 445     lea(rscratch1, RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry()));
 446     br(rscratch1);
 447     should_not_reach_here();
 448 
 449     bind(no_reserved_zone_enabling);
 450 }
 451 
 452 static void pass_arg0(MacroAssembler* masm, Register arg) {
 453   if (c_rarg0 != arg ) {
 454     masm->mov(c_rarg0, arg);
 455   }
 456 }
 457 
 458 static void pass_arg1(MacroAssembler* masm, Register arg) {
 459   if (c_rarg1 != arg ) {
 460     masm->mov(c_rarg1, arg);
 461   }
 462 }
 463 
 464 static void pass_arg2(MacroAssembler* masm, Register arg) {
 465   if (c_rarg2 != arg ) {
 466     masm->mov(c_rarg2, arg);
 467   }
 468 }
 469 
 470 static void pass_arg3(MacroAssembler* masm, Register arg) {
 471   if (c_rarg3 != arg ) {
 472     masm->mov(c_rarg3, arg);
 473   }
 474 }
 475 
 476 void MacroAssembler::call_VM_base(Register oop_result,
 477                                   Register java_thread,
 478                                   Register last_java_sp,
 479                                   address  entry_point,
 480                                   int      number_of_arguments,
 481                                   bool     check_exceptions) {
 482    // determine java_thread register
 483   if (!java_thread->is_valid()) {
 484     java_thread = rthread;
 485   }
 486 
 487   // determine last_java_sp register
 488   if (!last_java_sp->is_valid()) {
 489     last_java_sp = esp;
 490   }
 491 
 492   // debugging support
 493   assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
 494   assert(java_thread == rthread, "unexpected register");
 495 #ifdef ASSERT
 496   // TraceBytecodes does not use r12 but saves it over the call, so don't verify
 497   // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
 498 #endif // ASSERT
 499 
 500   assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
 501   assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
 502 
 503   // push java thread (becomes first argument of C function)
 504 
 505   mov(c_rarg0, java_thread);
 506 
 507   // set last Java frame before call
 508   assert(last_java_sp != rfp, "can't use rfp");
 509 
 510   Label l;
 511   set_last_Java_frame(last_java_sp, rfp, l, rscratch1);
 512 
 513   // do the call, remove parameters
 514   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l);
 515 
 516   // lr could be poisoned with PAC signature during throw_pending_exception
 517   // if it was tail-call optimized by compiler, since lr is not callee-saved
 518   // reload it with proper value
 519   adr(lr, l);
 520 
 521   // reset last Java frame
 522   // Only interpreter should have to clear fp
 523   reset_last_Java_frame(true);
 524 
 525    // C++ interp handles this in the interpreter
 526   check_and_handle_popframe(java_thread);
 527   check_and_handle_earlyret(java_thread);
 528 
 529   if (check_exceptions) {
 530     // check for pending exceptions (java_thread is set upon return)
 531     ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
 532     Label ok;
 533     cbz(rscratch1, ok);
 534     lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
 535     br(rscratch1);
 536     bind(ok);
 537   }
 538 
 539   // get oop result if there is one and reset the value in the thread
 540   if (oop_result->is_valid()) {
 541     get_vm_result(oop_result, java_thread);
 542   }
 543 }
 544 
 545 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
 546   call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
 547 }
 548 
 549 // Maybe emit a call via a trampoline.  If the code cache is small
 550 // trampolines won't be emitted.
 551 
 552 address MacroAssembler::trampoline_call(Address entry, CodeBuffer* cbuf) {
 553   assert(JavaThread::current()->is_Compiler_thread(), "just checking");
 554   assert(entry.rspec().type() == relocInfo::runtime_call_type
 555          || entry.rspec().type() == relocInfo::opt_virtual_call_type
 556          || entry.rspec().type() == relocInfo::static_call_type
 557          || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type");
 558 
 559   // We need a trampoline if branches are far.
 560   if (far_branches()) {
 561     bool in_scratch_emit_size = false;
 562 #ifdef COMPILER2
 563     // We don't want to emit a trampoline if C2 is generating dummy
 564     // code during its branch shortening phase.
 565     CompileTask* task = ciEnv::current()->task();
 566     in_scratch_emit_size =
 567       (task != NULL && is_c2_compile(task->comp_level()) &&
 568        Compile::current()->output()->in_scratch_emit_size());
 569 #endif
 570     if (!in_scratch_emit_size) {
 571       address stub = emit_trampoline_stub(offset(), entry.target());
 572       if (stub == NULL) {
 573         postcond(pc() == badAddress);
 574         return NULL; // CodeCache is full
 575       }
 576     }
 577   }
 578 
 579   if (cbuf) cbuf->set_insts_mark();
 580   relocate(entry.rspec());
 581   if (!far_branches()) {
 582     bl(entry.target());
 583   } else {
 584     bl(pc());
 585   }
 586   // just need to return a non-null address
 587   postcond(pc() != badAddress);
 588   return pc();
 589 }
 590 
 591 
 592 // Emit a trampoline stub for a call to a target which is too far away.
 593 //
 594 // code sequences:
 595 //
 596 // call-site:
 597 //   branch-and-link to <destination> or <trampoline stub>
 598 //
 599 // Related trampoline stub for this call site in the stub section:
 600 //   load the call target from the constant pool
 601 //   branch (LR still points to the call site above)
 602 
 603 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
 604                                              address dest) {
 605   // Max stub size: alignment nop, TrampolineStub.
 606   address stub = start_a_stub(NativeInstruction::instruction_size
 607                    + NativeCallTrampolineStub::instruction_size);
 608   if (stub == NULL) {
 609     return NULL;  // CodeBuffer::expand failed
 610   }
 611 
 612   // Create a trampoline stub relocation which relates this trampoline stub
 613   // with the call instruction at insts_call_instruction_offset in the
 614   // instructions code-section.
 615   align(wordSize);
 616   relocate(trampoline_stub_Relocation::spec(code()->insts()->start()
 617                                             + insts_call_instruction_offset));
 618   const int stub_start_offset = offset();
 619 
 620   // Now, create the trampoline stub's code:
 621   // - load the call
 622   // - call
 623   Label target;
 624   ldr(rscratch1, target);
 625   br(rscratch1);
 626   bind(target);
 627   assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset,
 628          "should be");
 629   emit_int64((int64_t)dest);
 630 
 631   const address stub_start_addr = addr_at(stub_start_offset);
 632 
 633   assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
 634 
 635   end_a_stub();
 636   return stub_start_addr;
 637 }
 638 
 639 void MacroAssembler::emit_static_call_stub() {
 640   // CompiledDirectStaticCall::set_to_interpreted knows the
 641   // exact layout of this stub.
 642 
 643   isb();
 644   mov_metadata(rmethod, (Metadata*)NULL);
 645 
 646   // Jump to the entry point of the i2c stub.
 647   movptr(rscratch1, 0);
 648   br(rscratch1);
 649 }
 650 
 651 void MacroAssembler::c2bool(Register x) {
 652   // implements x == 0 ? 0 : 1
 653   // note: must only look at least-significant byte of x
 654   //       since C-style booleans are stored in one byte
 655   //       only! (was bug)
 656   tst(x, 0xff);
 657   cset(x, Assembler::NE);
 658 }
 659 
 660 address MacroAssembler::ic_call(address entry, jint method_index) {
 661   RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
 662   // address const_ptr = long_constant((jlong)Universe::non_oop_word());
 663   // uintptr_t offset;
 664   // ldr_constant(rscratch2, const_ptr);
 665   movptr(rscratch2, (uintptr_t)Universe::non_oop_word());
 666   return trampoline_call(Address(entry, rh));
 667 }
 668 
 669 // Implementation of call_VM versions
 670 
 671 void MacroAssembler::call_VM(Register oop_result,
 672                              address entry_point,
 673                              bool check_exceptions) {
 674   call_VM_helper(oop_result, entry_point, 0, check_exceptions);
 675 }
 676 
 677 void MacroAssembler::call_VM(Register oop_result,
 678                              address entry_point,
 679                              Register arg_1,
 680                              bool check_exceptions) {
 681   pass_arg1(this, arg_1);
 682   call_VM_helper(oop_result, entry_point, 1, check_exceptions);
 683 }
 684 
 685 void MacroAssembler::call_VM(Register oop_result,
 686                              address entry_point,
 687                              Register arg_1,
 688                              Register arg_2,
 689                              bool check_exceptions) {
 690   assert(arg_1 != c_rarg2, "smashed arg");
 691   pass_arg2(this, arg_2);
 692   pass_arg1(this, arg_1);
 693   call_VM_helper(oop_result, entry_point, 2, check_exceptions);
 694 }
 695 
 696 void MacroAssembler::call_VM(Register oop_result,
 697                              address entry_point,
 698                              Register arg_1,
 699                              Register arg_2,
 700                              Register arg_3,
 701                              bool check_exceptions) {
 702   assert(arg_1 != c_rarg3, "smashed arg");
 703   assert(arg_2 != c_rarg3, "smashed arg");
 704   pass_arg3(this, arg_3);
 705 
 706   assert(arg_1 != c_rarg2, "smashed arg");
 707   pass_arg2(this, arg_2);
 708 
 709   pass_arg1(this, arg_1);
 710   call_VM_helper(oop_result, entry_point, 3, check_exceptions);
 711 }
 712 
 713 void MacroAssembler::call_VM(Register oop_result,
 714                              Register last_java_sp,
 715                              address entry_point,
 716                              int number_of_arguments,
 717                              bool check_exceptions) {
 718   call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
 719 }
 720 
 721 void MacroAssembler::call_VM(Register oop_result,
 722                              Register last_java_sp,
 723                              address entry_point,
 724                              Register arg_1,
 725                              bool check_exceptions) {
 726   pass_arg1(this, arg_1);
 727   call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
 728 }
 729 
 730 void MacroAssembler::call_VM(Register oop_result,
 731                              Register last_java_sp,
 732                              address entry_point,
 733                              Register arg_1,
 734                              Register arg_2,
 735                              bool check_exceptions) {
 736 
 737   assert(arg_1 != c_rarg2, "smashed arg");
 738   pass_arg2(this, arg_2);
 739   pass_arg1(this, arg_1);
 740   call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
 741 }
 742 
 743 void MacroAssembler::call_VM(Register oop_result,
 744                              Register last_java_sp,
 745                              address entry_point,
 746                              Register arg_1,
 747                              Register arg_2,
 748                              Register arg_3,
 749                              bool check_exceptions) {
 750   assert(arg_1 != c_rarg3, "smashed arg");
 751   assert(arg_2 != c_rarg3, "smashed arg");
 752   pass_arg3(this, arg_3);
 753   assert(arg_1 != c_rarg2, "smashed arg");
 754   pass_arg2(this, arg_2);
 755   pass_arg1(this, arg_1);
 756   call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
 757 }
 758 
 759 
 760 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
 761   ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
 762   str(zr, Address(java_thread, JavaThread::vm_result_offset()));
 763   verify_oop(oop_result, "broken oop in call_VM_base");
 764 }
 765 
 766 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
 767   ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
 768   str(zr, Address(java_thread, JavaThread::vm_result_2_offset()));
 769 }
 770 
 771 void MacroAssembler::align(int modulus) {
 772   while (offset() % modulus != 0) nop();
 773 }
 774 
 775 // these are no-ops overridden by InterpreterMacroAssembler
 776 
 777 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { }
 778 
 779 void MacroAssembler::check_and_handle_popframe(Register java_thread) { }
 780 
 781 void MacroAssembler::get_default_value_oop(Register inline_klass, Register temp_reg, Register obj) {
 782 #ifdef ASSERT
 783   {
 784     Label done_check;
 785     test_klass_is_inline_type(inline_klass, temp_reg, done_check);
 786     stop("get_default_value_oop from non inline type klass");
 787     bind(done_check);
 788   }
 789 #endif
 790   Register offset = temp_reg;
 791   // Getting the offset of the pre-allocated default value
 792   ldr(offset, Address(inline_klass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset())));
 793   ldr(offset, Address(offset, in_bytes(InlineKlass::default_value_offset_offset())));
 794 
 795   // Getting the mirror
 796   ldr(obj, Address(inline_klass, in_bytes(Klass::java_mirror_offset())));
 797   resolve_oop_handle(obj, inline_klass);
 798 
 799   // Getting the pre-allocated default value from the mirror
 800   Address field(obj, offset);
 801   load_heap_oop(obj, field);
 802 }
 803 
 804 void MacroAssembler::get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj) {
 805 #ifdef ASSERT
 806   {
 807     Label done_check;
 808     test_klass_is_empty_inline_type(inline_klass, temp_reg, done_check);
 809     stop("get_empty_value from non-empty inline klass");
 810     bind(done_check);
 811   }
 812 #endif
 813   get_default_value_oop(inline_klass, temp_reg, obj);
 814 }
 815 
 816 // Look up the method for a megamorphic invokeinterface call.
 817 // The target method is determined by <intf_klass, itable_index>.
 818 // The receiver klass is in recv_klass.
 819 // On success, the result will be in method_result, and execution falls through.
 820 // On failure, execution transfers to the given label.
 821 void MacroAssembler::lookup_interface_method(Register recv_klass,
 822                                              Register intf_klass,
 823                                              RegisterOrConstant itable_index,
 824                                              Register method_result,
 825                                              Register scan_temp,
 826                                              Label& L_no_such_interface,
 827                          bool return_method) {
 828   assert_different_registers(recv_klass, intf_klass, scan_temp);
 829   assert_different_registers(method_result, intf_klass, scan_temp);
 830   assert(recv_klass != method_result || !return_method,
 831      "recv_klass can be destroyed when method isn't needed");
 832   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 833          "caller must use same register for non-constant itable index as for method");
 834 
 835   // Compute start of first itableOffsetEntry (which is at the end of the vtable)
 836   int vtable_base = in_bytes(Klass::vtable_start_offset());
 837   int itentry_off = itableMethodEntry::method_offset_in_bytes();
 838   int scan_step   = itableOffsetEntry::size() * wordSize;
 839   int vte_size    = vtableEntry::size_in_bytes();
 840   assert(vte_size == wordSize, "else adjust times_vte_scale");
 841 
 842   ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
 843 
 844   // %%% Could store the aligned, prescaled offset in the klassoop.
 845   // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
 846   lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3)));
 847   add(scan_temp, scan_temp, vtable_base);
 848 
 849   if (return_method) {
 850     // Adjust recv_klass by scaled itable_index, so we can free itable_index.
 851     assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
 852     // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
 853     lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3)));
 854     if (itentry_off)
 855       add(recv_klass, recv_klass, itentry_off);
 856   }
 857 
 858   // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
 859   //   if (scan->interface() == intf) {
 860   //     result = (klass + scan->offset() + itable_index);
 861   //   }
 862   // }
 863   Label search, found_method;
 864 
 865   ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
 866   cmp(intf_klass, method_result);
 867   br(Assembler::EQ, found_method);
 868   bind(search);
 869   // Check that the previous entry is non-null.  A null entry means that
 870   // the receiver class doesn't implement the interface, and wasn't the
 871   // same as when the caller was compiled.
 872   cbz(method_result, L_no_such_interface);
 873   if (itableOffsetEntry::interface_offset_in_bytes() != 0) {
 874     add(scan_temp, scan_temp, scan_step);
 875     ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
 876   } else {
 877     ldr(method_result, Address(pre(scan_temp, scan_step)));
 878   }
 879   cmp(intf_klass, method_result);
 880   br(Assembler::NE, search);
 881 
 882   bind(found_method);
 883 
 884   // Got a hit.
 885   if (return_method) {
 886     ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
 887     ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0)));
 888   }
 889 }
 890 
 891 // virtual method calling
 892 void MacroAssembler::lookup_virtual_method(Register recv_klass,
 893                                            RegisterOrConstant vtable_index,
 894                                            Register method_result) {
 895   const int base = in_bytes(Klass::vtable_start_offset());
 896   assert(vtableEntry::size() * wordSize == 8,
 897          "adjust the scaling in the code below");
 898   int vtable_offset_in_bytes = base + vtableEntry::method_offset_in_bytes();
 899 
 900   if (vtable_index.is_register()) {
 901     lea(method_result, Address(recv_klass,
 902                                vtable_index.as_register(),
 903                                Address::lsl(LogBytesPerWord)));
 904     ldr(method_result, Address(method_result, vtable_offset_in_bytes));
 905   } else {
 906     vtable_offset_in_bytes += vtable_index.as_constant() * wordSize;
 907     ldr(method_result,
 908         form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0));
 909   }
 910 }
 911 
 912 void MacroAssembler::check_klass_subtype(Register sub_klass,
 913                            Register super_klass,
 914                            Register temp_reg,
 915                            Label& L_success) {
 916   Label L_failure;
 917   check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg,        &L_success, &L_failure, NULL);
 918   check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
 919   bind(L_failure);
 920 }
 921 
 922 
 923 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
 924                                                    Register super_klass,
 925                                                    Register temp_reg,
 926                                                    Label* L_success,
 927                                                    Label* L_failure,
 928                                                    Label* L_slow_path,
 929                                         RegisterOrConstant super_check_offset) {
 930   assert_different_registers(sub_klass, super_klass, temp_reg);
 931   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
 932   if (super_check_offset.is_register()) {
 933     assert_different_registers(sub_klass, super_klass,
 934                                super_check_offset.as_register());
 935   } else if (must_load_sco) {
 936     assert(temp_reg != noreg, "supply either a temp or a register offset");
 937   }
 938 
 939   Label L_fallthrough;
 940   int label_nulls = 0;
 941   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
 942   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
 943   if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
 944   assert(label_nulls <= 1, "at most one NULL in the batch");
 945 
 946   int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
 947   int sco_offset = in_bytes(Klass::super_check_offset_offset());
 948   Address super_check_offset_addr(super_klass, sco_offset);
 949 
 950   // Hacked jmp, which may only be used just before L_fallthrough.
 951 #define final_jmp(label)                                                \
 952   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
 953   else                            b(label)                /*omit semi*/
 954 
 955   // If the pointers are equal, we are done (e.g., String[] elements).
 956   // This self-check enables sharing of secondary supertype arrays among
 957   // non-primary types such as array-of-interface.  Otherwise, each such
 958   // type would need its own customized SSA.
 959   // We move this check to the front of the fast path because many
 960   // type checks are in fact trivially successful in this manner,
 961   // so we get a nicely predicted branch right at the start of the check.
 962   cmp(sub_klass, super_klass);
 963   br(Assembler::EQ, *L_success);
 964 
 965   // Check the supertype display:
 966   if (must_load_sco) {
 967     ldrw(temp_reg, super_check_offset_addr);
 968     super_check_offset = RegisterOrConstant(temp_reg);
 969   }
 970   Address super_check_addr(sub_klass, super_check_offset);
 971   ldr(rscratch1, super_check_addr);
 972   cmp(super_klass, rscratch1); // load displayed supertype
 973 
 974   // This check has worked decisively for primary supers.
 975   // Secondary supers are sought in the super_cache ('super_cache_addr').
 976   // (Secondary supers are interfaces and very deeply nested subtypes.)
 977   // This works in the same check above because of a tricky aliasing
 978   // between the super_cache and the primary super display elements.
 979   // (The 'super_check_addr' can address either, as the case requires.)
 980   // Note that the cache is updated below if it does not help us find
 981   // what we need immediately.
 982   // So if it was a primary super, we can just fail immediately.
 983   // Otherwise, it's the slow path for us (no success at this point).
 984 
 985   if (super_check_offset.is_register()) {
 986     br(Assembler::EQ, *L_success);
 987     subs(zr, super_check_offset.as_register(), sc_offset);
 988     if (L_failure == &L_fallthrough) {
 989       br(Assembler::EQ, *L_slow_path);
 990     } else {
 991       br(Assembler::NE, *L_failure);
 992       final_jmp(*L_slow_path);
 993     }
 994   } else if (super_check_offset.as_constant() == sc_offset) {
 995     // Need a slow path; fast failure is impossible.
 996     if (L_slow_path == &L_fallthrough) {
 997       br(Assembler::EQ, *L_success);
 998     } else {
 999       br(Assembler::NE, *L_slow_path);
1000       final_jmp(*L_success);
1001     }
1002   } else {
1003     // No slow path; it's a fast decision.
1004     if (L_failure == &L_fallthrough) {
1005       br(Assembler::EQ, *L_success);
1006     } else {
1007       br(Assembler::NE, *L_failure);
1008       final_jmp(*L_success);
1009     }
1010   }
1011 
1012   bind(L_fallthrough);
1013 
1014 #undef final_jmp
1015 }
1016 
1017 // These two are taken from x86, but they look generally useful
1018 
1019 // scans count pointer sized words at [addr] for occurence of value,
1020 // generic
1021 void MacroAssembler::repne_scan(Register addr, Register value, Register count,
1022                                 Register scratch) {
1023   Label Lloop, Lexit;
1024   cbz(count, Lexit);
1025   bind(Lloop);
1026   ldr(scratch, post(addr, wordSize));
1027   cmp(value, scratch);
1028   br(EQ, Lexit);
1029   sub(count, count, 1);
1030   cbnz(count, Lloop);
1031   bind(Lexit);
1032 }
1033 
1034 // scans count 4 byte words at [addr] for occurence of value,
1035 // generic
1036 void MacroAssembler::repne_scanw(Register addr, Register value, Register count,
1037                                 Register scratch) {
1038   Label Lloop, Lexit;
1039   cbz(count, Lexit);
1040   bind(Lloop);
1041   ldrw(scratch, post(addr, wordSize));
1042   cmpw(value, scratch);
1043   br(EQ, Lexit);
1044   sub(count, count, 1);
1045   cbnz(count, Lloop);
1046   bind(Lexit);
1047 }
1048 
1049 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
1050                                                    Register super_klass,
1051                                                    Register temp_reg,
1052                                                    Register temp2_reg,
1053                                                    Label* L_success,
1054                                                    Label* L_failure,
1055                                                    bool set_cond_codes) {
1056   assert_different_registers(sub_klass, super_klass, temp_reg);
1057   if (temp2_reg != noreg)
1058     assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1);
1059 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
1060 
1061   Label L_fallthrough;
1062   int label_nulls = 0;
1063   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
1064   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
1065   assert(label_nulls <= 1, "at most one NULL in the batch");
1066 
1067   // a couple of useful fields in sub_klass:
1068   int ss_offset = in_bytes(Klass::secondary_supers_offset());
1069   int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
1070   Address secondary_supers_addr(sub_klass, ss_offset);
1071   Address super_cache_addr(     sub_klass, sc_offset);
1072 
1073   BLOCK_COMMENT("check_klass_subtype_slow_path");
1074 
1075   // Do a linear scan of the secondary super-klass chain.
1076   // This code is rarely used, so simplicity is a virtue here.
1077   // The repne_scan instruction uses fixed registers, which we must spill.
1078   // Don't worry too much about pre-existing connections with the input regs.
1079 
1080   assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super)
1081   assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter)
1082 
1083   RegSet pushed_registers;
1084   if (!IS_A_TEMP(r2))    pushed_registers += r2;
1085   if (!IS_A_TEMP(r5))    pushed_registers += r5;
1086 
1087   if (super_klass != r0 || UseCompressedOops) {
1088     if (!IS_A_TEMP(r0))   pushed_registers += r0;
1089   }
1090 
1091   push(pushed_registers, sp);
1092 
1093   // Get super_klass value into r0 (even if it was in r5 or r2).
1094   if (super_klass != r0) {
1095     mov(r0, super_klass);
1096   }
1097 
1098 #ifndef PRODUCT
1099   mov(rscratch2, (address)&SharedRuntime::_partial_subtype_ctr);
1100   Address pst_counter_addr(rscratch2);
1101   ldr(rscratch1, pst_counter_addr);
1102   add(rscratch1, rscratch1, 1);
1103   str(rscratch1, pst_counter_addr);
1104 #endif //PRODUCT
1105 
1106   // We will consult the secondary-super array.
1107   ldr(r5, secondary_supers_addr);
1108   // Load the array length.
1109   ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes()));
1110   // Skip to start of data.
1111   add(r5, r5, Array<Klass*>::base_offset_in_bytes());
1112 
1113   cmp(sp, zr); // Clear Z flag; SP is never zero
1114   // Scan R2 words at [R5] for an occurrence of R0.
1115   // Set NZ/Z based on last compare.
1116   repne_scan(r5, r0, r2, rscratch1);
1117 
1118   // Unspill the temp. registers:
1119   pop(pushed_registers, sp);
1120 
1121   br(Assembler::NE, *L_failure);
1122 
1123   // Success.  Cache the super we found and proceed in triumph.
1124   str(super_klass, super_cache_addr);
1125 
1126   if (L_success != &L_fallthrough) {
1127     b(*L_success);
1128   }
1129 
1130 #undef IS_A_TEMP
1131 
1132   bind(L_fallthrough);
1133 }
1134 
1135 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) {
1136   assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required");
1137   assert_different_registers(klass, rthread, scratch);
1138 
1139   Label L_fallthrough, L_tmp;
1140   if (L_fast_path == NULL) {
1141     L_fast_path = &L_fallthrough;
1142   } else if (L_slow_path == NULL) {
1143     L_slow_path = &L_fallthrough;
1144   }
1145   // Fast path check: class is fully initialized
1146   ldrb(scratch, Address(klass, InstanceKlass::init_state_offset()));
1147   subs(zr, scratch, InstanceKlass::fully_initialized);
1148   br(Assembler::EQ, *L_fast_path);
1149 
1150   // Fast path check: current thread is initializer thread
1151   ldr(scratch, Address(klass, InstanceKlass::init_thread_offset()));
1152   cmp(rthread, scratch);
1153 
1154   if (L_slow_path == &L_fallthrough) {
1155     br(Assembler::EQ, *L_fast_path);
1156     bind(*L_slow_path);
1157   } else if (L_fast_path == &L_fallthrough) {
1158     br(Assembler::NE, *L_slow_path);
1159     bind(*L_fast_path);
1160   } else {
1161     Unimplemented();
1162   }
1163 }
1164 
1165 void MacroAssembler::verify_oop(Register reg, const char* s) {
1166   if (!VerifyOops || VerifyAdapterSharing) {
1167     // Below address of the code string confuses VerifyAdapterSharing
1168     // because it may differ between otherwise equivalent adapters.
1169     return;
1170   }
1171 
1172   // Pass register number to verify_oop_subroutine
1173   const char* b = NULL;
1174   {
1175     ResourceMark rm;
1176     stringStream ss;
1177     ss.print("verify_oop: %s: %s", reg->name(), s);
1178     b = code_string(ss.as_string());
1179   }
1180   BLOCK_COMMENT("verify_oop {");
1181 
1182   strip_return_address(); // This might happen within a stack frame.
1183   protect_return_address();
1184   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1185   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1186 
1187   mov(r0, reg);
1188   movptr(rscratch1, (uintptr_t)(address)b);
1189 
1190   // call indirectly to solve generation ordering problem
1191   lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1192   ldr(rscratch2, Address(rscratch2));
1193   blr(rscratch2);
1194 
1195   ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1196   ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1197   authenticate_return_address();
1198 
1199   BLOCK_COMMENT("} verify_oop");
1200 }
1201 
1202 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
1203   if (!VerifyOops || VerifyAdapterSharing) {
1204     // Below address of the code string confuses VerifyAdapterSharing
1205     // because it may differ between otherwise equivalent adapters.
1206     return;
1207   }
1208 
1209   const char* b = NULL;
1210   {
1211     ResourceMark rm;
1212     stringStream ss;
1213     ss.print("verify_oop_addr: %s", s);
1214     b = code_string(ss.as_string());
1215   }
1216   BLOCK_COMMENT("verify_oop_addr {");
1217 
1218   strip_return_address(); // This might happen within a stack frame.
1219   protect_return_address();
1220   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1221   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1222 
1223   // addr may contain sp so we will have to adjust it based on the
1224   // pushes that we just did.
1225   if (addr.uses(sp)) {
1226     lea(r0, addr);
1227     ldr(r0, Address(r0, 4 * wordSize));
1228   } else {
1229     ldr(r0, addr);
1230   }
1231   movptr(rscratch1, (uintptr_t)(address)b);
1232 
1233   // call indirectly to solve generation ordering problem
1234   lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1235   ldr(rscratch2, Address(rscratch2));
1236   blr(rscratch2);
1237 
1238   ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1239   ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1240   authenticate_return_address();
1241 
1242   BLOCK_COMMENT("} verify_oop_addr");
1243 }
1244 
1245 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
1246                                          int extra_slot_offset) {
1247   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
1248   int stackElementSize = Interpreter::stackElementSize;
1249   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
1250 #ifdef ASSERT
1251   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
1252   assert(offset1 - offset == stackElementSize, "correct arithmetic");
1253 #endif
1254   if (arg_slot.is_constant()) {
1255     return Address(esp, arg_slot.as_constant() * stackElementSize
1256                    + offset);
1257   } else {
1258     add(rscratch1, esp, arg_slot.as_register(),
1259         ext::uxtx, exact_log2(stackElementSize));
1260     return Address(rscratch1, offset);
1261   }
1262 }
1263 
1264 void MacroAssembler::call_VM_leaf_base(address entry_point,
1265                                        int number_of_arguments,
1266                                        Label *retaddr) {
1267   Label E, L;
1268 
1269   stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize)));
1270 
1271   mov(rscratch1, entry_point);
1272   blr(rscratch1);
1273   if (retaddr)
1274     bind(*retaddr);
1275 
1276   ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize)));
1277 }
1278 
1279 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
1280   call_VM_leaf_base(entry_point, number_of_arguments);
1281 }
1282 
1283 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1284   pass_arg0(this, arg_0);
1285   call_VM_leaf_base(entry_point, 1);
1286 }
1287 
1288 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1289   assert_different_registers(arg_1, c_rarg0);
1290   pass_arg0(this, arg_0);
1291   pass_arg1(this, arg_1);
1292   call_VM_leaf_base(entry_point, 2);
1293 }
1294 
1295 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
1296                                   Register arg_1, Register arg_2) {
1297   assert_different_registers(arg_1, c_rarg0);
1298   assert_different_registers(arg_2, c_rarg0, c_rarg1);
1299   pass_arg0(this, arg_0);
1300   pass_arg1(this, arg_1);
1301   pass_arg2(this, arg_2);
1302   call_VM_leaf_base(entry_point, 3);
1303 }
1304 
1305 void MacroAssembler::super_call_VM_leaf(address entry_point) {
1306   MacroAssembler::call_VM_leaf_base(entry_point, 1);
1307 }
1308 
1309 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1310   pass_arg0(this, arg_0);
1311   MacroAssembler::call_VM_leaf_base(entry_point, 1);
1312 }
1313 
1314 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1315 
1316   assert(arg_0 != c_rarg1, "smashed arg");
1317   pass_arg1(this, arg_1);
1318   pass_arg0(this, arg_0);
1319   MacroAssembler::call_VM_leaf_base(entry_point, 2);
1320 }
1321 
1322 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1323   assert(arg_0 != c_rarg2, "smashed arg");
1324   assert(arg_1 != c_rarg2, "smashed arg");
1325   pass_arg2(this, arg_2);
1326   assert(arg_0 != c_rarg1, "smashed arg");
1327   pass_arg1(this, arg_1);
1328   pass_arg0(this, arg_0);
1329   MacroAssembler::call_VM_leaf_base(entry_point, 3);
1330 }
1331 
1332 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
1333   assert(arg_0 != c_rarg3, "smashed arg");
1334   assert(arg_1 != c_rarg3, "smashed arg");
1335   assert(arg_2 != c_rarg3, "smashed arg");
1336   pass_arg3(this, arg_3);
1337   assert(arg_0 != c_rarg2, "smashed arg");
1338   assert(arg_1 != c_rarg2, "smashed arg");
1339   pass_arg2(this, arg_2);
1340   assert(arg_0 != c_rarg1, "smashed arg");
1341   pass_arg1(this, arg_1);
1342   pass_arg0(this, arg_0);
1343   MacroAssembler::call_VM_leaf_base(entry_point, 4);
1344 }
1345 
1346 void MacroAssembler::null_check(Register reg, int offset) {
1347   if (needs_explicit_null_check(offset)) {
1348     // provoke OS NULL exception if reg = NULL by
1349     // accessing M[reg] w/o changing any registers
1350     // NOTE: this is plenty to provoke a segv
1351     ldr(zr, Address(reg));
1352   } else {
1353     // nothing to do, (later) access of M[reg + offset]
1354     // will provoke OS NULL exception if reg = NULL
1355   }
1356 }
1357 
1358 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
1359   assert_different_registers(markword, rscratch2);
1360   andr(markword, markword, markWord::inline_type_mask_in_place);
1361   mov(rscratch2, markWord::inline_type_pattern);
1362   cmp(markword, rscratch2);
1363   br(Assembler::EQ, is_inline_type);
1364 }
1365 
1366 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) {
1367   ldrw(temp_reg, Address(klass, Klass::access_flags_offset()));
1368   andr(temp_reg, temp_reg, JVM_ACC_VALUE);
1369   cbnz(temp_reg, is_inline_type);
1370 }
1371 
1372 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) {
1373   assert_different_registers(tmp, rscratch1);
1374   cbz(object, not_inline_type);
1375   const int is_inline_type_mask = markWord::inline_type_pattern;
1376   ldr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
1377   mov(rscratch1, is_inline_type_mask);
1378   andr(tmp, tmp, rscratch1);
1379   cmp(tmp, rscratch1);
1380   br(Assembler::NE, not_inline_type);
1381 }
1382 
1383 void MacroAssembler::test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type) {
1384 #ifdef ASSERT
1385   {
1386     Label done_check;
1387     test_klass_is_inline_type(klass, temp_reg, done_check);
1388     stop("test_klass_is_empty_inline_type with non inline type klass");
1389     bind(done_check);
1390   }
1391 #endif
1392   ldrw(temp_reg, Address(klass, InstanceKlass::misc_flags_offset()));
1393   andr(temp_reg, temp_reg, InstanceKlass::misc_flag_is_empty_inline_type());
1394   cbnz(temp_reg, is_empty_inline_type);
1395 }
1396 
1397 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
1398   assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
1399   tbnz(flags, ConstantPoolCacheEntry::is_null_free_inline_type_shift, is_null_free_inline_type);
1400 }
1401 
1402 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
1403   assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
1404   tbz(flags, ConstantPoolCacheEntry::is_null_free_inline_type_shift, not_null_free_inline_type);
1405 }
1406 
1407 void MacroAssembler::test_field_is_inlined(Register flags, Register temp_reg, Label& is_flattened) {
1408   assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
1409   tbnz(flags, ConstantPoolCacheEntry::is_inlined_shift, is_flattened);
1410 }
1411 
1412 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
1413   Label test_mark_word;
1414   // load mark word
1415   ldr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
1416   // check displaced
1417   tst(temp_reg, markWord::unlocked_value);
1418   br(Assembler::NE, test_mark_word);
1419   // slow path use klass prototype
1420   load_prototype_header(temp_reg, oop);
1421 
1422   bind(test_mark_word);
1423   andr(temp_reg, temp_reg, test_bit);
1424   if (jmp_set) {
1425     cbnz(temp_reg, jmp_label);
1426   } else {
1427     cbz(temp_reg, jmp_label);
1428   }
1429 }
1430 
1431 void MacroAssembler::test_flattened_array_oop(Register oop, Register temp_reg, Label& is_flattened_array) {
1432   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flattened_array);
1433 }
1434 
1435 void MacroAssembler::test_non_flattened_array_oop(Register oop, Register temp_reg,
1436                                                   Label&is_non_flattened_array) {
1437   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flattened_array);
1438 }
1439 
1440 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array) {
1441   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
1442 }
1443 
1444 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
1445   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
1446 }
1447 
1448 void MacroAssembler::test_flattened_array_layout(Register lh, Label& is_flattened_array) {
1449   tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
1450   br(Assembler::NE, is_flattened_array);
1451 }
1452 
1453 void MacroAssembler::test_non_flattened_array_layout(Register lh, Label& is_non_flattened_array) {
1454   tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
1455   br(Assembler::EQ, is_non_flattened_array);
1456 }
1457 
1458 void MacroAssembler::test_null_free_array_layout(Register lh, Label& is_null_free_array) {
1459   tst(lh, Klass::_lh_null_free_array_bit_inplace);
1460   br(Assembler::NE, is_null_free_array);
1461 }
1462 
1463 void MacroAssembler::test_non_null_free_array_layout(Register lh, Label& is_non_null_free_array) {
1464   tst(lh, Klass::_lh_null_free_array_bit_inplace);
1465   br(Assembler::EQ, is_non_null_free_array);
1466 }
1467 
1468 // MacroAssembler protected routines needed to implement
1469 // public methods
1470 
1471 void MacroAssembler::mov(Register r, Address dest) {
1472   code_section()->relocate(pc(), dest.rspec());
1473   uint64_t imm64 = (uint64_t)dest.target();
1474   movptr(r, imm64);
1475 }
1476 
1477 // Move a constant pointer into r.  In AArch64 mode the virtual
1478 // address space is 48 bits in size, so we only need three
1479 // instructions to create a patchable instruction sequence that can
1480 // reach anywhere.
1481 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
1482 #ifndef PRODUCT
1483   {
1484     char buffer[64];
1485     snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64);
1486     block_comment(buffer);
1487   }
1488 #endif
1489   assert(imm64 < (1ull << 48), "48-bit overflow in address constant");
1490   movz(r, imm64 & 0xffff);
1491   imm64 >>= 16;
1492   movk(r, imm64 & 0xffff, 16);
1493   imm64 >>= 16;
1494   movk(r, imm64 & 0xffff, 32);
1495 }
1496 
1497 // Macro to mov replicated immediate to vector register.
1498 //  Vd will get the following values for different arrangements in T
1499 //   imm32 == hex 000000gh  T8B:  Vd = ghghghghghghghgh
1500 //   imm32 == hex 000000gh  T16B: Vd = ghghghghghghghghghghghghghghghgh
1501 //   imm32 == hex 0000efgh  T4H:  Vd = efghefghefghefgh
1502 //   imm32 == hex 0000efgh  T8H:  Vd = efghefghefghefghefghefghefghefgh
1503 //   imm32 == hex abcdefgh  T2S:  Vd = abcdefghabcdefgh
1504 //   imm32 == hex abcdefgh  T4S:  Vd = abcdefghabcdefghabcdefghabcdefgh
1505 //   T1D/T2D: invalid
1506 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint32_t imm32) {
1507   assert(T != T1D && T != T2D, "invalid arrangement");
1508   if (T == T8B || T == T16B) {
1509     assert((imm32 & ~0xff) == 0, "extraneous bits in unsigned imm32 (T8B/T16B)");
1510     movi(Vd, T, imm32 & 0xff, 0);
1511     return;
1512   }
1513   uint32_t nimm32 = ~imm32;
1514   if (T == T4H || T == T8H) {
1515     assert((imm32  & ~0xffff) == 0, "extraneous bits in unsigned imm32 (T4H/T8H)");
1516     imm32 &= 0xffff;
1517     nimm32 &= 0xffff;
1518   }
1519   uint32_t x = imm32;
1520   int movi_cnt = 0;
1521   int movn_cnt = 0;
1522   while (x) { if (x & 0xff) movi_cnt++; x >>= 8; }
1523   x = nimm32;
1524   while (x) { if (x & 0xff) movn_cnt++; x >>= 8; }
1525   if (movn_cnt < movi_cnt) imm32 = nimm32;
1526   unsigned lsl = 0;
1527   while (imm32 && (imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; }
1528   if (movn_cnt < movi_cnt)
1529     mvni(Vd, T, imm32 & 0xff, lsl);
1530   else
1531     movi(Vd, T, imm32 & 0xff, lsl);
1532   imm32 >>= 8; lsl += 8;
1533   while (imm32) {
1534     while ((imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; }
1535     if (movn_cnt < movi_cnt)
1536       bici(Vd, T, imm32 & 0xff, lsl);
1537     else
1538       orri(Vd, T, imm32 & 0xff, lsl);
1539     lsl += 8; imm32 >>= 8;
1540   }
1541 }
1542 
1543 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64)
1544 {
1545 #ifndef PRODUCT
1546   {
1547     char buffer[64];
1548     snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
1549     block_comment(buffer);
1550   }
1551 #endif
1552   if (operand_valid_for_logical_immediate(false, imm64)) {
1553     orr(dst, zr, imm64);
1554   } else {
1555     // we can use a combination of MOVZ or MOVN with
1556     // MOVK to build up the constant
1557     uint64_t imm_h[4];
1558     int zero_count = 0;
1559     int neg_count = 0;
1560     int i;
1561     for (i = 0; i < 4; i++) {
1562       imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL);
1563       if (imm_h[i] == 0) {
1564         zero_count++;
1565       } else if (imm_h[i] == 0xffffL) {
1566         neg_count++;
1567       }
1568     }
1569     if (zero_count == 4) {
1570       // one MOVZ will do
1571       movz(dst, 0);
1572     } else if (neg_count == 4) {
1573       // one MOVN will do
1574       movn(dst, 0);
1575     } else if (zero_count == 3) {
1576       for (i = 0; i < 4; i++) {
1577         if (imm_h[i] != 0L) {
1578           movz(dst, (uint32_t)imm_h[i], (i << 4));
1579           break;
1580         }
1581       }
1582     } else if (neg_count == 3) {
1583       // one MOVN will do
1584       for (int i = 0; i < 4; i++) {
1585         if (imm_h[i] != 0xffffL) {
1586           movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
1587           break;
1588         }
1589       }
1590     } else if (zero_count == 2) {
1591       // one MOVZ and one MOVK will do
1592       for (i = 0; i < 3; i++) {
1593         if (imm_h[i] != 0L) {
1594           movz(dst, (uint32_t)imm_h[i], (i << 4));
1595           i++;
1596           break;
1597         }
1598       }
1599       for (;i < 4; i++) {
1600         if (imm_h[i] != 0L) {
1601           movk(dst, (uint32_t)imm_h[i], (i << 4));
1602         }
1603       }
1604     } else if (neg_count == 2) {
1605       // one MOVN and one MOVK will do
1606       for (i = 0; i < 4; i++) {
1607         if (imm_h[i] != 0xffffL) {
1608           movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
1609           i++;
1610           break;
1611         }
1612       }
1613       for (;i < 4; i++) {
1614         if (imm_h[i] != 0xffffL) {
1615           movk(dst, (uint32_t)imm_h[i], (i << 4));
1616         }
1617       }
1618     } else if (zero_count == 1) {
1619       // one MOVZ and two MOVKs will do
1620       for (i = 0; i < 4; i++) {
1621         if (imm_h[i] != 0L) {
1622           movz(dst, (uint32_t)imm_h[i], (i << 4));
1623           i++;
1624           break;
1625         }
1626       }
1627       for (;i < 4; i++) {
1628         if (imm_h[i] != 0x0L) {
1629           movk(dst, (uint32_t)imm_h[i], (i << 4));
1630         }
1631       }
1632     } else if (neg_count == 1) {
1633       // one MOVN and two MOVKs will do
1634       for (i = 0; i < 4; i++) {
1635         if (imm_h[i] != 0xffffL) {
1636           movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
1637           i++;
1638           break;
1639         }
1640       }
1641       for (;i < 4; i++) {
1642         if (imm_h[i] != 0xffffL) {
1643           movk(dst, (uint32_t)imm_h[i], (i << 4));
1644         }
1645       }
1646     } else {
1647       // use a MOVZ and 3 MOVKs (makes it easier to debug)
1648       movz(dst, (uint32_t)imm_h[0], 0);
1649       for (i = 1; i < 4; i++) {
1650         movk(dst, (uint32_t)imm_h[i], (i << 4));
1651       }
1652     }
1653   }
1654 }
1655 
1656 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32)
1657 {
1658 #ifndef PRODUCT
1659     {
1660       char buffer[64];
1661       snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32);
1662       block_comment(buffer);
1663     }
1664 #endif
1665   if (operand_valid_for_logical_immediate(true, imm32)) {
1666     orrw(dst, zr, imm32);
1667   } else {
1668     // we can use MOVZ, MOVN or two calls to MOVK to build up the
1669     // constant
1670     uint32_t imm_h[2];
1671     imm_h[0] = imm32 & 0xffff;
1672     imm_h[1] = ((imm32 >> 16) & 0xffff);
1673     if (imm_h[0] == 0) {
1674       movzw(dst, imm_h[1], 16);
1675     } else if (imm_h[0] == 0xffff) {
1676       movnw(dst, imm_h[1] ^ 0xffff, 16);
1677     } else if (imm_h[1] == 0) {
1678       movzw(dst, imm_h[0], 0);
1679     } else if (imm_h[1] == 0xffff) {
1680       movnw(dst, imm_h[0] ^ 0xffff, 0);
1681     } else {
1682       // use a MOVZ and MOVK (makes it easier to debug)
1683       movzw(dst, imm_h[0], 0);
1684       movkw(dst, imm_h[1], 16);
1685     }
1686   }
1687 }
1688 
1689 // Form an address from base + offset in Rd.  Rd may or may
1690 // not actually be used: you must use the Address that is returned.
1691 // It is up to you to ensure that the shift provided matches the size
1692 // of your data.
1693 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) {
1694   if (Address::offset_ok_for_immed(byte_offset, shift))
1695     // It fits; no need for any heroics
1696     return Address(base, byte_offset);
1697 
1698   // Don't do anything clever with negative or misaligned offsets
1699   unsigned mask = (1 << shift) - 1;
1700   if (byte_offset < 0 || byte_offset & mask) {
1701     mov(Rd, byte_offset);
1702     add(Rd, base, Rd);
1703     return Address(Rd);
1704   }
1705 
1706   // See if we can do this with two 12-bit offsets
1707   {
1708     uint64_t word_offset = byte_offset >> shift;
1709     uint64_t masked_offset = word_offset & 0xfff000;
1710     if (Address::offset_ok_for_immed(word_offset - masked_offset, 0)
1711         && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) {
1712       add(Rd, base, masked_offset << shift);
1713       word_offset -= masked_offset;
1714       return Address(Rd, word_offset << shift);
1715     }
1716   }
1717 
1718   // Do it the hard way
1719   mov(Rd, byte_offset);
1720   add(Rd, base, Rd);
1721   return Address(Rd);
1722 }
1723 
1724 void MacroAssembler::atomic_incw(Register counter_addr, Register tmp, Register tmp2) {
1725   if (UseLSE) {
1726     mov(tmp, 1);
1727     ldadd(Assembler::word, tmp, zr, counter_addr);
1728     return;
1729   }
1730   Label retry_load;
1731   if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
1732     prfm(Address(counter_addr), PSTL1STRM);
1733   bind(retry_load);
1734   // flush and load exclusive from the memory location
1735   ldxrw(tmp, counter_addr);
1736   addw(tmp, tmp, 1);
1737   // if we store+flush with no intervening write tmp wil be zero
1738   stxrw(tmp2, tmp, counter_addr);
1739   cbnzw(tmp2, retry_load);
1740 }
1741 
1742 
1743 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb,
1744                                     bool want_remainder, Register scratch)
1745 {
1746   // Full implementation of Java idiv and irem.  The function
1747   // returns the (pc) offset of the div instruction - may be needed
1748   // for implicit exceptions.
1749   //
1750   // constraint : ra/rb =/= scratch
1751   //         normal case
1752   //
1753   // input : ra: dividend
1754   //         rb: divisor
1755   //
1756   // result: either
1757   //         quotient  (= ra idiv rb)
1758   //         remainder (= ra irem rb)
1759 
1760   assert(ra != scratch && rb != scratch, "reg cannot be scratch");
1761 
1762   int idivl_offset = offset();
1763   if (! want_remainder) {
1764     sdivw(result, ra, rb);
1765   } else {
1766     sdivw(scratch, ra, rb);
1767     Assembler::msubw(result, scratch, rb, ra);
1768   }
1769 
1770   return idivl_offset;
1771 }
1772 
1773 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb,
1774                                     bool want_remainder, Register scratch)
1775 {
1776   // Full implementation of Java ldiv and lrem.  The function
1777   // returns the (pc) offset of the div instruction - may be needed
1778   // for implicit exceptions.
1779   //
1780   // constraint : ra/rb =/= scratch
1781   //         normal case
1782   //
1783   // input : ra: dividend
1784   //         rb: divisor
1785   //
1786   // result: either
1787   //         quotient  (= ra idiv rb)
1788   //         remainder (= ra irem rb)
1789 
1790   assert(ra != scratch && rb != scratch, "reg cannot be scratch");
1791 
1792   int idivq_offset = offset();
1793   if (! want_remainder) {
1794     sdiv(result, ra, rb);
1795   } else {
1796     sdiv(scratch, ra, rb);
1797     Assembler::msub(result, scratch, rb, ra);
1798   }
1799 
1800   return idivq_offset;
1801 }
1802 
1803 void MacroAssembler::membar(Membar_mask_bits order_constraint) {
1804   address prev = pc() - NativeMembar::instruction_size;
1805   address last = code()->last_insn();
1806   if (last != NULL && nativeInstruction_at(last)->is_Membar() && prev == last) {
1807     NativeMembar *bar = NativeMembar_at(prev);
1808     // We are merging two memory barrier instructions.  On AArch64 we
1809     // can do this simply by ORing them together.
1810     bar->set_kind(bar->get_kind() | order_constraint);
1811     BLOCK_COMMENT("merged membar");
1812   } else {
1813     code()->set_last_insn(pc());
1814     dmb(Assembler::barrier(order_constraint));
1815   }
1816 }
1817 
1818 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) {
1819   if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) {
1820     merge_ldst(rt, adr, size_in_bytes, is_store);
1821     code()->clear_last_insn();
1822     return true;
1823   } else {
1824     assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported.");
1825     const uint64_t mask = size_in_bytes - 1;
1826     if (adr.getMode() == Address::base_plus_offset &&
1827         (adr.offset() & mask) == 0) { // only supports base_plus_offset.
1828       code()->set_last_insn(pc());
1829     }
1830     return false;
1831   }
1832 }
1833 
1834 void MacroAssembler::ldr(Register Rx, const Address &adr) {
1835   // We always try to merge two adjacent loads into one ldp.
1836   if (!try_merge_ldst(Rx, adr, 8, false)) {
1837     Assembler::ldr(Rx, adr);
1838   }
1839 }
1840 
1841 void MacroAssembler::ldrw(Register Rw, const Address &adr) {
1842   // We always try to merge two adjacent loads into one ldp.
1843   if (!try_merge_ldst(Rw, adr, 4, false)) {
1844     Assembler::ldrw(Rw, adr);
1845   }
1846 }
1847 
1848 void MacroAssembler::str(Register Rx, const Address &adr) {
1849   // We always try to merge two adjacent stores into one stp.
1850   if (!try_merge_ldst(Rx, adr, 8, true)) {
1851     Assembler::str(Rx, adr);
1852   }
1853 }
1854 
1855 void MacroAssembler::strw(Register Rw, const Address &adr) {
1856   // We always try to merge two adjacent stores into one stp.
1857   if (!try_merge_ldst(Rw, adr, 4, true)) {
1858     Assembler::strw(Rw, adr);
1859   }
1860 }
1861 
1862 // MacroAssembler routines found actually to be needed
1863 
1864 void MacroAssembler::push(Register src)
1865 {
1866   str(src, Address(pre(esp, -1 * wordSize)));
1867 }
1868 
1869 void MacroAssembler::pop(Register dst)
1870 {
1871   ldr(dst, Address(post(esp, 1 * wordSize)));
1872 }
1873 
1874 // Note: load_unsigned_short used to be called load_unsigned_word.
1875 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
1876   int off = offset();
1877   ldrh(dst, src);
1878   return off;
1879 }
1880 
1881 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
1882   int off = offset();
1883   ldrb(dst, src);
1884   return off;
1885 }
1886 
1887 int MacroAssembler::load_signed_short(Register dst, Address src) {
1888   int off = offset();
1889   ldrsh(dst, src);
1890   return off;
1891 }
1892 
1893 int MacroAssembler::load_signed_byte(Register dst, Address src) {
1894   int off = offset();
1895   ldrsb(dst, src);
1896   return off;
1897 }
1898 
1899 int MacroAssembler::load_signed_short32(Register dst, Address src) {
1900   int off = offset();
1901   ldrshw(dst, src);
1902   return off;
1903 }
1904 
1905 int MacroAssembler::load_signed_byte32(Register dst, Address src) {
1906   int off = offset();
1907   ldrsbw(dst, src);
1908   return off;
1909 }
1910 
1911 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
1912   switch (size_in_bytes) {
1913   case  8:  ldr(dst, src); break;
1914   case  4:  ldrw(dst, src); break;
1915   case  2:  is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
1916   case  1:  is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
1917   default:  ShouldNotReachHere();
1918   }
1919 }
1920 
1921 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
1922   switch (size_in_bytes) {
1923   case  8:  str(src, dst); break;
1924   case  4:  strw(src, dst); break;
1925   case  2:  strh(src, dst); break;
1926   case  1:  strb(src, dst); break;
1927   default:  ShouldNotReachHere();
1928   }
1929 }
1930 
1931 void MacroAssembler::decrementw(Register reg, int value)
1932 {
1933   if (value < 0)  { incrementw(reg, -value);      return; }
1934   if (value == 0) {                               return; }
1935   if (value < (1 << 12)) { subw(reg, reg, value); return; }
1936   /* else */ {
1937     guarantee(reg != rscratch2, "invalid dst for register decrement");
1938     movw(rscratch2, (unsigned)value);
1939     subw(reg, reg, rscratch2);
1940   }
1941 }
1942 
1943 void MacroAssembler::decrement(Register reg, int value)
1944 {
1945   if (value < 0)  { increment(reg, -value);      return; }
1946   if (value == 0) {                              return; }
1947   if (value < (1 << 12)) { sub(reg, reg, value); return; }
1948   /* else */ {
1949     assert(reg != rscratch2, "invalid dst for register decrement");
1950     mov(rscratch2, (uint64_t)value);
1951     sub(reg, reg, rscratch2);
1952   }
1953 }
1954 
1955 void MacroAssembler::decrementw(Address dst, int value)
1956 {
1957   assert(!dst.uses(rscratch1), "invalid dst for address decrement");
1958   if (dst.getMode() == Address::literal) {
1959     assert(abs(value) < (1 << 12), "invalid value and address mode combination");
1960     lea(rscratch2, dst);
1961     dst = Address(rscratch2);
1962   }
1963   ldrw(rscratch1, dst);
1964   decrementw(rscratch1, value);
1965   strw(rscratch1, dst);
1966 }
1967 
1968 void MacroAssembler::decrement(Address dst, int value)
1969 {
1970   assert(!dst.uses(rscratch1), "invalid address for decrement");
1971   if (dst.getMode() == Address::literal) {
1972     assert(abs(value) < (1 << 12), "invalid value and address mode combination");
1973     lea(rscratch2, dst);
1974     dst = Address(rscratch2);
1975   }
1976   ldr(rscratch1, dst);
1977   decrement(rscratch1, value);
1978   str(rscratch1, dst);
1979 }
1980 
1981 void MacroAssembler::incrementw(Register reg, int value)
1982 {
1983   if (value < 0)  { decrementw(reg, -value);      return; }
1984   if (value == 0) {                               return; }
1985   if (value < (1 << 12)) { addw(reg, reg, value); return; }
1986   /* else */ {
1987     assert(reg != rscratch2, "invalid dst for register increment");
1988     movw(rscratch2, (unsigned)value);
1989     addw(reg, reg, rscratch2);
1990   }
1991 }
1992 
1993 void MacroAssembler::increment(Register reg, int value)
1994 {
1995   if (value < 0)  { decrement(reg, -value);      return; }
1996   if (value == 0) {                              return; }
1997   if (value < (1 << 12)) { add(reg, reg, value); return; }
1998   /* else */ {
1999     assert(reg != rscratch2, "invalid dst for register increment");
2000     movw(rscratch2, (unsigned)value);
2001     add(reg, reg, rscratch2);
2002   }
2003 }
2004 
2005 void MacroAssembler::incrementw(Address dst, int value)
2006 {
2007   assert(!dst.uses(rscratch1), "invalid dst for address increment");
2008   if (dst.getMode() == Address::literal) {
2009     assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2010     lea(rscratch2, dst);
2011     dst = Address(rscratch2);
2012   }
2013   ldrw(rscratch1, dst);
2014   incrementw(rscratch1, value);
2015   strw(rscratch1, dst);
2016 }
2017 
2018 void MacroAssembler::increment(Address dst, int value)
2019 {
2020   assert(!dst.uses(rscratch1), "invalid dst for address increment");
2021   if (dst.getMode() == Address::literal) {
2022     assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2023     lea(rscratch2, dst);
2024     dst = Address(rscratch2);
2025   }
2026   ldr(rscratch1, dst);
2027   increment(rscratch1, value);
2028   str(rscratch1, dst);
2029 }
2030 
2031 // Push lots of registers in the bit set supplied.  Don't push sp.
2032 // Return the number of words pushed
2033 int MacroAssembler::push(unsigned int bitset, Register stack) {
2034   int words_pushed = 0;
2035 
2036   // Scan bitset to accumulate register pairs
2037   unsigned char regs[32];
2038   int count = 0;
2039   for (int reg = 0; reg <= 30; reg++) {
2040     if (1 & bitset)
2041       regs[count++] = reg;
2042     bitset >>= 1;
2043   }
2044   regs[count++] = zr->encoding_nocheck();
2045   count &= ~1;  // Only push an even nuber of regs
2046 
2047   if (count) {
2048     stp(as_Register(regs[0]), as_Register(regs[1]),
2049        Address(pre(stack, -count * wordSize)));
2050     words_pushed += 2;
2051   }
2052   for (int i = 2; i < count; i += 2) {
2053     stp(as_Register(regs[i]), as_Register(regs[i+1]),
2054        Address(stack, i * wordSize));
2055     words_pushed += 2;
2056   }
2057 
2058   assert(words_pushed == count, "oops, pushed != count");
2059 
2060   return count;
2061 }
2062 
2063 int MacroAssembler::pop(unsigned int bitset, Register stack) {
2064   int words_pushed = 0;
2065 
2066   // Scan bitset to accumulate register pairs
2067   unsigned char regs[32];
2068   int count = 0;
2069   for (int reg = 0; reg <= 30; reg++) {
2070     if (1 & bitset)
2071       regs[count++] = reg;
2072     bitset >>= 1;
2073   }
2074   regs[count++] = zr->encoding_nocheck();
2075   count &= ~1;
2076 
2077   for (int i = 2; i < count; i += 2) {
2078     ldp(as_Register(regs[i]), as_Register(regs[i+1]),
2079        Address(stack, i * wordSize));
2080     words_pushed += 2;
2081   }
2082   if (count) {
2083     ldp(as_Register(regs[0]), as_Register(regs[1]),
2084        Address(post(stack, count * wordSize)));
2085     words_pushed += 2;
2086   }
2087 
2088   assert(words_pushed == count, "oops, pushed != count");
2089 
2090   return count;
2091 }
2092 
2093 // Push lots of registers in the bit set supplied.  Don't push sp.
2094 // Return the number of dwords pushed
2095 int MacroAssembler::push_fp(unsigned int bitset, Register stack) {
2096   int words_pushed = 0;
2097   bool use_sve = false;
2098   int sve_vector_size_in_bytes = 0;
2099 
2100 #ifdef COMPILER2
2101   use_sve = Matcher::supports_scalable_vector();
2102   sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
2103 #endif
2104 
2105   // Scan bitset to accumulate register pairs
2106   unsigned char regs[32];
2107   int count = 0;
2108   for (int reg = 0; reg <= 31; reg++) {
2109     if (1 & bitset)
2110       regs[count++] = reg;
2111     bitset >>= 1;
2112   }
2113 
2114   if (count == 0) {
2115     return 0;
2116   }
2117 
2118   // SVE
2119   if (use_sve && sve_vector_size_in_bytes > 16) {
2120     sub(stack, stack, sve_vector_size_in_bytes * count);
2121     for (int i = 0; i < count; i++) {
2122       sve_str(as_FloatRegister(regs[i]), Address(stack, i));
2123     }
2124     return count * sve_vector_size_in_bytes / 8;
2125   }
2126 
2127   // NEON
2128   if (count == 1) {
2129     strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2)));
2130     return 2;
2131   }
2132 
2133   bool odd = (count & 1) == 1;
2134   int push_slots = count + (odd ? 1 : 0);
2135 
2136   // Always pushing full 128 bit registers.
2137   stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2)));
2138   words_pushed += 2;
2139 
2140   for (int i = 2; i + 1 < count; i += 2) {
2141     stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
2142     words_pushed += 2;
2143   }
2144 
2145   if (odd) {
2146     strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2));
2147     words_pushed++;
2148   }
2149 
2150   assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count);
2151   return count * 2;
2152 }
2153 
2154 // Return the number of dwords popped
2155 int MacroAssembler::pop_fp(unsigned int bitset, Register stack) {
2156   int words_pushed = 0;
2157   bool use_sve = false;
2158   int sve_vector_size_in_bytes = 0;
2159 
2160 #ifdef COMPILER2
2161   use_sve = Matcher::supports_scalable_vector();
2162   sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
2163 #endif
2164   // Scan bitset to accumulate register pairs
2165   unsigned char regs[32];
2166   int count = 0;
2167   for (int reg = 0; reg <= 31; reg++) {
2168     if (1 & bitset)
2169       regs[count++] = reg;
2170     bitset >>= 1;
2171   }
2172 
2173   if (count == 0) {
2174     return 0;
2175   }
2176 
2177   // SVE
2178   if (use_sve && sve_vector_size_in_bytes > 16) {
2179     for (int i = count - 1; i >= 0; i--) {
2180       sve_ldr(as_FloatRegister(regs[i]), Address(stack, i));
2181     }
2182     add(stack, stack, sve_vector_size_in_bytes * count);
2183     return count * sve_vector_size_in_bytes / 8;
2184   }
2185 
2186   // NEON
2187   if (count == 1) {
2188     ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2)));
2189     return 2;
2190   }
2191 
2192   bool odd = (count & 1) == 1;
2193   int push_slots = count + (odd ? 1 : 0);
2194 
2195   if (odd) {
2196     ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2));
2197     words_pushed++;
2198   }
2199 
2200   for (int i = 2; i + 1 < count; i += 2) {
2201     ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
2202     words_pushed += 2;
2203   }
2204 
2205   ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2)));
2206   words_pushed += 2;
2207 
2208   assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count);
2209 
2210   return count * 2;
2211 }
2212 
2213 // Return the number of dwords pushed
2214 int MacroAssembler::push_p(unsigned int bitset, Register stack) {
2215   bool use_sve = false;
2216   int sve_predicate_size_in_slots = 0;
2217 
2218 #ifdef COMPILER2
2219   use_sve = Matcher::supports_scalable_vector();
2220   if (use_sve) {
2221     sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
2222   }
2223 #endif
2224 
2225   if (!use_sve) {
2226     return 0;
2227   }
2228 
2229   unsigned char regs[PRegisterImpl::number_of_saved_registers];
2230   int count = 0;
2231   for (int reg = 0; reg < PRegisterImpl::number_of_saved_registers; reg++) {
2232     if (1 & bitset)
2233       regs[count++] = reg;
2234     bitset >>= 1;
2235   }
2236 
2237   if (count == 0) {
2238     return 0;
2239   }
2240 
2241   int total_push_bytes = align_up(sve_predicate_size_in_slots *
2242                                   VMRegImpl::stack_slot_size * count, 16);
2243   sub(stack, stack, total_push_bytes);
2244   for (int i = 0; i < count; i++) {
2245     sve_str(as_PRegister(regs[i]), Address(stack, i));
2246   }
2247   return total_push_bytes / 8;
2248 }
2249 
2250 // Return the number of dwords popped
2251 int MacroAssembler::pop_p(unsigned int bitset, Register stack) {
2252   bool use_sve = false;
2253   int sve_predicate_size_in_slots = 0;
2254 
2255 #ifdef COMPILER2
2256   use_sve = Matcher::supports_scalable_vector();
2257   if (use_sve) {
2258     sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
2259   }
2260 #endif
2261 
2262   if (!use_sve) {
2263     return 0;
2264   }
2265 
2266   unsigned char regs[PRegisterImpl::number_of_saved_registers];
2267   int count = 0;
2268   for (int reg = 0; reg < PRegisterImpl::number_of_saved_registers; reg++) {
2269     if (1 & bitset)
2270       regs[count++] = reg;
2271     bitset >>= 1;
2272   }
2273 
2274   if (count == 0) {
2275     return 0;
2276   }
2277 
2278   int total_pop_bytes = align_up(sve_predicate_size_in_slots *
2279                                  VMRegImpl::stack_slot_size * count, 16);
2280   for (int i = count - 1; i >= 0; i--) {
2281     sve_ldr(as_PRegister(regs[i]), Address(stack, i));
2282   }
2283   add(stack, stack, total_pop_bytes);
2284   return total_pop_bytes / 8;
2285 }
2286 
2287 #ifdef ASSERT
2288 void MacroAssembler::verify_heapbase(const char* msg) {
2289 #if 0
2290   assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed");
2291   assert (Universe::heap() != NULL, "java heap should be initialized");
2292   if (!UseCompressedOops || Universe::ptr_base() == NULL) {
2293     // rheapbase is allocated as general register
2294     return;
2295   }
2296   if (CheckCompressedOops) {
2297     Label ok;
2298     push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1
2299     cmpptr(rheapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr()));
2300     br(Assembler::EQ, ok);
2301     stop(msg);
2302     bind(ok);
2303     pop(1 << rscratch1->encoding(), sp);
2304   }
2305 #endif
2306 }
2307 #endif
2308 
2309 void MacroAssembler::resolve_jobject(Register value, Register thread, Register tmp) {
2310   Label done, not_weak;
2311   cbz(value, done);           // Use NULL as-is.
2312 
2313   STATIC_ASSERT(JNIHandles::weak_tag_mask == 1u);
2314   tbz(r0, 0, not_weak);    // Test for jweak tag.
2315 
2316   // Resolve jweak.
2317   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, value,
2318                  Address(value, -JNIHandles::weak_tag_value), tmp, thread);
2319   verify_oop(value);
2320   b(done);
2321 
2322   bind(not_weak);
2323   // Resolve (untagged) jobject.
2324   access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, 0), tmp, thread);
2325   verify_oop(value);
2326   bind(done);
2327 }
2328 
2329 void MacroAssembler::stop(const char* msg) {
2330   BLOCK_COMMENT(msg);
2331   dcps1(0xdeae);
2332   emit_int64((uintptr_t)msg);
2333 }
2334 
2335 void MacroAssembler::unimplemented(const char* what) {
2336   const char* buf = NULL;
2337   {
2338     ResourceMark rm;
2339     stringStream ss;
2340     ss.print("unimplemented: %s", what);
2341     buf = code_string(ss.as_string());
2342   }
2343   stop(buf);
2344 }
2345 
2346 // If a constant does not fit in an immediate field, generate some
2347 // number of MOV instructions and then perform the operation.
2348 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm,
2349                                            add_sub_imm_insn insn1,
2350                                            add_sub_reg_insn insn2) {
2351   assert(Rd != zr, "Rd = zr and not setting flags?");
2352   if (operand_valid_for_add_sub_immediate((int)imm)) {
2353     (this->*insn1)(Rd, Rn, imm);
2354   } else {
2355     if (uabs(imm) < (1 << 24)) {
2356        (this->*insn1)(Rd, Rn, imm & -(1 << 12));
2357        (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1));
2358     } else {
2359        assert_different_registers(Rd, Rn);
2360        mov(Rd, (uint64_t)imm);
2361        (this->*insn2)(Rd, Rn, Rd, LSL, 0);
2362     }
2363   }
2364 }
2365 
2366 // Seperate vsn which sets the flags. Optimisations are more restricted
2367 // because we must set the flags correctly.
2368 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm,
2369                                            add_sub_imm_insn insn1,
2370                                            add_sub_reg_insn insn2) {
2371   if (operand_valid_for_add_sub_immediate((int)imm)) {
2372     (this->*insn1)(Rd, Rn, imm);
2373   } else {
2374     assert_different_registers(Rd, Rn);
2375     assert(Rd != zr, "overflow in immediate operand");
2376     mov(Rd, (uint64_t)imm);
2377     (this->*insn2)(Rd, Rn, Rd, LSL, 0);
2378   }
2379 }
2380 
2381 
2382 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) {
2383   if (increment.is_register()) {
2384     add(Rd, Rn, increment.as_register());
2385   } else {
2386     add(Rd, Rn, increment.as_constant());
2387   }
2388 }
2389 
2390 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) {
2391   if (increment.is_register()) {
2392     addw(Rd, Rn, increment.as_register());
2393   } else {
2394     addw(Rd, Rn, increment.as_constant());
2395   }
2396 }
2397 
2398 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
2399   if (decrement.is_register()) {
2400     sub(Rd, Rn, decrement.as_register());
2401   } else {
2402     sub(Rd, Rn, decrement.as_constant());
2403   }
2404 }
2405 
2406 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
2407   if (decrement.is_register()) {
2408     subw(Rd, Rn, decrement.as_register());
2409   } else {
2410     subw(Rd, Rn, decrement.as_constant());
2411   }
2412 }
2413 
2414 void MacroAssembler::reinit_heapbase()
2415 {
2416   if (UseCompressedOops) {
2417     if (Universe::is_fully_initialized()) {
2418       mov(rheapbase, CompressedOops::ptrs_base());
2419     } else {
2420       lea(rheapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr()));
2421       ldr(rheapbase, Address(rheapbase));
2422     }
2423   }
2424 }
2425 
2426 // this simulates the behaviour of the x86 cmpxchg instruction using a
2427 // load linked/store conditional pair. we use the acquire/release
2428 // versions of these instructions so that we flush pending writes as
2429 // per Java semantics.
2430 
2431 // n.b the x86 version assumes the old value to be compared against is
2432 // in rax and updates rax with the value located in memory if the
2433 // cmpxchg fails. we supply a register for the old value explicitly
2434 
2435 // the aarch64 load linked/store conditional instructions do not
2436 // accept an offset. so, unlike x86, we must provide a plain register
2437 // to identify the memory word to be compared/exchanged rather than a
2438 // register+offset Address.
2439 
2440 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp,
2441                                 Label &succeed, Label *fail) {
2442   // oldv holds comparison value
2443   // newv holds value to write in exchange
2444   // addr identifies memory word to compare against/update
2445   if (UseLSE) {
2446     mov(tmp, oldv);
2447     casal(Assembler::xword, oldv, newv, addr);
2448     cmp(tmp, oldv);
2449     br(Assembler::EQ, succeed);
2450     membar(AnyAny);
2451   } else {
2452     Label retry_load, nope;
2453     if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
2454       prfm(Address(addr), PSTL1STRM);
2455     bind(retry_load);
2456     // flush and load exclusive from the memory location
2457     // and fail if it is not what we expect
2458     ldaxr(tmp, addr);
2459     cmp(tmp, oldv);
2460     br(Assembler::NE, nope);
2461     // if we store+flush with no intervening write tmp wil be zero
2462     stlxr(tmp, newv, addr);
2463     cbzw(tmp, succeed);
2464     // retry so we only ever return after a load fails to compare
2465     // ensures we don't return a stale value after a failed write.
2466     b(retry_load);
2467     // if the memory word differs we return it in oldv and signal a fail
2468     bind(nope);
2469     membar(AnyAny);
2470     mov(oldv, tmp);
2471   }
2472   if (fail)
2473     b(*fail);
2474 }
2475 
2476 void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp,
2477                                         Label &succeed, Label *fail) {
2478   assert(oopDesc::mark_offset_in_bytes() == 0, "assumption");
2479   cmpxchgptr(oldv, newv, obj, tmp, succeed, fail);
2480 }
2481 
2482 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp,
2483                                 Label &succeed, Label *fail) {
2484   // oldv holds comparison value
2485   // newv holds value to write in exchange
2486   // addr identifies memory word to compare against/update
2487   // tmp returns 0/1 for success/failure
2488   if (UseLSE) {
2489     mov(tmp, oldv);
2490     casal(Assembler::word, oldv, newv, addr);
2491     cmp(tmp, oldv);
2492     br(Assembler::EQ, succeed);
2493     membar(AnyAny);
2494   } else {
2495     Label retry_load, nope;
2496     if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
2497       prfm(Address(addr), PSTL1STRM);
2498     bind(retry_load);
2499     // flush and load exclusive from the memory location
2500     // and fail if it is not what we expect
2501     ldaxrw(tmp, addr);
2502     cmp(tmp, oldv);
2503     br(Assembler::NE, nope);
2504     // if we store+flush with no intervening write tmp wil be zero
2505     stlxrw(tmp, newv, addr);
2506     cbzw(tmp, succeed);
2507     // retry so we only ever return after a load fails to compare
2508     // ensures we don't return a stale value after a failed write.
2509     b(retry_load);
2510     // if the memory word differs we return it in oldv and signal a fail
2511     bind(nope);
2512     membar(AnyAny);
2513     mov(oldv, tmp);
2514   }
2515   if (fail)
2516     b(*fail);
2517 }
2518 
2519 // A generic CAS; success or failure is in the EQ flag.  A weak CAS
2520 // doesn't retry and may fail spuriously.  If the oldval is wanted,
2521 // Pass a register for the result, otherwise pass noreg.
2522 
2523 // Clobbers rscratch1
2524 void MacroAssembler::cmpxchg(Register addr, Register expected,
2525                              Register new_val,
2526                              enum operand_size size,
2527                              bool acquire, bool release,
2528                              bool weak,
2529                              Register result) {
2530   if (result == noreg)  result = rscratch1;
2531   BLOCK_COMMENT("cmpxchg {");
2532   if (UseLSE) {
2533     mov(result, expected);
2534     lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true);
2535     compare_eq(result, expected, size);
2536   } else {
2537     Label retry_load, done;
2538     if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
2539       prfm(Address(addr), PSTL1STRM);
2540     bind(retry_load);
2541     load_exclusive(result, addr, size, acquire);
2542     compare_eq(result, expected, size);
2543     br(Assembler::NE, done);
2544     store_exclusive(rscratch1, new_val, addr, size, release);
2545     if (weak) {
2546       cmpw(rscratch1, 0u);  // If the store fails, return NE to our caller.
2547     } else {
2548       cbnzw(rscratch1, retry_load);
2549     }
2550     bind(done);
2551   }
2552   BLOCK_COMMENT("} cmpxchg");
2553 }
2554 
2555 // A generic comparison. Only compares for equality, clobbers rscratch1.
2556 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) {
2557   if (size == xword) {
2558     cmp(rm, rn);
2559   } else if (size == word) {
2560     cmpw(rm, rn);
2561   } else if (size == halfword) {
2562     eorw(rscratch1, rm, rn);
2563     ands(zr, rscratch1, 0xffff);
2564   } else if (size == byte) {
2565     eorw(rscratch1, rm, rn);
2566     ands(zr, rscratch1, 0xff);
2567   } else {
2568     ShouldNotReachHere();
2569   }
2570 }
2571 
2572 
2573 static bool different(Register a, RegisterOrConstant b, Register c) {
2574   if (b.is_constant())
2575     return a != c;
2576   else
2577     return a != b.as_register() && a != c && b.as_register() != c;
2578 }
2579 
2580 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz)                   \
2581 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \
2582   if (UseLSE) {                                                         \
2583     prev = prev->is_valid() ? prev : zr;                                \
2584     if (incr.is_register()) {                                           \
2585       AOP(sz, incr.as_register(), prev, addr);                          \
2586     } else {                                                            \
2587       mov(rscratch2, incr.as_constant());                               \
2588       AOP(sz, rscratch2, prev, addr);                                   \
2589     }                                                                   \
2590     return;                                                             \
2591   }                                                                     \
2592   Register result = rscratch2;                                          \
2593   if (prev->is_valid())                                                 \
2594     result = different(prev, incr, addr) ? prev : rscratch2;            \
2595                                                                         \
2596   Label retry_load;                                                     \
2597   if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))         \
2598     prfm(Address(addr), PSTL1STRM);                                     \
2599   bind(retry_load);                                                     \
2600   LDXR(result, addr);                                                   \
2601   OP(rscratch1, result, incr);                                          \
2602   STXR(rscratch2, rscratch1, addr);                                     \
2603   cbnzw(rscratch2, retry_load);                                         \
2604   if (prev->is_valid() && prev != result) {                             \
2605     IOP(prev, rscratch1, incr);                                         \
2606   }                                                                     \
2607 }
2608 
2609 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword)
2610 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word)
2611 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword)
2612 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word)
2613 
2614 #undef ATOMIC_OP
2615 
2616 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz)                            \
2617 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \
2618   if (UseLSE) {                                                         \
2619     prev = prev->is_valid() ? prev : zr;                                \
2620     AOP(sz, newv, prev, addr);                                          \
2621     return;                                                             \
2622   }                                                                     \
2623   Register result = rscratch2;                                          \
2624   if (prev->is_valid())                                                 \
2625     result = different(prev, newv, addr) ? prev : rscratch2;            \
2626                                                                         \
2627   Label retry_load;                                                     \
2628   if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))         \
2629     prfm(Address(addr), PSTL1STRM);                                     \
2630   bind(retry_load);                                                     \
2631   LDXR(result, addr);                                                   \
2632   STXR(rscratch1, newv, addr);                                          \
2633   cbnzw(rscratch1, retry_load);                                         \
2634   if (prev->is_valid() && prev != result)                               \
2635     mov(prev, result);                                                  \
2636 }
2637 
2638 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword)
2639 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word)
2640 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword)
2641 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word)
2642 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword)
2643 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word)
2644 
2645 #undef ATOMIC_XCHG
2646 
2647 #ifndef PRODUCT
2648 extern "C" void findpc(intptr_t x);
2649 #endif
2650 
2651 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[])
2652 {
2653   // In order to get locks to work, we need to fake a in_VM state
2654   if (ShowMessageBoxOnError ) {
2655     JavaThread* thread = JavaThread::current();
2656     JavaThreadState saved_state = thread->thread_state();
2657     thread->set_thread_state(_thread_in_vm);
2658 #ifndef PRODUCT
2659     if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
2660       ttyLocker ttyl;
2661       BytecodeCounter::print();
2662     }
2663 #endif
2664     if (os::message_box(msg, "Execution stopped, print registers?")) {
2665       ttyLocker ttyl;
2666       tty->print_cr(" pc = 0x%016" PRIx64, pc);
2667 #ifndef PRODUCT
2668       tty->cr();
2669       findpc(pc);
2670       tty->cr();
2671 #endif
2672       tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]);
2673       tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]);
2674       tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]);
2675       tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]);
2676       tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]);
2677       tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]);
2678       tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]);
2679       tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]);
2680       tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]);
2681       tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]);
2682       tty->print_cr("r10 = 0x%016" PRIx64, regs[10]);
2683       tty->print_cr("r11 = 0x%016" PRIx64, regs[11]);
2684       tty->print_cr("r12 = 0x%016" PRIx64, regs[12]);
2685       tty->print_cr("r13 = 0x%016" PRIx64, regs[13]);
2686       tty->print_cr("r14 = 0x%016" PRIx64, regs[14]);
2687       tty->print_cr("r15 = 0x%016" PRIx64, regs[15]);
2688       tty->print_cr("r16 = 0x%016" PRIx64, regs[16]);
2689       tty->print_cr("r17 = 0x%016" PRIx64, regs[17]);
2690       tty->print_cr("r18 = 0x%016" PRIx64, regs[18]);
2691       tty->print_cr("r19 = 0x%016" PRIx64, regs[19]);
2692       tty->print_cr("r20 = 0x%016" PRIx64, regs[20]);
2693       tty->print_cr("r21 = 0x%016" PRIx64, regs[21]);
2694       tty->print_cr("r22 = 0x%016" PRIx64, regs[22]);
2695       tty->print_cr("r23 = 0x%016" PRIx64, regs[23]);
2696       tty->print_cr("r24 = 0x%016" PRIx64, regs[24]);
2697       tty->print_cr("r25 = 0x%016" PRIx64, regs[25]);
2698       tty->print_cr("r26 = 0x%016" PRIx64, regs[26]);
2699       tty->print_cr("r27 = 0x%016" PRIx64, regs[27]);
2700       tty->print_cr("r28 = 0x%016" PRIx64, regs[28]);
2701       tty->print_cr("r30 = 0x%016" PRIx64, regs[30]);
2702       tty->print_cr("r31 = 0x%016" PRIx64, regs[31]);
2703       BREAKPOINT;
2704     }
2705   }
2706   fatal("DEBUG MESSAGE: %s", msg);
2707 }
2708 
2709 RegSet MacroAssembler::call_clobbered_gp_registers() {
2710   RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2);
2711 #ifndef R18_RESERVED
2712   regs += r18_tls;
2713 #endif
2714   return regs;
2715 }
2716 
2717 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) {
2718   int step = 4 * wordSize;
2719   push(call_clobbered_gp_registers() - exclude, sp);
2720   sub(sp, sp, step);
2721   mov(rscratch1, -step);
2722   // Push v0-v7, v16-v31.
2723   for (int i = 31; i>= 4; i -= 4) {
2724     if (i <= v7->encoding() || i >= v16->encoding())
2725       st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1),
2726           as_FloatRegister(i), T1D, Address(post(sp, rscratch1)));
2727   }
2728   st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2),
2729       as_FloatRegister(3), T1D, Address(sp));
2730 }
2731 
2732 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) {
2733   for (int i = 0; i < 32; i += 4) {
2734     if (i <= v7->encoding() || i >= v16->encoding())
2735       ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
2736           as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize)));
2737   }
2738 
2739   reinitialize_ptrue();
2740 
2741   pop(call_clobbered_gp_registers() - exclude, sp);
2742 }
2743 
2744 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve,
2745                                     int sve_vector_size_in_bytes, int total_predicate_in_bytes) {
2746   push(RegSet::range(r0, r29), sp); // integer registers except lr & sp
2747   if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) {
2748     sub(sp, sp, sve_vector_size_in_bytes * FloatRegisterImpl::number_of_registers);
2749     for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++) {
2750       sve_str(as_FloatRegister(i), Address(sp, i));
2751     }
2752   } else {
2753     int step = (save_vectors ? 8 : 4) * wordSize;
2754     mov(rscratch1, -step);
2755     sub(sp, sp, step);
2756     for (int i = 28; i >= 4; i -= 4) {
2757       st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
2758           as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1)));
2759     }
2760     st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp);
2761   }
2762   if (save_vectors && use_sve && total_predicate_in_bytes > 0) {
2763     sub(sp, sp, total_predicate_in_bytes);
2764     for (int i = 0; i < PRegisterImpl::number_of_saved_registers; i++) {
2765       sve_str(as_PRegister(i), Address(sp, i));
2766     }
2767   }
2768 }
2769 
2770 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve,
2771                                    int sve_vector_size_in_bytes, int total_predicate_in_bytes) {
2772   if (restore_vectors && use_sve && total_predicate_in_bytes > 0) {
2773     for (int i = PRegisterImpl::number_of_saved_registers - 1; i >= 0; i--) {
2774       sve_ldr(as_PRegister(i), Address(sp, i));
2775     }
2776     add(sp, sp, total_predicate_in_bytes);
2777   }
2778   if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) {
2779     for (int i = FloatRegisterImpl::number_of_registers - 1; i >= 0; i--) {
2780       sve_ldr(as_FloatRegister(i), Address(sp, i));
2781     }
2782     add(sp, sp, sve_vector_size_in_bytes * FloatRegisterImpl::number_of_registers);
2783   } else {
2784     int step = (restore_vectors ? 8 : 4) * wordSize;
2785     for (int i = 0; i <= 28; i += 4)
2786       ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
2787           as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step)));
2788   }
2789 
2790   // We may use predicate registers and rely on ptrue with SVE,
2791   // regardless of wide vector (> 8 bytes) used or not.
2792   if (use_sve) {
2793     reinitialize_ptrue();
2794   }
2795 
2796   // integer registers except lr & sp
2797   pop(RegSet::range(r0, r17), sp);
2798 #ifdef R18_RESERVED
2799   ldp(zr, r19, Address(post(sp, 2 * wordSize)));
2800   pop(RegSet::range(r20, r29), sp);
2801 #else
2802   pop(RegSet::range(r18_tls, r29), sp);
2803 #endif
2804 }
2805 
2806 /**
2807  * Helpers for multiply_to_len().
2808  */
2809 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
2810                                      Register src1, Register src2) {
2811   adds(dest_lo, dest_lo, src1);
2812   adc(dest_hi, dest_hi, zr);
2813   adds(dest_lo, dest_lo, src2);
2814   adc(final_dest_hi, dest_hi, zr);
2815 }
2816 
2817 // Generate an address from (r + r1 extend offset).  "size" is the
2818 // size of the operand.  The result may be in rscratch2.
2819 Address MacroAssembler::offsetted_address(Register r, Register r1,
2820                                           Address::extend ext, int offset, int size) {
2821   if (offset || (ext.shift() % size != 0)) {
2822     lea(rscratch2, Address(r, r1, ext));
2823     return Address(rscratch2, offset);
2824   } else {
2825     return Address(r, r1, ext);
2826   }
2827 }
2828 
2829 Address MacroAssembler::spill_address(int size, int offset, Register tmp)
2830 {
2831   assert(offset >= 0, "spill to negative address?");
2832   // Offset reachable ?
2833   //   Not aligned - 9 bits signed offset
2834   //   Aligned - 12 bits unsigned offset shifted
2835   Register base = sp;
2836   if ((offset & (size-1)) && offset >= (1<<8)) {
2837     add(tmp, base, offset & ((1<<12)-1));
2838     base = tmp;
2839     offset &= -1u<<12;
2840   }
2841 
2842   if (offset >= (1<<12) * size) {
2843     add(tmp, base, offset & (((1<<12)-1)<<12));
2844     base = tmp;
2845     offset &= ~(((1<<12)-1)<<12);
2846   }
2847 
2848   return Address(base, offset);
2849 }
2850 
2851 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) {
2852   assert(offset >= 0, "spill to negative address?");
2853 
2854   Register base = sp;
2855 
2856   // An immediate offset in the range 0 to 255 which is multiplied
2857   // by the current vector or predicate register size in bytes.
2858   if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) {
2859     return Address(base, offset / sve_reg_size_in_bytes);
2860   }
2861 
2862   add(tmp, base, offset);
2863   return Address(tmp);
2864 }
2865 
2866 // Checks whether offset is aligned.
2867 // Returns true if it is, else false.
2868 bool MacroAssembler::merge_alignment_check(Register base,
2869                                            size_t size,
2870                                            int64_t cur_offset,
2871                                            int64_t prev_offset) const {
2872   if (AvoidUnalignedAccesses) {
2873     if (base == sp) {
2874       // Checks whether low offset if aligned to pair of registers.
2875       int64_t pair_mask = size * 2 - 1;
2876       int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset;
2877       return (offset & pair_mask) == 0;
2878     } else { // If base is not sp, we can't guarantee the access is aligned.
2879       return false;
2880     }
2881   } else {
2882     int64_t mask = size - 1;
2883     // Load/store pair instruction only supports element size aligned offset.
2884     return (cur_offset & mask) == 0 && (prev_offset & mask) == 0;
2885   }
2886 }
2887 
2888 // Checks whether current and previous loads/stores can be merged.
2889 // Returns true if it can be merged, else false.
2890 bool MacroAssembler::ldst_can_merge(Register rt,
2891                                     const Address &adr,
2892                                     size_t cur_size_in_bytes,
2893                                     bool is_store) const {
2894   address prev = pc() - NativeInstruction::instruction_size;
2895   address last = code()->last_insn();
2896 
2897   if (last == NULL || !nativeInstruction_at(last)->is_Imm_LdSt()) {
2898     return false;
2899   }
2900 
2901   if (adr.getMode() != Address::base_plus_offset || prev != last) {
2902     return false;
2903   }
2904 
2905   NativeLdSt* prev_ldst = NativeLdSt_at(prev);
2906   size_t prev_size_in_bytes = prev_ldst->size_in_bytes();
2907 
2908   assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging.");
2909   assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging.");
2910 
2911   if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) {
2912     return false;
2913   }
2914 
2915   int64_t max_offset = 63 * prev_size_in_bytes;
2916   int64_t min_offset = -64 * prev_size_in_bytes;
2917 
2918   assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged.");
2919 
2920   // Only same base can be merged.
2921   if (adr.base() != prev_ldst->base()) {
2922     return false;
2923   }
2924 
2925   int64_t cur_offset = adr.offset();
2926   int64_t prev_offset = prev_ldst->offset();
2927   size_t diff = abs(cur_offset - prev_offset);
2928   if (diff != prev_size_in_bytes) {
2929     return false;
2930   }
2931 
2932   // Following cases can not be merged:
2933   // ldr x2, [x2, #8]
2934   // ldr x3, [x2, #16]
2935   // or:
2936   // ldr x2, [x3, #8]
2937   // ldr x2, [x3, #16]
2938   // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL.
2939   if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) {
2940     return false;
2941   }
2942 
2943   int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset;
2944   // Offset range must be in ldp/stp instruction's range.
2945   if (low_offset > max_offset || low_offset < min_offset) {
2946     return false;
2947   }
2948 
2949   if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) {
2950     return true;
2951   }
2952 
2953   return false;
2954 }
2955 
2956 // Merge current load/store with previous load/store into ldp/stp.
2957 void MacroAssembler::merge_ldst(Register rt,
2958                                 const Address &adr,
2959                                 size_t cur_size_in_bytes,
2960                                 bool is_store) {
2961 
2962   assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged.");
2963 
2964   Register rt_low, rt_high;
2965   address prev = pc() - NativeInstruction::instruction_size;
2966   NativeLdSt* prev_ldst = NativeLdSt_at(prev);
2967 
2968   int64_t offset;
2969 
2970   if (adr.offset() < prev_ldst->offset()) {
2971     offset = adr.offset();
2972     rt_low = rt;
2973     rt_high = prev_ldst->target();
2974   } else {
2975     offset = prev_ldst->offset();
2976     rt_low = prev_ldst->target();
2977     rt_high = rt;
2978   }
2979 
2980   Address adr_p = Address(prev_ldst->base(), offset);
2981   // Overwrite previous generated binary.
2982   code_section()->set_end(prev);
2983 
2984   const size_t sz = prev_ldst->size_in_bytes();
2985   assert(sz == 8 || sz == 4, "only supports 64/32bit merging.");
2986   if (!is_store) {
2987     BLOCK_COMMENT("merged ldr pair");
2988     if (sz == 8) {
2989       ldp(rt_low, rt_high, adr_p);
2990     } else {
2991       ldpw(rt_low, rt_high, adr_p);
2992     }
2993   } else {
2994     BLOCK_COMMENT("merged str pair");
2995     if (sz == 8) {
2996       stp(rt_low, rt_high, adr_p);
2997     } else {
2998       stpw(rt_low, rt_high, adr_p);
2999     }
3000   }
3001 }
3002 
3003 /**
3004  * Multiply 64 bit by 64 bit first loop.
3005  */
3006 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
3007                                            Register y, Register y_idx, Register z,
3008                                            Register carry, Register product,
3009                                            Register idx, Register kdx) {
3010   //
3011   //  jlong carry, x[], y[], z[];
3012   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
3013   //    huge_128 product = y[idx] * x[xstart] + carry;
3014   //    z[kdx] = (jlong)product;
3015   //    carry  = (jlong)(product >>> 64);
3016   //  }
3017   //  z[xstart] = carry;
3018   //
3019 
3020   Label L_first_loop, L_first_loop_exit;
3021   Label L_one_x, L_one_y, L_multiply;
3022 
3023   subsw(xstart, xstart, 1);
3024   br(Assembler::MI, L_one_x);
3025 
3026   lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt)));
3027   ldr(x_xstart, Address(rscratch1));
3028   ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian
3029 
3030   bind(L_first_loop);
3031   subsw(idx, idx, 1);
3032   br(Assembler::MI, L_first_loop_exit);
3033   subsw(idx, idx, 1);
3034   br(Assembler::MI, L_one_y);
3035   lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
3036   ldr(y_idx, Address(rscratch1));
3037   ror(y_idx, y_idx, 32); // convert big-endian to little-endian
3038   bind(L_multiply);
3039 
3040   // AArch64 has a multiply-accumulate instruction that we can't use
3041   // here because it has no way to process carries, so we have to use
3042   // separate add and adc instructions.  Bah.
3043   umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product
3044   mul(product, x_xstart, y_idx);
3045   adds(product, product, carry);
3046   adc(carry, rscratch1, zr);   // x_xstart * y_idx + carry -> carry:product
3047 
3048   subw(kdx, kdx, 2);
3049   ror(product, product, 32); // back to big-endian
3050   str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong));
3051 
3052   b(L_first_loop);
3053 
3054   bind(L_one_y);
3055   ldrw(y_idx, Address(y,  0));
3056   b(L_multiply);
3057 
3058   bind(L_one_x);
3059   ldrw(x_xstart, Address(x,  0));
3060   b(L_first_loop);
3061 
3062   bind(L_first_loop_exit);
3063 }
3064 
3065 /**
3066  * Multiply 128 bit by 128. Unrolled inner loop.
3067  *
3068  */
3069 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z,
3070                                              Register carry, Register carry2,
3071                                              Register idx, Register jdx,
3072                                              Register yz_idx1, Register yz_idx2,
3073                                              Register tmp, Register tmp3, Register tmp4,
3074                                              Register tmp6, Register product_hi) {
3075 
3076   //   jlong carry, x[], y[], z[];
3077   //   int kdx = ystart+1;
3078   //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
3079   //     huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry;
3080   //     jlong carry2  = (jlong)(tmp3 >>> 64);
3081   //     huge_128 tmp4 = (y[idx]   * product_hi) + z[kdx+idx] + carry2;
3082   //     carry  = (jlong)(tmp4 >>> 64);
3083   //     z[kdx+idx+1] = (jlong)tmp3;
3084   //     z[kdx+idx] = (jlong)tmp4;
3085   //   }
3086   //   idx += 2;
3087   //   if (idx > 0) {
3088   //     yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry;
3089   //     z[kdx+idx] = (jlong)yz_idx1;
3090   //     carry  = (jlong)(yz_idx1 >>> 64);
3091   //   }
3092   //
3093 
3094   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
3095 
3096   lsrw(jdx, idx, 2);
3097 
3098   bind(L_third_loop);
3099 
3100   subsw(jdx, jdx, 1);
3101   br(Assembler::MI, L_third_loop_exit);
3102   subw(idx, idx, 4);
3103 
3104   lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
3105 
3106   ldp(yz_idx2, yz_idx1, Address(rscratch1, 0));
3107 
3108   lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt)));
3109 
3110   ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
3111   ror(yz_idx2, yz_idx2, 32);
3112 
3113   ldp(rscratch2, rscratch1, Address(tmp6, 0));
3114 
3115   mul(tmp3, product_hi, yz_idx1);  //  yz_idx1 * product_hi -> tmp4:tmp3
3116   umulh(tmp4, product_hi, yz_idx1);
3117 
3118   ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian
3119   ror(rscratch2, rscratch2, 32);
3120 
3121   mul(tmp, product_hi, yz_idx2);   //  yz_idx2 * product_hi -> carry2:tmp
3122   umulh(carry2, product_hi, yz_idx2);
3123 
3124   // propagate sum of both multiplications into carry:tmp4:tmp3
3125   adds(tmp3, tmp3, carry);
3126   adc(tmp4, tmp4, zr);
3127   adds(tmp3, tmp3, rscratch1);
3128   adcs(tmp4, tmp4, tmp);
3129   adc(carry, carry2, zr);
3130   adds(tmp4, tmp4, rscratch2);
3131   adc(carry, carry, zr);
3132 
3133   ror(tmp3, tmp3, 32); // convert little-endian to big-endian
3134   ror(tmp4, tmp4, 32);
3135   stp(tmp4, tmp3, Address(tmp6, 0));
3136 
3137   b(L_third_loop);
3138   bind (L_third_loop_exit);
3139 
3140   andw (idx, idx, 0x3);
3141   cbz(idx, L_post_third_loop_done);
3142 
3143   Label L_check_1;
3144   subsw(idx, idx, 2);
3145   br(Assembler::MI, L_check_1);
3146 
3147   lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
3148   ldr(yz_idx1, Address(rscratch1, 0));
3149   ror(yz_idx1, yz_idx1, 32);
3150   mul(tmp3, product_hi, yz_idx1);  //  yz_idx1 * product_hi -> tmp4:tmp3
3151   umulh(tmp4, product_hi, yz_idx1);
3152   lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt)));
3153   ldr(yz_idx2, Address(rscratch1, 0));
3154   ror(yz_idx2, yz_idx2, 32);
3155 
3156   add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2);
3157 
3158   ror(tmp3, tmp3, 32);
3159   str(tmp3, Address(rscratch1, 0));
3160 
3161   bind (L_check_1);
3162 
3163   andw (idx, idx, 0x1);
3164   subsw(idx, idx, 1);
3165   br(Assembler::MI, L_post_third_loop_done);
3166   ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt)));
3167   mul(tmp3, tmp4, product_hi);  //  tmp4 * product_hi -> carry2:tmp3
3168   umulh(carry2, tmp4, product_hi);
3169   ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt)));
3170 
3171   add2_with_carry(carry2, tmp3, tmp4, carry);
3172 
3173   strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt)));
3174   extr(carry, carry2, tmp3, 32);
3175 
3176   bind(L_post_third_loop_done);
3177 }
3178 
3179 /**
3180  * Code for BigInteger::multiplyToLen() instrinsic.
3181  *
3182  * r0: x
3183  * r1: xlen
3184  * r2: y
3185  * r3: ylen
3186  * r4:  z
3187  * r5: zlen
3188  * r10: tmp1
3189  * r11: tmp2
3190  * r12: tmp3
3191  * r13: tmp4
3192  * r14: tmp5
3193  * r15: tmp6
3194  * r16: tmp7
3195  *
3196  */
3197 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen,
3198                                      Register z, Register zlen,
3199                                      Register tmp1, Register tmp2, Register tmp3, Register tmp4,
3200                                      Register tmp5, Register tmp6, Register product_hi) {
3201 
3202   assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6);
3203 
3204   const Register idx = tmp1;
3205   const Register kdx = tmp2;
3206   const Register xstart = tmp3;
3207 
3208   const Register y_idx = tmp4;
3209   const Register carry = tmp5;
3210   const Register product  = xlen;
3211   const Register x_xstart = zlen;  // reuse register
3212 
3213   // First Loop.
3214   //
3215   //  final static long LONG_MASK = 0xffffffffL;
3216   //  int xstart = xlen - 1;
3217   //  int ystart = ylen - 1;
3218   //  long carry = 0;
3219   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
3220   //    long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
3221   //    z[kdx] = (int)product;
3222   //    carry = product >>> 32;
3223   //  }
3224   //  z[xstart] = (int)carry;
3225   //
3226 
3227   movw(idx, ylen);      // idx = ylen;
3228   movw(kdx, zlen);      // kdx = xlen+ylen;
3229   mov(carry, zr);       // carry = 0;
3230 
3231   Label L_done;
3232 
3233   movw(xstart, xlen);
3234   subsw(xstart, xstart, 1);
3235   br(Assembler::MI, L_done);
3236 
3237   multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
3238 
3239   Label L_second_loop;
3240   cbzw(kdx, L_second_loop);
3241 
3242   Label L_carry;
3243   subw(kdx, kdx, 1);
3244   cbzw(kdx, L_carry);
3245 
3246   strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
3247   lsr(carry, carry, 32);
3248   subw(kdx, kdx, 1);
3249 
3250   bind(L_carry);
3251   strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
3252 
3253   // Second and third (nested) loops.
3254   //
3255   // for (int i = xstart-1; i >= 0; i--) { // Second loop
3256   //   carry = 0;
3257   //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
3258   //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
3259   //                    (z[k] & LONG_MASK) + carry;
3260   //     z[k] = (int)product;
3261   //     carry = product >>> 32;
3262   //   }
3263   //   z[i] = (int)carry;
3264   // }
3265   //
3266   // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi
3267 
3268   const Register jdx = tmp1;
3269 
3270   bind(L_second_loop);
3271   mov(carry, zr);                // carry = 0;
3272   movw(jdx, ylen);               // j = ystart+1
3273 
3274   subsw(xstart, xstart, 1);      // i = xstart-1;
3275   br(Assembler::MI, L_done);
3276 
3277   str(z, Address(pre(sp, -4 * wordSize)));
3278 
3279   Label L_last_x;
3280   lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j
3281   subsw(xstart, xstart, 1);       // i = xstart-1;
3282   br(Assembler::MI, L_last_x);
3283 
3284   lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt)));
3285   ldr(product_hi, Address(rscratch1));
3286   ror(product_hi, product_hi, 32);  // convert big-endian to little-endian
3287 
3288   Label L_third_loop_prologue;
3289   bind(L_third_loop_prologue);
3290 
3291   str(ylen, Address(sp, wordSize));
3292   stp(x, xstart, Address(sp, 2 * wordSize));
3293   multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product,
3294                           tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi);
3295   ldp(z, ylen, Address(post(sp, 2 * wordSize)));
3296   ldp(x, xlen, Address(post(sp, 2 * wordSize)));   // copy old xstart -> xlen
3297 
3298   addw(tmp3, xlen, 1);
3299   strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
3300   subsw(tmp3, tmp3, 1);
3301   br(Assembler::MI, L_done);
3302 
3303   lsr(carry, carry, 32);
3304   strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
3305   b(L_second_loop);
3306 
3307   // Next infrequent code is moved outside loops.
3308   bind(L_last_x);
3309   ldrw(product_hi, Address(x,  0));
3310   b(L_third_loop_prologue);
3311 
3312   bind(L_done);
3313 }
3314 
3315 // Code for BigInteger::mulAdd instrinsic
3316 // out     = r0
3317 // in      = r1
3318 // offset  = r2  (already out.length-offset)
3319 // len     = r3
3320 // k       = r4
3321 //
3322 // pseudo code from java implementation:
3323 // carry = 0;
3324 // offset = out.length-offset - 1;
3325 // for (int j=len-1; j >= 0; j--) {
3326 //     product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry;
3327 //     out[offset--] = (int)product;
3328 //     carry = product >>> 32;
3329 // }
3330 // return (int)carry;
3331 void MacroAssembler::mul_add(Register out, Register in, Register offset,
3332       Register len, Register k) {
3333     Label LOOP, END;
3334     // pre-loop
3335     cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches
3336     csel(out, zr, out, Assembler::EQ);
3337     br(Assembler::EQ, END);
3338     add(in, in, len, LSL, 2); // in[j+1] address
3339     add(offset, out, offset, LSL, 2); // out[offset + 1] address
3340     mov(out, zr); // used to keep carry now
3341     BIND(LOOP);
3342     ldrw(rscratch1, Address(pre(in, -4)));
3343     madd(rscratch1, rscratch1, k, out);
3344     ldrw(rscratch2, Address(pre(offset, -4)));
3345     add(rscratch1, rscratch1, rscratch2);
3346     strw(rscratch1, Address(offset));
3347     lsr(out, rscratch1, 32);
3348     subs(len, len, 1);
3349     br(Assembler::NE, LOOP);
3350     BIND(END);
3351 }
3352 
3353 /**
3354  * Emits code to update CRC-32 with a byte value according to constants in table
3355  *
3356  * @param [in,out]crc   Register containing the crc.
3357  * @param [in]val       Register containing the byte to fold into the CRC.
3358  * @param [in]table     Register containing the table of crc constants.
3359  *
3360  * uint32_t crc;
3361  * val = crc_table[(val ^ crc) & 0xFF];
3362  * crc = val ^ (crc >> 8);
3363  *
3364  */
3365 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
3366   eor(val, val, crc);
3367   andr(val, val, 0xff);
3368   ldrw(val, Address(table, val, Address::lsl(2)));
3369   eor(crc, val, crc, Assembler::LSR, 8);
3370 }
3371 
3372 /**
3373  * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3
3374  *
3375  * @param [in,out]crc   Register containing the crc.
3376  * @param [in]v         Register containing the 32-bit to fold into the CRC.
3377  * @param [in]table0    Register containing table 0 of crc constants.
3378  * @param [in]table1    Register containing table 1 of crc constants.
3379  * @param [in]table2    Register containing table 2 of crc constants.
3380  * @param [in]table3    Register containing table 3 of crc constants.
3381  *
3382  * uint32_t crc;
3383  *   v = crc ^ v
3384  *   crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24]
3385  *
3386  */
3387 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp,
3388         Register table0, Register table1, Register table2, Register table3,
3389         bool upper) {
3390   eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0);
3391   uxtb(tmp, v);
3392   ldrw(crc, Address(table3, tmp, Address::lsl(2)));
3393   ubfx(tmp, v, 8, 8);
3394   ldrw(tmp, Address(table2, tmp, Address::lsl(2)));
3395   eor(crc, crc, tmp);
3396   ubfx(tmp, v, 16, 8);
3397   ldrw(tmp, Address(table1, tmp, Address::lsl(2)));
3398   eor(crc, crc, tmp);
3399   ubfx(tmp, v, 24, 8);
3400   ldrw(tmp, Address(table0, tmp, Address::lsl(2)));
3401   eor(crc, crc, tmp);
3402 }
3403 
3404 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf,
3405         Register len, Register tmp0, Register tmp1, Register tmp2,
3406         Register tmp3) {
3407     Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit;
3408     assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3);
3409 
3410     mvnw(crc, crc);
3411 
3412     subs(len, len, 128);
3413     br(Assembler::GE, CRC_by64_pre);
3414   BIND(CRC_less64);
3415     adds(len, len, 128-32);
3416     br(Assembler::GE, CRC_by32_loop);
3417   BIND(CRC_less32);
3418     adds(len, len, 32-4);
3419     br(Assembler::GE, CRC_by4_loop);
3420     adds(len, len, 4);
3421     br(Assembler::GT, CRC_by1_loop);
3422     b(L_exit);
3423 
3424   BIND(CRC_by32_loop);
3425     ldp(tmp0, tmp1, Address(post(buf, 16)));
3426     subs(len, len, 32);
3427     crc32x(crc, crc, tmp0);
3428     ldr(tmp2, Address(post(buf, 8)));
3429     crc32x(crc, crc, tmp1);
3430     ldr(tmp3, Address(post(buf, 8)));
3431     crc32x(crc, crc, tmp2);
3432     crc32x(crc, crc, tmp3);
3433     br(Assembler::GE, CRC_by32_loop);
3434     cmn(len, 32);
3435     br(Assembler::NE, CRC_less32);
3436     b(L_exit);
3437 
3438   BIND(CRC_by4_loop);
3439     ldrw(tmp0, Address(post(buf, 4)));
3440     subs(len, len, 4);
3441     crc32w(crc, crc, tmp0);
3442     br(Assembler::GE, CRC_by4_loop);
3443     adds(len, len, 4);
3444     br(Assembler::LE, L_exit);
3445   BIND(CRC_by1_loop);
3446     ldrb(tmp0, Address(post(buf, 1)));
3447     subs(len, len, 1);
3448     crc32b(crc, crc, tmp0);
3449     br(Assembler::GT, CRC_by1_loop);
3450     b(L_exit);
3451 
3452   BIND(CRC_by64_pre);
3453     sub(buf, buf, 8);
3454     ldp(tmp0, tmp1, Address(buf, 8));
3455     crc32x(crc, crc, tmp0);
3456     ldr(tmp2, Address(buf, 24));
3457     crc32x(crc, crc, tmp1);
3458     ldr(tmp3, Address(buf, 32));
3459     crc32x(crc, crc, tmp2);
3460     ldr(tmp0, Address(buf, 40));
3461     crc32x(crc, crc, tmp3);
3462     ldr(tmp1, Address(buf, 48));
3463     crc32x(crc, crc, tmp0);
3464     ldr(tmp2, Address(buf, 56));
3465     crc32x(crc, crc, tmp1);
3466     ldr(tmp3, Address(pre(buf, 64)));
3467 
3468     b(CRC_by64_loop);
3469 
3470     align(CodeEntryAlignment);
3471   BIND(CRC_by64_loop);
3472     subs(len, len, 64);
3473     crc32x(crc, crc, tmp2);
3474     ldr(tmp0, Address(buf, 8));
3475     crc32x(crc, crc, tmp3);
3476     ldr(tmp1, Address(buf, 16));
3477     crc32x(crc, crc, tmp0);
3478     ldr(tmp2, Address(buf, 24));
3479     crc32x(crc, crc, tmp1);
3480     ldr(tmp3, Address(buf, 32));
3481     crc32x(crc, crc, tmp2);
3482     ldr(tmp0, Address(buf, 40));
3483     crc32x(crc, crc, tmp3);
3484     ldr(tmp1, Address(buf, 48));
3485     crc32x(crc, crc, tmp0);
3486     ldr(tmp2, Address(buf, 56));
3487     crc32x(crc, crc, tmp1);
3488     ldr(tmp3, Address(pre(buf, 64)));
3489     br(Assembler::GE, CRC_by64_loop);
3490 
3491     // post-loop
3492     crc32x(crc, crc, tmp2);
3493     crc32x(crc, crc, tmp3);
3494 
3495     sub(len, len, 64);
3496     add(buf, buf, 8);
3497     cmn(len, 128);
3498     br(Assembler::NE, CRC_less64);
3499   BIND(L_exit);
3500     mvnw(crc, crc);
3501 }
3502 
3503 /**
3504  * @param crc   register containing existing CRC (32-bit)
3505  * @param buf   register pointing to input byte buffer (byte*)
3506  * @param len   register containing number of bytes
3507  * @param table register that will contain address of CRC table
3508  * @param tmp   scratch register
3509  */
3510 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len,
3511         Register table0, Register table1, Register table2, Register table3,
3512         Register tmp, Register tmp2, Register tmp3) {
3513   Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit;
3514   uint64_t offset;
3515 
3516   if (UseCRC32) {
3517       kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3);
3518       return;
3519   }
3520 
3521     mvnw(crc, crc);
3522 
3523     adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset);
3524     if (offset) add(table0, table0, offset);
3525     add(table1, table0, 1*256*sizeof(juint));
3526     add(table2, table0, 2*256*sizeof(juint));
3527     add(table3, table0, 3*256*sizeof(juint));
3528 
3529   if (UseNeon) {
3530       cmp(len, (u1)64);
3531       br(Assembler::LT, L_by16);
3532       eor(v16, T16B, v16, v16);
3533 
3534     Label L_fold;
3535 
3536       add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants
3537 
3538       ld1(v0, v1, T2D, post(buf, 32));
3539       ld1r(v4, T2D, post(tmp, 8));
3540       ld1r(v5, T2D, post(tmp, 8));
3541       ld1r(v6, T2D, post(tmp, 8));
3542       ld1r(v7, T2D, post(tmp, 8));
3543       mov(v16, S, 0, crc);
3544 
3545       eor(v0, T16B, v0, v16);
3546       sub(len, len, 64);
3547 
3548     BIND(L_fold);
3549       pmull(v22, T8H, v0, v5, T8B);
3550       pmull(v20, T8H, v0, v7, T8B);
3551       pmull(v23, T8H, v0, v4, T8B);
3552       pmull(v21, T8H, v0, v6, T8B);
3553 
3554       pmull2(v18, T8H, v0, v5, T16B);
3555       pmull2(v16, T8H, v0, v7, T16B);
3556       pmull2(v19, T8H, v0, v4, T16B);
3557       pmull2(v17, T8H, v0, v6, T16B);
3558 
3559       uzp1(v24, T8H, v20, v22);
3560       uzp2(v25, T8H, v20, v22);
3561       eor(v20, T16B, v24, v25);
3562 
3563       uzp1(v26, T8H, v16, v18);
3564       uzp2(v27, T8H, v16, v18);
3565       eor(v16, T16B, v26, v27);
3566 
3567       ushll2(v22, T4S, v20, T8H, 8);
3568       ushll(v20, T4S, v20, T4H, 8);
3569 
3570       ushll2(v18, T4S, v16, T8H, 8);
3571       ushll(v16, T4S, v16, T4H, 8);
3572 
3573       eor(v22, T16B, v23, v22);
3574       eor(v18, T16B, v19, v18);
3575       eor(v20, T16B, v21, v20);
3576       eor(v16, T16B, v17, v16);
3577 
3578       uzp1(v17, T2D, v16, v20);
3579       uzp2(v21, T2D, v16, v20);
3580       eor(v17, T16B, v17, v21);
3581 
3582       ushll2(v20, T2D, v17, T4S, 16);
3583       ushll(v16, T2D, v17, T2S, 16);
3584 
3585       eor(v20, T16B, v20, v22);
3586       eor(v16, T16B, v16, v18);
3587 
3588       uzp1(v17, T2D, v20, v16);
3589       uzp2(v21, T2D, v20, v16);
3590       eor(v28, T16B, v17, v21);
3591 
3592       pmull(v22, T8H, v1, v5, T8B);
3593       pmull(v20, T8H, v1, v7, T8B);
3594       pmull(v23, T8H, v1, v4, T8B);
3595       pmull(v21, T8H, v1, v6, T8B);
3596 
3597       pmull2(v18, T8H, v1, v5, T16B);
3598       pmull2(v16, T8H, v1, v7, T16B);
3599       pmull2(v19, T8H, v1, v4, T16B);
3600       pmull2(v17, T8H, v1, v6, T16B);
3601 
3602       ld1(v0, v1, T2D, post(buf, 32));
3603 
3604       uzp1(v24, T8H, v20, v22);
3605       uzp2(v25, T8H, v20, v22);
3606       eor(v20, T16B, v24, v25);
3607 
3608       uzp1(v26, T8H, v16, v18);
3609       uzp2(v27, T8H, v16, v18);
3610       eor(v16, T16B, v26, v27);
3611 
3612       ushll2(v22, T4S, v20, T8H, 8);
3613       ushll(v20, T4S, v20, T4H, 8);
3614 
3615       ushll2(v18, T4S, v16, T8H, 8);
3616       ushll(v16, T4S, v16, T4H, 8);
3617 
3618       eor(v22, T16B, v23, v22);
3619       eor(v18, T16B, v19, v18);
3620       eor(v20, T16B, v21, v20);
3621       eor(v16, T16B, v17, v16);
3622 
3623       uzp1(v17, T2D, v16, v20);
3624       uzp2(v21, T2D, v16, v20);
3625       eor(v16, T16B, v17, v21);
3626 
3627       ushll2(v20, T2D, v16, T4S, 16);
3628       ushll(v16, T2D, v16, T2S, 16);
3629 
3630       eor(v20, T16B, v22, v20);
3631       eor(v16, T16B, v16, v18);
3632 
3633       uzp1(v17, T2D, v20, v16);
3634       uzp2(v21, T2D, v20, v16);
3635       eor(v20, T16B, v17, v21);
3636 
3637       shl(v16, T2D, v28, 1);
3638       shl(v17, T2D, v20, 1);
3639 
3640       eor(v0, T16B, v0, v16);
3641       eor(v1, T16B, v1, v17);
3642 
3643       subs(len, len, 32);
3644       br(Assembler::GE, L_fold);
3645 
3646       mov(crc, 0);
3647       mov(tmp, v0, D, 0);
3648       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
3649       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
3650       mov(tmp, v0, D, 1);
3651       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
3652       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
3653       mov(tmp, v1, D, 0);
3654       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
3655       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
3656       mov(tmp, v1, D, 1);
3657       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
3658       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
3659 
3660       add(len, len, 32);
3661   }
3662 
3663   BIND(L_by16);
3664     subs(len, len, 16);
3665     br(Assembler::GE, L_by16_loop);
3666     adds(len, len, 16-4);
3667     br(Assembler::GE, L_by4_loop);
3668     adds(len, len, 4);
3669     br(Assembler::GT, L_by1_loop);
3670     b(L_exit);
3671 
3672   BIND(L_by4_loop);
3673     ldrw(tmp, Address(post(buf, 4)));
3674     update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3);
3675     subs(len, len, 4);
3676     br(Assembler::GE, L_by4_loop);
3677     adds(len, len, 4);
3678     br(Assembler::LE, L_exit);
3679   BIND(L_by1_loop);
3680     subs(len, len, 1);
3681     ldrb(tmp, Address(post(buf, 1)));
3682     update_byte_crc32(crc, tmp, table0);
3683     br(Assembler::GT, L_by1_loop);
3684     b(L_exit);
3685 
3686     align(CodeEntryAlignment);
3687   BIND(L_by16_loop);
3688     subs(len, len, 16);
3689     ldp(tmp, tmp3, Address(post(buf, 16)));
3690     update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
3691     update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
3692     update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false);
3693     update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true);
3694     br(Assembler::GE, L_by16_loop);
3695     adds(len, len, 16-4);
3696     br(Assembler::GE, L_by4_loop);
3697     adds(len, len, 4);
3698     br(Assembler::GT, L_by1_loop);
3699   BIND(L_exit);
3700     mvnw(crc, crc);
3701 }
3702 
3703 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf,
3704         Register len, Register tmp0, Register tmp1, Register tmp2,
3705         Register tmp3) {
3706     Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit;
3707     assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3);
3708 
3709     subs(len, len, 128);
3710     br(Assembler::GE, CRC_by64_pre);
3711   BIND(CRC_less64);
3712     adds(len, len, 128-32);
3713     br(Assembler::GE, CRC_by32_loop);
3714   BIND(CRC_less32);
3715     adds(len, len, 32-4);
3716     br(Assembler::GE, CRC_by4_loop);
3717     adds(len, len, 4);
3718     br(Assembler::GT, CRC_by1_loop);
3719     b(L_exit);
3720 
3721   BIND(CRC_by32_loop);
3722     ldp(tmp0, tmp1, Address(post(buf, 16)));
3723     subs(len, len, 32);
3724     crc32cx(crc, crc, tmp0);
3725     ldr(tmp2, Address(post(buf, 8)));
3726     crc32cx(crc, crc, tmp1);
3727     ldr(tmp3, Address(post(buf, 8)));
3728     crc32cx(crc, crc, tmp2);
3729     crc32cx(crc, crc, tmp3);
3730     br(Assembler::GE, CRC_by32_loop);
3731     cmn(len, 32);
3732     br(Assembler::NE, CRC_less32);
3733     b(L_exit);
3734 
3735   BIND(CRC_by4_loop);
3736     ldrw(tmp0, Address(post(buf, 4)));
3737     subs(len, len, 4);
3738     crc32cw(crc, crc, tmp0);
3739     br(Assembler::GE, CRC_by4_loop);
3740     adds(len, len, 4);
3741     br(Assembler::LE, L_exit);
3742   BIND(CRC_by1_loop);
3743     ldrb(tmp0, Address(post(buf, 1)));
3744     subs(len, len, 1);
3745     crc32cb(crc, crc, tmp0);
3746     br(Assembler::GT, CRC_by1_loop);
3747     b(L_exit);
3748 
3749   BIND(CRC_by64_pre);
3750     sub(buf, buf, 8);
3751     ldp(tmp0, tmp1, Address(buf, 8));
3752     crc32cx(crc, crc, tmp0);
3753     ldr(tmp2, Address(buf, 24));
3754     crc32cx(crc, crc, tmp1);
3755     ldr(tmp3, Address(buf, 32));
3756     crc32cx(crc, crc, tmp2);
3757     ldr(tmp0, Address(buf, 40));
3758     crc32cx(crc, crc, tmp3);
3759     ldr(tmp1, Address(buf, 48));
3760     crc32cx(crc, crc, tmp0);
3761     ldr(tmp2, Address(buf, 56));
3762     crc32cx(crc, crc, tmp1);
3763     ldr(tmp3, Address(pre(buf, 64)));
3764 
3765     b(CRC_by64_loop);
3766 
3767     align(CodeEntryAlignment);
3768   BIND(CRC_by64_loop);
3769     subs(len, len, 64);
3770     crc32cx(crc, crc, tmp2);
3771     ldr(tmp0, Address(buf, 8));
3772     crc32cx(crc, crc, tmp3);
3773     ldr(tmp1, Address(buf, 16));
3774     crc32cx(crc, crc, tmp0);
3775     ldr(tmp2, Address(buf, 24));
3776     crc32cx(crc, crc, tmp1);
3777     ldr(tmp3, Address(buf, 32));
3778     crc32cx(crc, crc, tmp2);
3779     ldr(tmp0, Address(buf, 40));
3780     crc32cx(crc, crc, tmp3);
3781     ldr(tmp1, Address(buf, 48));
3782     crc32cx(crc, crc, tmp0);
3783     ldr(tmp2, Address(buf, 56));
3784     crc32cx(crc, crc, tmp1);
3785     ldr(tmp3, Address(pre(buf, 64)));
3786     br(Assembler::GE, CRC_by64_loop);
3787 
3788     // post-loop
3789     crc32cx(crc, crc, tmp2);
3790     crc32cx(crc, crc, tmp3);
3791 
3792     sub(len, len, 64);
3793     add(buf, buf, 8);
3794     cmn(len, 128);
3795     br(Assembler::NE, CRC_less64);
3796   BIND(L_exit);
3797 }
3798 
3799 /**
3800  * @param crc   register containing existing CRC (32-bit)
3801  * @param buf   register pointing to input byte buffer (byte*)
3802  * @param len   register containing number of bytes
3803  * @param table register that will contain address of CRC table
3804  * @param tmp   scratch register
3805  */
3806 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len,
3807         Register table0, Register table1, Register table2, Register table3,
3808         Register tmp, Register tmp2, Register tmp3) {
3809   kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3);
3810 }
3811 
3812 
3813 SkipIfEqual::SkipIfEqual(
3814     MacroAssembler* masm, const bool* flag_addr, bool value) {
3815   _masm = masm;
3816   uint64_t offset;
3817   _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset);
3818   _masm->ldrb(rscratch1, Address(rscratch1, offset));
3819   _masm->cbzw(rscratch1, _label);
3820 }
3821 
3822 SkipIfEqual::~SkipIfEqual() {
3823   _masm->bind(_label);
3824 }
3825 
3826 void MacroAssembler::addptr(const Address &dst, int32_t src) {
3827   Address adr;
3828   switch(dst.getMode()) {
3829   case Address::base_plus_offset:
3830     // This is the expected mode, although we allow all the other
3831     // forms below.
3832     adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord);
3833     break;
3834   default:
3835     lea(rscratch2, dst);
3836     adr = Address(rscratch2);
3837     break;
3838   }
3839   ldr(rscratch1, adr);
3840   add(rscratch1, rscratch1, src);
3841   str(rscratch1, adr);
3842 }
3843 
3844 void MacroAssembler::cmpptr(Register src1, Address src2) {
3845   uint64_t offset;
3846   adrp(rscratch1, src2, offset);
3847   ldr(rscratch1, Address(rscratch1, offset));
3848   cmp(src1, rscratch1);
3849 }
3850 
3851 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
3852   cmp(obj1, obj2);
3853 }
3854 
3855 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
3856   load_method_holder(rresult, rmethod);
3857   ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
3858 }
3859 
3860 void MacroAssembler::load_method_holder(Register holder, Register method) {
3861   ldr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
3862   ldr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
3863   ldr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
3864 }
3865 
3866 void MacroAssembler::load_metadata(Register dst, Register src) {
3867   if (UseCompressedClassPointers) {
3868     ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
3869   } else {
3870     ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
3871   }
3872 }
3873 
3874 void MacroAssembler::load_klass(Register dst, Register src) {
3875   if (UseCompressedClassPointers) {
3876     ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
3877     decode_klass_not_null(dst);
3878   } else {
3879     ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
3880   }
3881 }
3882 
3883 // ((OopHandle)result).resolve();
3884 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
3885   // OopHandle::resolve is an indirection.
3886   access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp, noreg);
3887 }
3888 
3889 // ((WeakHandle)result).resolve();
3890 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) {
3891   assert_different_registers(rresult, rtmp);
3892   Label resolved;
3893 
3894   // A null weak handle resolves to null.
3895   cbz(rresult, resolved);
3896 
3897   // Only 64 bit platforms support GCs that require a tmp register
3898   // Only IN_HEAP loads require a thread_tmp register
3899   // WeakHandle::resolve is an indirection like jweak.
3900   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
3901                  rresult, Address(rresult), rtmp, /*tmp_thread*/noreg);
3902   bind(resolved);
3903 }
3904 
3905 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp) {
3906   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
3907   ldr(dst, Address(rmethod, Method::const_offset()));
3908   ldr(dst, Address(dst, ConstMethod::constants_offset()));
3909   ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes()));
3910   ldr(dst, Address(dst, mirror_offset));
3911   resolve_oop_handle(dst, tmp);
3912 }
3913 
3914 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
3915   if (UseCompressedClassPointers) {
3916     ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
3917     if (CompressedKlassPointers::base() == NULL) {
3918       cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
3919       return;
3920     } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
3921                && CompressedKlassPointers::shift() == 0) {
3922       // Only the bottom 32 bits matter
3923       cmpw(trial_klass, tmp);
3924       return;
3925     }
3926     decode_klass_not_null(tmp);
3927   } else {
3928     ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
3929   }
3930   cmp(trial_klass, tmp);
3931 }
3932 
3933 void MacroAssembler::load_prototype_header(Register dst, Register src) {
3934   load_klass(dst, src);
3935   ldr(dst, Address(dst, Klass::prototype_header_offset()));
3936 }
3937 
3938 void MacroAssembler::store_klass(Register dst, Register src) {
3939   // FIXME: Should this be a store release?  concurrent gcs assumes
3940   // klass length is valid if klass field is not null.
3941   if (UseCompressedClassPointers) {
3942     encode_klass_not_null(src);
3943     strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
3944   } else {
3945     str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
3946   }
3947 }
3948 
3949 void MacroAssembler::store_klass_gap(Register dst, Register src) {
3950   if (UseCompressedClassPointers) {
3951     // Store to klass gap in destination
3952     strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
3953   }
3954 }
3955 
3956 // Algorithm must match CompressedOops::encode.
3957 void MacroAssembler::encode_heap_oop(Register d, Register s) {
3958 #ifdef ASSERT
3959   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
3960 #endif
3961   verify_oop(s, "broken oop in encode_heap_oop");
3962   if (CompressedOops::base() == NULL) {
3963     if (CompressedOops::shift() != 0) {
3964       assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
3965       lsr(d, s, LogMinObjAlignmentInBytes);
3966     } else {
3967       mov(d, s);
3968     }
3969   } else {
3970     subs(d, s, rheapbase);
3971     csel(d, d, zr, Assembler::HS);
3972     lsr(d, d, LogMinObjAlignmentInBytes);
3973 
3974     /*  Old algorithm: is this any worse?
3975     Label nonnull;
3976     cbnz(r, nonnull);
3977     sub(r, r, rheapbase);
3978     bind(nonnull);
3979     lsr(r, r, LogMinObjAlignmentInBytes);
3980     */
3981   }
3982 }
3983 
3984 void MacroAssembler::encode_heap_oop_not_null(Register r) {
3985 #ifdef ASSERT
3986   verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
3987   if (CheckCompressedOops) {
3988     Label ok;
3989     cbnz(r, ok);
3990     stop("null oop passed to encode_heap_oop_not_null");
3991     bind(ok);
3992   }
3993 #endif
3994   verify_oop(r, "broken oop in encode_heap_oop_not_null");
3995   if (CompressedOops::base() != NULL) {
3996     sub(r, r, rheapbase);
3997   }
3998   if (CompressedOops::shift() != 0) {
3999     assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
4000     lsr(r, r, LogMinObjAlignmentInBytes);
4001   }
4002 }
4003 
4004 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
4005 #ifdef ASSERT
4006   verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
4007   if (CheckCompressedOops) {
4008     Label ok;
4009     cbnz(src, ok);
4010     stop("null oop passed to encode_heap_oop_not_null2");
4011     bind(ok);
4012   }
4013 #endif
4014   verify_oop(src, "broken oop in encode_heap_oop_not_null2");
4015 
4016   Register data = src;
4017   if (CompressedOops::base() != NULL) {
4018     sub(dst, src, rheapbase);
4019     data = dst;
4020   }
4021   if (CompressedOops::shift() != 0) {
4022     assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
4023     lsr(dst, data, LogMinObjAlignmentInBytes);
4024     data = dst;
4025   }
4026   if (data == src)
4027     mov(dst, src);
4028 }
4029 
4030 void  MacroAssembler::decode_heap_oop(Register d, Register s) {
4031 #ifdef ASSERT
4032   verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
4033 #endif
4034   if (CompressedOops::base() == NULL) {
4035     if (CompressedOops::shift() != 0 || d != s) {
4036       lsl(d, s, CompressedOops::shift());
4037     }
4038   } else {
4039     Label done;
4040     if (d != s)
4041       mov(d, s);
4042     cbz(s, done);
4043     add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes);
4044     bind(done);
4045   }
4046   verify_oop(d, "broken oop in decode_heap_oop");
4047 }
4048 
4049 void  MacroAssembler::decode_heap_oop_not_null(Register r) {
4050   assert (UseCompressedOops, "should only be used for compressed headers");
4051   assert (Universe::heap() != NULL, "java heap should be initialized");
4052   // Cannot assert, unverified entry point counts instructions (see .ad file)
4053   // vtableStubs also counts instructions in pd_code_size_limit.
4054   // Also do not verify_oop as this is called by verify_oop.
4055   if (CompressedOops::shift() != 0) {
4056     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
4057     if (CompressedOops::base() != NULL) {
4058       add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes);
4059     } else {
4060       add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes);
4061     }
4062   } else {
4063     assert (CompressedOops::base() == NULL, "sanity");
4064   }
4065 }
4066 
4067 void  MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
4068   assert (UseCompressedOops, "should only be used for compressed headers");
4069   assert (Universe::heap() != NULL, "java heap should be initialized");
4070   // Cannot assert, unverified entry point counts instructions (see .ad file)
4071   // vtableStubs also counts instructions in pd_code_size_limit.
4072   // Also do not verify_oop as this is called by verify_oop.
4073   if (CompressedOops::shift() != 0) {
4074     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
4075     if (CompressedOops::base() != NULL) {
4076       add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes);
4077     } else {
4078       add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes);
4079     }
4080   } else {
4081     assert (CompressedOops::base() == NULL, "sanity");
4082     if (dst != src) {
4083       mov(dst, src);
4084     }
4085   }
4086 }
4087 
4088 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone);
4089 
4090 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
4091   assert(UseCompressedClassPointers, "not using compressed class pointers");
4092   assert(Metaspace::initialized(), "metaspace not initialized yet");
4093 
4094   if (_klass_decode_mode != KlassDecodeNone) {
4095     return _klass_decode_mode;
4096   }
4097 
4098   assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift()
4099          || 0 == CompressedKlassPointers::shift(), "decode alg wrong");
4100 
4101   if (CompressedKlassPointers::base() == NULL) {
4102     return (_klass_decode_mode = KlassDecodeZero);
4103   }
4104 
4105   if (operand_valid_for_logical_immediate(
4106         /*is32*/false, (uint64_t)CompressedKlassPointers::base())) {
4107     const uint64_t range_mask =
4108       (1ULL << log2i(CompressedKlassPointers::range())) - 1;
4109     if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) {
4110       return (_klass_decode_mode = KlassDecodeXor);
4111     }
4112   }
4113 
4114   const uint64_t shifted_base =
4115     (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
4116   guarantee((shifted_base & 0xffff0000ffffffff) == 0,
4117             "compressed class base bad alignment");
4118 
4119   return (_klass_decode_mode = KlassDecodeMovk);
4120 }
4121 
4122 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
4123   switch (klass_decode_mode()) {
4124   case KlassDecodeZero:
4125     if (CompressedKlassPointers::shift() != 0) {
4126       lsr(dst, src, LogKlassAlignmentInBytes);
4127     } else {
4128       if (dst != src) mov(dst, src);
4129     }
4130     break;
4131 
4132   case KlassDecodeXor:
4133     if (CompressedKlassPointers::shift() != 0) {
4134       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
4135       lsr(dst, dst, LogKlassAlignmentInBytes);
4136     } else {
4137       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
4138     }
4139     break;
4140 
4141   case KlassDecodeMovk:
4142     if (CompressedKlassPointers::shift() != 0) {
4143       ubfx(dst, src, LogKlassAlignmentInBytes, 32);
4144     } else {
4145       movw(dst, src);
4146     }
4147     break;
4148 
4149   case KlassDecodeNone:
4150     ShouldNotReachHere();
4151     break;
4152   }
4153 }
4154 
4155 void MacroAssembler::encode_klass_not_null(Register r) {
4156   encode_klass_not_null(r, r);
4157 }
4158 
4159 void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
4160   assert (UseCompressedClassPointers, "should only be used for compressed headers");
4161 
4162   switch (klass_decode_mode()) {
4163   case KlassDecodeZero:
4164     if (CompressedKlassPointers::shift() != 0) {
4165       lsl(dst, src, LogKlassAlignmentInBytes);
4166     } else {
4167       if (dst != src) mov(dst, src);
4168     }
4169     break;
4170 
4171   case KlassDecodeXor:
4172     if (CompressedKlassPointers::shift() != 0) {
4173       lsl(dst, src, LogKlassAlignmentInBytes);
4174       eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
4175     } else {
4176       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
4177     }
4178     break;
4179 
4180   case KlassDecodeMovk: {
4181     const uint64_t shifted_base =
4182       (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
4183 
4184     if (dst != src) movw(dst, src);
4185     movk(dst, shifted_base >> 32, 32);
4186 
4187     if (CompressedKlassPointers::shift() != 0) {
4188       lsl(dst, dst, LogKlassAlignmentInBytes);
4189     }
4190 
4191     break;
4192   }
4193 
4194   case KlassDecodeNone:
4195     ShouldNotReachHere();
4196     break;
4197   }
4198 }
4199 
4200 void  MacroAssembler::decode_klass_not_null(Register r) {
4201   decode_klass_not_null(r, r);
4202 }
4203 
4204 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
4205 #ifdef ASSERT
4206   {
4207     ThreadInVMfromUnknown tiv;
4208     assert (UseCompressedOops, "should only be used for compressed oops");
4209     assert (Universe::heap() != NULL, "java heap should be initialized");
4210     assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
4211     assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
4212   }
4213 #endif
4214   int oop_index = oop_recorder()->find_index(obj);
4215   InstructionMark im(this);
4216   RelocationHolder rspec = oop_Relocation::spec(oop_index);
4217   code_section()->relocate(inst_mark(), rspec);
4218   movz(dst, 0xDEAD, 16);
4219   movk(dst, 0xBEEF);
4220 }
4221 
4222 void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
4223   assert (UseCompressedClassPointers, "should only be used for compressed headers");
4224   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
4225   int index = oop_recorder()->find_index(k);
4226   assert(! Universe::heap()->is_in(k), "should not be an oop");
4227 
4228   InstructionMark im(this);
4229   RelocationHolder rspec = metadata_Relocation::spec(index);
4230   code_section()->relocate(inst_mark(), rspec);
4231   narrowKlass nk = CompressedKlassPointers::encode(k);
4232   movz(dst, (nk >> 16), 16);
4233   movk(dst, nk & 0xffff);
4234 }
4235 
4236 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
4237                                     Register dst, Address src,
4238                                     Register tmp1, Register thread_tmp) {
4239   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4240   decorators = AccessInternal::decorator_fixup(decorators);
4241   bool as_raw = (decorators & AS_RAW) != 0;
4242   if (as_raw) {
4243     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4244   } else {
4245     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4246   }
4247 }
4248 
4249 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
4250                                      Address dst, Register src,
4251                                      Register tmp1, Register thread_tmp, Register tmp3) {
4252 
4253   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4254   decorators = AccessInternal::decorator_fixup(decorators);
4255   bool as_raw = (decorators & AS_RAW) != 0;
4256   if (as_raw) {
4257     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, thread_tmp, tmp3);
4258   } else {
4259     bs->store_at(this, decorators, type, dst, src, tmp1, thread_tmp, tmp3);
4260   }
4261 }
4262 
4263 void MacroAssembler::access_value_copy(DecoratorSet decorators, Register src, Register dst,
4264                                        Register inline_klass) {
4265   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4266   bs->value_copy(this, decorators, src, dst, inline_klass);
4267 }
4268 
4269 void MacroAssembler::first_field_offset(Register inline_klass, Register offset) {
4270   ldr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset()));
4271   ldrw(offset, Address(offset, InlineKlass::first_field_offset_offset()));
4272 }
4273 
4274 void MacroAssembler::data_for_oop(Register oop, Register data, Register inline_klass) {
4275   // ((address) (void*) o) + vk->first_field_offset();
4276   Register offset = (data == oop) ? rscratch1 : data;
4277   first_field_offset(inline_klass, offset);
4278   if (data == oop) {
4279     add(data, data, offset);
4280   } else {
4281     lea(data, Address(oop, offset));
4282   }
4283 }
4284 
4285 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
4286                                                 Register index, Register data) {
4287   assert_different_registers(array, array_klass, index);
4288   assert_different_registers(rscratch1, array, index);
4289 
4290   // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
4291   ldrw(rscratch1, Address(array_klass, Klass::layout_helper_offset()));
4292 
4293   // Klass::layout_helper_log2_element_size(lh)
4294   // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
4295   lsr(rscratch1, rscratch1, Klass::_lh_log2_element_size_shift);
4296   andr(rscratch1, rscratch1, Klass::_lh_log2_element_size_mask);
4297   lslv(index, index, rscratch1);
4298 
4299   add(data, array, index);
4300   add(data, data, arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT));
4301 }
4302 
4303 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
4304                                    Register thread_tmp, DecoratorSet decorators) {
4305   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
4306 }
4307 
4308 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
4309                                             Register thread_tmp, DecoratorSet decorators) {
4310   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
4311 }
4312 
4313 void MacroAssembler::store_heap_oop(Address dst, Register src, Register tmp1,
4314                                     Register thread_tmp, Register tmp3, DecoratorSet decorators) {
4315   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp, tmp3);
4316 }
4317 
4318 // Used for storing NULLs.
4319 void MacroAssembler::store_heap_oop_null(Address dst) {
4320   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
4321 }
4322 
4323 Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
4324   assert(oop_recorder() != NULL, "this assembler needs a Recorder");
4325   int index = oop_recorder()->allocate_metadata_index(obj);
4326   RelocationHolder rspec = metadata_Relocation::spec(index);
4327   return Address((address)obj, rspec);
4328 }
4329 
4330 // Move an oop into a register.  immediate is true if we want
4331 // immediate instructions and nmethod entry barriers are not enabled.
4332 // i.e. we are not going to patch this instruction while the code is being
4333 // executed by another thread.
4334 void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) {
4335   int oop_index;
4336   if (obj == NULL) {
4337     oop_index = oop_recorder()->allocate_oop_index(obj);
4338   } else {
4339 #ifdef ASSERT
4340     {
4341       ThreadInVMfromUnknown tiv;
4342       assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
4343     }
4344 #endif
4345     oop_index = oop_recorder()->find_index(obj);
4346   }
4347   RelocationHolder rspec = oop_Relocation::spec(oop_index);
4348 
4349   // nmethod entry barrier necessitate using the constant pool. They have to be
4350   // ordered with respected to oop accesses.
4351   // Using immediate literals would necessitate ISBs.
4352   if (BarrierSet::barrier_set()->barrier_set_nmethod() != NULL || !immediate) {
4353     address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address
4354     ldr_constant(dst, Address(dummy, rspec));
4355   } else
4356     mov(dst, Address((address)obj, rspec));
4357 
4358 }
4359 
4360 // Move a metadata address into a register.
4361 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
4362   int oop_index;
4363   if (obj == NULL) {
4364     oop_index = oop_recorder()->allocate_metadata_index(obj);
4365   } else {
4366     oop_index = oop_recorder()->find_index(obj);
4367   }
4368   RelocationHolder rspec = metadata_Relocation::spec(oop_index);
4369   mov(dst, Address((address)obj, rspec));
4370 }
4371 
4372 Address MacroAssembler::constant_oop_address(jobject obj) {
4373 #ifdef ASSERT
4374   {
4375     ThreadInVMfromUnknown tiv;
4376     assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
4377     assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop");
4378   }
4379 #endif
4380   int oop_index = oop_recorder()->find_index(obj);
4381   return Address((address)obj, oop_Relocation::spec(oop_index));
4382 }
4383 
4384 // Object / value buffer allocation...
4385 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
4386                                        Register t1, Register t2,
4387                                        bool clear_fields, Label& alloc_failed)
4388 {
4389   Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
4390   Register layout_size = t1;
4391   assert(new_obj == r0, "needs to be r0, according to barrier asm eden_allocate");
4392   assert_different_registers(klass, new_obj, t1, t2);
4393 
4394   // get instance_size in InstanceKlass (scaled to a count of bytes)
4395   ldrw(layout_size, Address(klass, Klass::layout_helper_offset()));
4396   // test to see if it has a finalizer or is malformed in some way
4397   tst(layout_size, Klass::_lh_instance_slow_path_bit);
4398   br(Assembler::NE, slow_case_no_pop);
4399 
4400   // Allocate the instance:
4401   //  If TLAB is enabled:
4402   //    Try to allocate in the TLAB.
4403   //    If fails, go to the slow path.
4404   //  Else If inline contiguous allocations are enabled:
4405   //    Try to allocate in eden.
4406   //    If fails due to heap end, go to slow path.
4407   //
4408   //  If TLAB is enabled OR inline contiguous is enabled:
4409   //    Initialize the allocation.
4410   //    Exit.
4411   //
4412   //  Go to slow path.
4413   const bool allow_shared_alloc =
4414     Universe::heap()->supports_inline_contig_alloc();
4415 
4416   push(klass);
4417 
4418   if (UseTLAB) {
4419     tlab_allocate(new_obj, layout_size, 0, klass, t2, slow_case);
4420     if (ZeroTLAB || (!clear_fields)) {
4421       // the fields have been already cleared
4422       b(initialize_header);
4423     } else {
4424       // initialize both the header and fields
4425       b(initialize_object);
4426     }
4427   } else {
4428     // Allocation in the shared Eden, if allowed.
4429     //
4430     eden_allocate(new_obj, layout_size, 0, t2, slow_case);
4431   }
4432 
4433   // If UseTLAB or allow_shared_alloc are true, the object is created above and
4434   // there is an initialize need. Otherwise, skip and go to the slow path.
4435   if (UseTLAB || allow_shared_alloc) {
4436     if (clear_fields) {
4437       // The object is initialized before the header.  If the object size is
4438       // zero, go directly to the header initialization.
4439       bind(initialize_object);
4440       subs(layout_size, layout_size, sizeof(oopDesc));
4441       br(Assembler::EQ, initialize_header);
4442 
4443       // Initialize topmost object field, divide size by 8, check if odd and
4444       // test if zero.
4445 
4446   #ifdef ASSERT
4447       // make sure instance_size was multiple of 8
4448       Label L;
4449       tst(layout_size, 7);
4450       br(Assembler::EQ, L);
4451       stop("object size is not multiple of 8 - adjust this code");
4452       bind(L);
4453       // must be > 0, no extra check needed here
4454   #endif
4455 
4456       lsr(layout_size, layout_size, LogBytesPerLong);
4457 
4458       // initialize remaining object fields: instance_size was a multiple of 8
4459       {
4460         Label loop;
4461         Register base = t2;
4462 
4463         bind(loop);
4464         add(rscratch1, new_obj, layout_size, Assembler::LSL, LogBytesPerLong);
4465         str(zr, Address(rscratch1, sizeof(oopDesc) - 1*oopSize));
4466         subs(layout_size, layout_size, 1);
4467         br(Assembler::NE, loop);
4468       }
4469     } // clear_fields
4470 
4471     // initialize object header only.
4472     bind(initialize_header);
4473     pop(klass);
4474     Register mark_word = t2;
4475     ldr(mark_word, Address(klass, Klass::prototype_header_offset()));
4476     str(mark_word, Address(new_obj, oopDesc::mark_offset_in_bytes ()));
4477     store_klass_gap(new_obj, zr);  // zero klass gap for compressed oops
4478     mov(t2, klass);         // preserve klass
4479     store_klass(new_obj, t2);  // src klass reg is potentially compressed
4480 
4481     b(done);
4482   }
4483 
4484   bind(slow_case);
4485   pop(klass);
4486   bind(slow_case_no_pop);
4487   b(alloc_failed);
4488 
4489   bind(done);
4490 }
4491 
4492 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
4493 void MacroAssembler::tlab_allocate(Register obj,
4494                                    Register var_size_in_bytes,
4495                                    int con_size_in_bytes,
4496                                    Register t1,
4497                                    Register t2,
4498                                    Label& slow_case) {
4499   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4500   bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
4501 }
4502 
4503 // Defines obj, preserves var_size_in_bytes
4504 void MacroAssembler::eden_allocate(Register obj,
4505                                    Register var_size_in_bytes,
4506                                    int con_size_in_bytes,
4507                                    Register t1,
4508                                    Label& slow_case) {
4509   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4510   bs->eden_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
4511 }
4512 
4513 void MacroAssembler::verify_tlab() {
4514 #ifdef ASSERT
4515   if (UseTLAB && VerifyOops) {
4516     Label next, ok;
4517 
4518     stp(rscratch2, rscratch1, Address(pre(sp, -16)));
4519 
4520     ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
4521     ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
4522     cmp(rscratch2, rscratch1);
4523     br(Assembler::HS, next);
4524     STOP("assert(top >= start)");
4525     should_not_reach_here();
4526 
4527     bind(next);
4528     ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
4529     ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
4530     cmp(rscratch2, rscratch1);
4531     br(Assembler::HS, ok);
4532     STOP("assert(top <= end)");
4533     should_not_reach_here();
4534 
4535     bind(ok);
4536     ldp(rscratch2, rscratch1, Address(post(sp, 16)));
4537   }
4538 #endif
4539 }
4540 
4541 void MacroAssembler::get_inline_type_field_klass(Register klass, Register index, Register inline_klass) {
4542   ldr(inline_klass, Address(klass, InstanceKlass::inline_type_field_klasses_offset()));
4543 #ifdef ASSERT
4544   {
4545     Label done;
4546     cbnz(inline_klass, done);
4547     stop("get_inline_type_field_klass contains no inline klass");
4548     bind(done);
4549   }
4550 #endif
4551   ldr(inline_klass, Address(inline_klass, index, Address::lsl(3)));
4552 }
4553 
4554 // Writes to stack successive pages until offset reached to check for
4555 // stack overflow + shadow pages.  This clobbers tmp.
4556 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
4557   assert_different_registers(tmp, size, rscratch1);
4558   mov(tmp, sp);
4559   // Bang stack for total size given plus shadow page size.
4560   // Bang one page at a time because large size can bang beyond yellow and
4561   // red zones.
4562   Label loop;
4563   mov(rscratch1, os::vm_page_size());
4564   bind(loop);
4565   lea(tmp, Address(tmp, -os::vm_page_size()));
4566   subsw(size, size, rscratch1);
4567   str(size, Address(tmp));
4568   br(Assembler::GT, loop);
4569 
4570   // Bang down shadow pages too.
4571   // At this point, (tmp-0) is the last address touched, so don't
4572   // touch it again.  (It was touched as (tmp-pagesize) but then tmp
4573   // was post-decremented.)  Skip this address by starting at i=1, and
4574   // touch a few more pages below.  N.B.  It is important to touch all
4575   // the way down to and including i=StackShadowPages.
4576   for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / os::vm_page_size()) - 1; i++) {
4577     // this could be any sized move but this is can be a debugging crumb
4578     // so the bigger the better.
4579     lea(tmp, Address(tmp, -os::vm_page_size()));
4580     str(size, Address(tmp));
4581   }
4582 }
4583 
4584 // Move the address of the polling page into dest.
4585 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) {
4586   ldr(dest, Address(rthread, JavaThread::polling_page_offset()));
4587 }
4588 
4589 // Read the polling page.  The address of the polling page must
4590 // already be in r.
4591 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) {
4592   address mark;
4593   {
4594     InstructionMark im(this);
4595     code_section()->relocate(inst_mark(), rtype);
4596     ldrw(zr, Address(r, 0));
4597     mark = inst_mark();
4598   }
4599   verify_cross_modify_fence_not_required();
4600   return mark;
4601 }
4602 
4603 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) {
4604   relocInfo::relocType rtype = dest.rspec().reloc()->type();
4605   uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12;
4606   uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12;
4607   uint64_t dest_page = (uint64_t)dest.target() >> 12;
4608   int64_t offset_low = dest_page - low_page;
4609   int64_t offset_high = dest_page - high_page;
4610 
4611   assert(is_valid_AArch64_address(dest.target()), "bad address");
4612   assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address");
4613 
4614   InstructionMark im(this);
4615   code_section()->relocate(inst_mark(), dest.rspec());
4616   // 8143067: Ensure that the adrp can reach the dest from anywhere within
4617   // the code cache so that if it is relocated we know it will still reach
4618   if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
4619     _adrp(reg1, dest.target());
4620   } else {
4621     uint64_t target = (uint64_t)dest.target();
4622     uint64_t adrp_target
4623       = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
4624 
4625     _adrp(reg1, (address)adrp_target);
4626     movk(reg1, target >> 32, 32);
4627   }
4628   byte_offset = (uint64_t)dest.target() & 0xfff;
4629 }
4630 
4631 void MacroAssembler::load_byte_map_base(Register reg) {
4632   CardTable::CardValue* byte_map_base =
4633     ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
4634 
4635   // Strictly speaking the byte_map_base isn't an address at all, and it might
4636   // even be negative. It is thus materialised as a constant.
4637   mov(reg, (uint64_t)byte_map_base);
4638 }
4639 
4640 void MacroAssembler::build_frame(int framesize) {
4641   assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
4642   assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
4643   protect_return_address();
4644   if (framesize < ((1 << 9) + 2 * wordSize)) {
4645     sub(sp, sp, framesize);
4646     stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
4647     if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
4648   } else {
4649     stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
4650     if (PreserveFramePointer) mov(rfp, sp);
4651     if (framesize < ((1 << 12) + 2 * wordSize))
4652       sub(sp, sp, framesize - 2 * wordSize);
4653     else {
4654       mov(rscratch1, framesize - 2 * wordSize);
4655       sub(sp, sp, rscratch1);
4656     }
4657   }
4658   verify_cross_modify_fence_not_required();
4659 }
4660 
4661 void MacroAssembler::remove_frame(int framesize) {
4662   assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
4663   assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
4664   if (framesize < ((1 << 9) + 2 * wordSize)) {
4665     ldp(rfp, lr, Address(sp, framesize - 2 * wordSize));
4666     add(sp, sp, framesize);
4667   } else {
4668     if (framesize < ((1 << 12) + 2 * wordSize))
4669       add(sp, sp, framesize - 2 * wordSize);
4670     else {
4671       mov(rscratch1, framesize - 2 * wordSize);
4672       add(sp, sp, rscratch1);
4673     }
4674     ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
4675   }
4676   authenticate_return_address();
4677 }
4678 
4679 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
4680   if (needs_stack_repair) {
4681     // Remove the extension of the caller's frame used for inline type unpacking
4682     //
4683     // Right now the stack looks like this:
4684     //
4685     // | Arguments from caller     |
4686     // |---------------------------|  <-- caller's SP
4687     // | Saved LR #1               |
4688     // | Saved FP #1               |
4689     // |---------------------------|
4690     // | Extension space for       |
4691     // |   inline arg (un)packing  |
4692     // |---------------------------|  <-- start of this method's frame
4693     // | Saved LR #2               |
4694     // | Saved FP #2               |
4695     // |---------------------------|  <-- FP
4696     // | sp_inc                    |
4697     // | method locals             |
4698     // |---------------------------|  <-- SP
4699     //
4700     // There are two copies of FP and LR on the stack. They will be identical
4701     // unless the caller has been deoptimized, in which case LR #1 will be patched
4702     // to point at the deopt blob, and LR #2 will still point into the old method.
4703     //
4704     // The sp_inc stack slot holds the total size of the frame including the
4705     // extension space minus two words for the saved FP and LR.
4706 
4707     int sp_inc_offset = initial_framesize - 3 * wordSize;  // Immediately below saved LR and FP
4708 
4709     ldr(rscratch1, Address(sp, sp_inc_offset));
4710     add(sp, sp, rscratch1);
4711     ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
4712   } else {
4713     remove_frame(initial_framesize);
4714   }
4715 }
4716 
4717 void MacroAssembler::save_stack_increment(int sp_inc, int frame_size) {
4718   int real_frame_size = frame_size + sp_inc;
4719   assert(sp_inc == 0 || sp_inc > 2*wordSize, "invalid sp_inc value");
4720   assert(real_frame_size >= 2*wordSize, "frame size must include FP/LR space");
4721   assert((real_frame_size & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
4722 
4723   int sp_inc_offset = frame_size - 3 * wordSize;  // Immediately below saved LR and FP
4724 
4725   // Subtract two words for the saved FP and LR as these will be popped
4726   // separately. See remove_frame above.
4727   mov(rscratch1, real_frame_size - 2*wordSize);
4728   str(rscratch1, Address(sp, sp_inc_offset));
4729 }
4730 
4731 // This method counts leading positive bytes (highest bit not set) in provided byte array
4732 address MacroAssembler::count_positives(Register ary1, Register len, Register result) {
4733     // Simple and most common case of aligned small array which is not at the
4734     // end of memory page is placed here. All other cases are in stub.
4735     Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE;
4736     const uint64_t UPPER_BIT_MASK=0x8080808080808080;
4737     assert_different_registers(ary1, len, result);
4738 
4739     mov(result, len);
4740     cmpw(len, 0);
4741     br(LE, DONE);
4742     cmpw(len, 4 * wordSize);
4743     br(GE, STUB_LONG); // size > 32 then go to stub
4744 
4745     int shift = 64 - exact_log2(os::vm_page_size());
4746     lsl(rscratch1, ary1, shift);
4747     mov(rscratch2, (size_t)(4 * wordSize) << shift);
4748     adds(rscratch2, rscratch1, rscratch2);  // At end of page?
4749     br(CS, STUB); // at the end of page then go to stub
4750     subs(len, len, wordSize);
4751     br(LT, END);
4752 
4753   BIND(LOOP);
4754     ldr(rscratch1, Address(post(ary1, wordSize)));
4755     tst(rscratch1, UPPER_BIT_MASK);
4756     br(NE, SET_RESULT);
4757     subs(len, len, wordSize);
4758     br(GE, LOOP);
4759     cmpw(len, -wordSize);
4760     br(EQ, DONE);
4761 
4762   BIND(END);
4763     ldr(rscratch1, Address(ary1));
4764     sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes
4765     lslv(rscratch1, rscratch1, rscratch2);
4766     tst(rscratch1, UPPER_BIT_MASK);
4767     br(NE, SET_RESULT);
4768     b(DONE);
4769 
4770   BIND(STUB);
4771     RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives());
4772     assert(count_pos.target() != NULL, "count_positives stub has not been generated");
4773     address tpc1 = trampoline_call(count_pos);
4774     if (tpc1 == NULL) {
4775       DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE));
4776       postcond(pc() == badAddress);
4777       return NULL;
4778     }
4779     b(DONE);
4780 
4781   BIND(STUB_LONG);
4782     RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long());
4783     assert(count_pos_long.target() != NULL, "count_positives_long stub has not been generated");
4784     address tpc2 = trampoline_call(count_pos_long);
4785     if (tpc2 == NULL) {
4786       DEBUG_ONLY(reset_labels(SET_RESULT, DONE));
4787       postcond(pc() == badAddress);
4788       return NULL;
4789     }
4790     b(DONE);
4791 
4792   BIND(SET_RESULT);
4793 
4794     add(len, len, wordSize);
4795     sub(result, result, len);
4796 
4797   BIND(DONE);
4798   postcond(pc() != badAddress);
4799   return pc();
4800 }
4801 
4802 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
4803                                       Register tmp4, Register tmp5, Register result,
4804                                       Register cnt1, int elem_size) {
4805   Label DONE, SAME;
4806   Register tmp1 = rscratch1;
4807   Register tmp2 = rscratch2;
4808   Register cnt2 = tmp2;  // cnt2 only used in array length compare
4809   int elem_per_word = wordSize/elem_size;
4810   int log_elem_size = exact_log2(elem_size);
4811   int length_offset = arrayOopDesc::length_offset_in_bytes();
4812   int base_offset
4813     = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE);
4814   int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16);
4815 
4816   assert(elem_size == 1 || elem_size == 2, "must be char or byte");
4817   assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
4818 
4819 #ifndef PRODUCT
4820   {
4821     const char kind = (elem_size == 2) ? 'U' : 'L';
4822     char comment[64];
4823     snprintf(comment, sizeof comment, "array_equals%c{", kind);
4824     BLOCK_COMMENT(comment);
4825   }
4826 #endif
4827 
4828   // if (a1 == a2)
4829   //     return true;
4830   cmpoop(a1, a2); // May have read barriers for a1 and a2.
4831   br(EQ, SAME);
4832 
4833   if (UseSimpleArrayEquals) {
4834     Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL;
4835     // if (a1 == null || a2 == null)
4836     //     return false;
4837     // a1 & a2 == 0 means (some-pointer is null) or
4838     // (very-rare-or-even-probably-impossible-pointer-values)
4839     // so, we can save one branch in most cases
4840     tst(a1, a2);
4841     mov(result, false);
4842     br(EQ, A_MIGHT_BE_NULL);
4843     // if (a1.length != a2.length)
4844     //      return false;
4845     bind(A_IS_NOT_NULL);
4846     ldrw(cnt1, Address(a1, length_offset));
4847     ldrw(cnt2, Address(a2, length_offset));
4848     eorw(tmp5, cnt1, cnt2);
4849     cbnzw(tmp5, DONE);
4850     lea(a1, Address(a1, base_offset));
4851     lea(a2, Address(a2, base_offset));
4852     // Check for short strings, i.e. smaller than wordSize.
4853     subs(cnt1, cnt1, elem_per_word);
4854     br(Assembler::LT, SHORT);
4855     // Main 8 byte comparison loop.
4856     bind(NEXT_WORD); {
4857       ldr(tmp1, Address(post(a1, wordSize)));
4858       ldr(tmp2, Address(post(a2, wordSize)));
4859       subs(cnt1, cnt1, elem_per_word);
4860       eor(tmp5, tmp1, tmp2);
4861       cbnz(tmp5, DONE);
4862     } br(GT, NEXT_WORD);
4863     // Last longword.  In the case where length == 4 we compare the
4864     // same longword twice, but that's still faster than another
4865     // conditional branch.
4866     // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when
4867     // length == 4.
4868     if (log_elem_size > 0)
4869       lsl(cnt1, cnt1, log_elem_size);
4870     ldr(tmp3, Address(a1, cnt1));
4871     ldr(tmp4, Address(a2, cnt1));
4872     eor(tmp5, tmp3, tmp4);
4873     cbnz(tmp5, DONE);
4874     b(SAME);
4875     bind(A_MIGHT_BE_NULL);
4876     // in case both a1 and a2 are not-null, proceed with loads
4877     cbz(a1, DONE);
4878     cbz(a2, DONE);
4879     b(A_IS_NOT_NULL);
4880     bind(SHORT);
4881 
4882     tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left.
4883     {
4884       ldrw(tmp1, Address(post(a1, 4)));
4885       ldrw(tmp2, Address(post(a2, 4)));
4886       eorw(tmp5, tmp1, tmp2);
4887       cbnzw(tmp5, DONE);
4888     }
4889     bind(TAIL03);
4890     tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left.
4891     {
4892       ldrh(tmp3, Address(post(a1, 2)));
4893       ldrh(tmp4, Address(post(a2, 2)));
4894       eorw(tmp5, tmp3, tmp4);
4895       cbnzw(tmp5, DONE);
4896     }
4897     bind(TAIL01);
4898     if (elem_size == 1) { // Only needed when comparing byte arrays.
4899       tbz(cnt1, 0, SAME); // 0-1 bytes left.
4900       {
4901         ldrb(tmp1, a1);
4902         ldrb(tmp2, a2);
4903         eorw(tmp5, tmp1, tmp2);
4904         cbnzw(tmp5, DONE);
4905       }
4906     }
4907   } else {
4908     Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB,
4909         CSET_EQ, LAST_CHECK;
4910     mov(result, false);
4911     cbz(a1, DONE);
4912     ldrw(cnt1, Address(a1, length_offset));
4913     cbz(a2, DONE);
4914     ldrw(cnt2, Address(a2, length_offset));
4915     // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's
4916     // faster to perform another branch before comparing a1 and a2
4917     cmp(cnt1, (u1)elem_per_word);
4918     br(LE, SHORT); // short or same
4919     ldr(tmp3, Address(pre(a1, base_offset)));
4920     subs(zr, cnt1, stubBytesThreshold);
4921     br(GE, STUB);
4922     ldr(tmp4, Address(pre(a2, base_offset)));
4923     sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size);
4924     cmp(cnt2, cnt1);
4925     br(NE, DONE);
4926 
4927     // Main 16 byte comparison loop with 2 exits
4928     bind(NEXT_DWORD); {
4929       ldr(tmp1, Address(pre(a1, wordSize)));
4930       ldr(tmp2, Address(pre(a2, wordSize)));
4931       subs(cnt1, cnt1, 2 * elem_per_word);
4932       br(LE, TAIL);
4933       eor(tmp4, tmp3, tmp4);
4934       cbnz(tmp4, DONE);
4935       ldr(tmp3, Address(pre(a1, wordSize)));
4936       ldr(tmp4, Address(pre(a2, wordSize)));
4937       cmp(cnt1, (u1)elem_per_word);
4938       br(LE, TAIL2);
4939       cmp(tmp1, tmp2);
4940     } br(EQ, NEXT_DWORD);
4941     b(DONE);
4942 
4943     bind(TAIL);
4944     eor(tmp4, tmp3, tmp4);
4945     eor(tmp2, tmp1, tmp2);
4946     lslv(tmp2, tmp2, tmp5);
4947     orr(tmp5, tmp4, tmp2);
4948     cmp(tmp5, zr);
4949     b(CSET_EQ);
4950 
4951     bind(TAIL2);
4952     eor(tmp2, tmp1, tmp2);
4953     cbnz(tmp2, DONE);
4954     b(LAST_CHECK);
4955 
4956     bind(STUB);
4957     ldr(tmp4, Address(pre(a2, base_offset)));
4958     cmp(cnt2, cnt1);
4959     br(NE, DONE);
4960     if (elem_size == 2) { // convert to byte counter
4961       lsl(cnt1, cnt1, 1);
4962     }
4963     eor(tmp5, tmp3, tmp4);
4964     cbnz(tmp5, DONE);
4965     RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals());
4966     assert(stub.target() != NULL, "array_equals_long stub has not been generated");
4967     address tpc = trampoline_call(stub);
4968     if (tpc == NULL) {
4969       DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE));
4970       postcond(pc() == badAddress);
4971       return NULL;
4972     }
4973     b(DONE);
4974 
4975     // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2)
4976     // so, if a2 == null => return false(0), else return true, so we can return a2
4977     mov(result, a2);
4978     b(DONE);
4979     bind(SHORT);
4980     cmp(cnt2, cnt1);
4981     br(NE, DONE);
4982     cbz(cnt1, SAME);
4983     sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size);
4984     ldr(tmp3, Address(a1, base_offset));
4985     ldr(tmp4, Address(a2, base_offset));
4986     bind(LAST_CHECK);
4987     eor(tmp4, tmp3, tmp4);
4988     lslv(tmp5, tmp4, tmp5);
4989     cmp(tmp5, zr);
4990     bind(CSET_EQ);
4991     cset(result, EQ);
4992     b(DONE);
4993   }
4994 
4995   bind(SAME);
4996   mov(result, true);
4997   // That's it.
4998   bind(DONE);
4999 
5000   BLOCK_COMMENT("} array_equals");
5001   postcond(pc() != badAddress);
5002   return pc();
5003 }
5004 
5005 // Compare Strings
5006 
5007 // For Strings we're passed the address of the first characters in a1
5008 // and a2 and the length in cnt1.
5009 // elem_size is the element size in bytes: either 1 or 2.
5010 // There are two implementations.  For arrays >= 8 bytes, all
5011 // comparisons (including the final one, which may overlap) are
5012 // performed 8 bytes at a time.  For strings < 8 bytes, we compare a
5013 // halfword, then a short, and then a byte.
5014 
5015 void MacroAssembler::string_equals(Register a1, Register a2,
5016                                    Register result, Register cnt1, int elem_size)
5017 {
5018   Label SAME, DONE, SHORT, NEXT_WORD;
5019   Register tmp1 = rscratch1;
5020   Register tmp2 = rscratch2;
5021   Register cnt2 = tmp2;  // cnt2 only used in array length compare
5022 
5023   assert(elem_size == 1 || elem_size == 2, "must be 2 or 1 byte");
5024   assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
5025 
5026 #ifndef PRODUCT
5027   {
5028     const char kind = (elem_size == 2) ? 'U' : 'L';
5029     char comment[64];
5030     snprintf(comment, sizeof comment, "{string_equals%c", kind);
5031     BLOCK_COMMENT(comment);
5032   }
5033 #endif
5034 
5035   mov(result, false);
5036 
5037   // Check for short strings, i.e. smaller than wordSize.
5038   subs(cnt1, cnt1, wordSize);
5039   br(Assembler::LT, SHORT);
5040   // Main 8 byte comparison loop.
5041   bind(NEXT_WORD); {
5042     ldr(tmp1, Address(post(a1, wordSize)));
5043     ldr(tmp2, Address(post(a2, wordSize)));
5044     subs(cnt1, cnt1, wordSize);
5045     eor(tmp1, tmp1, tmp2);
5046     cbnz(tmp1, DONE);
5047   } br(GT, NEXT_WORD);
5048   // Last longword.  In the case where length == 4 we compare the
5049   // same longword twice, but that's still faster than another
5050   // conditional branch.
5051   // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when
5052   // length == 4.
5053   ldr(tmp1, Address(a1, cnt1));
5054   ldr(tmp2, Address(a2, cnt1));
5055   eor(tmp2, tmp1, tmp2);
5056   cbnz(tmp2, DONE);
5057   b(SAME);
5058 
5059   bind(SHORT);
5060   Label TAIL03, TAIL01;
5061 
5062   tbz(cnt1, 2, TAIL03); // 0-7 bytes left.
5063   {
5064     ldrw(tmp1, Address(post(a1, 4)));
5065     ldrw(tmp2, Address(post(a2, 4)));
5066     eorw(tmp1, tmp1, tmp2);
5067     cbnzw(tmp1, DONE);
5068   }
5069   bind(TAIL03);
5070   tbz(cnt1, 1, TAIL01); // 0-3 bytes left.
5071   {
5072     ldrh(tmp1, Address(post(a1, 2)));
5073     ldrh(tmp2, Address(post(a2, 2)));
5074     eorw(tmp1, tmp1, tmp2);
5075     cbnzw(tmp1, DONE);
5076   }
5077   bind(TAIL01);
5078   if (elem_size == 1) { // Only needed when comparing 1-byte elements
5079     tbz(cnt1, 0, SAME); // 0-1 bytes left.
5080     {
5081       ldrb(tmp1, a1);
5082       ldrb(tmp2, a2);
5083       eorw(tmp1, tmp1, tmp2);
5084       cbnzw(tmp1, DONE);
5085     }
5086   }
5087   // Arrays are equal.
5088   bind(SAME);
5089   mov(result, true);
5090 
5091   // That's it.
5092   bind(DONE);
5093   BLOCK_COMMENT("} string_equals");
5094 }
5095 
5096 
5097 // The size of the blocks erased by the zero_blocks stub.  We must
5098 // handle anything smaller than this ourselves in zero_words().
5099 const int MacroAssembler::zero_words_block_size = 8;
5100 
5101 // zero_words() is used by C2 ClearArray patterns and by
5102 // C1_MacroAssembler.  It is as small as possible, handling small word
5103 // counts locally and delegating anything larger to the zero_blocks
5104 // stub.  It is expanded many times in compiled code, so it is
5105 // important to keep it short.
5106 
5107 // ptr:   Address of a buffer to be zeroed.
5108 // cnt:   Count in HeapWords.
5109 //
5110 // ptr, cnt, rscratch1, and rscratch2 are clobbered.
5111 address MacroAssembler::zero_words(Register ptr, Register cnt)
5112 {
5113   assert(is_power_of_2(zero_words_block_size), "adjust this");
5114 
5115   BLOCK_COMMENT("zero_words {");
5116   assert(ptr == r10 && cnt == r11, "mismatch in register usage");
5117   RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
5118   assert(zero_blocks.target() != NULL, "zero_blocks stub has not been generated");
5119 
5120   subs(rscratch1, cnt, zero_words_block_size);
5121   Label around;
5122   br(LO, around);
5123   {
5124     RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
5125     assert(zero_blocks.target() != NULL, "zero_blocks stub has not been generated");
5126     // Make sure this is a C2 compilation. C1 allocates space only for
5127     // trampoline stubs generated by Call LIR ops, and in any case it
5128     // makes sense for a C1 compilation task to proceed as quickly as
5129     // possible.
5130     CompileTask* task;
5131     if (StubRoutines::aarch64::complete()
5132         && Thread::current()->is_Compiler_thread()
5133         && (task = ciEnv::current()->task())
5134         && is_c2_compile(task->comp_level())) {
5135       address tpc = trampoline_call(zero_blocks);
5136       if (tpc == NULL) {
5137         DEBUG_ONLY(reset_labels(around));
5138         assert(false, "failed to allocate space for trampoline");
5139         return NULL;
5140       }
5141     } else {
5142       far_call(zero_blocks);
5143     }
5144   }
5145   bind(around);
5146 
5147   // We have a few words left to do. zero_blocks has adjusted r10 and r11
5148   // for us.
5149   for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) {
5150     Label l;
5151     tbz(cnt, exact_log2(i), l);
5152     for (int j = 0; j < i; j += 2) {
5153       stp(zr, zr, post(ptr, 2 * BytesPerWord));
5154     }
5155     bind(l);
5156   }
5157   {
5158     Label l;
5159     tbz(cnt, 0, l);
5160     str(zr, Address(ptr));
5161     bind(l);
5162   }
5163 
5164   BLOCK_COMMENT("} zero_words");
5165   return pc();
5166 }
5167 
5168 // base:         Address of a buffer to be zeroed, 8 bytes aligned.
5169 // cnt:          Immediate count in HeapWords.
5170 //
5171 // r10, r11, rscratch1, and rscratch2 are clobbered.
5172 void MacroAssembler::zero_words(Register base, uint64_t cnt)
5173 {
5174   guarantee(zero_words_block_size < BlockZeroingLowLimit,
5175             "increase BlockZeroingLowLimit");
5176   if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) {
5177 #ifndef PRODUCT
5178     {
5179       char buf[64];
5180       snprintf(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt);
5181       BLOCK_COMMENT(buf);
5182     }
5183 #endif
5184     if (cnt >= 16) {
5185       uint64_t loops = cnt/16;
5186       if (loops > 1) {
5187         mov(rscratch2, loops - 1);
5188       }
5189       {
5190         Label loop;
5191         bind(loop);
5192         for (int i = 0; i < 16; i += 2) {
5193           stp(zr, zr, Address(base, i * BytesPerWord));
5194         }
5195         add(base, base, 16 * BytesPerWord);
5196         if (loops > 1) {
5197           subs(rscratch2, rscratch2, 1);
5198           br(GE, loop);
5199         }
5200       }
5201     }
5202     cnt %= 16;
5203     int i = cnt & 1;  // store any odd word to start
5204     if (i) str(zr, Address(base));
5205     for (; i < (int)cnt; i += 2) {
5206       stp(zr, zr, Address(base, i * wordSize));
5207     }
5208     BLOCK_COMMENT("} zero_words");
5209   } else {
5210     mov(r10, base); mov(r11, cnt);
5211     zero_words(r10, r11);
5212   }
5213 }
5214 
5215 // Zero blocks of memory by using DC ZVA.
5216 //
5217 // Aligns the base address first sufficently for DC ZVA, then uses
5218 // DC ZVA repeatedly for every full block.  cnt is the size to be
5219 // zeroed in HeapWords.  Returns the count of words left to be zeroed
5220 // in cnt.
5221 //
5222 // NOTE: This is intended to be used in the zero_blocks() stub.  If
5223 // you want to use it elsewhere, note that cnt must be >= 2*zva_length.
5224 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) {
5225   Register tmp = rscratch1;
5226   Register tmp2 = rscratch2;
5227   int zva_length = VM_Version::zva_length();
5228   Label initial_table_end, loop_zva;
5229   Label fini;
5230 
5231   // Base must be 16 byte aligned. If not just return and let caller handle it
5232   tst(base, 0x0f);
5233   br(Assembler::NE, fini);
5234   // Align base with ZVA length.
5235   neg(tmp, base);
5236   andr(tmp, tmp, zva_length - 1);
5237 
5238   // tmp: the number of bytes to be filled to align the base with ZVA length.
5239   add(base, base, tmp);
5240   sub(cnt, cnt, tmp, Assembler::ASR, 3);
5241   adr(tmp2, initial_table_end);
5242   sub(tmp2, tmp2, tmp, Assembler::LSR, 2);
5243   br(tmp2);
5244 
5245   for (int i = -zva_length + 16; i < 0; i += 16)
5246     stp(zr, zr, Address(base, i));
5247   bind(initial_table_end);
5248 
5249   sub(cnt, cnt, zva_length >> 3);
5250   bind(loop_zva);
5251   dc(Assembler::ZVA, base);
5252   subs(cnt, cnt, zva_length >> 3);
5253   add(base, base, zva_length);
5254   br(Assembler::GE, loop_zva);
5255   add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA
5256   bind(fini);
5257 }
5258 
5259 // base:   Address of a buffer to be filled, 8 bytes aligned.
5260 // cnt:    Count in 8-byte unit.
5261 // value:  Value to be filled with.
5262 // base will point to the end of the buffer after filling.
5263 void MacroAssembler::fill_words(Register base, Register cnt, Register value)
5264 {
5265 //  Algorithm:
5266 //
5267 //    if (cnt == 0) {
5268 //      return;
5269 //    }
5270 //    if ((p & 8) != 0) {
5271 //      *p++ = v;
5272 //    }
5273 //
5274 //    scratch1 = cnt & 14;
5275 //    cnt -= scratch1;
5276 //    p += scratch1;
5277 //    switch (scratch1 / 2) {
5278 //      do {
5279 //        cnt -= 16;
5280 //          p[-16] = v;
5281 //          p[-15] = v;
5282 //        case 7:
5283 //          p[-14] = v;
5284 //          p[-13] = v;
5285 //        case 6:
5286 //          p[-12] = v;
5287 //          p[-11] = v;
5288 //          // ...
5289 //        case 1:
5290 //          p[-2] = v;
5291 //          p[-1] = v;
5292 //        case 0:
5293 //          p += 16;
5294 //      } while (cnt);
5295 //    }
5296 //    if ((cnt & 1) == 1) {
5297 //      *p++ = v;
5298 //    }
5299 
5300   assert_different_registers(base, cnt, value, rscratch1, rscratch2);
5301 
5302   Label fini, skip, entry, loop;
5303   const int unroll = 8; // Number of stp instructions we'll unroll
5304 
5305   cbz(cnt, fini);
5306   tbz(base, 3, skip);
5307   str(value, Address(post(base, 8)));
5308   sub(cnt, cnt, 1);
5309   bind(skip);
5310 
5311   andr(rscratch1, cnt, (unroll-1) * 2);
5312   sub(cnt, cnt, rscratch1);
5313   add(base, base, rscratch1, Assembler::LSL, 3);
5314   adr(rscratch2, entry);
5315   sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1);
5316   br(rscratch2);
5317 
5318   bind(loop);
5319   add(base, base, unroll * 16);
5320   for (int i = -unroll; i < 0; i++)
5321     stp(value, value, Address(base, i * 16));
5322   bind(entry);
5323   subs(cnt, cnt, unroll * 2);
5324   br(Assembler::GE, loop);
5325 
5326   tbz(cnt, 0, fini);
5327   str(value, Address(post(base, 8)));
5328   bind(fini);
5329 }
5330 
5331 // Intrinsic for
5332 //
5333 // - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray
5334 //     return the number of characters copied.
5335 // - java/lang/StringUTF16.compress
5336 //     return zero (0) if copy fails, otherwise 'len'.
5337 //
5338 // This version always returns the number of characters copied, and does not
5339 // clobber the 'len' register. A successful copy will complete with the post-
5340 // condition: 'res' == 'len', while an unsuccessful copy will exit with the
5341 // post-condition: 0 <= 'res' < 'len'.
5342 //
5343 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to
5344 //       degrade performance (on Ampere Altra - Neoverse N1), to an extent
5345 //       beyond the acceptable, even though the footprint would be smaller.
5346 //       Using 'umaxv' in the ASCII-case comes with a small penalty but does
5347 //       avoid additional bloat.
5348 //
5349 void MacroAssembler::encode_iso_array(Register src, Register dst,
5350                                       Register len, Register res, bool ascii,
5351                                       FloatRegister vtmp0, FloatRegister vtmp1,
5352                                       FloatRegister vtmp2, FloatRegister vtmp3)
5353 {
5354   Register cnt = res;
5355   Register max = rscratch1;
5356   Register chk = rscratch2;
5357 
5358   prfm(Address(src), PLDL1STRM);
5359   movw(cnt, len);
5360 
5361 #define ASCII(insn) do { if (ascii) { insn; } } while (0)
5362 
5363   Label LOOP_32, DONE_32, FAIL_32;
5364 
5365   BIND(LOOP_32);
5366   {
5367     cmpw(cnt, 32);
5368     br(LT, DONE_32);
5369     ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64)));
5370     // Extract lower bytes.
5371     FloatRegister vlo0 = v4;
5372     FloatRegister vlo1 = v5;
5373     uzp1(vlo0, T16B, vtmp0, vtmp1);
5374     uzp1(vlo1, T16B, vtmp2, vtmp3);
5375     // Merge bits...
5376     orr(vtmp0, T16B, vtmp0, vtmp1);
5377     orr(vtmp2, T16B, vtmp2, vtmp3);
5378     // Extract merged upper bytes.
5379     FloatRegister vhix = vtmp0;
5380     uzp2(vhix, T16B, vtmp0, vtmp2);
5381     // ISO-check on hi-parts (all zero).
5382     //                          ASCII-check on lo-parts (no sign).
5383     FloatRegister vlox = vtmp1; // Merge lower bytes.
5384                                 ASCII(orr(vlox, T16B, vlo0, vlo1));
5385     umov(chk, vhix, D, 1);      ASCII(cmlt(vlox, T16B, vlox));
5386     fmovd(max, vhix);           ASCII(umaxv(vlox, T16B, vlox));
5387     orr(chk, chk, max);         ASCII(umov(max, vlox, B, 0));
5388                                 ASCII(orr(chk, chk, max));
5389     cbnz(chk, FAIL_32);
5390     subw(cnt, cnt, 32);
5391     st1(vlo0, vlo1, T16B, Address(post(dst, 32)));
5392     b(LOOP_32);
5393   }
5394   BIND(FAIL_32);
5395   sub(src, src, 64);
5396   BIND(DONE_32);
5397 
5398   Label LOOP_8, SKIP_8;
5399 
5400   BIND(LOOP_8);
5401   {
5402     cmpw(cnt, 8);
5403     br(LT, SKIP_8);
5404     FloatRegister vhi = vtmp0;
5405     FloatRegister vlo = vtmp1;
5406     ld1(vtmp3, T8H, src);
5407     uzp1(vlo, T16B, vtmp3, vtmp3);
5408     uzp2(vhi, T16B, vtmp3, vtmp3);
5409     // ISO-check on hi-parts (all zero).
5410     //                          ASCII-check on lo-parts (no sign).
5411                                 ASCII(cmlt(vtmp2, T16B, vlo));
5412     fmovd(chk, vhi);            ASCII(umaxv(vtmp2, T16B, vtmp2));
5413                                 ASCII(umov(max, vtmp2, B, 0));
5414                                 ASCII(orr(chk, chk, max));
5415     cbnz(chk, SKIP_8);
5416 
5417     strd(vlo, Address(post(dst, 8)));
5418     subw(cnt, cnt, 8);
5419     add(src, src, 16);
5420     b(LOOP_8);
5421   }
5422   BIND(SKIP_8);
5423 
5424 #undef ASCII
5425 
5426   Label LOOP, DONE;
5427 
5428   cbz(cnt, DONE);
5429   BIND(LOOP);
5430   {
5431     Register chr = rscratch1;
5432     ldrh(chr, Address(post(src, 2)));
5433     tst(chr, ascii ? 0xff80 : 0xff00);
5434     br(NE, DONE);
5435     strb(chr, Address(post(dst, 1)));
5436     subs(cnt, cnt, 1);
5437     br(GT, LOOP);
5438   }
5439   BIND(DONE);
5440   // Return index where we stopped.
5441   subw(res, len, cnt);
5442 }
5443 
5444 // Inflate byte[] array to char[].
5445 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
5446                                            FloatRegister vtmp1, FloatRegister vtmp2,
5447                                            FloatRegister vtmp3, Register tmp4) {
5448   Label big, done, after_init, to_stub;
5449 
5450   assert_different_registers(src, dst, len, tmp4, rscratch1);
5451 
5452   fmovd(vtmp1, 0.0);
5453   lsrw(tmp4, len, 3);
5454   bind(after_init);
5455   cbnzw(tmp4, big);
5456   // Short string: less than 8 bytes.
5457   {
5458     Label loop, tiny;
5459 
5460     cmpw(len, 4);
5461     br(LT, tiny);
5462     // Use SIMD to do 4 bytes.
5463     ldrs(vtmp2, post(src, 4));
5464     zip1(vtmp3, T8B, vtmp2, vtmp1);
5465     subw(len, len, 4);
5466     strd(vtmp3, post(dst, 8));
5467 
5468     cbzw(len, done);
5469 
5470     // Do the remaining bytes by steam.
5471     bind(loop);
5472     ldrb(tmp4, post(src, 1));
5473     strh(tmp4, post(dst, 2));
5474     subw(len, len, 1);
5475 
5476     bind(tiny);
5477     cbnz(len, loop);
5478 
5479     b(done);
5480   }
5481 
5482   if (SoftwarePrefetchHintDistance >= 0) {
5483     bind(to_stub);
5484       RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate());
5485       assert(stub.target() != NULL, "large_byte_array_inflate stub has not been generated");
5486       address tpc = trampoline_call(stub);
5487       if (tpc == NULL) {
5488         DEBUG_ONLY(reset_labels(big, done));
5489         postcond(pc() == badAddress);
5490         return NULL;
5491       }
5492       b(after_init);
5493   }
5494 
5495   // Unpack the bytes 8 at a time.
5496   bind(big);
5497   {
5498     Label loop, around, loop_last, loop_start;
5499 
5500     if (SoftwarePrefetchHintDistance >= 0) {
5501       const int large_loop_threshold = (64 + 16)/8;
5502       ldrd(vtmp2, post(src, 8));
5503       andw(len, len, 7);
5504       cmp(tmp4, (u1)large_loop_threshold);
5505       br(GE, to_stub);
5506       b(loop_start);
5507 
5508       bind(loop);
5509       ldrd(vtmp2, post(src, 8));
5510       bind(loop_start);
5511       subs(tmp4, tmp4, 1);
5512       br(EQ, loop_last);
5513       zip1(vtmp2, T16B, vtmp2, vtmp1);
5514       ldrd(vtmp3, post(src, 8));
5515       st1(vtmp2, T8H, post(dst, 16));
5516       subs(tmp4, tmp4, 1);
5517       zip1(vtmp3, T16B, vtmp3, vtmp1);
5518       st1(vtmp3, T8H, post(dst, 16));
5519       br(NE, loop);
5520       b(around);
5521       bind(loop_last);
5522       zip1(vtmp2, T16B, vtmp2, vtmp1);
5523       st1(vtmp2, T8H, post(dst, 16));
5524       bind(around);
5525       cbz(len, done);
5526     } else {
5527       andw(len, len, 7);
5528       bind(loop);
5529       ldrd(vtmp2, post(src, 8));
5530       sub(tmp4, tmp4, 1);
5531       zip1(vtmp3, T16B, vtmp2, vtmp1);
5532       st1(vtmp3, T8H, post(dst, 16));
5533       cbnz(tmp4, loop);
5534     }
5535   }
5536 
5537   // Do the tail of up to 8 bytes.
5538   add(src, src, len);
5539   ldrd(vtmp3, Address(src, -8));
5540   add(dst, dst, len, ext::uxtw, 1);
5541   zip1(vtmp3, T16B, vtmp3, vtmp1);
5542   strq(vtmp3, Address(dst, -16));
5543 
5544   bind(done);
5545   postcond(pc() != badAddress);
5546   return pc();
5547 }
5548 
5549 // Compress char[] array to byte[].
5550 void MacroAssembler::char_array_compress(Register src, Register dst, Register len,
5551                                          Register res,
5552                                          FloatRegister tmp0, FloatRegister tmp1,
5553                                          FloatRegister tmp2, FloatRegister tmp3) {
5554   encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3);
5555   // Adjust result: res == len ? len : 0
5556   cmp(len, res);
5557   csel(res, res, zr, EQ);
5558 }
5559 
5560 // get_thread() can be called anywhere inside generated code so we
5561 // need to save whatever non-callee save context might get clobbered
5562 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
5563 // the call setup code.
5564 //
5565 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags.
5566 // On other systems, the helper is a usual C function.
5567 //
5568 void MacroAssembler::get_thread(Register dst) {
5569   RegSet saved_regs =
5570     LINUX_ONLY(RegSet::range(r0, r1)  + lr - dst)
5571     NOT_LINUX (RegSet::range(r0, r17) + lr - dst);
5572 
5573   protect_return_address();
5574   push(saved_regs, sp);
5575 
5576   mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
5577   blr(lr);
5578   if (dst != c_rarg0) {
5579     mov(dst, c_rarg0);
5580   }
5581 
5582   pop(saved_regs, sp);
5583   authenticate_return_address();
5584 }
5585 
5586 #ifdef COMPILER2
5587 // C2 compiled method's prolog code
5588 // Moved here from aarch64.ad to support Valhalla code belows
5589 void MacroAssembler::verified_entry(Compile* C, int sp_inc) {
5590 
5591   // n.b. frame size includes space for return pc and rfp
5592   const long framesize = C->output()->frame_size_in_bytes();
5593 
5594   // insert a nop at the start of the prolog so we can patch in a
5595   // branch if we need to invalidate the method later
5596   nop();
5597 
5598   int bangsize = C->output()->bang_size_in_bytes();
5599   if (C->output()->need_stack_bang(bangsize))
5600     generate_stack_overflow_check(bangsize);
5601 
5602   build_frame(framesize);
5603 
5604   if (C->needs_stack_repair()) {
5605     save_stack_increment(sp_inc, framesize);
5606   }
5607 
5608   if (VerifyStackAtCalls) {
5609     Unimplemented();
5610   }
5611 }
5612 #endif // COMPILER2
5613 
5614 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
5615   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
5616   // An inline type might be returned. If fields are in registers we
5617   // need to allocate an inline type instance and initialize it with
5618   // the value of the fields.
5619   Label skip;
5620   // We only need a new buffered inline type if a new one is not returned
5621   tbz(r0, 0, skip);
5622   int call_offset = -1;
5623 
5624   // Be careful not to clobber r1-7 which hold returned fields
5625   // Also do not use callee-saved registers as these may be live in the interpreter
5626   Register tmp1 = r13, tmp2 = r14, klass = r15, r0_preserved = r12;
5627 
5628   // The following code is similar to allocate_instance but has some slight differences,
5629   // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
5630   // allocating is not necessary if vk != NULL, etc. allocate_instance is not aware of these.
5631   Label slow_case;
5632   // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
5633   mov(r0_preserved, r0); // save r0 for slow_case since *_allocate may corrupt it when allocation failed
5634 
5635   if (vk != NULL) {
5636     // Called from C1, where the return type is statically known.
5637     movptr(klass, (intptr_t)vk->get_InlineKlass());
5638     jint obj_size = vk->layout_helper();
5639     assert(obj_size != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
5640     if (UseTLAB) {
5641       tlab_allocate(r0, noreg, obj_size, tmp1, tmp2, slow_case);
5642     } else {
5643       eden_allocate(r0, noreg, obj_size, tmp1, slow_case);
5644     }
5645   } else {
5646     // Call from interpreter. R0 contains ((the InlineKlass* of the return type) | 0x01)
5647     andr(klass, r0, -2);
5648     ldrw(tmp2, Address(klass, Klass::layout_helper_offset()));
5649     if (UseTLAB) {
5650       tlab_allocate(r0, tmp2, 0, tmp1, tmp2, slow_case);
5651     } else {
5652       eden_allocate(r0, tmp2, 0, tmp1, slow_case);
5653     }
5654   }
5655   if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
5656     // 2. Initialize buffered inline instance header
5657     Register buffer_obj = r0;
5658     mov(rscratch1, (intptr_t)markWord::inline_type_prototype().value());
5659     str(rscratch1, Address(buffer_obj, oopDesc::mark_offset_in_bytes()));
5660     store_klass_gap(buffer_obj, zr);
5661     if (vk == NULL) {
5662       // store_klass corrupts klass, so save it for later use (interpreter case only).
5663       mov(tmp1, klass);
5664     }
5665     store_klass(buffer_obj, klass);
5666     // 3. Initialize its fields with an inline class specific handler
5667     if (vk != NULL) {
5668       far_call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
5669     } else {
5670       // tmp1 holds klass preserved above
5671       ldr(tmp1, Address(tmp1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
5672       ldr(tmp1, Address(tmp1, InlineKlass::pack_handler_offset()));
5673       blr(tmp1);
5674     }
5675 
5676     membar(Assembler::StoreStore);
5677     b(skip);
5678   } else {
5679     // Must have already branched to slow_case in eden_allocate() above.
5680     DEBUG_ONLY(should_not_reach_here());
5681   }
5682   bind(slow_case);
5683   // We failed to allocate a new inline type, fall back to a runtime
5684   // call. Some oop field may be live in some registers but we can't
5685   // tell. That runtime call will take care of preserving them
5686   // across a GC if there's one.
5687   mov(r0, r0_preserved);
5688 
5689   if (from_interpreter) {
5690     super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
5691   } else {
5692     far_call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
5693     call_offset = offset();
5694   }
5695   membar(Assembler::StoreStore);
5696 
5697   bind(skip);
5698   return call_offset;
5699 }
5700 
5701 // Move a value between registers/stack slots and update the reg_state
5702 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
5703   assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
5704   if (reg_state[to->value()] == reg_written) {
5705     return true; // Already written
5706   }
5707 
5708   if (from != to && bt != T_VOID) {
5709     if (reg_state[to->value()] == reg_readonly) {
5710       return false; // Not yet writable
5711     }
5712     if (from->is_reg()) {
5713       if (to->is_reg()) {
5714         if (from->is_Register() && to->is_Register()) {
5715           mov(to->as_Register(), from->as_Register());
5716         } else if (from->is_FloatRegister() && to->is_FloatRegister()) {
5717           fmovd(to->as_FloatRegister(), from->as_FloatRegister());
5718         } else {
5719           ShouldNotReachHere();
5720         }
5721       } else {
5722         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size;
5723         Address to_addr = Address(sp, st_off);
5724         if (from->is_FloatRegister()) {
5725           if (bt == T_DOUBLE) {
5726              strd(from->as_FloatRegister(), to_addr);
5727           } else {
5728              assert(bt == T_FLOAT, "must be float");
5729              strs(from->as_FloatRegister(), to_addr);
5730           }
5731         } else {
5732           str(from->as_Register(), to_addr);
5733         }
5734       }
5735     } else {
5736       Address from_addr = Address(sp, from->reg2stack() * VMRegImpl::stack_slot_size);
5737       if (to->is_reg()) {
5738         if (to->is_FloatRegister()) {
5739           if (bt == T_DOUBLE) {
5740             ldrd(to->as_FloatRegister(), from_addr);
5741           } else {
5742             assert(bt == T_FLOAT, "must be float");
5743             ldrs(to->as_FloatRegister(), from_addr);
5744           }
5745         } else {
5746           ldr(to->as_Register(), from_addr);
5747         }
5748       } else {
5749         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size;
5750         ldr(rscratch1, from_addr);
5751         str(rscratch1, Address(sp, st_off));
5752       }
5753     }
5754   }
5755 
5756   // Update register states
5757   reg_state[from->value()] = reg_writable;
5758   reg_state[to->value()] = reg_written;
5759   return true;
5760 }
5761 
5762 // Calculate the extra stack space required for packing or unpacking inline
5763 // args and adjust the stack pointer
5764 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
5765   int sp_inc = args_on_stack * VMRegImpl::stack_slot_size;
5766   sp_inc = align_up(sp_inc, StackAlignmentInBytes);
5767   assert(sp_inc > 0, "sanity");
5768 
5769   // Save a copy of the FP and LR here for deoptimization patching and frame walking
5770   stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5771 
5772   // Adjust the stack pointer. This will be repaired on return by MacroAssembler::remove_frame
5773   if (sp_inc < (1 << 9)) {
5774     sub(sp, sp, sp_inc);   // Fits in an immediate
5775   } else {
5776     mov(rscratch1, sp_inc);
5777     sub(sp, sp, rscratch1);
5778   }
5779 
5780   return sp_inc + 2 * wordSize;  // Account for the FP/LR space
5781 }
5782 
5783 // Read all fields from an inline type oop and store the values in registers/stack slots
5784 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
5785                                           VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
5786                                           RegState reg_state[]) {
5787   assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
5788   assert(from->is_valid(), "source must be valid");
5789   bool progress = false;
5790 #ifdef ASSERT
5791   const int start_offset = offset();
5792 #endif
5793 
5794   Label L_null, L_notNull;
5795   // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
5796   Register tmp1 = r10;
5797   Register tmp2 = r11;
5798   Register fromReg = noreg;
5799   ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1);
5800   bool done = true;
5801   bool mark_done = true;
5802   VMReg toReg;
5803   BasicType bt;
5804   // Check if argument requires a null check
5805   bool null_check = false;
5806   VMReg nullCheckReg;
5807   while (stream.next(nullCheckReg, bt)) {
5808     if (sig->at(stream.sig_index())._offset == -1) {
5809       null_check = true;
5810       break;
5811     }
5812   }
5813   stream.reset(sig_index, to_index);
5814   while (stream.next(toReg, bt)) {
5815     assert(toReg->is_valid(), "destination must be valid");
5816     int idx = (int)toReg->value();
5817     if (reg_state[idx] == reg_readonly) {
5818       if (idx != from->value()) {
5819         mark_done = false;
5820       }
5821       done = false;
5822       continue;
5823     } else if (reg_state[idx] == reg_written) {
5824       continue;
5825     }
5826     assert(reg_state[idx] == reg_writable, "must be writable");
5827     reg_state[idx] = reg_written;
5828     progress = true;
5829 
5830     if (fromReg == noreg) {
5831       if (from->is_reg()) {
5832         fromReg = from->as_Register();
5833       } else {
5834         int st_off = from->reg2stack() * VMRegImpl::stack_slot_size;
5835         ldr(tmp1, Address(sp, st_off));
5836         fromReg = tmp1;
5837       }
5838       if (null_check) {
5839         // Nullable inline type argument, emit null check
5840         cbz(fromReg, L_null);
5841       }
5842     }
5843     int off = sig->at(stream.sig_index())._offset;
5844     if (off == -1) {
5845       assert(null_check, "Missing null check at");
5846       if (toReg->is_stack()) {
5847         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
5848         mov(tmp2, 1);
5849         str(tmp2, Address(sp, st_off));
5850       } else {
5851         mov(toReg->as_Register(), 1);
5852       }
5853       continue;
5854     }
5855     assert(off > 0, "offset in object should be positive");
5856     Address fromAddr = Address(fromReg, off);
5857     if (!toReg->is_FloatRegister()) {
5858       Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
5859       if (is_reference_type(bt)) {
5860         load_heap_oop(dst, fromAddr);
5861       } else {
5862         bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
5863         load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
5864       }
5865       if (toReg->is_stack()) {
5866         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
5867         str(dst, Address(sp, st_off));
5868       }
5869     } else if (bt == T_DOUBLE) {
5870       ldrd(toReg->as_FloatRegister(), fromAddr);
5871     } else {
5872       assert(bt == T_FLOAT, "must be float");
5873       ldrs(toReg->as_FloatRegister(), fromAddr);
5874     }
5875   }
5876   if (progress && null_check) {
5877     if (done) {
5878       b(L_notNull);
5879       bind(L_null);
5880       // Set IsInit field to zero to signal that the argument is null.
5881       // Also set all oop fields to zero to make the GC happy.
5882       stream.reset(sig_index, to_index);
5883       while (stream.next(toReg, bt)) {
5884         if (sig->at(stream.sig_index())._offset == -1 ||
5885             bt == T_OBJECT || bt == T_ARRAY) {
5886           if (toReg->is_stack()) {
5887             int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
5888             str(zr, Address(sp, st_off));
5889           } else {
5890             mov(toReg->as_Register(), zr);
5891           }
5892         }
5893       }
5894       bind(L_notNull);
5895     } else {
5896       bind(L_null);
5897     }
5898   }
5899 
5900   sig_index = stream.sig_index();
5901   to_index = stream.regs_index();
5902 
5903   if (mark_done && reg_state[from->value()] != reg_written) {
5904     // This is okay because no one else will write to that slot
5905     reg_state[from->value()] = reg_writable;
5906   }
5907   from_index--;
5908   assert(progress || (start_offset == offset()), "should not emit code");
5909   return done;
5910 }
5911 
5912 // Pack fields back into an inline type oop
5913 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
5914                                         VMRegPair* from, int from_count, int& from_index, VMReg to,
5915                                         RegState reg_state[], Register val_array) {
5916   assert(sig->at(sig_index)._bt == T_PRIMITIVE_OBJECT, "should be at end delimiter");
5917   assert(to->is_valid(), "destination must be valid");
5918 
5919   if (reg_state[to->value()] == reg_written) {
5920     skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
5921     return true; // Already written
5922   }
5923 
5924   // The GC barrier expanded by store_heap_oop below may call into the
5925   // runtime so use callee-saved registers for any values that need to be
5926   // preserved. The GC barrier assembler should take care of saving the
5927   // Java argument registers.
5928   // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
5929   // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
5930   Register val_obj_tmp = r21;
5931   Register from_reg_tmp = r22;
5932   Register tmp1 = r14;
5933   Register tmp2 = r13;
5934   Register tmp3 = r12;
5935   Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
5936 
5937   assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
5938 
5939   if (reg_state[to->value()] == reg_readonly) {
5940     if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
5941       skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
5942       return false; // Not yet writable
5943     }
5944     val_obj = val_obj_tmp;
5945   }
5946 
5947   int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_PRIMITIVE_OBJECT);
5948   load_heap_oop(val_obj, Address(val_array, index));
5949 
5950   ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
5951   VMReg fromReg;
5952   BasicType bt;
5953   Label L_null;
5954   while (stream.next(fromReg, bt)) {
5955     assert(fromReg->is_valid(), "source must be valid");
5956     reg_state[fromReg->value()] = reg_writable;
5957 
5958     int off = sig->at(stream.sig_index())._offset;
5959     if (off == -1) {
5960       // Nullable inline type argument, emit null check
5961       Label L_notNull;
5962       if (fromReg->is_stack()) {
5963         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size;
5964         ldr(tmp2, Address(sp, ld_off));
5965         cbnz(tmp2, L_notNull);
5966       } else {
5967         cbnz(fromReg->as_Register(), L_notNull);
5968       }
5969       mov(val_obj, 0);
5970       b(L_null);
5971       bind(L_notNull);
5972       continue;
5973     }
5974 
5975     assert(off > 0, "offset in object should be positive");
5976     size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
5977 
5978     // Pack the scalarized field into the value object.
5979     Address dst(val_obj, off);
5980 
5981     if (!fromReg->is_FloatRegister()) {
5982       Register src;
5983       if (fromReg->is_stack()) {
5984         src = from_reg_tmp;
5985         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size;
5986         load_sized_value(src, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
5987       } else {
5988         src = fromReg->as_Register();
5989       }
5990       assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
5991       if (is_reference_type(bt)) {
5992         store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
5993       } else {
5994         store_sized_value(dst, src, size_in_bytes);
5995       }
5996     } else if (bt == T_DOUBLE) {
5997       strd(fromReg->as_FloatRegister(), dst);
5998     } else {
5999       assert(bt == T_FLOAT, "must be float");
6000       strs(fromReg->as_FloatRegister(), dst);
6001     }
6002   }
6003   bind(L_null);
6004   sig_index = stream.sig_index();
6005   from_index = stream.regs_index();
6006 
6007   assert(reg_state[to->value()] == reg_writable, "must have already been read");
6008   bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
6009   assert(success, "to register must be writeable");
6010 
6011   return true;
6012 }
6013 
6014 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
6015   return (reg->is_FloatRegister()) ? v0->as_VMReg() : r14->as_VMReg();
6016 }
6017 
6018 void MacroAssembler::cache_wb(Address line) {
6019   assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset");
6020   assert(line.index() == noreg, "index should be noreg");
6021   assert(line.offset() == 0, "offset should be 0");
6022   // would like to assert this
6023   // assert(line._ext.shift == 0, "shift should be zero");
6024   if (VM_Version::features() & VM_Version::CPU_DCPOP) {
6025     // writeback using clear virtual address to point of persistence
6026     dc(Assembler::CVAP, line.base());
6027   } else {
6028     // no need to generate anything as Unsafe.writebackMemory should
6029     // never invoke this stub
6030   }
6031 }
6032 
6033 void MacroAssembler::cache_wbsync(bool is_pre) {
6034   // we only need a barrier post sync
6035   if (!is_pre) {
6036     membar(Assembler::AnyAny);
6037   }
6038 }
6039 
6040 void MacroAssembler::verify_sve_vector_length() {
6041   // Make sure that native code does not change SVE vector length.
6042   if (!UseSVE) return;
6043   Label verify_ok;
6044   movw(rscratch1, zr);
6045   sve_inc(rscratch1, B);
6046   subsw(zr, rscratch1, VM_Version::get_initial_sve_vector_length());
6047   br(EQ, verify_ok);
6048   stop("Error: SVE vector length has changed since jvm startup");
6049   bind(verify_ok);
6050 }
6051 
6052 void MacroAssembler::verify_ptrue() {
6053   Label verify_ok;
6054   if (!UseSVE) {
6055     return;
6056   }
6057   sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count.
6058   sve_dec(rscratch1, B);
6059   cbz(rscratch1, verify_ok);
6060   stop("Error: the preserved predicate register (p7) elements are not all true");
6061   bind(verify_ok);
6062 }
6063 
6064 void MacroAssembler::safepoint_isb() {
6065   isb();
6066 #ifndef PRODUCT
6067   if (VerifyCrossModifyFence) {
6068     // Clear the thread state.
6069     strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
6070   }
6071 #endif
6072 }
6073 
6074 #ifndef PRODUCT
6075 void MacroAssembler::verify_cross_modify_fence_not_required() {
6076   if (VerifyCrossModifyFence) {
6077     // Check if thread needs a cross modify fence.
6078     ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
6079     Label fence_not_required;
6080     cbz(rscratch1, fence_not_required);
6081     // If it does then fail.
6082     lea(rscratch1, CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure));
6083     mov(c_rarg0, rthread);
6084     blr(rscratch1);
6085     bind(fence_not_required);
6086   }
6087 }
6088 #endif
6089 
6090 void MacroAssembler::spin_wait() {
6091   for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) {
6092     switch (VM_Version::spin_wait_desc().inst()) {
6093       case SpinWait::NOP:
6094         nop();
6095         break;
6096       case SpinWait::ISB:
6097         isb();
6098         break;
6099       case SpinWait::YIELD:
6100         yield();
6101         break;
6102       default:
6103         ShouldNotReachHere();
6104     }
6105   }
6106 }
6107 
6108 // Stack frame creation/removal
6109 
6110 void MacroAssembler::enter(bool strip_ret_addr) {
6111   if (strip_ret_addr) {
6112     // Addresses can only be signed once. If there are multiple nested frames being created
6113     // in the same function, then the return address needs stripping first.
6114     strip_return_address();
6115   }
6116   protect_return_address();
6117   stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
6118   mov(rfp, sp);
6119 }
6120 
6121 void MacroAssembler::leave() {
6122   mov(sp, rfp);
6123   ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
6124   authenticate_return_address();
6125 }
6126 
6127 // ROP Protection
6128 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/
6129 // destroying stack frames or whenever directly loading/storing the LR to memory.
6130 // If ROP protection is not set then these functions are no-ops.
6131 // For more details on PAC see pauth_aarch64.hpp.
6132 
6133 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory.
6134 // Uses the FP as the modifier.
6135 //
6136 void MacroAssembler::protect_return_address() {
6137   if (VM_Version::use_rop_protection()) {
6138     check_return_address();
6139     // The standard convention for C code is to use paciasp, which uses SP as the modifier. This
6140     // works because in C code, FP and SP match on function entry. In the JDK, SP and FP may not
6141     // match, so instead explicitly use the FP.
6142     pacia(lr, rfp);
6143   }
6144 }
6145 
6146 // Sign the return value in the given register. Use before updating the LR in the exisiting stack
6147 // frame for the current function.
6148 // Uses the FP from the start of the function as the modifier - which is stored at the address of
6149 // the current FP.
6150 //
6151 void MacroAssembler::protect_return_address(Register return_reg, Register temp_reg) {
6152   if (VM_Version::use_rop_protection()) {
6153     assert(PreserveFramePointer, "PreserveFramePointer must be set for ROP protection");
6154     check_return_address(return_reg);
6155     ldr(temp_reg, Address(rfp));
6156     pacia(return_reg, temp_reg);
6157   }
6158 }
6159 
6160 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory.
6161 //
6162 void MacroAssembler::authenticate_return_address(Register return_reg) {
6163   if (VM_Version::use_rop_protection()) {
6164     autia(return_reg, rfp);
6165     check_return_address(return_reg);
6166   }
6167 }
6168 
6169 // Authenticate the return value in the given register. Use before updating the LR in the exisiting
6170 // stack frame for the current function.
6171 // Uses the FP from the start of the function as the modifier - which is stored at the address of
6172 // the current FP.
6173 //
6174 void MacroAssembler::authenticate_return_address(Register return_reg, Register temp_reg) {
6175   if (VM_Version::use_rop_protection()) {
6176     assert(PreserveFramePointer, "PreserveFramePointer must be set for ROP protection");
6177     ldr(temp_reg, Address(rfp));
6178     autia(return_reg, temp_reg);
6179     check_return_address(return_reg);
6180   }
6181 }
6182 
6183 // Strip any PAC data from LR without performing any authentication. Use with caution - only if
6184 // there is no guaranteed way of authenticating the LR.
6185 //
6186 void MacroAssembler::strip_return_address() {
6187   if (VM_Version::use_rop_protection()) {
6188     xpaclri();
6189   }
6190 }
6191 
6192 #ifndef PRODUCT
6193 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only
6194 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point
6195 // it is difficult to debug back to the callee function.
6196 // This function simply loads from the address in the given register.
6197 // Use directly after authentication to catch authentication failures.
6198 // Also use before signing to check that the pointer is valid and hasn't already been signed.
6199 //
6200 void MacroAssembler::check_return_address(Register return_reg) {
6201   if (VM_Version::use_rop_protection()) {
6202     ldr(zr, Address(return_reg));
6203   }
6204 }
6205 #endif