1 /*
   2  * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include <sys/types.h>
  27 
  28 #include "precompiled.hpp"
  29 #include "jvm.h"
  30 #include "asm/assembler.hpp"
  31 #include "asm/assembler.inline.hpp"
  32 #include "ci/ciEnv.hpp"
  33 #include "gc/shared/barrierSet.hpp"
  34 #include "gc/shared/barrierSetAssembler.hpp"
  35 #include "gc/shared/cardTableBarrierSet.hpp"
  36 #include "gc/shared/cardTable.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/tlab_globals.hpp"
  39 #include "interpreter/bytecodeHistogram.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "compiler/compileTask.hpp"
  42 #include "compiler/disassembler.hpp"
  43 #include "memory/resourceArea.hpp"
  44 #include "memory/universe.hpp"
  45 #include "nativeInst_aarch64.hpp"
  46 #include "oops/accessDecorators.hpp"
  47 #include "oops/compressedOops.inline.hpp"
  48 #include "oops/klass.inline.hpp"
  49 #include "runtime/biasedLocking.hpp"
  50 #include "runtime/icache.hpp"
  51 #include "runtime/interfaceSupport.inline.hpp"
  52 #include "runtime/jniHandles.inline.hpp"
  53 #include "runtime/sharedRuntime.hpp"
  54 #include "runtime/stubRoutines.hpp"
  55 #include "runtime/thread.hpp"
  56 #include "utilities/powerOfTwo.hpp"
  57 #ifdef COMPILER1
  58 #include "c1/c1_LIRAssembler.hpp"
  59 #endif
  60 #ifdef COMPILER2
  61 #include "oops/oop.hpp"
  62 #include "opto/compile.hpp"
  63 #include "opto/node.hpp"
  64 #include "opto/output.hpp"
  65 #endif
  66 
  67 #ifdef PRODUCT
  68 #define BLOCK_COMMENT(str) /* nothing */
  69 #else
  70 #define BLOCK_COMMENT(str) block_comment(str)
  71 #endif
  72 #define STOP(str) stop(str);
  73 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  74 
  75 // Patch any kind of instruction; there may be several instructions.
  76 // Return the total length (in bytes) of the instructions.
  77 int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
  78   int instructions = 1;
  79   assert((uint64_t)target < (1ull << 48), "48-bit overflow in address constant");
  80   intptr_t offset = (target - branch) >> 2;
  81   unsigned insn = *(unsigned*)branch;
  82   if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) {
  83     // Load register (literal)
  84     Instruction_aarch64::spatch(branch, 23, 5, offset);
  85   } else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) {
  86     // Unconditional branch (immediate)
  87     Instruction_aarch64::spatch(branch, 25, 0, offset);
  88   } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) {
  89     // Conditional branch (immediate)
  90     Instruction_aarch64::spatch(branch, 23, 5, offset);
  91   } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) {
  92     // Compare & branch (immediate)
  93     Instruction_aarch64::spatch(branch, 23, 5, offset);
  94   } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) {
  95     // Test & branch (immediate)
  96     Instruction_aarch64::spatch(branch, 18, 5, offset);
  97   } else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) {
  98     // PC-rel. addressing
  99     offset = target-branch;
 100     int shift = Instruction_aarch64::extract(insn, 31, 31);
 101     if (shift) {
 102       uint64_t dest = (uint64_t)target;
 103       uint64_t pc_page = (uint64_t)branch >> 12;
 104       uint64_t adr_page = (uint64_t)target >> 12;
 105       unsigned offset_lo = dest & 0xfff;
 106       offset = adr_page - pc_page;
 107 
 108       // We handle 4 types of PC relative addressing
 109       //   1 - adrp    Rx, target_page
 110       //       ldr/str Ry, [Rx, #offset_in_page]
 111       //   2 - adrp    Rx, target_page
 112       //       add     Ry, Rx, #offset_in_page
 113       //   3 - adrp    Rx, target_page (page aligned reloc, offset == 0)
 114       //       movk    Rx, #imm16<<32
 115       //   4 - adrp    Rx, target_page (page aligned reloc, offset == 0)
 116       // In the first 3 cases we must check that Rx is the same in the adrp and the
 117       // subsequent ldr/str, add or movk instruction. Otherwise we could accidentally end
 118       // up treating a type 4 relocation as a type 1, 2 or 3 just because it happened
 119       // to be followed by a random unrelated ldr/str, add or movk instruction.
 120       //
 121       unsigned insn2 = ((unsigned*)branch)[1];
 122       if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
 123                 Instruction_aarch64::extract(insn, 4, 0) ==
 124                         Instruction_aarch64::extract(insn2, 9, 5)) {
 125         // Load/store register (unsigned immediate)
 126         unsigned size = Instruction_aarch64::extract(insn2, 31, 30);
 127         Instruction_aarch64::patch(branch + sizeof (unsigned),
 128                                     21, 10, offset_lo >> size);
 129         guarantee(((dest >> size) << size) == dest, "misaligned target");
 130         instructions = 2;
 131       } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
 132                 Instruction_aarch64::extract(insn, 4, 0) ==
 133                         Instruction_aarch64::extract(insn2, 4, 0)) {
 134         // add (immediate)
 135         Instruction_aarch64::patch(branch + sizeof (unsigned),
 136                                    21, 10, offset_lo);
 137         instructions = 2;
 138       } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 &&
 139                    Instruction_aarch64::extract(insn, 4, 0) ==
 140                      Instruction_aarch64::extract(insn2, 4, 0)) {
 141         // movk #imm16<<32
 142         Instruction_aarch64::patch(branch + 4, 20, 5, (uint64_t)target >> 32);
 143         uintptr_t dest = ((uintptr_t)target & 0xffffffffULL) | ((uintptr_t)branch & 0xffff00000000ULL);
 144         uintptr_t pc_page = (uintptr_t)branch >> 12;
 145         uintptr_t adr_page = (uintptr_t)dest >> 12;
 146         offset = adr_page - pc_page;
 147         instructions = 2;
 148       }
 149     }
 150     int offset_lo = offset & 3;
 151     offset >>= 2;
 152     Instruction_aarch64::spatch(branch, 23, 5, offset);
 153     Instruction_aarch64::patch(branch, 30, 29, offset_lo);
 154   } else if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010100) {
 155     uint64_t dest = (uint64_t)target;
 156     // Move wide constant
 157     assert(nativeInstruction_at(branch+4)->is_movk(), "wrong insns in patch");
 158     assert(nativeInstruction_at(branch+8)->is_movk(), "wrong insns in patch");
 159     Instruction_aarch64::patch(branch, 20, 5, dest & 0xffff);
 160     Instruction_aarch64::patch(branch+4, 20, 5, (dest >>= 16) & 0xffff);
 161     Instruction_aarch64::patch(branch+8, 20, 5, (dest >>= 16) & 0xffff);
 162     assert(target_addr_for_insn(branch) == target, "should be");
 163     instructions = 3;
 164   } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
 165              Instruction_aarch64::extract(insn, 4, 0) == 0b11111) {
 166     // nothing to do
 167     assert(target == 0, "did not expect to relocate target for polling page load");
 168   } else {
 169     ShouldNotReachHere();
 170   }
 171   return instructions * NativeInstruction::instruction_size;
 172 }
 173 
 174 int MacroAssembler::patch_oop(address insn_addr, address o) {
 175   int instructions;
 176   unsigned insn = *(unsigned*)insn_addr;
 177   assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
 178 
 179   // OOPs are either narrow (32 bits) or wide (48 bits).  We encode
 180   // narrow OOPs by setting the upper 16 bits in the first
 181   // instruction.
 182   if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
 183     // Move narrow OOP
 184     uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o));
 185     Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
 186     Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
 187     instructions = 2;
 188   } else {
 189     // Move wide OOP
 190     assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
 191     uintptr_t dest = (uintptr_t)o;
 192     Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
 193     Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
 194     Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
 195     instructions = 3;
 196   }
 197   return instructions * NativeInstruction::instruction_size;
 198 }
 199 
 200 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) {
 201   // Metatdata pointers are either narrow (32 bits) or wide (48 bits).
 202   // We encode narrow ones by setting the upper 16 bits in the first
 203   // instruction.
 204   NativeInstruction *insn = nativeInstruction_at(insn_addr);
 205   assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 &&
 206          nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
 207 
 208   Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
 209   Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
 210   return 2 * NativeInstruction::instruction_size;
 211 }
 212 
 213 address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
 214   intptr_t offset = 0;
 215   if ((Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000) {
 216     // Load register (literal)
 217     offset = Instruction_aarch64::sextract(insn, 23, 5);
 218     return address(((uint64_t)insn_addr + (offset << 2)));
 219   } else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) {
 220     // Unconditional branch (immediate)
 221     offset = Instruction_aarch64::sextract(insn, 25, 0);
 222   } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) {
 223     // Conditional branch (immediate)
 224     offset = Instruction_aarch64::sextract(insn, 23, 5);
 225   } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) {
 226     // Compare & branch (immediate)
 227     offset = Instruction_aarch64::sextract(insn, 23, 5);
 228    } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) {
 229     // Test & branch (immediate)
 230     offset = Instruction_aarch64::sextract(insn, 18, 5);
 231   } else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) {
 232     // PC-rel. addressing
 233     offset = Instruction_aarch64::extract(insn, 30, 29);
 234     offset |= Instruction_aarch64::sextract(insn, 23, 5) << 2;
 235     int shift = Instruction_aarch64::extract(insn, 31, 31) ? 12 : 0;
 236     if (shift) {
 237       offset <<= shift;
 238       uint64_t target_page = ((uint64_t)insn_addr) + offset;
 239       target_page &= ((uint64_t)-1) << shift;
 240       // Return the target address for the following sequences
 241       //   1 - adrp    Rx, target_page
 242       //       ldr/str Ry, [Rx, #offset_in_page]
 243       //   2 - adrp    Rx, target_page
 244       //       add     Ry, Rx, #offset_in_page
 245       //   3 - adrp    Rx, target_page (page aligned reloc, offset == 0)
 246       //       movk    Rx, #imm12<<32
 247       //   4 - adrp    Rx, target_page (page aligned reloc, offset == 0)
 248       //
 249       // In the first two cases  we check that the register is the same and
 250       // return the target_page + the offset within the page.
 251       // Otherwise we assume it is a page aligned relocation and return
 252       // the target page only.
 253       //
 254       unsigned insn2 = ((unsigned*)insn_addr)[1];
 255       if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
 256                 Instruction_aarch64::extract(insn, 4, 0) ==
 257                         Instruction_aarch64::extract(insn2, 9, 5)) {
 258         // Load/store register (unsigned immediate)
 259         unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
 260         unsigned int size = Instruction_aarch64::extract(insn2, 31, 30);
 261         return address(target_page + (byte_offset << size));
 262       } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
 263                 Instruction_aarch64::extract(insn, 4, 0) ==
 264                         Instruction_aarch64::extract(insn2, 4, 0)) {
 265         // add (immediate)
 266         unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
 267         return address(target_page + byte_offset);
 268       } else {
 269         if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110  &&
 270                Instruction_aarch64::extract(insn, 4, 0) ==
 271                  Instruction_aarch64::extract(insn2, 4, 0)) {
 272           target_page = (target_page & 0xffffffff) |
 273                          ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
 274         }
 275         return (address)target_page;
 276       }
 277     } else {
 278       ShouldNotReachHere();
 279     }
 280   } else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) {
 281     uint32_t *insns = (uint32_t *)insn_addr;
 282     // Move wide constant: movz, movk, movk.  See movptr().
 283     assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
 284     assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
 285     return address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
 286                    + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
 287                    + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
 288   } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
 289              Instruction_aarch64::extract(insn, 4, 0) == 0b11111) {
 290     return 0;
 291   } else {
 292     ShouldNotReachHere();
 293   }
 294   return address(((uint64_t)insn_addr + (offset << 2)));
 295 }
 296 
 297 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod) {
 298   if (acquire) {
 299     lea(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
 300     ldar(rscratch1, rscratch1);
 301   } else {
 302     ldr(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
 303   }
 304   if (at_return) {
 305     // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
 306     // we may safely use the sp instead to perform the stack watermark check.
 307     cmp(in_nmethod ? sp : rfp, rscratch1);
 308     br(Assembler::HI, slow_path);
 309   } else {
 310     tbnz(rscratch1, log2i_exact(SafepointMechanism::poll_bit()), slow_path);
 311   }
 312 }
 313 
 314 void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
 315   // we must set sp to zero to clear frame
 316   str(zr, Address(rthread, JavaThread::last_Java_sp_offset()));
 317 
 318   // must clear fp, so that compiled frames are not confused; it is
 319   // possible that we need it only for debugging
 320   if (clear_fp) {
 321     str(zr, Address(rthread, JavaThread::last_Java_fp_offset()));
 322   }
 323 
 324   // Always clear the pc because it could have been set by make_walkable()
 325   str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
 326 }
 327 
 328 // Calls to C land
 329 //
 330 // When entering C land, the rfp, & resp of the last Java frame have to be recorded
 331 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
 332 // has to be reset to 0. This is required to allow proper stack traversal.
 333 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 334                                          Register last_java_fp,
 335                                          Register last_java_pc,
 336                                          Register scratch) {
 337 
 338   if (last_java_pc->is_valid()) {
 339       str(last_java_pc, Address(rthread,
 340                                 JavaThread::frame_anchor_offset()
 341                                 + JavaFrameAnchor::last_Java_pc_offset()));
 342     }
 343 
 344   // determine last_java_sp register
 345   if (last_java_sp == sp) {
 346     mov(scratch, sp);
 347     last_java_sp = scratch;
 348   } else if (!last_java_sp->is_valid()) {
 349     last_java_sp = esp;
 350   }
 351 
 352   str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset()));
 353 
 354   // last_java_fp is optional
 355   if (last_java_fp->is_valid()) {
 356     str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset()));
 357   }
 358 }
 359 
 360 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 361                                          Register last_java_fp,
 362                                          address  last_java_pc,
 363                                          Register scratch) {
 364   assert(last_java_pc != NULL, "must provide a valid PC");
 365 
 366   adr(scratch, last_java_pc);
 367   str(scratch, Address(rthread,
 368                        JavaThread::frame_anchor_offset()
 369                        + JavaFrameAnchor::last_Java_pc_offset()));
 370 
 371   set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
 372 }
 373 
 374 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 375                                          Register last_java_fp,
 376                                          Label &L,
 377                                          Register scratch) {
 378   if (L.is_bound()) {
 379     set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
 380   } else {
 381     InstructionMark im(this);
 382     L.add_patch_at(code(), locator());
 383     set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
 384   }
 385 }
 386 
 387 static inline bool target_needs_far_branch(address addr) {
 388   // codecache size <= 128M
 389   if (!MacroAssembler::far_branches()) {
 390     return false;
 391   }
 392   // codecache size > 240M
 393   if (MacroAssembler::codestub_branch_needs_far_jump()) {
 394     return true;
 395   }
 396   // codecache size: 128M..240M
 397   return !CodeCache::is_non_nmethod(addr);
 398 }
 399 
 400 void MacroAssembler::far_call(Address entry, CodeBuffer *cbuf, Register tmp) {
 401   assert(ReservedCodeCacheSize < 4*G, "branch out of range");
 402   assert(CodeCache::find_blob(entry.target()) != NULL,
 403          "destination of far call not found in code cache");
 404   if (target_needs_far_branch(entry.target())) {
 405     uint64_t offset;
 406     // We can use ADRP here because we know that the total size of
 407     // the code cache cannot exceed 2Gb (ADRP limit is 4GB).
 408     adrp(tmp, entry, offset);
 409     add(tmp, tmp, offset);
 410     if (cbuf) cbuf->set_insts_mark();
 411     blr(tmp);
 412   } else {
 413     if (cbuf) cbuf->set_insts_mark();
 414     bl(entry);
 415   }
 416 }
 417 
 418 int MacroAssembler::far_jump(Address entry, CodeBuffer *cbuf, Register tmp) {
 419   assert(ReservedCodeCacheSize < 4*G, "branch out of range");
 420   assert(CodeCache::find_blob(entry.target()) != NULL,
 421          "destination of far call not found in code cache");
 422   address start = pc();
 423   if (target_needs_far_branch(entry.target())) {
 424     uint64_t offset;
 425     // We can use ADRP here because we know that the total size of
 426     // the code cache cannot exceed 2Gb (ADRP limit is 4GB).
 427     adrp(tmp, entry, offset);
 428     add(tmp, tmp, offset);
 429     if (cbuf) cbuf->set_insts_mark();
 430     br(tmp);
 431   } else {
 432     if (cbuf) cbuf->set_insts_mark();
 433     b(entry);
 434   }
 435   return pc() - start;
 436 }
 437 
 438 void MacroAssembler::reserved_stack_check() {
 439     // testing if reserved zone needs to be enabled
 440     Label no_reserved_zone_enabling;
 441 
 442     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 443     cmp(sp, rscratch1);
 444     br(Assembler::LO, no_reserved_zone_enabling);
 445 
 446     enter();   // LR and FP are live.
 447     lea(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone));
 448     mov(c_rarg0, rthread);
 449     blr(rscratch1);
 450     leave();
 451 
 452     // We have already removed our own frame.
 453     // throw_delayed_StackOverflowError will think that it's been
 454     // called by our caller.
 455     lea(rscratch1, RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry()));
 456     br(rscratch1);
 457     should_not_reach_here();
 458 
 459     bind(no_reserved_zone_enabling);
 460 }
 461 
 462 void MacroAssembler::biased_locking_enter(Register lock_reg,
 463                                           Register obj_reg,
 464                                           Register swap_reg,
 465                                           Register tmp_reg,
 466                                           bool swap_reg_contains_mark,
 467                                           Label& done,
 468                                           Label* slow_case,
 469                                           BiasedLockingCounters* counters) {
 470   assert(UseBiasedLocking, "why call this otherwise?");
 471   assert_different_registers(lock_reg, obj_reg, swap_reg);
 472 
 473   if (PrintBiasedLockingStatistics && counters == NULL)
 474     counters = BiasedLocking::counters();
 475 
 476   assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, rscratch1, rscratch2, noreg);
 477   assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout");
 478   Address mark_addr      (obj_reg, oopDesc::mark_offset_in_bytes());
 479   Address klass_addr     (obj_reg, oopDesc::klass_offset_in_bytes());
 480   Address saved_mark_addr(lock_reg, 0);
 481 
 482   // Biased locking
 483   // See whether the lock is currently biased toward our thread and
 484   // whether the epoch is still valid
 485   // Note that the runtime guarantees sufficient alignment of JavaThread
 486   // pointers to allow age to be placed into low bits
 487   // First check to see whether biasing is even enabled for this object
 488   Label cas_label;
 489   if (!swap_reg_contains_mark) {
 490     ldr(swap_reg, mark_addr);
 491   }
 492   andr(tmp_reg, swap_reg, markWord::biased_lock_mask_in_place);
 493   cmp(tmp_reg, (u1)markWord::biased_lock_pattern);
 494   br(Assembler::NE, cas_label);
 495   // The bias pattern is present in the object's header. Need to check
 496   // whether the bias owner and the epoch are both still current.
 497   load_prototype_header(tmp_reg, obj_reg);
 498   orr(tmp_reg, tmp_reg, rthread);
 499   eor(tmp_reg, swap_reg, tmp_reg);
 500   andr(tmp_reg, tmp_reg, ~((int) markWord::age_mask_in_place));
 501   if (counters != NULL) {
 502     Label around;
 503     cbnz(tmp_reg, around);
 504     atomic_incw(Address((address)counters->biased_lock_entry_count_addr()), tmp_reg, rscratch1, rscratch2);
 505     b(done);
 506     bind(around);
 507   } else {
 508     cbz(tmp_reg, done);
 509   }
 510 
 511   Label try_revoke_bias;
 512   Label try_rebias;
 513 
 514   // At this point we know that the header has the bias pattern and
 515   // that we are not the bias owner in the current epoch. We need to
 516   // figure out more details about the state of the header in order to
 517   // know what operations can be legally performed on the object's
 518   // header.
 519 
 520   // If the low three bits in the xor result aren't clear, that means
 521   // the prototype header is no longer biased and we have to revoke
 522   // the bias on this object.
 523   andr(rscratch1, tmp_reg, markWord::biased_lock_mask_in_place);
 524   cbnz(rscratch1, try_revoke_bias);
 525 
 526   // Biasing is still enabled for this data type. See whether the
 527   // epoch of the current bias is still valid, meaning that the epoch
 528   // bits of the mark word are equal to the epoch bits of the
 529   // prototype header. (Note that the prototype header's epoch bits
 530   // only change at a safepoint.) If not, attempt to rebias the object
 531   // toward the current thread. Note that we must be absolutely sure
 532   // that the current epoch is invalid in order to do this because
 533   // otherwise the manipulations it performs on the mark word are
 534   // illegal.
 535   andr(rscratch1, tmp_reg, markWord::epoch_mask_in_place);
 536   cbnz(rscratch1, try_rebias);
 537 
 538   // The epoch of the current bias is still valid but we know nothing
 539   // about the owner; it might be set or it might be clear. Try to
 540   // acquire the bias of the object using an atomic operation. If this
 541   // fails we will go in to the runtime to revoke the object's bias.
 542   // Note that we first construct the presumed unbiased header so we
 543   // don't accidentally blow away another thread's valid bias.
 544   {
 545     Label here;
 546     mov(rscratch1, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place);
 547     andr(swap_reg, swap_reg, rscratch1);
 548     orr(tmp_reg, swap_reg, rthread);
 549     cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
 550     // If the biasing toward our thread failed, this means that
 551     // another thread succeeded in biasing it toward itself and we
 552     // need to revoke that bias. The revocation will occur in the
 553     // interpreter runtime in the slow case.
 554     bind(here);
 555     if (counters != NULL) {
 556       atomic_incw(Address((address)counters->anonymously_biased_lock_entry_count_addr()),
 557                   tmp_reg, rscratch1, rscratch2);
 558     }
 559   }
 560   b(done);
 561 
 562   bind(try_rebias);
 563   // At this point we know the epoch has expired, meaning that the
 564   // current "bias owner", if any, is actually invalid. Under these
 565   // circumstances _only_, we are allowed to use the current header's
 566   // value as the comparison value when doing the cas to acquire the
 567   // bias in the current epoch. In other words, we allow transfer of
 568   // the bias from one thread to another directly in this situation.
 569   //
 570   // FIXME: due to a lack of registers we currently blow away the age
 571   // bits in this situation. Should attempt to preserve them.
 572   {
 573     Label here;
 574     load_prototype_header(tmp_reg, obj_reg);
 575     orr(tmp_reg, rthread, tmp_reg);
 576     cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
 577     // If the biasing toward our thread failed, then another thread
 578     // succeeded in biasing it toward itself and we need to revoke that
 579     // bias. The revocation will occur in the runtime in the slow case.
 580     bind(here);
 581     if (counters != NULL) {
 582       atomic_incw(Address((address)counters->rebiased_lock_entry_count_addr()),
 583                   tmp_reg, rscratch1, rscratch2);
 584     }
 585   }
 586   b(done);
 587 
 588   bind(try_revoke_bias);
 589   // The prototype mark in the klass doesn't have the bias bit set any
 590   // more, indicating that objects of this data type are not supposed
 591   // to be biased any more. We are going to try to reset the mark of
 592   // this object to the prototype value and fall through to the
 593   // CAS-based locking scheme. Note that if our CAS fails, it means
 594   // that another thread raced us for the privilege of revoking the
 595   // bias of this particular object, so it's okay to continue in the
 596   // normal locking code.
 597   //
 598   // FIXME: due to a lack of registers we currently blow away the age
 599   // bits in this situation. Should attempt to preserve them.
 600   {
 601     Label here, nope;
 602     load_prototype_header(tmp_reg, obj_reg);
 603     cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope);
 604     bind(here);
 605 
 606     // Fall through to the normal CAS-based lock, because no matter what
 607     // the result of the above CAS, some thread must have succeeded in
 608     // removing the bias bit from the object's header.
 609     if (counters != NULL) {
 610       atomic_incw(Address((address)counters->revoked_lock_entry_count_addr()), tmp_reg,
 611                   rscratch1, rscratch2);
 612     }
 613     bind(nope);
 614   }
 615 
 616   bind(cas_label);
 617 }
 618 
 619 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
 620   assert(UseBiasedLocking, "why call this otherwise?");
 621 
 622   // Check for biased locking unlock case, which is a no-op
 623   // Note: we do not have to check the thread ID for two reasons.
 624   // First, the interpreter checks for IllegalMonitorStateException at
 625   // a higher level. Second, if the bias was revoked while we held the
 626   // lock, the object could not be rebiased toward another thread, so
 627   // the bias bit would be clear.
 628   ldr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 629   andr(temp_reg, temp_reg, markWord::biased_lock_mask_in_place);
 630   cmp(temp_reg, (u1)markWord::biased_lock_pattern);
 631   br(Assembler::EQ, done);
 632 }
 633 
 634 static void pass_arg0(MacroAssembler* masm, Register arg) {
 635   if (c_rarg0 != arg ) {
 636     masm->mov(c_rarg0, arg);
 637   }
 638 }
 639 
 640 static void pass_arg1(MacroAssembler* masm, Register arg) {
 641   if (c_rarg1 != arg ) {
 642     masm->mov(c_rarg1, arg);
 643   }
 644 }
 645 
 646 static void pass_arg2(MacroAssembler* masm, Register arg) {
 647   if (c_rarg2 != arg ) {
 648     masm->mov(c_rarg2, arg);
 649   }
 650 }
 651 
 652 static void pass_arg3(MacroAssembler* masm, Register arg) {
 653   if (c_rarg3 != arg ) {
 654     masm->mov(c_rarg3, arg);
 655   }
 656 }
 657 
 658 void MacroAssembler::call_VM_base(Register oop_result,
 659                                   Register java_thread,
 660                                   Register last_java_sp,
 661                                   address  entry_point,
 662                                   int      number_of_arguments,
 663                                   bool     check_exceptions) {
 664    // determine java_thread register
 665   if (!java_thread->is_valid()) {
 666     java_thread = rthread;
 667   }
 668 
 669   // determine last_java_sp register
 670   if (!last_java_sp->is_valid()) {
 671     last_java_sp = esp;
 672   }
 673 
 674   // debugging support
 675   assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
 676   assert(java_thread == rthread, "unexpected register");
 677 #ifdef ASSERT
 678   // TraceBytecodes does not use r12 but saves it over the call, so don't verify
 679   // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
 680 #endif // ASSERT
 681 
 682   assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
 683   assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
 684 
 685   // push java thread (becomes first argument of C function)
 686 
 687   mov(c_rarg0, java_thread);
 688 
 689   // set last Java frame before call
 690   assert(last_java_sp != rfp, "can't use rfp");
 691 
 692   Label l;
 693   set_last_Java_frame(last_java_sp, rfp, l, rscratch1);
 694 
 695   // do the call, remove parameters
 696   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l);
 697 
 698   // lr could be poisoned with PAC signature during throw_pending_exception
 699   // if it was tail-call optimized by compiler, since lr is not callee-saved
 700   // reload it with proper value
 701   adr(lr, l);
 702 
 703   // reset last Java frame
 704   // Only interpreter should have to clear fp
 705   reset_last_Java_frame(true);
 706 
 707    // C++ interp handles this in the interpreter
 708   check_and_handle_popframe(java_thread);
 709   check_and_handle_earlyret(java_thread);
 710 
 711   if (check_exceptions) {
 712     // check for pending exceptions (java_thread is set upon return)
 713     ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
 714     Label ok;
 715     cbz(rscratch1, ok);
 716     lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
 717     br(rscratch1);
 718     bind(ok);
 719   }
 720 
 721   // get oop result if there is one and reset the value in the thread
 722   if (oop_result->is_valid()) {
 723     get_vm_result(oop_result, java_thread);
 724   }
 725 }
 726 
 727 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
 728   call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
 729 }
 730 
 731 // Maybe emit a call via a trampoline.  If the code cache is small
 732 // trampolines won't be emitted.
 733 
 734 address MacroAssembler::trampoline_call(Address entry, CodeBuffer* cbuf) {
 735   assert(JavaThread::current()->is_Compiler_thread(), "just checking");
 736   assert(entry.rspec().type() == relocInfo::runtime_call_type
 737          || entry.rspec().type() == relocInfo::opt_virtual_call_type
 738          || entry.rspec().type() == relocInfo::static_call_type
 739          || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type");
 740 
 741   bool need_trampoline = far_branches();
 742   if (!need_trampoline && entry.rspec().type() == relocInfo::runtime_call_type && !CodeCache::contains(entry.target())) {
 743     // If it is a runtime call of an address outside small CodeCache,
 744     // we need to check whether it is in range.
 745     address target = entry.target();
 746     assert(target < CodeCache::low_bound() || target >= CodeCache::high_bound(), "target is inside CodeCache");
 747     // Case 1: -------T-------L====CodeCache====H-------
 748     //                ^-------longest branch---|
 749     // Case 2: -------L====CodeCache====H-------T-------
 750     //                |-------longest branch ---^
 751     address longest_branch_start = (target < CodeCache::low_bound()) ? CodeCache::high_bound() - NativeInstruction::instruction_size
 752                                                                      : CodeCache::low_bound();
 753     need_trampoline = !reachable_from_branch_at(longest_branch_start, target);
 754   }
 755 
 756   // We need a trampoline if branches are far.
 757   if (need_trampoline) {
 758     bool in_scratch_emit_size = false;
 759 #ifdef COMPILER2
 760     // We don't want to emit a trampoline if C2 is generating dummy
 761     // code during its branch shortening phase.
 762     CompileTask* task = ciEnv::current()->task();
 763     in_scratch_emit_size =
 764       (task != NULL && is_c2_compile(task->comp_level()) &&
 765        Compile::current()->output()->in_scratch_emit_size());
 766 #endif
 767     if (!in_scratch_emit_size) {
 768       address stub = emit_trampoline_stub(offset(), entry.target());
 769       if (stub == NULL) {
 770         postcond(pc() == badAddress);
 771         return NULL; // CodeCache is full
 772       }
 773     }
 774   }
 775 
 776   if (cbuf) cbuf->set_insts_mark();
 777   relocate(entry.rspec());
 778   if (!need_trampoline) {
 779     bl(entry.target());
 780   } else {
 781     bl(pc());
 782   }
 783   // just need to return a non-null address
 784   postcond(pc() != badAddress);
 785   return pc();
 786 }
 787 
 788 
 789 // Emit a trampoline stub for a call to a target which is too far away.
 790 //
 791 // code sequences:
 792 //
 793 // call-site:
 794 //   branch-and-link to <destination> or <trampoline stub>
 795 //
 796 // Related trampoline stub for this call site in the stub section:
 797 //   load the call target from the constant pool
 798 //   branch (LR still points to the call site above)
 799 
 800 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
 801                                              address dest) {
 802   // Max stub size: alignment nop, TrampolineStub.
 803   address stub = start_a_stub(NativeInstruction::instruction_size
 804                    + NativeCallTrampolineStub::instruction_size);
 805   if (stub == NULL) {
 806     return NULL;  // CodeBuffer::expand failed
 807   }
 808 
 809   // Create a trampoline stub relocation which relates this trampoline stub
 810   // with the call instruction at insts_call_instruction_offset in the
 811   // instructions code-section.
 812   align(wordSize);
 813   relocate(trampoline_stub_Relocation::spec(code()->insts()->start()
 814                                             + insts_call_instruction_offset));
 815   const int stub_start_offset = offset();
 816 
 817   // Now, create the trampoline stub's code:
 818   // - load the call
 819   // - call
 820   Label target;
 821   ldr(rscratch1, target);
 822   br(rscratch1);
 823   bind(target);
 824   assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset,
 825          "should be");
 826   emit_int64((int64_t)dest);
 827 
 828   const address stub_start_addr = addr_at(stub_start_offset);
 829 
 830   assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
 831 
 832   end_a_stub();
 833   return stub_start_addr;
 834 }
 835 
 836 void MacroAssembler::emit_static_call_stub() {
 837   // CompiledDirectStaticCall::set_to_interpreted knows the
 838   // exact layout of this stub.
 839 
 840   isb();
 841   mov_metadata(rmethod, (Metadata*)NULL);
 842 
 843   // Jump to the entry point of the i2c stub.
 844   movptr(rscratch1, 0);
 845   br(rscratch1);
 846 }
 847 
 848 void MacroAssembler::c2bool(Register x) {
 849   // implements x == 0 ? 0 : 1
 850   // note: must only look at least-significant byte of x
 851   //       since C-style booleans are stored in one byte
 852   //       only! (was bug)
 853   tst(x, 0xff);
 854   cset(x, Assembler::NE);
 855 }
 856 
 857 address MacroAssembler::ic_call(address entry, jint method_index) {
 858   RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
 859   // address const_ptr = long_constant((jlong)Universe::non_oop_word());
 860   // uintptr_t offset;
 861   // ldr_constant(rscratch2, const_ptr);
 862   movptr(rscratch2, (uintptr_t)Universe::non_oop_word());
 863   return trampoline_call(Address(entry, rh));
 864 }
 865 
 866 // Implementation of call_VM versions
 867 
 868 void MacroAssembler::call_VM(Register oop_result,
 869                              address entry_point,
 870                              bool check_exceptions) {
 871   call_VM_helper(oop_result, entry_point, 0, check_exceptions);
 872 }
 873 
 874 void MacroAssembler::call_VM(Register oop_result,
 875                              address entry_point,
 876                              Register arg_1,
 877                              bool check_exceptions) {
 878   pass_arg1(this, arg_1);
 879   call_VM_helper(oop_result, entry_point, 1, check_exceptions);
 880 }
 881 
 882 void MacroAssembler::call_VM(Register oop_result,
 883                              address entry_point,
 884                              Register arg_1,
 885                              Register arg_2,
 886                              bool check_exceptions) {
 887   assert(arg_1 != c_rarg2, "smashed arg");
 888   pass_arg2(this, arg_2);
 889   pass_arg1(this, arg_1);
 890   call_VM_helper(oop_result, entry_point, 2, check_exceptions);
 891 }
 892 
 893 void MacroAssembler::call_VM(Register oop_result,
 894                              address entry_point,
 895                              Register arg_1,
 896                              Register arg_2,
 897                              Register arg_3,
 898                              bool check_exceptions) {
 899   assert(arg_1 != c_rarg3, "smashed arg");
 900   assert(arg_2 != c_rarg3, "smashed arg");
 901   pass_arg3(this, arg_3);
 902 
 903   assert(arg_1 != c_rarg2, "smashed arg");
 904   pass_arg2(this, arg_2);
 905 
 906   pass_arg1(this, arg_1);
 907   call_VM_helper(oop_result, entry_point, 3, check_exceptions);
 908 }
 909 
 910 void MacroAssembler::call_VM(Register oop_result,
 911                              Register last_java_sp,
 912                              address entry_point,
 913                              int number_of_arguments,
 914                              bool check_exceptions) {
 915   call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
 916 }
 917 
 918 void MacroAssembler::call_VM(Register oop_result,
 919                              Register last_java_sp,
 920                              address entry_point,
 921                              Register arg_1,
 922                              bool check_exceptions) {
 923   pass_arg1(this, arg_1);
 924   call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
 925 }
 926 
 927 void MacroAssembler::call_VM(Register oop_result,
 928                              Register last_java_sp,
 929                              address entry_point,
 930                              Register arg_1,
 931                              Register arg_2,
 932                              bool check_exceptions) {
 933 
 934   assert(arg_1 != c_rarg2, "smashed arg");
 935   pass_arg2(this, arg_2);
 936   pass_arg1(this, arg_1);
 937   call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
 938 }
 939 
 940 void MacroAssembler::call_VM(Register oop_result,
 941                              Register last_java_sp,
 942                              address entry_point,
 943                              Register arg_1,
 944                              Register arg_2,
 945                              Register arg_3,
 946                              bool check_exceptions) {
 947   assert(arg_1 != c_rarg3, "smashed arg");
 948   assert(arg_2 != c_rarg3, "smashed arg");
 949   pass_arg3(this, arg_3);
 950   assert(arg_1 != c_rarg2, "smashed arg");
 951   pass_arg2(this, arg_2);
 952   pass_arg1(this, arg_1);
 953   call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
 954 }
 955 
 956 
 957 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
 958   ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
 959   str(zr, Address(java_thread, JavaThread::vm_result_offset()));
 960   verify_oop(oop_result, "broken oop in call_VM_base");
 961 }
 962 
 963 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
 964   ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
 965   str(zr, Address(java_thread, JavaThread::vm_result_2_offset()));
 966 }
 967 
 968 void MacroAssembler::align(int modulus) {
 969   while (offset() % modulus != 0) nop();
 970 }
 971 
 972 // these are no-ops overridden by InterpreterMacroAssembler
 973 
 974 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { }
 975 
 976 void MacroAssembler::check_and_handle_popframe(Register java_thread) { }
 977 
 978 // Look up the method for a megamorphic invokeinterface call.
 979 // The target method is determined by <intf_klass, itable_index>.
 980 // The receiver klass is in recv_klass.
 981 // On success, the result will be in method_result, and execution falls through.
 982 // On failure, execution transfers to the given label.
 983 void MacroAssembler::lookup_interface_method(Register recv_klass,
 984                                              Register intf_klass,
 985                                              RegisterOrConstant itable_index,
 986                                              Register method_result,
 987                                              Register scan_temp,
 988                                              Label& L_no_such_interface,
 989                          bool return_method) {
 990   assert_different_registers(recv_klass, intf_klass, scan_temp);
 991   assert_different_registers(method_result, intf_klass, scan_temp);
 992   assert(recv_klass != method_result || !return_method,
 993      "recv_klass can be destroyed when method isn't needed");
 994   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 995          "caller must use same register for non-constant itable index as for method");
 996 
 997   // Compute start of first itableOffsetEntry (which is at the end of the vtable)
 998   int vtable_base = in_bytes(Klass::vtable_start_offset());
 999   int itentry_off = itableMethodEntry::method_offset_in_bytes();
1000   int scan_step   = itableOffsetEntry::size() * wordSize;
1001   int vte_size    = vtableEntry::size_in_bytes();
1002   assert(vte_size == wordSize, "else adjust times_vte_scale");
1003 
1004   ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
1005 
1006   // %%% Could store the aligned, prescaled offset in the klassoop.
1007   // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
1008   lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3)));
1009   add(scan_temp, scan_temp, vtable_base);
1010 
1011   if (return_method) {
1012     // Adjust recv_klass by scaled itable_index, so we can free itable_index.
1013     assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
1014     // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
1015     lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3)));
1016     if (itentry_off)
1017       add(recv_klass, recv_klass, itentry_off);
1018   }
1019 
1020   // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
1021   //   if (scan->interface() == intf) {
1022   //     result = (klass + scan->offset() + itable_index);
1023   //   }
1024   // }
1025   Label search, found_method;
1026 
1027   ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
1028   cmp(intf_klass, method_result);
1029   br(Assembler::EQ, found_method);
1030   bind(search);
1031   // Check that the previous entry is non-null.  A null entry means that
1032   // the receiver class doesn't implement the interface, and wasn't the
1033   // same as when the caller was compiled.
1034   cbz(method_result, L_no_such_interface);
1035   if (itableOffsetEntry::interface_offset_in_bytes() != 0) {
1036     add(scan_temp, scan_temp, scan_step);
1037     ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
1038   } else {
1039     ldr(method_result, Address(pre(scan_temp, scan_step)));
1040   }
1041   cmp(intf_klass, method_result);
1042   br(Assembler::NE, search);
1043 
1044   bind(found_method);
1045 
1046   // Got a hit.
1047   if (return_method) {
1048     ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
1049     ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0)));
1050   }
1051 }
1052 
1053 // virtual method calling
1054 void MacroAssembler::lookup_virtual_method(Register recv_klass,
1055                                            RegisterOrConstant vtable_index,
1056                                            Register method_result) {
1057   const int base = in_bytes(Klass::vtable_start_offset());
1058   assert(vtableEntry::size() * wordSize == 8,
1059          "adjust the scaling in the code below");
1060   int vtable_offset_in_bytes = base + vtableEntry::method_offset_in_bytes();
1061 
1062   if (vtable_index.is_register()) {
1063     lea(method_result, Address(recv_klass,
1064                                vtable_index.as_register(),
1065                                Address::lsl(LogBytesPerWord)));
1066     ldr(method_result, Address(method_result, vtable_offset_in_bytes));
1067   } else {
1068     vtable_offset_in_bytes += vtable_index.as_constant() * wordSize;
1069     ldr(method_result,
1070         form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0));
1071   }
1072 }
1073 
1074 void MacroAssembler::check_klass_subtype(Register sub_klass,
1075                            Register super_klass,
1076                            Register temp_reg,
1077                            Label& L_success) {
1078   Label L_failure;
1079   check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg,        &L_success, &L_failure, NULL);
1080   check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
1081   bind(L_failure);
1082 }
1083 
1084 
1085 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
1086                                                    Register super_klass,
1087                                                    Register temp_reg,
1088                                                    Label* L_success,
1089                                                    Label* L_failure,
1090                                                    Label* L_slow_path,
1091                                         RegisterOrConstant super_check_offset) {
1092   assert_different_registers(sub_klass, super_klass, temp_reg);
1093   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
1094   if (super_check_offset.is_register()) {
1095     assert_different_registers(sub_klass, super_klass,
1096                                super_check_offset.as_register());
1097   } else if (must_load_sco) {
1098     assert(temp_reg != noreg, "supply either a temp or a register offset");
1099   }
1100 
1101   Label L_fallthrough;
1102   int label_nulls = 0;
1103   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
1104   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
1105   if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
1106   assert(label_nulls <= 1, "at most one NULL in the batch");
1107 
1108   int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
1109   int sco_offset = in_bytes(Klass::super_check_offset_offset());
1110   Address super_check_offset_addr(super_klass, sco_offset);
1111 
1112   // Hacked jmp, which may only be used just before L_fallthrough.
1113 #define final_jmp(label)                                                \
1114   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
1115   else                            b(label)                /*omit semi*/
1116 
1117   // If the pointers are equal, we are done (e.g., String[] elements).
1118   // This self-check enables sharing of secondary supertype arrays among
1119   // non-primary types such as array-of-interface.  Otherwise, each such
1120   // type would need its own customized SSA.
1121   // We move this check to the front of the fast path because many
1122   // type checks are in fact trivially successful in this manner,
1123   // so we get a nicely predicted branch right at the start of the check.
1124   cmp(sub_klass, super_klass);
1125   br(Assembler::EQ, *L_success);
1126 
1127   // Check the supertype display:
1128   if (must_load_sco) {
1129     ldrw(temp_reg, super_check_offset_addr);
1130     super_check_offset = RegisterOrConstant(temp_reg);
1131   }
1132   Address super_check_addr(sub_klass, super_check_offset);
1133   ldr(rscratch1, super_check_addr);
1134   cmp(super_klass, rscratch1); // load displayed supertype
1135 
1136   // This check has worked decisively for primary supers.
1137   // Secondary supers are sought in the super_cache ('super_cache_addr').
1138   // (Secondary supers are interfaces and very deeply nested subtypes.)
1139   // This works in the same check above because of a tricky aliasing
1140   // between the super_cache and the primary super display elements.
1141   // (The 'super_check_addr' can address either, as the case requires.)
1142   // Note that the cache is updated below if it does not help us find
1143   // what we need immediately.
1144   // So if it was a primary super, we can just fail immediately.
1145   // Otherwise, it's the slow path for us (no success at this point).
1146 
1147   if (super_check_offset.is_register()) {
1148     br(Assembler::EQ, *L_success);
1149     subs(zr, super_check_offset.as_register(), sc_offset);
1150     if (L_failure == &L_fallthrough) {
1151       br(Assembler::EQ, *L_slow_path);
1152     } else {
1153       br(Assembler::NE, *L_failure);
1154       final_jmp(*L_slow_path);
1155     }
1156   } else if (super_check_offset.as_constant() == sc_offset) {
1157     // Need a slow path; fast failure is impossible.
1158     if (L_slow_path == &L_fallthrough) {
1159       br(Assembler::EQ, *L_success);
1160     } else {
1161       br(Assembler::NE, *L_slow_path);
1162       final_jmp(*L_success);
1163     }
1164   } else {
1165     // No slow path; it's a fast decision.
1166     if (L_failure == &L_fallthrough) {
1167       br(Assembler::EQ, *L_success);
1168     } else {
1169       br(Assembler::NE, *L_failure);
1170       final_jmp(*L_success);
1171     }
1172   }
1173 
1174   bind(L_fallthrough);
1175 
1176 #undef final_jmp
1177 }
1178 
1179 // These two are taken from x86, but they look generally useful
1180 
1181 // scans count pointer sized words at [addr] for occurence of value,
1182 // generic
1183 void MacroAssembler::repne_scan(Register addr, Register value, Register count,
1184                                 Register scratch) {
1185   Label Lloop, Lexit;
1186   cbz(count, Lexit);
1187   bind(Lloop);
1188   ldr(scratch, post(addr, wordSize));
1189   cmp(value, scratch);
1190   br(EQ, Lexit);
1191   sub(count, count, 1);
1192   cbnz(count, Lloop);
1193   bind(Lexit);
1194 }
1195 
1196 // scans count 4 byte words at [addr] for occurence of value,
1197 // generic
1198 void MacroAssembler::repne_scanw(Register addr, Register value, Register count,
1199                                 Register scratch) {
1200   Label Lloop, Lexit;
1201   cbz(count, Lexit);
1202   bind(Lloop);
1203   ldrw(scratch, post(addr, wordSize));
1204   cmpw(value, scratch);
1205   br(EQ, Lexit);
1206   sub(count, count, 1);
1207   cbnz(count, Lloop);
1208   bind(Lexit);
1209 }
1210 
1211 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
1212                                                    Register super_klass,
1213                                                    Register temp_reg,
1214                                                    Register temp2_reg,
1215                                                    Label* L_success,
1216                                                    Label* L_failure,
1217                                                    bool set_cond_codes) {
1218   assert_different_registers(sub_klass, super_klass, temp_reg);
1219   if (temp2_reg != noreg)
1220     assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1);
1221 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
1222 
1223   Label L_fallthrough;
1224   int label_nulls = 0;
1225   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
1226   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
1227   assert(label_nulls <= 1, "at most one NULL in the batch");
1228 
1229   // a couple of useful fields in sub_klass:
1230   int ss_offset = in_bytes(Klass::secondary_supers_offset());
1231   int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
1232   Address secondary_supers_addr(sub_klass, ss_offset);
1233   Address super_cache_addr(     sub_klass, sc_offset);
1234 
1235   BLOCK_COMMENT("check_klass_subtype_slow_path");
1236 
1237   // Do a linear scan of the secondary super-klass chain.
1238   // This code is rarely used, so simplicity is a virtue here.
1239   // The repne_scan instruction uses fixed registers, which we must spill.
1240   // Don't worry too much about pre-existing connections with the input regs.
1241 
1242   assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super)
1243   assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter)
1244 
1245   RegSet pushed_registers;
1246   if (!IS_A_TEMP(r2))    pushed_registers += r2;
1247   if (!IS_A_TEMP(r5))    pushed_registers += r5;
1248 
1249   if (super_klass != r0) {
1250     if (!IS_A_TEMP(r0))   pushed_registers += r0;
1251   }
1252 
1253   push(pushed_registers, sp);
1254 
1255   // Get super_klass value into r0 (even if it was in r5 or r2).
1256   if (super_klass != r0) {
1257     mov(r0, super_klass);
1258   }
1259 
1260 #ifndef PRODUCT
1261   mov(rscratch2, (address)&SharedRuntime::_partial_subtype_ctr);
1262   Address pst_counter_addr(rscratch2);
1263   ldr(rscratch1, pst_counter_addr);
1264   add(rscratch1, rscratch1, 1);
1265   str(rscratch1, pst_counter_addr);
1266 #endif //PRODUCT
1267 
1268   // We will consult the secondary-super array.
1269   ldr(r5, secondary_supers_addr);
1270   // Load the array length.
1271   ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes()));
1272   // Skip to start of data.
1273   add(r5, r5, Array<Klass*>::base_offset_in_bytes());
1274 
1275   cmp(sp, zr); // Clear Z flag; SP is never zero
1276   // Scan R2 words at [R5] for an occurrence of R0.
1277   // Set NZ/Z based on last compare.
1278   repne_scan(r5, r0, r2, rscratch1);
1279 
1280   // Unspill the temp. registers:
1281   pop(pushed_registers, sp);
1282 
1283   br(Assembler::NE, *L_failure);
1284 
1285   // Success.  Cache the super we found and proceed in triumph.
1286   str(super_klass, super_cache_addr);
1287 
1288   if (L_success != &L_fallthrough) {
1289     b(*L_success);
1290   }
1291 
1292 #undef IS_A_TEMP
1293 
1294   bind(L_fallthrough);
1295 }
1296 
1297 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) {
1298   assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required");
1299   assert_different_registers(klass, rthread, scratch);
1300 
1301   Label L_fallthrough, L_tmp;
1302   if (L_fast_path == NULL) {
1303     L_fast_path = &L_fallthrough;
1304   } else if (L_slow_path == NULL) {
1305     L_slow_path = &L_fallthrough;
1306   }
1307   // Fast path check: class is fully initialized
1308   ldrb(scratch, Address(klass, InstanceKlass::init_state_offset()));
1309   subs(zr, scratch, InstanceKlass::fully_initialized);
1310   br(Assembler::EQ, *L_fast_path);
1311 
1312   // Fast path check: current thread is initializer thread
1313   ldr(scratch, Address(klass, InstanceKlass::init_thread_offset()));
1314   cmp(rthread, scratch);
1315 
1316   if (L_slow_path == &L_fallthrough) {
1317     br(Assembler::EQ, *L_fast_path);
1318     bind(*L_slow_path);
1319   } else if (L_fast_path == &L_fallthrough) {
1320     br(Assembler::NE, *L_slow_path);
1321     bind(*L_fast_path);
1322   } else {
1323     Unimplemented();
1324   }
1325 }
1326 
1327 void MacroAssembler::verify_oop(Register reg, const char* s) {
1328   if (!VerifyOops) return;
1329 
1330   // Pass register number to verify_oop_subroutine
1331   const char* b = NULL;
1332   {
1333     ResourceMark rm;
1334     stringStream ss;
1335     ss.print("verify_oop: %s: %s", reg->name(), s);
1336     b = code_string(ss.as_string());
1337   }
1338   BLOCK_COMMENT("verify_oop {");
1339 
1340   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1341   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1342 
1343   mov(r0, reg);
1344   movptr(rscratch1, (uintptr_t)(address)b);
1345 
1346   // call indirectly to solve generation ordering problem
1347   lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1348   ldr(rscratch2, Address(rscratch2));
1349   blr(rscratch2);
1350 
1351   ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1352   ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1353 
1354   BLOCK_COMMENT("} verify_oop");
1355 }
1356 
1357 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
1358   if (!VerifyOops) return;
1359 
1360   const char* b = NULL;
1361   {
1362     ResourceMark rm;
1363     stringStream ss;
1364     ss.print("verify_oop_addr: %s", s);
1365     b = code_string(ss.as_string());
1366   }
1367   BLOCK_COMMENT("verify_oop_addr {");
1368 
1369   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1370   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1371 
1372   // addr may contain sp so we will have to adjust it based on the
1373   // pushes that we just did.
1374   if (addr.uses(sp)) {
1375     lea(r0, addr);
1376     ldr(r0, Address(r0, 4 * wordSize));
1377   } else {
1378     ldr(r0, addr);
1379   }
1380   movptr(rscratch1, (uintptr_t)(address)b);
1381 
1382   // call indirectly to solve generation ordering problem
1383   lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1384   ldr(rscratch2, Address(rscratch2));
1385   blr(rscratch2);
1386 
1387   ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1388   ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1389 
1390   BLOCK_COMMENT("} verify_oop_addr");
1391 }
1392 
1393 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
1394                                          int extra_slot_offset) {
1395   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
1396   int stackElementSize = Interpreter::stackElementSize;
1397   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
1398 #ifdef ASSERT
1399   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
1400   assert(offset1 - offset == stackElementSize, "correct arithmetic");
1401 #endif
1402   if (arg_slot.is_constant()) {
1403     return Address(esp, arg_slot.as_constant() * stackElementSize
1404                    + offset);
1405   } else {
1406     add(rscratch1, esp, arg_slot.as_register(),
1407         ext::uxtx, exact_log2(stackElementSize));
1408     return Address(rscratch1, offset);
1409   }
1410 }
1411 
1412 void MacroAssembler::call_VM_leaf_base(address entry_point,
1413                                        int number_of_arguments,
1414                                        Label *retaddr) {
1415   Label E, L;
1416 
1417   stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize)));
1418 
1419   mov(rscratch1, entry_point);
1420   blr(rscratch1);
1421   if (retaddr)
1422     bind(*retaddr);
1423 
1424   ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize)));
1425 }
1426 
1427 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
1428   call_VM_leaf_base(entry_point, number_of_arguments);
1429 }
1430 
1431 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1432   pass_arg0(this, arg_0);
1433   call_VM_leaf_base(entry_point, 1);
1434 }
1435 
1436 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1437   pass_arg0(this, arg_0);
1438   pass_arg1(this, arg_1);
1439   call_VM_leaf_base(entry_point, 2);
1440 }
1441 
1442 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
1443                                   Register arg_1, Register arg_2) {
1444   pass_arg0(this, arg_0);
1445   pass_arg1(this, arg_1);
1446   pass_arg2(this, arg_2);
1447   call_VM_leaf_base(entry_point, 3);
1448 }
1449 
1450 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1451   pass_arg0(this, arg_0);
1452   MacroAssembler::call_VM_leaf_base(entry_point, 1);
1453 }
1454 
1455 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1456 
1457   assert(arg_0 != c_rarg1, "smashed arg");
1458   pass_arg1(this, arg_1);
1459   pass_arg0(this, arg_0);
1460   MacroAssembler::call_VM_leaf_base(entry_point, 2);
1461 }
1462 
1463 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1464   assert(arg_0 != c_rarg2, "smashed arg");
1465   assert(arg_1 != c_rarg2, "smashed arg");
1466   pass_arg2(this, arg_2);
1467   assert(arg_0 != c_rarg1, "smashed arg");
1468   pass_arg1(this, arg_1);
1469   pass_arg0(this, arg_0);
1470   MacroAssembler::call_VM_leaf_base(entry_point, 3);
1471 }
1472 
1473 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
1474   assert(arg_0 != c_rarg3, "smashed arg");
1475   assert(arg_1 != c_rarg3, "smashed arg");
1476   assert(arg_2 != c_rarg3, "smashed arg");
1477   pass_arg3(this, arg_3);
1478   assert(arg_0 != c_rarg2, "smashed arg");
1479   assert(arg_1 != c_rarg2, "smashed arg");
1480   pass_arg2(this, arg_2);
1481   assert(arg_0 != c_rarg1, "smashed arg");
1482   pass_arg1(this, arg_1);
1483   pass_arg0(this, arg_0);
1484   MacroAssembler::call_VM_leaf_base(entry_point, 4);
1485 }
1486 
1487 void MacroAssembler::null_check(Register reg, int offset) {
1488   if (needs_explicit_null_check(offset)) {
1489     // provoke OS NULL exception if reg = NULL by
1490     // accessing M[reg] w/o changing any registers
1491     // NOTE: this is plenty to provoke a segv
1492     ldr(zr, Address(reg));
1493   } else {
1494     // nothing to do, (later) access of M[reg + offset]
1495     // will provoke OS NULL exception if reg = NULL
1496   }
1497 }
1498 
1499 // MacroAssembler protected routines needed to implement
1500 // public methods
1501 
1502 void MacroAssembler::mov(Register r, Address dest) {
1503   code_section()->relocate(pc(), dest.rspec());
1504   uint64_t imm64 = (uint64_t)dest.target();
1505   movptr(r, imm64);
1506 }
1507 
1508 // Move a constant pointer into r.  In AArch64 mode the virtual
1509 // address space is 48 bits in size, so we only need three
1510 // instructions to create a patchable instruction sequence that can
1511 // reach anywhere.
1512 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
1513 #ifndef PRODUCT
1514   {
1515     char buffer[64];
1516     snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64);
1517     block_comment(buffer);
1518   }
1519 #endif
1520   assert(imm64 < (1ull << 48), "48-bit overflow in address constant");
1521   movz(r, imm64 & 0xffff);
1522   imm64 >>= 16;
1523   movk(r, imm64 & 0xffff, 16);
1524   imm64 >>= 16;
1525   movk(r, imm64 & 0xffff, 32);
1526 }
1527 
1528 // Macro to mov replicated immediate to vector register.
1529 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is,
1530 //        the upper 56/48/32 bits must be zeros for B/H/S type.
1531 // Vd will get the following values for different arrangements in T
1532 //   imm64 == hex 000000gh  T8B:  Vd = ghghghghghghghgh
1533 //   imm64 == hex 000000gh  T16B: Vd = ghghghghghghghghghghghghghghghgh
1534 //   imm64 == hex 0000efgh  T4H:  Vd = efghefghefghefgh
1535 //   imm64 == hex 0000efgh  T8H:  Vd = efghefghefghefghefghefghefghefgh
1536 //   imm64 == hex abcdefgh  T2S:  Vd = abcdefghabcdefgh
1537 //   imm64 == hex abcdefgh  T4S:  Vd = abcdefghabcdefghabcdefghabcdefgh
1538 //   imm64 == hex abcdefgh  T1D:  Vd = 00000000abcdefgh
1539 //   imm64 == hex abcdefgh  T2D:  Vd = 00000000abcdefgh00000000abcdefgh
1540 // Clobbers rscratch1
1541 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) {
1542   assert(T != T1Q, "unsupported");
1543   if (T == T1D || T == T2D) {
1544     int imm = operand_valid_for_movi_immediate(imm64, T);
1545     if (-1 != imm) {
1546       movi(Vd, T, imm);
1547     } else {
1548       mov(rscratch1, imm64);
1549       dup(Vd, T, rscratch1);
1550     }
1551     return;
1552   }
1553 
1554 #ifdef ASSERT
1555   if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)");
1556   if (T == T4H || T == T8H) assert((imm64  & ~0xffff) == 0, "extraneous bits (T4H/T8H)");
1557   if (T == T2S || T == T4S) assert((imm64  & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)");
1558 #endif
1559   int shift = operand_valid_for_movi_immediate(imm64, T);
1560   uint32_t imm32 = imm64 & 0xffffffffULL;
1561   if (shift >= 0) {
1562     movi(Vd, T, (imm32 >> shift) & 0xff, shift);
1563   } else {
1564     movw(rscratch1, imm32);
1565     dup(Vd, T, rscratch1);
1566   }
1567 }
1568 
1569 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64)
1570 {
1571 #ifndef PRODUCT
1572   {
1573     char buffer[64];
1574     snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
1575     block_comment(buffer);
1576   }
1577 #endif
1578   if (operand_valid_for_logical_immediate(false, imm64)) {
1579     orr(dst, zr, imm64);
1580   } else {
1581     // we can use a combination of MOVZ or MOVN with
1582     // MOVK to build up the constant
1583     uint64_t imm_h[4];
1584     int zero_count = 0;
1585     int neg_count = 0;
1586     int i;
1587     for (i = 0; i < 4; i++) {
1588       imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL);
1589       if (imm_h[i] == 0) {
1590         zero_count++;
1591       } else if (imm_h[i] == 0xffffL) {
1592         neg_count++;
1593       }
1594     }
1595     if (zero_count == 4) {
1596       // one MOVZ will do
1597       movz(dst, 0);
1598     } else if (neg_count == 4) {
1599       // one MOVN will do
1600       movn(dst, 0);
1601     } else if (zero_count == 3) {
1602       for (i = 0; i < 4; i++) {
1603         if (imm_h[i] != 0L) {
1604           movz(dst, (uint32_t)imm_h[i], (i << 4));
1605           break;
1606         }
1607       }
1608     } else if (neg_count == 3) {
1609       // one MOVN will do
1610       for (int i = 0; i < 4; i++) {
1611         if (imm_h[i] != 0xffffL) {
1612           movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
1613           break;
1614         }
1615       }
1616     } else if (zero_count == 2) {
1617       // one MOVZ and one MOVK will do
1618       for (i = 0; i < 3; i++) {
1619         if (imm_h[i] != 0L) {
1620           movz(dst, (uint32_t)imm_h[i], (i << 4));
1621           i++;
1622           break;
1623         }
1624       }
1625       for (;i < 4; i++) {
1626         if (imm_h[i] != 0L) {
1627           movk(dst, (uint32_t)imm_h[i], (i << 4));
1628         }
1629       }
1630     } else if (neg_count == 2) {
1631       // one MOVN and one MOVK will do
1632       for (i = 0; i < 4; i++) {
1633         if (imm_h[i] != 0xffffL) {
1634           movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
1635           i++;
1636           break;
1637         }
1638       }
1639       for (;i < 4; i++) {
1640         if (imm_h[i] != 0xffffL) {
1641           movk(dst, (uint32_t)imm_h[i], (i << 4));
1642         }
1643       }
1644     } else if (zero_count == 1) {
1645       // one MOVZ and two MOVKs will do
1646       for (i = 0; i < 4; i++) {
1647         if (imm_h[i] != 0L) {
1648           movz(dst, (uint32_t)imm_h[i], (i << 4));
1649           i++;
1650           break;
1651         }
1652       }
1653       for (;i < 4; i++) {
1654         if (imm_h[i] != 0x0L) {
1655           movk(dst, (uint32_t)imm_h[i], (i << 4));
1656         }
1657       }
1658     } else if (neg_count == 1) {
1659       // one MOVN and two MOVKs will do
1660       for (i = 0; i < 4; i++) {
1661         if (imm_h[i] != 0xffffL) {
1662           movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
1663           i++;
1664           break;
1665         }
1666       }
1667       for (;i < 4; i++) {
1668         if (imm_h[i] != 0xffffL) {
1669           movk(dst, (uint32_t)imm_h[i], (i << 4));
1670         }
1671       }
1672     } else {
1673       // use a MOVZ and 3 MOVKs (makes it easier to debug)
1674       movz(dst, (uint32_t)imm_h[0], 0);
1675       for (i = 1; i < 4; i++) {
1676         movk(dst, (uint32_t)imm_h[i], (i << 4));
1677       }
1678     }
1679   }
1680 }
1681 
1682 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32)
1683 {
1684 #ifndef PRODUCT
1685     {
1686       char buffer[64];
1687       snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32);
1688       block_comment(buffer);
1689     }
1690 #endif
1691   if (operand_valid_for_logical_immediate(true, imm32)) {
1692     orrw(dst, zr, imm32);
1693   } else {
1694     // we can use MOVZ, MOVN or two calls to MOVK to build up the
1695     // constant
1696     uint32_t imm_h[2];
1697     imm_h[0] = imm32 & 0xffff;
1698     imm_h[1] = ((imm32 >> 16) & 0xffff);
1699     if (imm_h[0] == 0) {
1700       movzw(dst, imm_h[1], 16);
1701     } else if (imm_h[0] == 0xffff) {
1702       movnw(dst, imm_h[1] ^ 0xffff, 16);
1703     } else if (imm_h[1] == 0) {
1704       movzw(dst, imm_h[0], 0);
1705     } else if (imm_h[1] == 0xffff) {
1706       movnw(dst, imm_h[0] ^ 0xffff, 0);
1707     } else {
1708       // use a MOVZ and MOVK (makes it easier to debug)
1709       movzw(dst, imm_h[0], 0);
1710       movkw(dst, imm_h[1], 16);
1711     }
1712   }
1713 }
1714 
1715 // Form an address from base + offset in Rd.  Rd may or may
1716 // not actually be used: you must use the Address that is returned.
1717 // It is up to you to ensure that the shift provided matches the size
1718 // of your data.
1719 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) {
1720   if (Address::offset_ok_for_immed(byte_offset, shift))
1721     // It fits; no need for any heroics
1722     return Address(base, byte_offset);
1723 
1724   // Don't do anything clever with negative or misaligned offsets
1725   unsigned mask = (1 << shift) - 1;
1726   if (byte_offset < 0 || byte_offset & mask) {
1727     mov(Rd, byte_offset);
1728     add(Rd, base, Rd);
1729     return Address(Rd);
1730   }
1731 
1732   // See if we can do this with two 12-bit offsets
1733   {
1734     uint64_t word_offset = byte_offset >> shift;
1735     uint64_t masked_offset = word_offset & 0xfff000;
1736     if (Address::offset_ok_for_immed(word_offset - masked_offset, 0)
1737         && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) {
1738       add(Rd, base, masked_offset << shift);
1739       word_offset -= masked_offset;
1740       return Address(Rd, word_offset << shift);
1741     }
1742   }
1743 
1744   // Do it the hard way
1745   mov(Rd, byte_offset);
1746   add(Rd, base, Rd);
1747   return Address(Rd);
1748 }
1749 
1750 void MacroAssembler::atomic_incw(Register counter_addr, Register tmp, Register tmp2) {
1751   if (UseLSE) {
1752     mov(tmp, 1);
1753     ldadd(Assembler::word, tmp, zr, counter_addr);
1754     return;
1755   }
1756   Label retry_load;
1757   if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
1758     prfm(Address(counter_addr), PSTL1STRM);
1759   bind(retry_load);
1760   // flush and load exclusive from the memory location
1761   ldxrw(tmp, counter_addr);
1762   addw(tmp, tmp, 1);
1763   // if we store+flush with no intervening write tmp wil be zero
1764   stxrw(tmp2, tmp, counter_addr);
1765   cbnzw(tmp2, retry_load);
1766 }
1767 
1768 
1769 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb,
1770                                     bool want_remainder, Register scratch)
1771 {
1772   // Full implementation of Java idiv and irem.  The function
1773   // returns the (pc) offset of the div instruction - may be needed
1774   // for implicit exceptions.
1775   //
1776   // constraint : ra/rb =/= scratch
1777   //         normal case
1778   //
1779   // input : ra: dividend
1780   //         rb: divisor
1781   //
1782   // result: either
1783   //         quotient  (= ra idiv rb)
1784   //         remainder (= ra irem rb)
1785 
1786   assert(ra != scratch && rb != scratch, "reg cannot be scratch");
1787 
1788   int idivl_offset = offset();
1789   if (! want_remainder) {
1790     sdivw(result, ra, rb);
1791   } else {
1792     sdivw(scratch, ra, rb);
1793     Assembler::msubw(result, scratch, rb, ra);
1794   }
1795 
1796   return idivl_offset;
1797 }
1798 
1799 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb,
1800                                     bool want_remainder, Register scratch)
1801 {
1802   // Full implementation of Java ldiv and lrem.  The function
1803   // returns the (pc) offset of the div instruction - may be needed
1804   // for implicit exceptions.
1805   //
1806   // constraint : ra/rb =/= scratch
1807   //         normal case
1808   //
1809   // input : ra: dividend
1810   //         rb: divisor
1811   //
1812   // result: either
1813   //         quotient  (= ra idiv rb)
1814   //         remainder (= ra irem rb)
1815 
1816   assert(ra != scratch && rb != scratch, "reg cannot be scratch");
1817 
1818   int idivq_offset = offset();
1819   if (! want_remainder) {
1820     sdiv(result, ra, rb);
1821   } else {
1822     sdiv(scratch, ra, rb);
1823     Assembler::msub(result, scratch, rb, ra);
1824   }
1825 
1826   return idivq_offset;
1827 }
1828 
1829 void MacroAssembler::membar(Membar_mask_bits order_constraint) {
1830   address prev = pc() - NativeMembar::instruction_size;
1831   address last = code()->last_insn();
1832   if (last != NULL && nativeInstruction_at(last)->is_Membar() && prev == last) {
1833     NativeMembar *bar = NativeMembar_at(prev);
1834     // We are merging two memory barrier instructions.  On AArch64 we
1835     // can do this simply by ORing them together.
1836     bar->set_kind(bar->get_kind() | order_constraint);
1837     BLOCK_COMMENT("merged membar");
1838   } else {
1839     code()->set_last_insn(pc());
1840     dmb(Assembler::barrier(order_constraint));
1841   }
1842 }
1843 
1844 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) {
1845   if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) {
1846     merge_ldst(rt, adr, size_in_bytes, is_store);
1847     code()->clear_last_insn();
1848     return true;
1849   } else {
1850     assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported.");
1851     const uint64_t mask = size_in_bytes - 1;
1852     if (adr.getMode() == Address::base_plus_offset &&
1853         (adr.offset() & mask) == 0) { // only supports base_plus_offset.
1854       code()->set_last_insn(pc());
1855     }
1856     return false;
1857   }
1858 }
1859 
1860 void MacroAssembler::ldr(Register Rx, const Address &adr) {
1861   // We always try to merge two adjacent loads into one ldp.
1862   if (!try_merge_ldst(Rx, adr, 8, false)) {
1863     Assembler::ldr(Rx, adr);
1864   }
1865 }
1866 
1867 void MacroAssembler::ldrw(Register Rw, const Address &adr) {
1868   // We always try to merge two adjacent loads into one ldp.
1869   if (!try_merge_ldst(Rw, adr, 4, false)) {
1870     Assembler::ldrw(Rw, adr);
1871   }
1872 }
1873 
1874 void MacroAssembler::str(Register Rx, const Address &adr) {
1875   // We always try to merge two adjacent stores into one stp.
1876   if (!try_merge_ldst(Rx, adr, 8, true)) {
1877     Assembler::str(Rx, adr);
1878   }
1879 }
1880 
1881 void MacroAssembler::strw(Register Rw, const Address &adr) {
1882   // We always try to merge two adjacent stores into one stp.
1883   if (!try_merge_ldst(Rw, adr, 4, true)) {
1884     Assembler::strw(Rw, adr);
1885   }
1886 }
1887 
1888 // MacroAssembler routines found actually to be needed
1889 
1890 void MacroAssembler::push(Register src)
1891 {
1892   str(src, Address(pre(esp, -1 * wordSize)));
1893 }
1894 
1895 void MacroAssembler::pop(Register dst)
1896 {
1897   ldr(dst, Address(post(esp, 1 * wordSize)));
1898 }
1899 
1900 // Note: load_unsigned_short used to be called load_unsigned_word.
1901 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
1902   int off = offset();
1903   ldrh(dst, src);
1904   return off;
1905 }
1906 
1907 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
1908   int off = offset();
1909   ldrb(dst, src);
1910   return off;
1911 }
1912 
1913 int MacroAssembler::load_signed_short(Register dst, Address src) {
1914   int off = offset();
1915   ldrsh(dst, src);
1916   return off;
1917 }
1918 
1919 int MacroAssembler::load_signed_byte(Register dst, Address src) {
1920   int off = offset();
1921   ldrsb(dst, src);
1922   return off;
1923 }
1924 
1925 int MacroAssembler::load_signed_short32(Register dst, Address src) {
1926   int off = offset();
1927   ldrshw(dst, src);
1928   return off;
1929 }
1930 
1931 int MacroAssembler::load_signed_byte32(Register dst, Address src) {
1932   int off = offset();
1933   ldrsbw(dst, src);
1934   return off;
1935 }
1936 
1937 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
1938   switch (size_in_bytes) {
1939   case  8:  ldr(dst, src); break;
1940   case  4:  ldrw(dst, src); break;
1941   case  2:  is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
1942   case  1:  is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
1943   default:  ShouldNotReachHere();
1944   }
1945 }
1946 
1947 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
1948   switch (size_in_bytes) {
1949   case  8:  str(src, dst); break;
1950   case  4:  strw(src, dst); break;
1951   case  2:  strh(src, dst); break;
1952   case  1:  strb(src, dst); break;
1953   default:  ShouldNotReachHere();
1954   }
1955 }
1956 
1957 void MacroAssembler::decrementw(Register reg, int value)
1958 {
1959   if (value < 0)  { incrementw(reg, -value);      return; }
1960   if (value == 0) {                               return; }
1961   if (value < (1 << 12)) { subw(reg, reg, value); return; }
1962   /* else */ {
1963     guarantee(reg != rscratch2, "invalid dst for register decrement");
1964     movw(rscratch2, (unsigned)value);
1965     subw(reg, reg, rscratch2);
1966   }
1967 }
1968 
1969 void MacroAssembler::decrement(Register reg, int value)
1970 {
1971   if (value < 0)  { increment(reg, -value);      return; }
1972   if (value == 0) {                              return; }
1973   if (value < (1 << 12)) { sub(reg, reg, value); return; }
1974   /* else */ {
1975     assert(reg != rscratch2, "invalid dst for register decrement");
1976     mov(rscratch2, (uint64_t)value);
1977     sub(reg, reg, rscratch2);
1978   }
1979 }
1980 
1981 void MacroAssembler::decrementw(Address dst, int value)
1982 {
1983   assert(!dst.uses(rscratch1), "invalid dst for address decrement");
1984   if (dst.getMode() == Address::literal) {
1985     assert(abs(value) < (1 << 12), "invalid value and address mode combination");
1986     lea(rscratch2, dst);
1987     dst = Address(rscratch2);
1988   }
1989   ldrw(rscratch1, dst);
1990   decrementw(rscratch1, value);
1991   strw(rscratch1, dst);
1992 }
1993 
1994 void MacroAssembler::decrement(Address dst, int value)
1995 {
1996   assert(!dst.uses(rscratch1), "invalid address for decrement");
1997   if (dst.getMode() == Address::literal) {
1998     assert(abs(value) < (1 << 12), "invalid value and address mode combination");
1999     lea(rscratch2, dst);
2000     dst = Address(rscratch2);
2001   }
2002   ldr(rscratch1, dst);
2003   decrement(rscratch1, value);
2004   str(rscratch1, dst);
2005 }
2006 
2007 void MacroAssembler::incrementw(Register reg, int value)
2008 {
2009   if (value < 0)  { decrementw(reg, -value);      return; }
2010   if (value == 0) {                               return; }
2011   if (value < (1 << 12)) { addw(reg, reg, value); return; }
2012   /* else */ {
2013     assert(reg != rscratch2, "invalid dst for register increment");
2014     movw(rscratch2, (unsigned)value);
2015     addw(reg, reg, rscratch2);
2016   }
2017 }
2018 
2019 void MacroAssembler::increment(Register reg, int value)
2020 {
2021   if (value < 0)  { decrement(reg, -value);      return; }
2022   if (value == 0) {                              return; }
2023   if (value < (1 << 12)) { add(reg, reg, value); return; }
2024   /* else */ {
2025     assert(reg != rscratch2, "invalid dst for register increment");
2026     movw(rscratch2, (unsigned)value);
2027     add(reg, reg, rscratch2);
2028   }
2029 }
2030 
2031 void MacroAssembler::incrementw(Address dst, int value)
2032 {
2033   assert(!dst.uses(rscratch1), "invalid dst for address increment");
2034   if (dst.getMode() == Address::literal) {
2035     assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2036     lea(rscratch2, dst);
2037     dst = Address(rscratch2);
2038   }
2039   ldrw(rscratch1, dst);
2040   incrementw(rscratch1, value);
2041   strw(rscratch1, dst);
2042 }
2043 
2044 void MacroAssembler::increment(Address dst, int value)
2045 {
2046   assert(!dst.uses(rscratch1), "invalid dst for address increment");
2047   if (dst.getMode() == Address::literal) {
2048     assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2049     lea(rscratch2, dst);
2050     dst = Address(rscratch2);
2051   }
2052   ldr(rscratch1, dst);
2053   increment(rscratch1, value);
2054   str(rscratch1, dst);
2055 }
2056 
2057 // Push lots of registers in the bit set supplied.  Don't push sp.
2058 // Return the number of words pushed
2059 int MacroAssembler::push(unsigned int bitset, Register stack) {
2060   int words_pushed = 0;
2061 
2062   // Scan bitset to accumulate register pairs
2063   unsigned char regs[32];
2064   int count = 0;
2065   for (int reg = 0; reg <= 30; reg++) {
2066     if (1 & bitset)
2067       regs[count++] = reg;
2068     bitset >>= 1;
2069   }
2070   regs[count++] = zr->encoding_nocheck();
2071   count &= ~1;  // Only push an even nuber of regs
2072 
2073   if (count) {
2074     stp(as_Register(regs[0]), as_Register(regs[1]),
2075        Address(pre(stack, -count * wordSize)));
2076     words_pushed += 2;
2077   }
2078   for (int i = 2; i < count; i += 2) {
2079     stp(as_Register(regs[i]), as_Register(regs[i+1]),
2080        Address(stack, i * wordSize));
2081     words_pushed += 2;
2082   }
2083 
2084   assert(words_pushed == count, "oops, pushed != count");
2085 
2086   return count;
2087 }
2088 
2089 int MacroAssembler::pop(unsigned int bitset, Register stack) {
2090   int words_pushed = 0;
2091 
2092   // Scan bitset to accumulate register pairs
2093   unsigned char regs[32];
2094   int count = 0;
2095   for (int reg = 0; reg <= 30; reg++) {
2096     if (1 & bitset)
2097       regs[count++] = reg;
2098     bitset >>= 1;
2099   }
2100   regs[count++] = zr->encoding_nocheck();
2101   count &= ~1;
2102 
2103   for (int i = 2; i < count; i += 2) {
2104     ldp(as_Register(regs[i]), as_Register(regs[i+1]),
2105        Address(stack, i * wordSize));
2106     words_pushed += 2;
2107   }
2108   if (count) {
2109     ldp(as_Register(regs[0]), as_Register(regs[1]),
2110        Address(post(stack, count * wordSize)));
2111     words_pushed += 2;
2112   }
2113 
2114   assert(words_pushed == count, "oops, pushed != count");
2115 
2116   return count;
2117 }
2118 
2119 // Push lots of registers in the bit set supplied.  Don't push sp.
2120 // Return the number of dwords pushed
2121 int MacroAssembler::push_fp(unsigned int bitset, Register stack) {
2122   int words_pushed = 0;
2123   bool use_sve = false;
2124   int sve_vector_size_in_bytes = 0;
2125 
2126 #ifdef COMPILER2
2127   use_sve = Matcher::supports_scalable_vector();
2128   sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
2129 #endif
2130 
2131   // Scan bitset to accumulate register pairs
2132   unsigned char regs[32];
2133   int count = 0;
2134   for (int reg = 0; reg <= 31; reg++) {
2135     if (1 & bitset)
2136       regs[count++] = reg;
2137     bitset >>= 1;
2138   }
2139 
2140   if (count == 0) {
2141     return 0;
2142   }
2143 
2144   // SVE
2145   if (use_sve && sve_vector_size_in_bytes > 16) {
2146     sub(stack, stack, sve_vector_size_in_bytes * count);
2147     for (int i = 0; i < count; i++) {
2148       sve_str(as_FloatRegister(regs[i]), Address(stack, i));
2149     }
2150     return count * sve_vector_size_in_bytes / 8;
2151   }
2152 
2153   // NEON
2154   if (count == 1) {
2155     strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2)));
2156     return 2;
2157   }
2158 
2159   bool odd = (count & 1) == 1;
2160   int push_slots = count + (odd ? 1 : 0);
2161 
2162   // Always pushing full 128 bit registers.
2163   stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2)));
2164   words_pushed += 2;
2165 
2166   for (int i = 2; i + 1 < count; i += 2) {
2167     stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
2168     words_pushed += 2;
2169   }
2170 
2171   if (odd) {
2172     strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2));
2173     words_pushed++;
2174   }
2175 
2176   assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count);
2177   return count * 2;
2178 }
2179 
2180 // Return the number of dwords poped
2181 int MacroAssembler::pop_fp(unsigned int bitset, Register stack) {
2182   int words_pushed = 0;
2183   bool use_sve = false;
2184   int sve_vector_size_in_bytes = 0;
2185 
2186 #ifdef COMPILER2
2187   use_sve = Matcher::supports_scalable_vector();
2188   sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
2189 #endif
2190   // Scan bitset to accumulate register pairs
2191   unsigned char regs[32];
2192   int count = 0;
2193   for (int reg = 0; reg <= 31; reg++) {
2194     if (1 & bitset)
2195       regs[count++] = reg;
2196     bitset >>= 1;
2197   }
2198 
2199   if (count == 0) {
2200     return 0;
2201   }
2202 
2203   // SVE
2204   if (use_sve && sve_vector_size_in_bytes > 16) {
2205     for (int i = count - 1; i >= 0; i--) {
2206       sve_ldr(as_FloatRegister(regs[i]), Address(stack, i));
2207     }
2208     add(stack, stack, sve_vector_size_in_bytes * count);
2209     return count * sve_vector_size_in_bytes / 8;
2210   }
2211 
2212   // NEON
2213   if (count == 1) {
2214     ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2)));
2215     return 2;
2216   }
2217 
2218   bool odd = (count & 1) == 1;
2219   int push_slots = count + (odd ? 1 : 0);
2220 
2221   if (odd) {
2222     ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2));
2223     words_pushed++;
2224   }
2225 
2226   for (int i = 2; i + 1 < count; i += 2) {
2227     ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
2228     words_pushed += 2;
2229   }
2230 
2231   ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2)));
2232   words_pushed += 2;
2233 
2234   assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count);
2235 
2236   return count * 2;
2237 }
2238 
2239 #ifdef ASSERT
2240 void MacroAssembler::verify_heapbase(const char* msg) {
2241 #if 0
2242   assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed");
2243   assert (Universe::heap() != NULL, "java heap should be initialized");
2244   if (!UseCompressedOops || Universe::ptr_base() == NULL) {
2245     // rheapbase is allocated as general register
2246     return;
2247   }
2248   if (CheckCompressedOops) {
2249     Label ok;
2250     push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1
2251     cmpptr(rheapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr()));
2252     br(Assembler::EQ, ok);
2253     stop(msg);
2254     bind(ok);
2255     pop(1 << rscratch1->encoding(), sp);
2256   }
2257 #endif
2258 }
2259 #endif
2260 
2261 void MacroAssembler::resolve_jobject(Register value, Register thread, Register tmp) {
2262   Label done, not_weak;
2263   cbz(value, done);           // Use NULL as-is.
2264 
2265   STATIC_ASSERT(JNIHandles::weak_tag_mask == 1u);
2266   tbz(r0, 0, not_weak);    // Test for jweak tag.
2267 
2268   // Resolve jweak.
2269   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, value,
2270                  Address(value, -JNIHandles::weak_tag_value), tmp, thread);
2271   verify_oop(value);
2272   b(done);
2273 
2274   bind(not_weak);
2275   // Resolve (untagged) jobject.
2276   access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, 0), tmp, thread);
2277   verify_oop(value);
2278   bind(done);
2279 }
2280 
2281 void MacroAssembler::stop(const char* msg) {
2282   BLOCK_COMMENT(msg);
2283   dcps1(0xdeae);
2284   emit_int64((uintptr_t)msg);
2285 }
2286 
2287 void MacroAssembler::unimplemented(const char* what) {
2288   const char* buf = NULL;
2289   {
2290     ResourceMark rm;
2291     stringStream ss;
2292     ss.print("unimplemented: %s", what);
2293     buf = code_string(ss.as_string());
2294   }
2295   stop(buf);
2296 }
2297 
2298 // If a constant does not fit in an immediate field, generate some
2299 // number of MOV instructions and then perform the operation.
2300 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm,
2301                                            add_sub_imm_insn insn1,
2302                                            add_sub_reg_insn insn2,
2303                                            bool is32) {
2304   assert(Rd != zr, "Rd = zr and not setting flags?");
2305   bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm);
2306   if (fits) {
2307     (this->*insn1)(Rd, Rn, imm);
2308   } else {
2309     if (uabs(imm) < (1 << 24)) {
2310        (this->*insn1)(Rd, Rn, imm & -(1 << 12));
2311        (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1));
2312     } else {
2313        assert_different_registers(Rd, Rn);
2314        mov(Rd, imm);
2315        (this->*insn2)(Rd, Rn, Rd, LSL, 0);
2316     }
2317   }
2318 }
2319 
2320 // Seperate vsn which sets the flags. Optimisations are more restricted
2321 // because we must set the flags correctly.
2322 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm,
2323                                              add_sub_imm_insn insn1,
2324                                              add_sub_reg_insn insn2,
2325                                              bool is32) {
2326   bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm);
2327   if (fits) {
2328     (this->*insn1)(Rd, Rn, imm);
2329   } else {
2330     assert_different_registers(Rd, Rn);
2331     assert(Rd != zr, "overflow in immediate operand");
2332     mov(Rd, imm);
2333     (this->*insn2)(Rd, Rn, Rd, LSL, 0);
2334   }
2335 }
2336 
2337 
2338 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) {
2339   if (increment.is_register()) {
2340     add(Rd, Rn, increment.as_register());
2341   } else {
2342     add(Rd, Rn, increment.as_constant());
2343   }
2344 }
2345 
2346 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) {
2347   if (increment.is_register()) {
2348     addw(Rd, Rn, increment.as_register());
2349   } else {
2350     addw(Rd, Rn, increment.as_constant());
2351   }
2352 }
2353 
2354 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
2355   if (decrement.is_register()) {
2356     sub(Rd, Rn, decrement.as_register());
2357   } else {
2358     sub(Rd, Rn, decrement.as_constant());
2359   }
2360 }
2361 
2362 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
2363   if (decrement.is_register()) {
2364     subw(Rd, Rn, decrement.as_register());
2365   } else {
2366     subw(Rd, Rn, decrement.as_constant());
2367   }
2368 }
2369 
2370 void MacroAssembler::reinit_heapbase()
2371 {
2372   if (UseCompressedOops) {
2373     if (Universe::is_fully_initialized()) {
2374       mov(rheapbase, CompressedOops::ptrs_base());
2375     } else {
2376       lea(rheapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr()));
2377       ldr(rheapbase, Address(rheapbase));
2378     }
2379   }
2380 }
2381 
2382 // this simulates the behaviour of the x86 cmpxchg instruction using a
2383 // load linked/store conditional pair. we use the acquire/release
2384 // versions of these instructions so that we flush pending writes as
2385 // per Java semantics.
2386 
2387 // n.b the x86 version assumes the old value to be compared against is
2388 // in rax and updates rax with the value located in memory if the
2389 // cmpxchg fails. we supply a register for the old value explicitly
2390 
2391 // the aarch64 load linked/store conditional instructions do not
2392 // accept an offset. so, unlike x86, we must provide a plain register
2393 // to identify the memory word to be compared/exchanged rather than a
2394 // register+offset Address.
2395 
2396 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp,
2397                                 Label &succeed, Label *fail) {
2398   // oldv holds comparison value
2399   // newv holds value to write in exchange
2400   // addr identifies memory word to compare against/update
2401   if (UseLSE) {
2402     mov(tmp, oldv);
2403     casal(Assembler::xword, oldv, newv, addr);
2404     cmp(tmp, oldv);
2405     br(Assembler::EQ, succeed);
2406     membar(AnyAny);
2407   } else {
2408     Label retry_load, nope;
2409     if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
2410       prfm(Address(addr), PSTL1STRM);
2411     bind(retry_load);
2412     // flush and load exclusive from the memory location
2413     // and fail if it is not what we expect
2414     ldaxr(tmp, addr);
2415     cmp(tmp, oldv);
2416     br(Assembler::NE, nope);
2417     // if we store+flush with no intervening write tmp wil be zero
2418     stlxr(tmp, newv, addr);
2419     cbzw(tmp, succeed);
2420     // retry so we only ever return after a load fails to compare
2421     // ensures we don't return a stale value after a failed write.
2422     b(retry_load);
2423     // if the memory word differs we return it in oldv and signal a fail
2424     bind(nope);
2425     membar(AnyAny);
2426     mov(oldv, tmp);
2427   }
2428   if (fail)
2429     b(*fail);
2430 }
2431 
2432 void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp,
2433                                         Label &succeed, Label *fail) {
2434   assert(oopDesc::mark_offset_in_bytes() == 0, "assumption");
2435   cmpxchgptr(oldv, newv, obj, tmp, succeed, fail);
2436 }
2437 
2438 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp,
2439                                 Label &succeed, Label *fail) {
2440   // oldv holds comparison value
2441   // newv holds value to write in exchange
2442   // addr identifies memory word to compare against/update
2443   // tmp returns 0/1 for success/failure
2444   if (UseLSE) {
2445     mov(tmp, oldv);
2446     casal(Assembler::word, oldv, newv, addr);
2447     cmp(tmp, oldv);
2448     br(Assembler::EQ, succeed);
2449     membar(AnyAny);
2450   } else {
2451     Label retry_load, nope;
2452     if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
2453       prfm(Address(addr), PSTL1STRM);
2454     bind(retry_load);
2455     // flush and load exclusive from the memory location
2456     // and fail if it is not what we expect
2457     ldaxrw(tmp, addr);
2458     cmp(tmp, oldv);
2459     br(Assembler::NE, nope);
2460     // if we store+flush with no intervening write tmp wil be zero
2461     stlxrw(tmp, newv, addr);
2462     cbzw(tmp, succeed);
2463     // retry so we only ever return after a load fails to compare
2464     // ensures we don't return a stale value after a failed write.
2465     b(retry_load);
2466     // if the memory word differs we return it in oldv and signal a fail
2467     bind(nope);
2468     membar(AnyAny);
2469     mov(oldv, tmp);
2470   }
2471   if (fail)
2472     b(*fail);
2473 }
2474 
2475 // A generic CAS; success or failure is in the EQ flag.  A weak CAS
2476 // doesn't retry and may fail spuriously.  If the oldval is wanted,
2477 // Pass a register for the result, otherwise pass noreg.
2478 
2479 // Clobbers rscratch1
2480 void MacroAssembler::cmpxchg(Register addr, Register expected,
2481                              Register new_val,
2482                              enum operand_size size,
2483                              bool acquire, bool release,
2484                              bool weak,
2485                              Register result) {
2486   if (result == noreg)  result = rscratch1;
2487   BLOCK_COMMENT("cmpxchg {");
2488   if (UseLSE) {
2489     mov(result, expected);
2490     lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true);
2491     compare_eq(result, expected, size);
2492   } else {
2493     Label retry_load, done;
2494     if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
2495       prfm(Address(addr), PSTL1STRM);
2496     bind(retry_load);
2497     load_exclusive(result, addr, size, acquire);
2498     compare_eq(result, expected, size);
2499     br(Assembler::NE, done);
2500     store_exclusive(rscratch1, new_val, addr, size, release);
2501     if (weak) {
2502       cmpw(rscratch1, 0u);  // If the store fails, return NE to our caller.
2503     } else {
2504       cbnzw(rscratch1, retry_load);
2505     }
2506     bind(done);
2507   }
2508   BLOCK_COMMENT("} cmpxchg");
2509 }
2510 
2511 // A generic comparison. Only compares for equality, clobbers rscratch1.
2512 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) {
2513   if (size == xword) {
2514     cmp(rm, rn);
2515   } else if (size == word) {
2516     cmpw(rm, rn);
2517   } else if (size == halfword) {
2518     eorw(rscratch1, rm, rn);
2519     ands(zr, rscratch1, 0xffff);
2520   } else if (size == byte) {
2521     eorw(rscratch1, rm, rn);
2522     ands(zr, rscratch1, 0xff);
2523   } else {
2524     ShouldNotReachHere();
2525   }
2526 }
2527 
2528 
2529 static bool different(Register a, RegisterOrConstant b, Register c) {
2530   if (b.is_constant())
2531     return a != c;
2532   else
2533     return a != b.as_register() && a != c && b.as_register() != c;
2534 }
2535 
2536 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz)                   \
2537 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \
2538   if (UseLSE) {                                                         \
2539     prev = prev->is_valid() ? prev : zr;                                \
2540     if (incr.is_register()) {                                           \
2541       AOP(sz, incr.as_register(), prev, addr);                          \
2542     } else {                                                            \
2543       mov(rscratch2, incr.as_constant());                               \
2544       AOP(sz, rscratch2, prev, addr);                                   \
2545     }                                                                   \
2546     return;                                                             \
2547   }                                                                     \
2548   Register result = rscratch2;                                          \
2549   if (prev->is_valid())                                                 \
2550     result = different(prev, incr, addr) ? prev : rscratch2;            \
2551                                                                         \
2552   Label retry_load;                                                     \
2553   if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))         \
2554     prfm(Address(addr), PSTL1STRM);                                     \
2555   bind(retry_load);                                                     \
2556   LDXR(result, addr);                                                   \
2557   OP(rscratch1, result, incr);                                          \
2558   STXR(rscratch2, rscratch1, addr);                                     \
2559   cbnzw(rscratch2, retry_load);                                         \
2560   if (prev->is_valid() && prev != result) {                             \
2561     IOP(prev, rscratch1, incr);                                         \
2562   }                                                                     \
2563 }
2564 
2565 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword)
2566 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word)
2567 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword)
2568 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word)
2569 
2570 #undef ATOMIC_OP
2571 
2572 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz)                            \
2573 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \
2574   if (UseLSE) {                                                         \
2575     prev = prev->is_valid() ? prev : zr;                                \
2576     AOP(sz, newv, prev, addr);                                          \
2577     return;                                                             \
2578   }                                                                     \
2579   Register result = rscratch2;                                          \
2580   if (prev->is_valid())                                                 \
2581     result = different(prev, newv, addr) ? prev : rscratch2;            \
2582                                                                         \
2583   Label retry_load;                                                     \
2584   if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))         \
2585     prfm(Address(addr), PSTL1STRM);                                     \
2586   bind(retry_load);                                                     \
2587   LDXR(result, addr);                                                   \
2588   STXR(rscratch1, newv, addr);                                          \
2589   cbnzw(rscratch1, retry_load);                                         \
2590   if (prev->is_valid() && prev != result)                               \
2591     mov(prev, result);                                                  \
2592 }
2593 
2594 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword)
2595 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word)
2596 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword)
2597 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word)
2598 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword)
2599 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word)
2600 
2601 #undef ATOMIC_XCHG
2602 
2603 #ifndef PRODUCT
2604 extern "C" void findpc(intptr_t x);
2605 #endif
2606 
2607 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[])
2608 {
2609   // In order to get locks to work, we need to fake a in_VM state
2610   if (ShowMessageBoxOnError ) {
2611     JavaThread* thread = JavaThread::current();
2612     JavaThreadState saved_state = thread->thread_state();
2613     thread->set_thread_state(_thread_in_vm);
2614 #ifndef PRODUCT
2615     if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
2616       ttyLocker ttyl;
2617       BytecodeCounter::print();
2618     }
2619 #endif
2620     if (os::message_box(msg, "Execution stopped, print registers?")) {
2621       ttyLocker ttyl;
2622       tty->print_cr(" pc = 0x%016" PRIx64, pc);
2623 #ifndef PRODUCT
2624       tty->cr();
2625       findpc(pc);
2626       tty->cr();
2627 #endif
2628       tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]);
2629       tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]);
2630       tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]);
2631       tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]);
2632       tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]);
2633       tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]);
2634       tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]);
2635       tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]);
2636       tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]);
2637       tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]);
2638       tty->print_cr("r10 = 0x%016" PRIx64, regs[10]);
2639       tty->print_cr("r11 = 0x%016" PRIx64, regs[11]);
2640       tty->print_cr("r12 = 0x%016" PRIx64, regs[12]);
2641       tty->print_cr("r13 = 0x%016" PRIx64, regs[13]);
2642       tty->print_cr("r14 = 0x%016" PRIx64, regs[14]);
2643       tty->print_cr("r15 = 0x%016" PRIx64, regs[15]);
2644       tty->print_cr("r16 = 0x%016" PRIx64, regs[16]);
2645       tty->print_cr("r17 = 0x%016" PRIx64, regs[17]);
2646       tty->print_cr("r18 = 0x%016" PRIx64, regs[18]);
2647       tty->print_cr("r19 = 0x%016" PRIx64, regs[19]);
2648       tty->print_cr("r20 = 0x%016" PRIx64, regs[20]);
2649       tty->print_cr("r21 = 0x%016" PRIx64, regs[21]);
2650       tty->print_cr("r22 = 0x%016" PRIx64, regs[22]);
2651       tty->print_cr("r23 = 0x%016" PRIx64, regs[23]);
2652       tty->print_cr("r24 = 0x%016" PRIx64, regs[24]);
2653       tty->print_cr("r25 = 0x%016" PRIx64, regs[25]);
2654       tty->print_cr("r26 = 0x%016" PRIx64, regs[26]);
2655       tty->print_cr("r27 = 0x%016" PRIx64, regs[27]);
2656       tty->print_cr("r28 = 0x%016" PRIx64, regs[28]);
2657       tty->print_cr("r30 = 0x%016" PRIx64, regs[30]);
2658       tty->print_cr("r31 = 0x%016" PRIx64, regs[31]);
2659       BREAKPOINT;
2660     }
2661   }
2662   fatal("DEBUG MESSAGE: %s", msg);
2663 }
2664 
2665 RegSet MacroAssembler::call_clobbered_registers() {
2666   RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2);
2667 #ifndef R18_RESERVED
2668   regs += r18_tls;
2669 #endif
2670   return regs;
2671 }
2672 
2673 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) {
2674   int step = 4 * wordSize;
2675   push(call_clobbered_registers() - exclude, sp);
2676   sub(sp, sp, step);
2677   mov(rscratch1, -step);
2678   // Push v0-v7, v16-v31.
2679   for (int i = 31; i>= 4; i -= 4) {
2680     if (i <= v7->encoding() || i >= v16->encoding())
2681       st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1),
2682           as_FloatRegister(i), T1D, Address(post(sp, rscratch1)));
2683   }
2684   st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2),
2685       as_FloatRegister(3), T1D, Address(sp));
2686 }
2687 
2688 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) {
2689   for (int i = 0; i < 32; i += 4) {
2690     if (i <= v7->encoding() || i >= v16->encoding())
2691       ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
2692           as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize)));
2693   }
2694 
2695   reinitialize_ptrue();
2696 
2697   pop(call_clobbered_registers() - exclude, sp);
2698 }
2699 
2700 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve,
2701                                     int sve_vector_size_in_bytes) {
2702   push(RegSet::range(r0, r29), sp); // integer registers except lr & sp
2703   if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) {
2704     sub(sp, sp, sve_vector_size_in_bytes * FloatRegisterImpl::number_of_registers);
2705     for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++) {
2706       sve_str(as_FloatRegister(i), Address(sp, i));
2707     }
2708   } else {
2709     int step = (save_vectors ? 8 : 4) * wordSize;
2710     mov(rscratch1, -step);
2711     sub(sp, sp, step);
2712     for (int i = 28; i >= 4; i -= 4) {
2713       st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
2714           as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1)));
2715     }
2716     st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp);
2717   }
2718 }
2719 
2720 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve,
2721                                    int sve_vector_size_in_bytes) {
2722   if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) {
2723     for (int i = FloatRegisterImpl::number_of_registers - 1; i >= 0; i--) {
2724       sve_ldr(as_FloatRegister(i), Address(sp, i));
2725     }
2726     add(sp, sp, sve_vector_size_in_bytes * FloatRegisterImpl::number_of_registers);
2727   } else {
2728     int step = (restore_vectors ? 8 : 4) * wordSize;
2729     for (int i = 0; i <= 28; i += 4)
2730       ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
2731           as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step)));
2732   }
2733 
2734   if (restore_vectors) {
2735     reinitialize_ptrue();
2736   }
2737 
2738   // integer registers except lr & sp
2739   pop(RegSet::range(r0, r17), sp);
2740 #ifdef R18_RESERVED
2741   ldp(zr, r19, Address(post(sp, 2 * wordSize)));
2742   pop(RegSet::range(r20, r29), sp);
2743 #else
2744   pop(RegSet::range(r18_tls, r29), sp);
2745 #endif
2746 }
2747 
2748 /**
2749  * Helpers for multiply_to_len().
2750  */
2751 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
2752                                      Register src1, Register src2) {
2753   adds(dest_lo, dest_lo, src1);
2754   adc(dest_hi, dest_hi, zr);
2755   adds(dest_lo, dest_lo, src2);
2756   adc(final_dest_hi, dest_hi, zr);
2757 }
2758 
2759 // Generate an address from (r + r1 extend offset).  "size" is the
2760 // size of the operand.  The result may be in rscratch2.
2761 Address MacroAssembler::offsetted_address(Register r, Register r1,
2762                                           Address::extend ext, int offset, int size) {
2763   if (offset || (ext.shift() % size != 0)) {
2764     lea(rscratch2, Address(r, r1, ext));
2765     return Address(rscratch2, offset);
2766   } else {
2767     return Address(r, r1, ext);
2768   }
2769 }
2770 
2771 Address MacroAssembler::spill_address(int size, int offset, Register tmp)
2772 {
2773   assert(offset >= 0, "spill to negative address?");
2774   // Offset reachable ?
2775   //   Not aligned - 9 bits signed offset
2776   //   Aligned - 12 bits unsigned offset shifted
2777   Register base = sp;
2778   if ((offset & (size-1)) && offset >= (1<<8)) {
2779     add(tmp, base, offset & ((1<<12)-1));
2780     base = tmp;
2781     offset &= -1u<<12;
2782   }
2783 
2784   if (offset >= (1<<12) * size) {
2785     add(tmp, base, offset & (((1<<12)-1)<<12));
2786     base = tmp;
2787     offset &= ~(((1<<12)-1)<<12);
2788   }
2789 
2790   return Address(base, offset);
2791 }
2792 
2793 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) {
2794   assert(offset >= 0, "spill to negative address?");
2795 
2796   Register base = sp;
2797 
2798   // An immediate offset in the range 0 to 255 which is multiplied
2799   // by the current vector or predicate register size in bytes.
2800   if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) {
2801     return Address(base, offset / sve_reg_size_in_bytes);
2802   }
2803 
2804   add(tmp, base, offset);
2805   return Address(tmp);
2806 }
2807 
2808 // Checks whether offset is aligned.
2809 // Returns true if it is, else false.
2810 bool MacroAssembler::merge_alignment_check(Register base,
2811                                            size_t size,
2812                                            int64_t cur_offset,
2813                                            int64_t prev_offset) const {
2814   if (AvoidUnalignedAccesses) {
2815     if (base == sp) {
2816       // Checks whether low offset if aligned to pair of registers.
2817       int64_t pair_mask = size * 2 - 1;
2818       int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset;
2819       return (offset & pair_mask) == 0;
2820     } else { // If base is not sp, we can't guarantee the access is aligned.
2821       return false;
2822     }
2823   } else {
2824     int64_t mask = size - 1;
2825     // Load/store pair instruction only supports element size aligned offset.
2826     return (cur_offset & mask) == 0 && (prev_offset & mask) == 0;
2827   }
2828 }
2829 
2830 // Checks whether current and previous loads/stores can be merged.
2831 // Returns true if it can be merged, else false.
2832 bool MacroAssembler::ldst_can_merge(Register rt,
2833                                     const Address &adr,
2834                                     size_t cur_size_in_bytes,
2835                                     bool is_store) const {
2836   address prev = pc() - NativeInstruction::instruction_size;
2837   address last = code()->last_insn();
2838 
2839   if (last == NULL || !nativeInstruction_at(last)->is_Imm_LdSt()) {
2840     return false;
2841   }
2842 
2843   if (adr.getMode() != Address::base_plus_offset || prev != last) {
2844     return false;
2845   }
2846 
2847   NativeLdSt* prev_ldst = NativeLdSt_at(prev);
2848   size_t prev_size_in_bytes = prev_ldst->size_in_bytes();
2849 
2850   assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging.");
2851   assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging.");
2852 
2853   if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) {
2854     return false;
2855   }
2856 
2857   int64_t max_offset = 63 * prev_size_in_bytes;
2858   int64_t min_offset = -64 * prev_size_in_bytes;
2859 
2860   assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged.");
2861 
2862   // Only same base can be merged.
2863   if (adr.base() != prev_ldst->base()) {
2864     return false;
2865   }
2866 
2867   int64_t cur_offset = adr.offset();
2868   int64_t prev_offset = prev_ldst->offset();
2869   size_t diff = abs(cur_offset - prev_offset);
2870   if (diff != prev_size_in_bytes) {
2871     return false;
2872   }
2873 
2874   // Following cases can not be merged:
2875   // ldr x2, [x2, #8]
2876   // ldr x3, [x2, #16]
2877   // or:
2878   // ldr x2, [x3, #8]
2879   // ldr x2, [x3, #16]
2880   // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL.
2881   if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) {
2882     return false;
2883   }
2884 
2885   int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset;
2886   // Offset range must be in ldp/stp instruction's range.
2887   if (low_offset > max_offset || low_offset < min_offset) {
2888     return false;
2889   }
2890 
2891   if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) {
2892     return true;
2893   }
2894 
2895   return false;
2896 }
2897 
2898 // Merge current load/store with previous load/store into ldp/stp.
2899 void MacroAssembler::merge_ldst(Register rt,
2900                                 const Address &adr,
2901                                 size_t cur_size_in_bytes,
2902                                 bool is_store) {
2903 
2904   assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged.");
2905 
2906   Register rt_low, rt_high;
2907   address prev = pc() - NativeInstruction::instruction_size;
2908   NativeLdSt* prev_ldst = NativeLdSt_at(prev);
2909 
2910   int64_t offset;
2911 
2912   if (adr.offset() < prev_ldst->offset()) {
2913     offset = adr.offset();
2914     rt_low = rt;
2915     rt_high = prev_ldst->target();
2916   } else {
2917     offset = prev_ldst->offset();
2918     rt_low = prev_ldst->target();
2919     rt_high = rt;
2920   }
2921 
2922   Address adr_p = Address(prev_ldst->base(), offset);
2923   // Overwrite previous generated binary.
2924   code_section()->set_end(prev);
2925 
2926   const size_t sz = prev_ldst->size_in_bytes();
2927   assert(sz == 8 || sz == 4, "only supports 64/32bit merging.");
2928   if (!is_store) {
2929     BLOCK_COMMENT("merged ldr pair");
2930     if (sz == 8) {
2931       ldp(rt_low, rt_high, adr_p);
2932     } else {
2933       ldpw(rt_low, rt_high, adr_p);
2934     }
2935   } else {
2936     BLOCK_COMMENT("merged str pair");
2937     if (sz == 8) {
2938       stp(rt_low, rt_high, adr_p);
2939     } else {
2940       stpw(rt_low, rt_high, adr_p);
2941     }
2942   }
2943 }
2944 
2945 /**
2946  * Multiply 64 bit by 64 bit first loop.
2947  */
2948 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
2949                                            Register y, Register y_idx, Register z,
2950                                            Register carry, Register product,
2951                                            Register idx, Register kdx) {
2952   //
2953   //  jlong carry, x[], y[], z[];
2954   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
2955   //    huge_128 product = y[idx] * x[xstart] + carry;
2956   //    z[kdx] = (jlong)product;
2957   //    carry  = (jlong)(product >>> 64);
2958   //  }
2959   //  z[xstart] = carry;
2960   //
2961 
2962   Label L_first_loop, L_first_loop_exit;
2963   Label L_one_x, L_one_y, L_multiply;
2964 
2965   subsw(xstart, xstart, 1);
2966   br(Assembler::MI, L_one_x);
2967 
2968   lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt)));
2969   ldr(x_xstart, Address(rscratch1));
2970   ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian
2971 
2972   bind(L_first_loop);
2973   subsw(idx, idx, 1);
2974   br(Assembler::MI, L_first_loop_exit);
2975   subsw(idx, idx, 1);
2976   br(Assembler::MI, L_one_y);
2977   lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
2978   ldr(y_idx, Address(rscratch1));
2979   ror(y_idx, y_idx, 32); // convert big-endian to little-endian
2980   bind(L_multiply);
2981 
2982   // AArch64 has a multiply-accumulate instruction that we can't use
2983   // here because it has no way to process carries, so we have to use
2984   // separate add and adc instructions.  Bah.
2985   umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product
2986   mul(product, x_xstart, y_idx);
2987   adds(product, product, carry);
2988   adc(carry, rscratch1, zr);   // x_xstart * y_idx + carry -> carry:product
2989 
2990   subw(kdx, kdx, 2);
2991   ror(product, product, 32); // back to big-endian
2992   str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong));
2993 
2994   b(L_first_loop);
2995 
2996   bind(L_one_y);
2997   ldrw(y_idx, Address(y,  0));
2998   b(L_multiply);
2999 
3000   bind(L_one_x);
3001   ldrw(x_xstart, Address(x,  0));
3002   b(L_first_loop);
3003 
3004   bind(L_first_loop_exit);
3005 }
3006 
3007 /**
3008  * Multiply 128 bit by 128. Unrolled inner loop.
3009  *
3010  */
3011 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z,
3012                                              Register carry, Register carry2,
3013                                              Register idx, Register jdx,
3014                                              Register yz_idx1, Register yz_idx2,
3015                                              Register tmp, Register tmp3, Register tmp4,
3016                                              Register tmp6, Register product_hi) {
3017 
3018   //   jlong carry, x[], y[], z[];
3019   //   int kdx = ystart+1;
3020   //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
3021   //     huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry;
3022   //     jlong carry2  = (jlong)(tmp3 >>> 64);
3023   //     huge_128 tmp4 = (y[idx]   * product_hi) + z[kdx+idx] + carry2;
3024   //     carry  = (jlong)(tmp4 >>> 64);
3025   //     z[kdx+idx+1] = (jlong)tmp3;
3026   //     z[kdx+idx] = (jlong)tmp4;
3027   //   }
3028   //   idx += 2;
3029   //   if (idx > 0) {
3030   //     yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry;
3031   //     z[kdx+idx] = (jlong)yz_idx1;
3032   //     carry  = (jlong)(yz_idx1 >>> 64);
3033   //   }
3034   //
3035 
3036   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
3037 
3038   lsrw(jdx, idx, 2);
3039 
3040   bind(L_third_loop);
3041 
3042   subsw(jdx, jdx, 1);
3043   br(Assembler::MI, L_third_loop_exit);
3044   subw(idx, idx, 4);
3045 
3046   lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
3047 
3048   ldp(yz_idx2, yz_idx1, Address(rscratch1, 0));
3049 
3050   lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt)));
3051 
3052   ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
3053   ror(yz_idx2, yz_idx2, 32);
3054 
3055   ldp(rscratch2, rscratch1, Address(tmp6, 0));
3056 
3057   mul(tmp3, product_hi, yz_idx1);  //  yz_idx1 * product_hi -> tmp4:tmp3
3058   umulh(tmp4, product_hi, yz_idx1);
3059 
3060   ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian
3061   ror(rscratch2, rscratch2, 32);
3062 
3063   mul(tmp, product_hi, yz_idx2);   //  yz_idx2 * product_hi -> carry2:tmp
3064   umulh(carry2, product_hi, yz_idx2);
3065 
3066   // propagate sum of both multiplications into carry:tmp4:tmp3
3067   adds(tmp3, tmp3, carry);
3068   adc(tmp4, tmp4, zr);
3069   adds(tmp3, tmp3, rscratch1);
3070   adcs(tmp4, tmp4, tmp);
3071   adc(carry, carry2, zr);
3072   adds(tmp4, tmp4, rscratch2);
3073   adc(carry, carry, zr);
3074 
3075   ror(tmp3, tmp3, 32); // convert little-endian to big-endian
3076   ror(tmp4, tmp4, 32);
3077   stp(tmp4, tmp3, Address(tmp6, 0));
3078 
3079   b(L_third_loop);
3080   bind (L_third_loop_exit);
3081 
3082   andw (idx, idx, 0x3);
3083   cbz(idx, L_post_third_loop_done);
3084 
3085   Label L_check_1;
3086   subsw(idx, idx, 2);
3087   br(Assembler::MI, L_check_1);
3088 
3089   lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
3090   ldr(yz_idx1, Address(rscratch1, 0));
3091   ror(yz_idx1, yz_idx1, 32);
3092   mul(tmp3, product_hi, yz_idx1);  //  yz_idx1 * product_hi -> tmp4:tmp3
3093   umulh(tmp4, product_hi, yz_idx1);
3094   lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt)));
3095   ldr(yz_idx2, Address(rscratch1, 0));
3096   ror(yz_idx2, yz_idx2, 32);
3097 
3098   add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2);
3099 
3100   ror(tmp3, tmp3, 32);
3101   str(tmp3, Address(rscratch1, 0));
3102 
3103   bind (L_check_1);
3104 
3105   andw (idx, idx, 0x1);
3106   subsw(idx, idx, 1);
3107   br(Assembler::MI, L_post_third_loop_done);
3108   ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt)));
3109   mul(tmp3, tmp4, product_hi);  //  tmp4 * product_hi -> carry2:tmp3
3110   umulh(carry2, tmp4, product_hi);
3111   ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt)));
3112 
3113   add2_with_carry(carry2, tmp3, tmp4, carry);
3114 
3115   strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt)));
3116   extr(carry, carry2, tmp3, 32);
3117 
3118   bind(L_post_third_loop_done);
3119 }
3120 
3121 /**
3122  * Code for BigInteger::multiplyToLen() instrinsic.
3123  *
3124  * r0: x
3125  * r1: xlen
3126  * r2: y
3127  * r3: ylen
3128  * r4:  z
3129  * r5: zlen
3130  * r10: tmp1
3131  * r11: tmp2
3132  * r12: tmp3
3133  * r13: tmp4
3134  * r14: tmp5
3135  * r15: tmp6
3136  * r16: tmp7
3137  *
3138  */
3139 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen,
3140                                      Register z, Register zlen,
3141                                      Register tmp1, Register tmp2, Register tmp3, Register tmp4,
3142                                      Register tmp5, Register tmp6, Register product_hi) {
3143 
3144   assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6);
3145 
3146   const Register idx = tmp1;
3147   const Register kdx = tmp2;
3148   const Register xstart = tmp3;
3149 
3150   const Register y_idx = tmp4;
3151   const Register carry = tmp5;
3152   const Register product  = xlen;
3153   const Register x_xstart = zlen;  // reuse register
3154 
3155   // First Loop.
3156   //
3157   //  final static long LONG_MASK = 0xffffffffL;
3158   //  int xstart = xlen - 1;
3159   //  int ystart = ylen - 1;
3160   //  long carry = 0;
3161   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
3162   //    long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
3163   //    z[kdx] = (int)product;
3164   //    carry = product >>> 32;
3165   //  }
3166   //  z[xstart] = (int)carry;
3167   //
3168 
3169   movw(idx, ylen);      // idx = ylen;
3170   movw(kdx, zlen);      // kdx = xlen+ylen;
3171   mov(carry, zr);       // carry = 0;
3172 
3173   Label L_done;
3174 
3175   movw(xstart, xlen);
3176   subsw(xstart, xstart, 1);
3177   br(Assembler::MI, L_done);
3178 
3179   multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
3180 
3181   Label L_second_loop;
3182   cbzw(kdx, L_second_loop);
3183 
3184   Label L_carry;
3185   subw(kdx, kdx, 1);
3186   cbzw(kdx, L_carry);
3187 
3188   strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
3189   lsr(carry, carry, 32);
3190   subw(kdx, kdx, 1);
3191 
3192   bind(L_carry);
3193   strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
3194 
3195   // Second and third (nested) loops.
3196   //
3197   // for (int i = xstart-1; i >= 0; i--) { // Second loop
3198   //   carry = 0;
3199   //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
3200   //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
3201   //                    (z[k] & LONG_MASK) + carry;
3202   //     z[k] = (int)product;
3203   //     carry = product >>> 32;
3204   //   }
3205   //   z[i] = (int)carry;
3206   // }
3207   //
3208   // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi
3209 
3210   const Register jdx = tmp1;
3211 
3212   bind(L_second_loop);
3213   mov(carry, zr);                // carry = 0;
3214   movw(jdx, ylen);               // j = ystart+1
3215 
3216   subsw(xstart, xstart, 1);      // i = xstart-1;
3217   br(Assembler::MI, L_done);
3218 
3219   str(z, Address(pre(sp, -4 * wordSize)));
3220 
3221   Label L_last_x;
3222   lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j
3223   subsw(xstart, xstart, 1);       // i = xstart-1;
3224   br(Assembler::MI, L_last_x);
3225 
3226   lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt)));
3227   ldr(product_hi, Address(rscratch1));
3228   ror(product_hi, product_hi, 32);  // convert big-endian to little-endian
3229 
3230   Label L_third_loop_prologue;
3231   bind(L_third_loop_prologue);
3232 
3233   str(ylen, Address(sp, wordSize));
3234   stp(x, xstart, Address(sp, 2 * wordSize));
3235   multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product,
3236                           tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi);
3237   ldp(z, ylen, Address(post(sp, 2 * wordSize)));
3238   ldp(x, xlen, Address(post(sp, 2 * wordSize)));   // copy old xstart -> xlen
3239 
3240   addw(tmp3, xlen, 1);
3241   strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
3242   subsw(tmp3, tmp3, 1);
3243   br(Assembler::MI, L_done);
3244 
3245   lsr(carry, carry, 32);
3246   strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
3247   b(L_second_loop);
3248 
3249   // Next infrequent code is moved outside loops.
3250   bind(L_last_x);
3251   ldrw(product_hi, Address(x,  0));
3252   b(L_third_loop_prologue);
3253 
3254   bind(L_done);
3255 }
3256 
3257 // Code for BigInteger::mulAdd instrinsic
3258 // out     = r0
3259 // in      = r1
3260 // offset  = r2  (already out.length-offset)
3261 // len     = r3
3262 // k       = r4
3263 //
3264 // pseudo code from java implementation:
3265 // carry = 0;
3266 // offset = out.length-offset - 1;
3267 // for (int j=len-1; j >= 0; j--) {
3268 //     product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry;
3269 //     out[offset--] = (int)product;
3270 //     carry = product >>> 32;
3271 // }
3272 // return (int)carry;
3273 void MacroAssembler::mul_add(Register out, Register in, Register offset,
3274       Register len, Register k) {
3275     Label LOOP, END;
3276     // pre-loop
3277     cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches
3278     csel(out, zr, out, Assembler::EQ);
3279     br(Assembler::EQ, END);
3280     add(in, in, len, LSL, 2); // in[j+1] address
3281     add(offset, out, offset, LSL, 2); // out[offset + 1] address
3282     mov(out, zr); // used to keep carry now
3283     BIND(LOOP);
3284     ldrw(rscratch1, Address(pre(in, -4)));
3285     madd(rscratch1, rscratch1, k, out);
3286     ldrw(rscratch2, Address(pre(offset, -4)));
3287     add(rscratch1, rscratch1, rscratch2);
3288     strw(rscratch1, Address(offset));
3289     lsr(out, rscratch1, 32);
3290     subs(len, len, 1);
3291     br(Assembler::NE, LOOP);
3292     BIND(END);
3293 }
3294 
3295 /**
3296  * Emits code to update CRC-32 with a byte value according to constants in table
3297  *
3298  * @param [in,out]crc   Register containing the crc.
3299  * @param [in]val       Register containing the byte to fold into the CRC.
3300  * @param [in]table     Register containing the table of crc constants.
3301  *
3302  * uint32_t crc;
3303  * val = crc_table[(val ^ crc) & 0xFF];
3304  * crc = val ^ (crc >> 8);
3305  *
3306  */
3307 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
3308   eor(val, val, crc);
3309   andr(val, val, 0xff);
3310   ldrw(val, Address(table, val, Address::lsl(2)));
3311   eor(crc, val, crc, Assembler::LSR, 8);
3312 }
3313 
3314 /**
3315  * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3
3316  *
3317  * @param [in,out]crc   Register containing the crc.
3318  * @param [in]v         Register containing the 32-bit to fold into the CRC.
3319  * @param [in]table0    Register containing table 0 of crc constants.
3320  * @param [in]table1    Register containing table 1 of crc constants.
3321  * @param [in]table2    Register containing table 2 of crc constants.
3322  * @param [in]table3    Register containing table 3 of crc constants.
3323  *
3324  * uint32_t crc;
3325  *   v = crc ^ v
3326  *   crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24]
3327  *
3328  */
3329 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp,
3330         Register table0, Register table1, Register table2, Register table3,
3331         bool upper) {
3332   eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0);
3333   uxtb(tmp, v);
3334   ldrw(crc, Address(table3, tmp, Address::lsl(2)));
3335   ubfx(tmp, v, 8, 8);
3336   ldrw(tmp, Address(table2, tmp, Address::lsl(2)));
3337   eor(crc, crc, tmp);
3338   ubfx(tmp, v, 16, 8);
3339   ldrw(tmp, Address(table1, tmp, Address::lsl(2)));
3340   eor(crc, crc, tmp);
3341   ubfx(tmp, v, 24, 8);
3342   ldrw(tmp, Address(table0, tmp, Address::lsl(2)));
3343   eor(crc, crc, tmp);
3344 }
3345 
3346 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf,
3347         Register len, Register tmp0, Register tmp1, Register tmp2,
3348         Register tmp3) {
3349     Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit;
3350     assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3);
3351 
3352     mvnw(crc, crc);
3353 
3354     subs(len, len, 128);
3355     br(Assembler::GE, CRC_by64_pre);
3356   BIND(CRC_less64);
3357     adds(len, len, 128-32);
3358     br(Assembler::GE, CRC_by32_loop);
3359   BIND(CRC_less32);
3360     adds(len, len, 32-4);
3361     br(Assembler::GE, CRC_by4_loop);
3362     adds(len, len, 4);
3363     br(Assembler::GT, CRC_by1_loop);
3364     b(L_exit);
3365 
3366   BIND(CRC_by32_loop);
3367     ldp(tmp0, tmp1, Address(post(buf, 16)));
3368     subs(len, len, 32);
3369     crc32x(crc, crc, tmp0);
3370     ldr(tmp2, Address(post(buf, 8)));
3371     crc32x(crc, crc, tmp1);
3372     ldr(tmp3, Address(post(buf, 8)));
3373     crc32x(crc, crc, tmp2);
3374     crc32x(crc, crc, tmp3);
3375     br(Assembler::GE, CRC_by32_loop);
3376     cmn(len, (u1)32);
3377     br(Assembler::NE, CRC_less32);
3378     b(L_exit);
3379 
3380   BIND(CRC_by4_loop);
3381     ldrw(tmp0, Address(post(buf, 4)));
3382     subs(len, len, 4);
3383     crc32w(crc, crc, tmp0);
3384     br(Assembler::GE, CRC_by4_loop);
3385     adds(len, len, 4);
3386     br(Assembler::LE, L_exit);
3387   BIND(CRC_by1_loop);
3388     ldrb(tmp0, Address(post(buf, 1)));
3389     subs(len, len, 1);
3390     crc32b(crc, crc, tmp0);
3391     br(Assembler::GT, CRC_by1_loop);
3392     b(L_exit);
3393 
3394   BIND(CRC_by64_pre);
3395     sub(buf, buf, 8);
3396     ldp(tmp0, tmp1, Address(buf, 8));
3397     crc32x(crc, crc, tmp0);
3398     ldr(tmp2, Address(buf, 24));
3399     crc32x(crc, crc, tmp1);
3400     ldr(tmp3, Address(buf, 32));
3401     crc32x(crc, crc, tmp2);
3402     ldr(tmp0, Address(buf, 40));
3403     crc32x(crc, crc, tmp3);
3404     ldr(tmp1, Address(buf, 48));
3405     crc32x(crc, crc, tmp0);
3406     ldr(tmp2, Address(buf, 56));
3407     crc32x(crc, crc, tmp1);
3408     ldr(tmp3, Address(pre(buf, 64)));
3409 
3410     b(CRC_by64_loop);
3411 
3412     align(CodeEntryAlignment);
3413   BIND(CRC_by64_loop);
3414     subs(len, len, 64);
3415     crc32x(crc, crc, tmp2);
3416     ldr(tmp0, Address(buf, 8));
3417     crc32x(crc, crc, tmp3);
3418     ldr(tmp1, Address(buf, 16));
3419     crc32x(crc, crc, tmp0);
3420     ldr(tmp2, Address(buf, 24));
3421     crc32x(crc, crc, tmp1);
3422     ldr(tmp3, Address(buf, 32));
3423     crc32x(crc, crc, tmp2);
3424     ldr(tmp0, Address(buf, 40));
3425     crc32x(crc, crc, tmp3);
3426     ldr(tmp1, Address(buf, 48));
3427     crc32x(crc, crc, tmp0);
3428     ldr(tmp2, Address(buf, 56));
3429     crc32x(crc, crc, tmp1);
3430     ldr(tmp3, Address(pre(buf, 64)));
3431     br(Assembler::GE, CRC_by64_loop);
3432 
3433     // post-loop
3434     crc32x(crc, crc, tmp2);
3435     crc32x(crc, crc, tmp3);
3436 
3437     sub(len, len, 64);
3438     add(buf, buf, 8);
3439     cmn(len, (u1)128);
3440     br(Assembler::NE, CRC_less64);
3441   BIND(L_exit);
3442     mvnw(crc, crc);
3443 }
3444 
3445 /**
3446  * @param crc   register containing existing CRC (32-bit)
3447  * @param buf   register pointing to input byte buffer (byte*)
3448  * @param len   register containing number of bytes
3449  * @param table register that will contain address of CRC table
3450  * @param tmp   scratch register
3451  */
3452 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len,
3453         Register table0, Register table1, Register table2, Register table3,
3454         Register tmp, Register tmp2, Register tmp3) {
3455   Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit;
3456   uint64_t offset;
3457 
3458   if (UseCRC32) {
3459       kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3);
3460       return;
3461   }
3462 
3463     mvnw(crc, crc);
3464 
3465     adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset);
3466     if (offset) add(table0, table0, offset);
3467     add(table1, table0, 1*256*sizeof(juint));
3468     add(table2, table0, 2*256*sizeof(juint));
3469     add(table3, table0, 3*256*sizeof(juint));
3470 
3471   if (UseNeon) {
3472       cmp(len, (u1)64);
3473       br(Assembler::LT, L_by16);
3474       eor(v16, T16B, v16, v16);
3475 
3476     Label L_fold;
3477 
3478       add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants
3479 
3480       ld1(v0, v1, T2D, post(buf, 32));
3481       ld1r(v4, T2D, post(tmp, 8));
3482       ld1r(v5, T2D, post(tmp, 8));
3483       ld1r(v6, T2D, post(tmp, 8));
3484       ld1r(v7, T2D, post(tmp, 8));
3485       mov(v16, T4S, 0, crc);
3486 
3487       eor(v0, T16B, v0, v16);
3488       sub(len, len, 64);
3489 
3490     BIND(L_fold);
3491       pmull(v22, T8H, v0, v5, T8B);
3492       pmull(v20, T8H, v0, v7, T8B);
3493       pmull(v23, T8H, v0, v4, T8B);
3494       pmull(v21, T8H, v0, v6, T8B);
3495 
3496       pmull2(v18, T8H, v0, v5, T16B);
3497       pmull2(v16, T8H, v0, v7, T16B);
3498       pmull2(v19, T8H, v0, v4, T16B);
3499       pmull2(v17, T8H, v0, v6, T16B);
3500 
3501       uzp1(v24, T8H, v20, v22);
3502       uzp2(v25, T8H, v20, v22);
3503       eor(v20, T16B, v24, v25);
3504 
3505       uzp1(v26, T8H, v16, v18);
3506       uzp2(v27, T8H, v16, v18);
3507       eor(v16, T16B, v26, v27);
3508 
3509       ushll2(v22, T4S, v20, T8H, 8);
3510       ushll(v20, T4S, v20, T4H, 8);
3511 
3512       ushll2(v18, T4S, v16, T8H, 8);
3513       ushll(v16, T4S, v16, T4H, 8);
3514 
3515       eor(v22, T16B, v23, v22);
3516       eor(v18, T16B, v19, v18);
3517       eor(v20, T16B, v21, v20);
3518       eor(v16, T16B, v17, v16);
3519 
3520       uzp1(v17, T2D, v16, v20);
3521       uzp2(v21, T2D, v16, v20);
3522       eor(v17, T16B, v17, v21);
3523 
3524       ushll2(v20, T2D, v17, T4S, 16);
3525       ushll(v16, T2D, v17, T2S, 16);
3526 
3527       eor(v20, T16B, v20, v22);
3528       eor(v16, T16B, v16, v18);
3529 
3530       uzp1(v17, T2D, v20, v16);
3531       uzp2(v21, T2D, v20, v16);
3532       eor(v28, T16B, v17, v21);
3533 
3534       pmull(v22, T8H, v1, v5, T8B);
3535       pmull(v20, T8H, v1, v7, T8B);
3536       pmull(v23, T8H, v1, v4, T8B);
3537       pmull(v21, T8H, v1, v6, T8B);
3538 
3539       pmull2(v18, T8H, v1, v5, T16B);
3540       pmull2(v16, T8H, v1, v7, T16B);
3541       pmull2(v19, T8H, v1, v4, T16B);
3542       pmull2(v17, T8H, v1, v6, T16B);
3543 
3544       ld1(v0, v1, T2D, post(buf, 32));
3545 
3546       uzp1(v24, T8H, v20, v22);
3547       uzp2(v25, T8H, v20, v22);
3548       eor(v20, T16B, v24, v25);
3549 
3550       uzp1(v26, T8H, v16, v18);
3551       uzp2(v27, T8H, v16, v18);
3552       eor(v16, T16B, v26, v27);
3553 
3554       ushll2(v22, T4S, v20, T8H, 8);
3555       ushll(v20, T4S, v20, T4H, 8);
3556 
3557       ushll2(v18, T4S, v16, T8H, 8);
3558       ushll(v16, T4S, v16, T4H, 8);
3559 
3560       eor(v22, T16B, v23, v22);
3561       eor(v18, T16B, v19, v18);
3562       eor(v20, T16B, v21, v20);
3563       eor(v16, T16B, v17, v16);
3564 
3565       uzp1(v17, T2D, v16, v20);
3566       uzp2(v21, T2D, v16, v20);
3567       eor(v16, T16B, v17, v21);
3568 
3569       ushll2(v20, T2D, v16, T4S, 16);
3570       ushll(v16, T2D, v16, T2S, 16);
3571 
3572       eor(v20, T16B, v22, v20);
3573       eor(v16, T16B, v16, v18);
3574 
3575       uzp1(v17, T2D, v20, v16);
3576       uzp2(v21, T2D, v20, v16);
3577       eor(v20, T16B, v17, v21);
3578 
3579       shl(v16, T2D, v28, 1);
3580       shl(v17, T2D, v20, 1);
3581 
3582       eor(v0, T16B, v0, v16);
3583       eor(v1, T16B, v1, v17);
3584 
3585       subs(len, len, 32);
3586       br(Assembler::GE, L_fold);
3587 
3588       mov(crc, 0);
3589       mov(tmp, v0, T1D, 0);
3590       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
3591       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
3592       mov(tmp, v0, T1D, 1);
3593       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
3594       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
3595       mov(tmp, v1, T1D, 0);
3596       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
3597       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
3598       mov(tmp, v1, T1D, 1);
3599       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
3600       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
3601 
3602       add(len, len, 32);
3603   }
3604 
3605   BIND(L_by16);
3606     subs(len, len, 16);
3607     br(Assembler::GE, L_by16_loop);
3608     adds(len, len, 16-4);
3609     br(Assembler::GE, L_by4_loop);
3610     adds(len, len, 4);
3611     br(Assembler::GT, L_by1_loop);
3612     b(L_exit);
3613 
3614   BIND(L_by4_loop);
3615     ldrw(tmp, Address(post(buf, 4)));
3616     update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3);
3617     subs(len, len, 4);
3618     br(Assembler::GE, L_by4_loop);
3619     adds(len, len, 4);
3620     br(Assembler::LE, L_exit);
3621   BIND(L_by1_loop);
3622     subs(len, len, 1);
3623     ldrb(tmp, Address(post(buf, 1)));
3624     update_byte_crc32(crc, tmp, table0);
3625     br(Assembler::GT, L_by1_loop);
3626     b(L_exit);
3627 
3628     align(CodeEntryAlignment);
3629   BIND(L_by16_loop);
3630     subs(len, len, 16);
3631     ldp(tmp, tmp3, Address(post(buf, 16)));
3632     update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
3633     update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
3634     update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false);
3635     update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true);
3636     br(Assembler::GE, L_by16_loop);
3637     adds(len, len, 16-4);
3638     br(Assembler::GE, L_by4_loop);
3639     adds(len, len, 4);
3640     br(Assembler::GT, L_by1_loop);
3641   BIND(L_exit);
3642     mvnw(crc, crc);
3643 }
3644 
3645 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf,
3646         Register len, Register tmp0, Register tmp1, Register tmp2,
3647         Register tmp3) {
3648     Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit;
3649     assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3);
3650 
3651     subs(len, len, 128);
3652     br(Assembler::GE, CRC_by64_pre);
3653   BIND(CRC_less64);
3654     adds(len, len, 128-32);
3655     br(Assembler::GE, CRC_by32_loop);
3656   BIND(CRC_less32);
3657     adds(len, len, 32-4);
3658     br(Assembler::GE, CRC_by4_loop);
3659     adds(len, len, 4);
3660     br(Assembler::GT, CRC_by1_loop);
3661     b(L_exit);
3662 
3663   BIND(CRC_by32_loop);
3664     ldp(tmp0, tmp1, Address(post(buf, 16)));
3665     subs(len, len, 32);
3666     crc32cx(crc, crc, tmp0);
3667     ldr(tmp2, Address(post(buf, 8)));
3668     crc32cx(crc, crc, tmp1);
3669     ldr(tmp3, Address(post(buf, 8)));
3670     crc32cx(crc, crc, tmp2);
3671     crc32cx(crc, crc, tmp3);
3672     br(Assembler::GE, CRC_by32_loop);
3673     cmn(len, (u1)32);
3674     br(Assembler::NE, CRC_less32);
3675     b(L_exit);
3676 
3677   BIND(CRC_by4_loop);
3678     ldrw(tmp0, Address(post(buf, 4)));
3679     subs(len, len, 4);
3680     crc32cw(crc, crc, tmp0);
3681     br(Assembler::GE, CRC_by4_loop);
3682     adds(len, len, 4);
3683     br(Assembler::LE, L_exit);
3684   BIND(CRC_by1_loop);
3685     ldrb(tmp0, Address(post(buf, 1)));
3686     subs(len, len, 1);
3687     crc32cb(crc, crc, tmp0);
3688     br(Assembler::GT, CRC_by1_loop);
3689     b(L_exit);
3690 
3691   BIND(CRC_by64_pre);
3692     sub(buf, buf, 8);
3693     ldp(tmp0, tmp1, Address(buf, 8));
3694     crc32cx(crc, crc, tmp0);
3695     ldr(tmp2, Address(buf, 24));
3696     crc32cx(crc, crc, tmp1);
3697     ldr(tmp3, Address(buf, 32));
3698     crc32cx(crc, crc, tmp2);
3699     ldr(tmp0, Address(buf, 40));
3700     crc32cx(crc, crc, tmp3);
3701     ldr(tmp1, Address(buf, 48));
3702     crc32cx(crc, crc, tmp0);
3703     ldr(tmp2, Address(buf, 56));
3704     crc32cx(crc, crc, tmp1);
3705     ldr(tmp3, Address(pre(buf, 64)));
3706 
3707     b(CRC_by64_loop);
3708 
3709     align(CodeEntryAlignment);
3710   BIND(CRC_by64_loop);
3711     subs(len, len, 64);
3712     crc32cx(crc, crc, tmp2);
3713     ldr(tmp0, Address(buf, 8));
3714     crc32cx(crc, crc, tmp3);
3715     ldr(tmp1, Address(buf, 16));
3716     crc32cx(crc, crc, tmp0);
3717     ldr(tmp2, Address(buf, 24));
3718     crc32cx(crc, crc, tmp1);
3719     ldr(tmp3, Address(buf, 32));
3720     crc32cx(crc, crc, tmp2);
3721     ldr(tmp0, Address(buf, 40));
3722     crc32cx(crc, crc, tmp3);
3723     ldr(tmp1, Address(buf, 48));
3724     crc32cx(crc, crc, tmp0);
3725     ldr(tmp2, Address(buf, 56));
3726     crc32cx(crc, crc, tmp1);
3727     ldr(tmp3, Address(pre(buf, 64)));
3728     br(Assembler::GE, CRC_by64_loop);
3729 
3730     // post-loop
3731     crc32cx(crc, crc, tmp2);
3732     crc32cx(crc, crc, tmp3);
3733 
3734     sub(len, len, 64);
3735     add(buf, buf, 8);
3736     cmn(len, (u1)128);
3737     br(Assembler::NE, CRC_less64);
3738   BIND(L_exit);
3739 }
3740 
3741 /**
3742  * @param crc   register containing existing CRC (32-bit)
3743  * @param buf   register pointing to input byte buffer (byte*)
3744  * @param len   register containing number of bytes
3745  * @param table register that will contain address of CRC table
3746  * @param tmp   scratch register
3747  */
3748 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len,
3749         Register table0, Register table1, Register table2, Register table3,
3750         Register tmp, Register tmp2, Register tmp3) {
3751   kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3);
3752 }
3753 
3754 
3755 SkipIfEqual::SkipIfEqual(
3756     MacroAssembler* masm, const bool* flag_addr, bool value) {
3757   _masm = masm;
3758   uint64_t offset;
3759   _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset);
3760   _masm->ldrb(rscratch1, Address(rscratch1, offset));
3761   _masm->cbzw(rscratch1, _label);
3762 }
3763 
3764 SkipIfEqual::~SkipIfEqual() {
3765   _masm->bind(_label);
3766 }
3767 
3768 void MacroAssembler::addptr(const Address &dst, int32_t src) {
3769   Address adr;
3770   switch(dst.getMode()) {
3771   case Address::base_plus_offset:
3772     // This is the expected mode, although we allow all the other
3773     // forms below.
3774     adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord);
3775     break;
3776   default:
3777     lea(rscratch2, dst);
3778     adr = Address(rscratch2);
3779     break;
3780   }
3781   ldr(rscratch1, adr);
3782   add(rscratch1, rscratch1, src);
3783   str(rscratch1, adr);
3784 }
3785 
3786 void MacroAssembler::cmpptr(Register src1, Address src2) {
3787   uint64_t offset;
3788   adrp(rscratch1, src2, offset);
3789   ldr(rscratch1, Address(rscratch1, offset));
3790   cmp(src1, rscratch1);
3791 }
3792 
3793 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
3794   cmp(obj1, obj2);
3795 }
3796 
3797 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
3798   load_method_holder(rresult, rmethod);
3799   ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
3800 }
3801 
3802 void MacroAssembler::load_method_holder(Register holder, Register method) {
3803   ldr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
3804   ldr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
3805   ldr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
3806 }
3807 
3808 void MacroAssembler::load_klass(Register dst, Register src) {
3809   if (UseCompressedClassPointers) {
3810     ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
3811     decode_klass_not_null(dst);
3812   } else {
3813     ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
3814   }
3815 }
3816 
3817 // ((OopHandle)result).resolve();
3818 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
3819   // OopHandle::resolve is an indirection.
3820   access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp, noreg);
3821 }
3822 
3823 // ((WeakHandle)result).resolve();
3824 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) {
3825   assert_different_registers(rresult, rtmp);
3826   Label resolved;
3827 
3828   // A null weak handle resolves to null.
3829   cbz(rresult, resolved);
3830 
3831   // Only 64 bit platforms support GCs that require a tmp register
3832   // Only IN_HEAP loads require a thread_tmp register
3833   // WeakHandle::resolve is an indirection like jweak.
3834   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
3835                  rresult, Address(rresult), rtmp, /*tmp_thread*/noreg);
3836   bind(resolved);
3837 }
3838 
3839 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp) {
3840   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
3841   ldr(dst, Address(rmethod, Method::const_offset()));
3842   ldr(dst, Address(dst, ConstMethod::constants_offset()));
3843   ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes()));
3844   ldr(dst, Address(dst, mirror_offset));
3845   resolve_oop_handle(dst, tmp);
3846 }
3847 
3848 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
3849   if (UseCompressedClassPointers) {
3850     ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
3851     if (CompressedKlassPointers::base() == NULL) {
3852       cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
3853       return;
3854     } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
3855                && CompressedKlassPointers::shift() == 0) {
3856       // Only the bottom 32 bits matter
3857       cmpw(trial_klass, tmp);
3858       return;
3859     }
3860     decode_klass_not_null(tmp);
3861   } else {
3862     ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
3863   }
3864   cmp(trial_klass, tmp);
3865 }
3866 
3867 void MacroAssembler::load_prototype_header(Register dst, Register src) {
3868   load_klass(dst, src);
3869   ldr(dst, Address(dst, Klass::prototype_header_offset()));
3870 }
3871 
3872 void MacroAssembler::store_klass(Register dst, Register src) {
3873   // FIXME: Should this be a store release?  concurrent gcs assumes
3874   // klass length is valid if klass field is not null.
3875   if (UseCompressedClassPointers) {
3876     encode_klass_not_null(src);
3877     strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
3878   } else {
3879     str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
3880   }
3881 }
3882 
3883 void MacroAssembler::store_klass_gap(Register dst, Register src) {
3884   if (UseCompressedClassPointers) {
3885     // Store to klass gap in destination
3886     strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
3887   }
3888 }
3889 
3890 // Algorithm must match CompressedOops::encode.
3891 void MacroAssembler::encode_heap_oop(Register d, Register s) {
3892 #ifdef ASSERT
3893   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
3894 #endif
3895   verify_oop(s, "broken oop in encode_heap_oop");
3896   if (CompressedOops::base() == NULL) {
3897     if (CompressedOops::shift() != 0) {
3898       assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
3899       lsr(d, s, LogMinObjAlignmentInBytes);
3900     } else {
3901       mov(d, s);
3902     }
3903   } else {
3904     subs(d, s, rheapbase);
3905     csel(d, d, zr, Assembler::HS);
3906     lsr(d, d, LogMinObjAlignmentInBytes);
3907 
3908     /*  Old algorithm: is this any worse?
3909     Label nonnull;
3910     cbnz(r, nonnull);
3911     sub(r, r, rheapbase);
3912     bind(nonnull);
3913     lsr(r, r, LogMinObjAlignmentInBytes);
3914     */
3915   }
3916 }
3917 
3918 void MacroAssembler::encode_heap_oop_not_null(Register r) {
3919 #ifdef ASSERT
3920   verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
3921   if (CheckCompressedOops) {
3922     Label ok;
3923     cbnz(r, ok);
3924     stop("null oop passed to encode_heap_oop_not_null");
3925     bind(ok);
3926   }
3927 #endif
3928   verify_oop(r, "broken oop in encode_heap_oop_not_null");
3929   if (CompressedOops::base() != NULL) {
3930     sub(r, r, rheapbase);
3931   }
3932   if (CompressedOops::shift() != 0) {
3933     assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
3934     lsr(r, r, LogMinObjAlignmentInBytes);
3935   }
3936 }
3937 
3938 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
3939 #ifdef ASSERT
3940   verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
3941   if (CheckCompressedOops) {
3942     Label ok;
3943     cbnz(src, ok);
3944     stop("null oop passed to encode_heap_oop_not_null2");
3945     bind(ok);
3946   }
3947 #endif
3948   verify_oop(src, "broken oop in encode_heap_oop_not_null2");
3949 
3950   Register data = src;
3951   if (CompressedOops::base() != NULL) {
3952     sub(dst, src, rheapbase);
3953     data = dst;
3954   }
3955   if (CompressedOops::shift() != 0) {
3956     assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
3957     lsr(dst, data, LogMinObjAlignmentInBytes);
3958     data = dst;
3959   }
3960   if (data == src)
3961     mov(dst, src);
3962 }
3963 
3964 void  MacroAssembler::decode_heap_oop(Register d, Register s) {
3965 #ifdef ASSERT
3966   verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
3967 #endif
3968   if (CompressedOops::base() == NULL) {
3969     if (CompressedOops::shift() != 0 || d != s) {
3970       lsl(d, s, CompressedOops::shift());
3971     }
3972   } else {
3973     Label done;
3974     if (d != s)
3975       mov(d, s);
3976     cbz(s, done);
3977     add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes);
3978     bind(done);
3979   }
3980   verify_oop(d, "broken oop in decode_heap_oop");
3981 }
3982 
3983 void  MacroAssembler::decode_heap_oop_not_null(Register r) {
3984   assert (UseCompressedOops, "should only be used for compressed headers");
3985   assert (Universe::heap() != NULL, "java heap should be initialized");
3986   // Cannot assert, unverified entry point counts instructions (see .ad file)
3987   // vtableStubs also counts instructions in pd_code_size_limit.
3988   // Also do not verify_oop as this is called by verify_oop.
3989   if (CompressedOops::shift() != 0) {
3990     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
3991     if (CompressedOops::base() != NULL) {
3992       add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes);
3993     } else {
3994       add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes);
3995     }
3996   } else {
3997     assert (CompressedOops::base() == NULL, "sanity");
3998   }
3999 }
4000 
4001 void  MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
4002   assert (UseCompressedOops, "should only be used for compressed headers");
4003   assert (Universe::heap() != NULL, "java heap should be initialized");
4004   // Cannot assert, unverified entry point counts instructions (see .ad file)
4005   // vtableStubs also counts instructions in pd_code_size_limit.
4006   // Also do not verify_oop as this is called by verify_oop.
4007   if (CompressedOops::shift() != 0) {
4008     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
4009     if (CompressedOops::base() != NULL) {
4010       add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes);
4011     } else {
4012       add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes);
4013     }
4014   } else {
4015     assert (CompressedOops::base() == NULL, "sanity");
4016     if (dst != src) {
4017       mov(dst, src);
4018     }
4019   }
4020 }
4021 
4022 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone);
4023 
4024 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
4025   assert(UseCompressedClassPointers, "not using compressed class pointers");
4026   assert(Metaspace::initialized(), "metaspace not initialized yet");
4027 
4028   if (_klass_decode_mode != KlassDecodeNone) {
4029     return _klass_decode_mode;
4030   }
4031 
4032   assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift()
4033          || 0 == CompressedKlassPointers::shift(), "decode alg wrong");
4034 
4035   if (CompressedKlassPointers::base() == NULL) {
4036     return (_klass_decode_mode = KlassDecodeZero);
4037   }
4038 
4039   if (operand_valid_for_logical_immediate(
4040         /*is32*/false, (uint64_t)CompressedKlassPointers::base())) {
4041     const uint64_t range_mask =
4042       (1ULL << log2i(CompressedKlassPointers::range())) - 1;
4043     if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) {
4044       return (_klass_decode_mode = KlassDecodeXor);
4045     }
4046   }
4047 
4048   const uint64_t shifted_base =
4049     (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
4050   guarantee((shifted_base & 0xffff0000ffffffff) == 0,
4051             "compressed class base bad alignment");
4052 
4053   return (_klass_decode_mode = KlassDecodeMovk);
4054 }
4055 
4056 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
4057   switch (klass_decode_mode()) {
4058   case KlassDecodeZero:
4059     if (CompressedKlassPointers::shift() != 0) {
4060       lsr(dst, src, LogKlassAlignmentInBytes);
4061     } else {
4062       if (dst != src) mov(dst, src);
4063     }
4064     break;
4065 
4066   case KlassDecodeXor:
4067     if (CompressedKlassPointers::shift() != 0) {
4068       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
4069       lsr(dst, dst, LogKlassAlignmentInBytes);
4070     } else {
4071       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
4072     }
4073     break;
4074 
4075   case KlassDecodeMovk:
4076     if (CompressedKlassPointers::shift() != 0) {
4077       ubfx(dst, src, LogKlassAlignmentInBytes, 32);
4078     } else {
4079       movw(dst, src);
4080     }
4081     break;
4082 
4083   case KlassDecodeNone:
4084     ShouldNotReachHere();
4085     break;
4086   }
4087 }
4088 
4089 void MacroAssembler::encode_klass_not_null(Register r) {
4090   encode_klass_not_null(r, r);
4091 }
4092 
4093 void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
4094   assert (UseCompressedClassPointers, "should only be used for compressed headers");
4095 
4096   switch (klass_decode_mode()) {
4097   case KlassDecodeZero:
4098     if (CompressedKlassPointers::shift() != 0) {
4099       lsl(dst, src, LogKlassAlignmentInBytes);
4100     } else {
4101       if (dst != src) mov(dst, src);
4102     }
4103     break;
4104 
4105   case KlassDecodeXor:
4106     if (CompressedKlassPointers::shift() != 0) {
4107       lsl(dst, src, LogKlassAlignmentInBytes);
4108       eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
4109     } else {
4110       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
4111     }
4112     break;
4113 
4114   case KlassDecodeMovk: {
4115     const uint64_t shifted_base =
4116       (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
4117 
4118     if (dst != src) movw(dst, src);
4119     movk(dst, shifted_base >> 32, 32);
4120 
4121     if (CompressedKlassPointers::shift() != 0) {
4122       lsl(dst, dst, LogKlassAlignmentInBytes);
4123     }
4124 
4125     break;
4126   }
4127 
4128   case KlassDecodeNone:
4129     ShouldNotReachHere();
4130     break;
4131   }
4132 }
4133 
4134 void  MacroAssembler::decode_klass_not_null(Register r) {
4135   decode_klass_not_null(r, r);
4136 }
4137 
4138 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
4139 #ifdef ASSERT
4140   {
4141     ThreadInVMfromUnknown tiv;
4142     assert (UseCompressedOops, "should only be used for compressed oops");
4143     assert (Universe::heap() != NULL, "java heap should be initialized");
4144     assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
4145     assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
4146   }
4147 #endif
4148   int oop_index = oop_recorder()->find_index(obj);
4149   InstructionMark im(this);
4150   RelocationHolder rspec = oop_Relocation::spec(oop_index);
4151   code_section()->relocate(inst_mark(), rspec);
4152   movz(dst, 0xDEAD, 16);
4153   movk(dst, 0xBEEF);
4154 }
4155 
4156 void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
4157   assert (UseCompressedClassPointers, "should only be used for compressed headers");
4158   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
4159   int index = oop_recorder()->find_index(k);
4160   assert(! Universe::heap()->is_in(k), "should not be an oop");
4161 
4162   InstructionMark im(this);
4163   RelocationHolder rspec = metadata_Relocation::spec(index);
4164   code_section()->relocate(inst_mark(), rspec);
4165   narrowKlass nk = CompressedKlassPointers::encode(k);
4166   movz(dst, (nk >> 16), 16);
4167   movk(dst, nk & 0xffff);
4168 }
4169 
4170 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
4171                                     Register dst, Address src,
4172                                     Register tmp1, Register thread_tmp) {
4173   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4174   decorators = AccessInternal::decorator_fixup(decorators);
4175   bool as_raw = (decorators & AS_RAW) != 0;
4176   if (as_raw) {
4177     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4178   } else {
4179     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4180   }
4181 }
4182 
4183 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
4184                                      Address dst, Register src,
4185                                      Register tmp1, Register thread_tmp) {
4186   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4187   decorators = AccessInternal::decorator_fixup(decorators);
4188   bool as_raw = (decorators & AS_RAW) != 0;
4189   if (as_raw) {
4190     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4191   } else {
4192     bs->store_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4193   }
4194 }
4195 
4196 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
4197                                    Register thread_tmp, DecoratorSet decorators) {
4198   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
4199 }
4200 
4201 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
4202                                             Register thread_tmp, DecoratorSet decorators) {
4203   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
4204 }
4205 
4206 void MacroAssembler::store_heap_oop(Address dst, Register src, Register tmp1,
4207                                     Register thread_tmp, DecoratorSet decorators) {
4208   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
4209 }
4210 
4211 // Used for storing NULLs.
4212 void MacroAssembler::store_heap_oop_null(Address dst) {
4213   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg);
4214 }
4215 
4216 Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
4217   assert(oop_recorder() != NULL, "this assembler needs a Recorder");
4218   int index = oop_recorder()->allocate_metadata_index(obj);
4219   RelocationHolder rspec = metadata_Relocation::spec(index);
4220   return Address((address)obj, rspec);
4221 }
4222 
4223 // Move an oop into a register.  immediate is true if we want
4224 // immediate instructions and nmethod entry barriers are not enabled.
4225 // i.e. we are not going to patch this instruction while the code is being
4226 // executed by another thread.
4227 void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) {
4228   int oop_index;
4229   if (obj == NULL) {
4230     oop_index = oop_recorder()->allocate_oop_index(obj);
4231   } else {
4232 #ifdef ASSERT
4233     {
4234       ThreadInVMfromUnknown tiv;
4235       assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
4236     }
4237 #endif
4238     oop_index = oop_recorder()->find_index(obj);
4239   }
4240   RelocationHolder rspec = oop_Relocation::spec(oop_index);
4241 
4242   // nmethod entry barrier necessitate using the constant pool. They have to be
4243   // ordered with respected to oop accesses.
4244   // Using immediate literals would necessitate ISBs.
4245   if (BarrierSet::barrier_set()->barrier_set_nmethod() != NULL || !immediate) {
4246     address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address
4247     ldr_constant(dst, Address(dummy, rspec));
4248   } else
4249     mov(dst, Address((address)obj, rspec));
4250 
4251 }
4252 
4253 // Move a metadata address into a register.
4254 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
4255   int oop_index;
4256   if (obj == NULL) {
4257     oop_index = oop_recorder()->allocate_metadata_index(obj);
4258   } else {
4259     oop_index = oop_recorder()->find_index(obj);
4260   }
4261   RelocationHolder rspec = metadata_Relocation::spec(oop_index);
4262   mov(dst, Address((address)obj, rspec));
4263 }
4264 
4265 Address MacroAssembler::constant_oop_address(jobject obj) {
4266 #ifdef ASSERT
4267   {
4268     ThreadInVMfromUnknown tiv;
4269     assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
4270     assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop");
4271   }
4272 #endif
4273   int oop_index = oop_recorder()->find_index(obj);
4274   return Address((address)obj, oop_Relocation::spec(oop_index));
4275 }
4276 
4277 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
4278 void MacroAssembler::tlab_allocate(Register obj,
4279                                    Register var_size_in_bytes,
4280                                    int con_size_in_bytes,
4281                                    Register t1,
4282                                    Register t2,
4283                                    Label& slow_case) {
4284   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4285   bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
4286 }
4287 
4288 // Defines obj, preserves var_size_in_bytes
4289 void MacroAssembler::eden_allocate(Register obj,
4290                                    Register var_size_in_bytes,
4291                                    int con_size_in_bytes,
4292                                    Register t1,
4293                                    Label& slow_case) {
4294   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4295   bs->eden_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
4296 }
4297 
4298 void MacroAssembler::verify_tlab() {
4299 #ifdef ASSERT
4300   if (UseTLAB && VerifyOops) {
4301     Label next, ok;
4302 
4303     stp(rscratch2, rscratch1, Address(pre(sp, -16)));
4304 
4305     ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
4306     ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
4307     cmp(rscratch2, rscratch1);
4308     br(Assembler::HS, next);
4309     STOP("assert(top >= start)");
4310     should_not_reach_here();
4311 
4312     bind(next);
4313     ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
4314     ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
4315     cmp(rscratch2, rscratch1);
4316     br(Assembler::HS, ok);
4317     STOP("assert(top <= end)");
4318     should_not_reach_here();
4319 
4320     bind(ok);
4321     ldp(rscratch2, rscratch1, Address(post(sp, 16)));
4322   }
4323 #endif
4324 }
4325 
4326 // Writes to stack successive pages until offset reached to check for
4327 // stack overflow + shadow pages.  This clobbers tmp.
4328 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
4329   assert_different_registers(tmp, size, rscratch1);
4330   mov(tmp, sp);
4331   // Bang stack for total size given plus shadow page size.
4332   // Bang one page at a time because large size can bang beyond yellow and
4333   // red zones.
4334   Label loop;
4335   mov(rscratch1, os::vm_page_size());
4336   bind(loop);
4337   lea(tmp, Address(tmp, -os::vm_page_size()));
4338   subsw(size, size, rscratch1);
4339   str(size, Address(tmp));
4340   br(Assembler::GT, loop);
4341 
4342   // Bang down shadow pages too.
4343   // At this point, (tmp-0) is the last address touched, so don't
4344   // touch it again.  (It was touched as (tmp-pagesize) but then tmp
4345   // was post-decremented.)  Skip this address by starting at i=1, and
4346   // touch a few more pages below.  N.B.  It is important to touch all
4347   // the way down to and including i=StackShadowPages.
4348   for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / os::vm_page_size()) - 1; i++) {
4349     // this could be any sized move but this is can be a debugging crumb
4350     // so the bigger the better.
4351     lea(tmp, Address(tmp, -os::vm_page_size()));
4352     str(size, Address(tmp));
4353   }
4354 }
4355 
4356 // Move the address of the polling page into dest.
4357 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) {
4358   ldr(dest, Address(rthread, JavaThread::polling_page_offset()));
4359 }
4360 
4361 // Read the polling page.  The address of the polling page must
4362 // already be in r.
4363 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) {
4364   address mark;
4365   {
4366     InstructionMark im(this);
4367     code_section()->relocate(inst_mark(), rtype);
4368     ldrw(zr, Address(r, 0));
4369     mark = inst_mark();
4370   }
4371   verify_cross_modify_fence_not_required();
4372   return mark;
4373 }
4374 
4375 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) {
4376   relocInfo::relocType rtype = dest.rspec().reloc()->type();
4377   uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12;
4378   uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12;
4379   uint64_t dest_page = (uint64_t)dest.target() >> 12;
4380   int64_t offset_low = dest_page - low_page;
4381   int64_t offset_high = dest_page - high_page;
4382 
4383   assert(is_valid_AArch64_address(dest.target()), "bad address");
4384   assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address");
4385 
4386   InstructionMark im(this);
4387   code_section()->relocate(inst_mark(), dest.rspec());
4388   // 8143067: Ensure that the adrp can reach the dest from anywhere within
4389   // the code cache so that if it is relocated we know it will still reach
4390   if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
4391     _adrp(reg1, dest.target());
4392   } else {
4393     uint64_t target = (uint64_t)dest.target();
4394     uint64_t adrp_target
4395       = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
4396 
4397     _adrp(reg1, (address)adrp_target);
4398     movk(reg1, target >> 32, 32);
4399   }
4400   byte_offset = (uint64_t)dest.target() & 0xfff;
4401 }
4402 
4403 void MacroAssembler::load_byte_map_base(Register reg) {
4404   CardTable::CardValue* byte_map_base =
4405     ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
4406 
4407   // Strictly speaking the byte_map_base isn't an address at all, and it might
4408   // even be negative. It is thus materialised as a constant.
4409   mov(reg, (uint64_t)byte_map_base);
4410 }
4411 
4412 void MacroAssembler::build_frame(int framesize) {
4413   assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
4414   assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
4415   if (framesize < ((1 << 9) + 2 * wordSize)) {
4416     sub(sp, sp, framesize);
4417     stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
4418     if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
4419   } else {
4420     stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
4421     if (PreserveFramePointer) mov(rfp, sp);
4422     if (framesize < ((1 << 12) + 2 * wordSize))
4423       sub(sp, sp, framesize - 2 * wordSize);
4424     else {
4425       mov(rscratch1, framesize - 2 * wordSize);
4426       sub(sp, sp, rscratch1);
4427     }
4428   }
4429   verify_cross_modify_fence_not_required();
4430 }
4431 
4432 void MacroAssembler::remove_frame(int framesize) {
4433   assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
4434   assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
4435   if (framesize < ((1 << 9) + 2 * wordSize)) {
4436     ldp(rfp, lr, Address(sp, framesize - 2 * wordSize));
4437     add(sp, sp, framesize);
4438   } else {
4439     if (framesize < ((1 << 12) + 2 * wordSize))
4440       add(sp, sp, framesize - 2 * wordSize);
4441     else {
4442       mov(rscratch1, framesize - 2 * wordSize);
4443       add(sp, sp, rscratch1);
4444     }
4445     ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
4446   }
4447 }
4448 
4449 
4450 // This method checks if provided byte array contains byte with highest bit set.
4451 address MacroAssembler::has_negatives(Register ary1, Register len, Register result) {
4452     // Simple and most common case of aligned small array which is not at the
4453     // end of memory page is placed here. All other cases are in stub.
4454     Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE;
4455     const uint64_t UPPER_BIT_MASK=0x8080808080808080;
4456     assert_different_registers(ary1, len, result);
4457 
4458     cmpw(len, 0);
4459     br(LE, SET_RESULT);
4460     cmpw(len, 4 * wordSize);
4461     br(GE, STUB_LONG); // size > 32 then go to stub
4462 
4463     int shift = 64 - exact_log2(os::vm_page_size());
4464     lsl(rscratch1, ary1, shift);
4465     mov(rscratch2, (size_t)(4 * wordSize) << shift);
4466     adds(rscratch2, rscratch1, rscratch2);  // At end of page?
4467     br(CS, STUB); // at the end of page then go to stub
4468     subs(len, len, wordSize);
4469     br(LT, END);
4470 
4471   BIND(LOOP);
4472     ldr(rscratch1, Address(post(ary1, wordSize)));
4473     tst(rscratch1, UPPER_BIT_MASK);
4474     br(NE, SET_RESULT);
4475     subs(len, len, wordSize);
4476     br(GE, LOOP);
4477     cmpw(len, -wordSize);
4478     br(EQ, SET_RESULT);
4479 
4480   BIND(END);
4481     ldr(result, Address(ary1));
4482     sub(len, zr, len, LSL, 3); // LSL 3 is to get bits from bytes
4483     lslv(result, result, len);
4484     tst(result, UPPER_BIT_MASK);
4485     b(SET_RESULT);
4486 
4487   BIND(STUB);
4488     RuntimeAddress has_neg = RuntimeAddress(StubRoutines::aarch64::has_negatives());
4489     assert(has_neg.target() != NULL, "has_negatives stub has not been generated");
4490     address tpc1 = trampoline_call(has_neg);
4491     if (tpc1 == NULL) {
4492       DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE));
4493       postcond(pc() == badAddress);
4494       return NULL;
4495     }
4496     b(DONE);
4497 
4498   BIND(STUB_LONG);
4499     RuntimeAddress has_neg_long = RuntimeAddress(StubRoutines::aarch64::has_negatives_long());
4500     assert(has_neg_long.target() != NULL, "has_negatives stub has not been generated");
4501     address tpc2 = trampoline_call(has_neg_long);
4502     if (tpc2 == NULL) {
4503       DEBUG_ONLY(reset_labels(SET_RESULT, DONE));
4504       postcond(pc() == badAddress);
4505       return NULL;
4506     }
4507     b(DONE);
4508 
4509   BIND(SET_RESULT);
4510     cset(result, NE); // set true or false
4511 
4512   BIND(DONE);
4513   postcond(pc() != badAddress);
4514   return pc();
4515 }
4516 
4517 // Clobbers: rscratch1, rscratch2, rflags
4518 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals)
4519 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
4520                                       Register tmp4, Register tmp5, Register result,
4521                                       Register cnt1, int elem_size) {
4522   Label DONE, SAME;
4523   Register tmp1 = rscratch1;
4524   Register tmp2 = rscratch2;
4525   Register cnt2 = tmp2;  // cnt2 only used in array length compare
4526   int elem_per_word = wordSize/elem_size;
4527   int log_elem_size = exact_log2(elem_size);
4528   int length_offset = arrayOopDesc::length_offset_in_bytes();
4529   int base_offset
4530     = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE);
4531   int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16);
4532 
4533   assert(elem_size == 1 || elem_size == 2, "must be char or byte");
4534   assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
4535 
4536 #ifndef PRODUCT
4537   {
4538     const char kind = (elem_size == 2) ? 'U' : 'L';
4539     char comment[64];
4540     snprintf(comment, sizeof comment, "array_equals%c{", kind);
4541     BLOCK_COMMENT(comment);
4542   }
4543 #endif
4544 
4545   // if (a1 == a2)
4546   //     return true;
4547   cmpoop(a1, a2); // May have read barriers for a1 and a2.
4548   br(EQ, SAME);
4549 
4550   if (UseSimpleArrayEquals) {
4551     Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL;
4552     // if (a1 == null || a2 == null)
4553     //     return false;
4554     // a1 & a2 == 0 means (some-pointer is null) or
4555     // (very-rare-or-even-probably-impossible-pointer-values)
4556     // so, we can save one branch in most cases
4557     tst(a1, a2);
4558     mov(result, false);
4559     br(EQ, A_MIGHT_BE_NULL);
4560     // if (a1.length != a2.length)
4561     //      return false;
4562     bind(A_IS_NOT_NULL);
4563     ldrw(cnt1, Address(a1, length_offset));
4564     ldrw(cnt2, Address(a2, length_offset));
4565     eorw(tmp5, cnt1, cnt2);
4566     cbnzw(tmp5, DONE);
4567     lea(a1, Address(a1, base_offset));
4568     lea(a2, Address(a2, base_offset));
4569     // Check for short strings, i.e. smaller than wordSize.
4570     subs(cnt1, cnt1, elem_per_word);
4571     br(Assembler::LT, SHORT);
4572     // Main 8 byte comparison loop.
4573     bind(NEXT_WORD); {
4574       ldr(tmp1, Address(post(a1, wordSize)));
4575       ldr(tmp2, Address(post(a2, wordSize)));
4576       subs(cnt1, cnt1, elem_per_word);
4577       eor(tmp5, tmp1, tmp2);
4578       cbnz(tmp5, DONE);
4579     } br(GT, NEXT_WORD);
4580     // Last longword.  In the case where length == 4 we compare the
4581     // same longword twice, but that's still faster than another
4582     // conditional branch.
4583     // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when
4584     // length == 4.
4585     if (log_elem_size > 0)
4586       lsl(cnt1, cnt1, log_elem_size);
4587     ldr(tmp3, Address(a1, cnt1));
4588     ldr(tmp4, Address(a2, cnt1));
4589     eor(tmp5, tmp3, tmp4);
4590     cbnz(tmp5, DONE);
4591     b(SAME);
4592     bind(A_MIGHT_BE_NULL);
4593     // in case both a1 and a2 are not-null, proceed with loads
4594     cbz(a1, DONE);
4595     cbz(a2, DONE);
4596     b(A_IS_NOT_NULL);
4597     bind(SHORT);
4598 
4599     tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left.
4600     {
4601       ldrw(tmp1, Address(post(a1, 4)));
4602       ldrw(tmp2, Address(post(a2, 4)));
4603       eorw(tmp5, tmp1, tmp2);
4604       cbnzw(tmp5, DONE);
4605     }
4606     bind(TAIL03);
4607     tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left.
4608     {
4609       ldrh(tmp3, Address(post(a1, 2)));
4610       ldrh(tmp4, Address(post(a2, 2)));
4611       eorw(tmp5, tmp3, tmp4);
4612       cbnzw(tmp5, DONE);
4613     }
4614     bind(TAIL01);
4615     if (elem_size == 1) { // Only needed when comparing byte arrays.
4616       tbz(cnt1, 0, SAME); // 0-1 bytes left.
4617       {
4618         ldrb(tmp1, a1);
4619         ldrb(tmp2, a2);
4620         eorw(tmp5, tmp1, tmp2);
4621         cbnzw(tmp5, DONE);
4622       }
4623     }
4624   } else {
4625     Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB,
4626         CSET_EQ, LAST_CHECK;
4627     mov(result, false);
4628     cbz(a1, DONE);
4629     ldrw(cnt1, Address(a1, length_offset));
4630     cbz(a2, DONE);
4631     ldrw(cnt2, Address(a2, length_offset));
4632     // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's
4633     // faster to perform another branch before comparing a1 and a2
4634     cmp(cnt1, (u1)elem_per_word);
4635     br(LE, SHORT); // short or same
4636     ldr(tmp3, Address(pre(a1, base_offset)));
4637     subs(zr, cnt1, stubBytesThreshold);
4638     br(GE, STUB);
4639     ldr(tmp4, Address(pre(a2, base_offset)));
4640     sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size);
4641     cmp(cnt2, cnt1);
4642     br(NE, DONE);
4643 
4644     // Main 16 byte comparison loop with 2 exits
4645     bind(NEXT_DWORD); {
4646       ldr(tmp1, Address(pre(a1, wordSize)));
4647       ldr(tmp2, Address(pre(a2, wordSize)));
4648       subs(cnt1, cnt1, 2 * elem_per_word);
4649       br(LE, TAIL);
4650       eor(tmp4, tmp3, tmp4);
4651       cbnz(tmp4, DONE);
4652       ldr(tmp3, Address(pre(a1, wordSize)));
4653       ldr(tmp4, Address(pre(a2, wordSize)));
4654       cmp(cnt1, (u1)elem_per_word);
4655       br(LE, TAIL2);
4656       cmp(tmp1, tmp2);
4657     } br(EQ, NEXT_DWORD);
4658     b(DONE);
4659 
4660     bind(TAIL);
4661     eor(tmp4, tmp3, tmp4);
4662     eor(tmp2, tmp1, tmp2);
4663     lslv(tmp2, tmp2, tmp5);
4664     orr(tmp5, tmp4, tmp2);
4665     cmp(tmp5, zr);
4666     b(CSET_EQ);
4667 
4668     bind(TAIL2);
4669     eor(tmp2, tmp1, tmp2);
4670     cbnz(tmp2, DONE);
4671     b(LAST_CHECK);
4672 
4673     bind(STUB);
4674     ldr(tmp4, Address(pre(a2, base_offset)));
4675     cmp(cnt2, cnt1);
4676     br(NE, DONE);
4677     if (elem_size == 2) { // convert to byte counter
4678       lsl(cnt1, cnt1, 1);
4679     }
4680     eor(tmp5, tmp3, tmp4);
4681     cbnz(tmp5, DONE);
4682     RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals());
4683     assert(stub.target() != NULL, "array_equals_long stub has not been generated");
4684     address tpc = trampoline_call(stub);
4685     if (tpc == NULL) {
4686       DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE));
4687       postcond(pc() == badAddress);
4688       return NULL;
4689     }
4690     b(DONE);
4691 
4692     // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2)
4693     // so, if a2 == null => return false(0), else return true, so we can return a2
4694     mov(result, a2);
4695     b(DONE);
4696     bind(SHORT);
4697     cmp(cnt2, cnt1);
4698     br(NE, DONE);
4699     cbz(cnt1, SAME);
4700     sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size);
4701     ldr(tmp3, Address(a1, base_offset));
4702     ldr(tmp4, Address(a2, base_offset));
4703     bind(LAST_CHECK);
4704     eor(tmp4, tmp3, tmp4);
4705     lslv(tmp5, tmp4, tmp5);
4706     cmp(tmp5, zr);
4707     bind(CSET_EQ);
4708     cset(result, EQ);
4709     b(DONE);
4710   }
4711 
4712   bind(SAME);
4713   mov(result, true);
4714   // That's it.
4715   bind(DONE);
4716 
4717   BLOCK_COMMENT("} array_equals");
4718   postcond(pc() != badAddress);
4719   return pc();
4720 }
4721 
4722 // Compare Strings
4723 
4724 // For Strings we're passed the address of the first characters in a1
4725 // and a2 and the length in cnt1.
4726 // elem_size is the element size in bytes: either 1 or 2.
4727 // There are two implementations.  For arrays >= 8 bytes, all
4728 // comparisons (including the final one, which may overlap) are
4729 // performed 8 bytes at a time.  For strings < 8 bytes, we compare a
4730 // halfword, then a short, and then a byte.
4731 
4732 void MacroAssembler::string_equals(Register a1, Register a2,
4733                                    Register result, Register cnt1, int elem_size)
4734 {
4735   Label SAME, DONE, SHORT, NEXT_WORD;
4736   Register tmp1 = rscratch1;
4737   Register tmp2 = rscratch2;
4738   Register cnt2 = tmp2;  // cnt2 only used in array length compare
4739 
4740   assert(elem_size == 1 || elem_size == 2, "must be 2 or 1 byte");
4741   assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
4742 
4743 #ifndef PRODUCT
4744   {
4745     const char kind = (elem_size == 2) ? 'U' : 'L';
4746     char comment[64];
4747     snprintf(comment, sizeof comment, "{string_equals%c", kind);
4748     BLOCK_COMMENT(comment);
4749   }
4750 #endif
4751 
4752   mov(result, false);
4753 
4754   // Check for short strings, i.e. smaller than wordSize.
4755   subs(cnt1, cnt1, wordSize);
4756   br(Assembler::LT, SHORT);
4757   // Main 8 byte comparison loop.
4758   bind(NEXT_WORD); {
4759     ldr(tmp1, Address(post(a1, wordSize)));
4760     ldr(tmp2, Address(post(a2, wordSize)));
4761     subs(cnt1, cnt1, wordSize);
4762     eor(tmp1, tmp1, tmp2);
4763     cbnz(tmp1, DONE);
4764   } br(GT, NEXT_WORD);
4765   // Last longword.  In the case where length == 4 we compare the
4766   // same longword twice, but that's still faster than another
4767   // conditional branch.
4768   // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when
4769   // length == 4.
4770   ldr(tmp1, Address(a1, cnt1));
4771   ldr(tmp2, Address(a2, cnt1));
4772   eor(tmp2, tmp1, tmp2);
4773   cbnz(tmp2, DONE);
4774   b(SAME);
4775 
4776   bind(SHORT);
4777   Label TAIL03, TAIL01;
4778 
4779   tbz(cnt1, 2, TAIL03); // 0-7 bytes left.
4780   {
4781     ldrw(tmp1, Address(post(a1, 4)));
4782     ldrw(tmp2, Address(post(a2, 4)));
4783     eorw(tmp1, tmp1, tmp2);
4784     cbnzw(tmp1, DONE);
4785   }
4786   bind(TAIL03);
4787   tbz(cnt1, 1, TAIL01); // 0-3 bytes left.
4788   {
4789     ldrh(tmp1, Address(post(a1, 2)));
4790     ldrh(tmp2, Address(post(a2, 2)));
4791     eorw(tmp1, tmp1, tmp2);
4792     cbnzw(tmp1, DONE);
4793   }
4794   bind(TAIL01);
4795   if (elem_size == 1) { // Only needed when comparing 1-byte elements
4796     tbz(cnt1, 0, SAME); // 0-1 bytes left.
4797     {
4798       ldrb(tmp1, a1);
4799       ldrb(tmp2, a2);
4800       eorw(tmp1, tmp1, tmp2);
4801       cbnzw(tmp1, DONE);
4802     }
4803   }
4804   // Arrays are equal.
4805   bind(SAME);
4806   mov(result, true);
4807 
4808   // That's it.
4809   bind(DONE);
4810   BLOCK_COMMENT("} string_equals");
4811 }
4812 
4813 
4814 // The size of the blocks erased by the zero_blocks stub.  We must
4815 // handle anything smaller than this ourselves in zero_words().
4816 const int MacroAssembler::zero_words_block_size = 8;
4817 
4818 // zero_words() is used by C2 ClearArray patterns and by
4819 // C1_MacroAssembler.  It is as small as possible, handling small word
4820 // counts locally and delegating anything larger to the zero_blocks
4821 // stub.  It is expanded many times in compiled code, so it is
4822 // important to keep it short.
4823 
4824 // ptr:   Address of a buffer to be zeroed.
4825 // cnt:   Count in HeapWords.
4826 //
4827 // ptr, cnt, rscratch1, and rscratch2 are clobbered.
4828 address MacroAssembler::zero_words(Register ptr, Register cnt)
4829 {
4830   assert(is_power_of_2(zero_words_block_size), "adjust this");
4831 
4832   BLOCK_COMMENT("zero_words {");
4833   assert(ptr == r10 && cnt == r11, "mismatch in register usage");
4834   RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
4835   assert(zero_blocks.target() != NULL, "zero_blocks stub has not been generated");
4836 
4837   subs(rscratch1, cnt, zero_words_block_size);
4838   Label around;
4839   br(LO, around);
4840   {
4841     RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
4842     assert(zero_blocks.target() != NULL, "zero_blocks stub has not been generated");
4843     // Make sure this is a C2 compilation. C1 allocates space only for
4844     // trampoline stubs generated by Call LIR ops, and in any case it
4845     // makes sense for a C1 compilation task to proceed as quickly as
4846     // possible.
4847     CompileTask* task;
4848     if (StubRoutines::aarch64::complete()
4849         && Thread::current()->is_Compiler_thread()
4850         && (task = ciEnv::current()->task())
4851         && is_c2_compile(task->comp_level())) {
4852       address tpc = trampoline_call(zero_blocks);
4853       if (tpc == NULL) {
4854         DEBUG_ONLY(reset_labels(around));
4855         return NULL;
4856       }
4857     } else {
4858       far_call(zero_blocks);
4859     }
4860   }
4861   bind(around);
4862 
4863   // We have a few words left to do. zero_blocks has adjusted r10 and r11
4864   // for us.
4865   for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) {
4866     Label l;
4867     tbz(cnt, exact_log2(i), l);
4868     for (int j = 0; j < i; j += 2) {
4869       stp(zr, zr, post(ptr, 2 * BytesPerWord));
4870     }
4871     bind(l);
4872   }
4873   {
4874     Label l;
4875     tbz(cnt, 0, l);
4876     str(zr, Address(ptr));
4877     bind(l);
4878   }
4879 
4880   BLOCK_COMMENT("} zero_words");
4881   return pc();
4882 }
4883 
4884 // base:         Address of a buffer to be zeroed, 8 bytes aligned.
4885 // cnt:          Immediate count in HeapWords.
4886 //
4887 // r10, r11, rscratch1, and rscratch2 are clobbered.
4888 address MacroAssembler::zero_words(Register base, uint64_t cnt)
4889 {
4890   assert(wordSize <= BlockZeroingLowLimit,
4891             "increase BlockZeroingLowLimit");
4892   address result = nullptr;
4893   if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) {
4894 #ifndef PRODUCT
4895     {
4896       char buf[64];
4897       snprintf(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt);
4898       BLOCK_COMMENT(buf);
4899     }
4900 #endif
4901     if (cnt >= 16) {
4902       uint64_t loops = cnt/16;
4903       if (loops > 1) {
4904         mov(rscratch2, loops - 1);
4905       }
4906       {
4907         Label loop;
4908         bind(loop);
4909         for (int i = 0; i < 16; i += 2) {
4910           stp(zr, zr, Address(base, i * BytesPerWord));
4911         }
4912         add(base, base, 16 * BytesPerWord);
4913         if (loops > 1) {
4914           subs(rscratch2, rscratch2, 1);
4915           br(GE, loop);
4916         }
4917       }
4918     }
4919     cnt %= 16;
4920     int i = cnt & 1;  // store any odd word to start
4921     if (i) str(zr, Address(base));
4922     for (; i < (int)cnt; i += 2) {
4923       stp(zr, zr, Address(base, i * wordSize));
4924     }
4925     BLOCK_COMMENT("} zero_words");
4926     result = pc();
4927   } else {
4928     mov(r10, base); mov(r11, cnt);
4929     result = zero_words(r10, r11);
4930   }
4931   return result;
4932 }
4933 
4934 // Zero blocks of memory by using DC ZVA.
4935 //
4936 // Aligns the base address first sufficently for DC ZVA, then uses
4937 // DC ZVA repeatedly for every full block.  cnt is the size to be
4938 // zeroed in HeapWords.  Returns the count of words left to be zeroed
4939 // in cnt.
4940 //
4941 // NOTE: This is intended to be used in the zero_blocks() stub.  If
4942 // you want to use it elsewhere, note that cnt must be >= 2*zva_length.
4943 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) {
4944   Register tmp = rscratch1;
4945   Register tmp2 = rscratch2;
4946   int zva_length = VM_Version::zva_length();
4947   Label initial_table_end, loop_zva;
4948   Label fini;
4949 
4950   // Base must be 16 byte aligned. If not just return and let caller handle it
4951   tst(base, 0x0f);
4952   br(Assembler::NE, fini);
4953   // Align base with ZVA length.
4954   neg(tmp, base);
4955   andr(tmp, tmp, zva_length - 1);
4956 
4957   // tmp: the number of bytes to be filled to align the base with ZVA length.
4958   add(base, base, tmp);
4959   sub(cnt, cnt, tmp, Assembler::ASR, 3);
4960   adr(tmp2, initial_table_end);
4961   sub(tmp2, tmp2, tmp, Assembler::LSR, 2);
4962   br(tmp2);
4963 
4964   for (int i = -zva_length + 16; i < 0; i += 16)
4965     stp(zr, zr, Address(base, i));
4966   bind(initial_table_end);
4967 
4968   sub(cnt, cnt, zva_length >> 3);
4969   bind(loop_zva);
4970   dc(Assembler::ZVA, base);
4971   subs(cnt, cnt, zva_length >> 3);
4972   add(base, base, zva_length);
4973   br(Assembler::GE, loop_zva);
4974   add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA
4975   bind(fini);
4976 }
4977 
4978 // base:   Address of a buffer to be filled, 8 bytes aligned.
4979 // cnt:    Count in 8-byte unit.
4980 // value:  Value to be filled with.
4981 // base will point to the end of the buffer after filling.
4982 void MacroAssembler::fill_words(Register base, Register cnt, Register value)
4983 {
4984 //  Algorithm:
4985 //
4986 //    scratch1 = cnt & 7;
4987 //    cnt -= scratch1;
4988 //    p += scratch1;
4989 //    switch (scratch1) {
4990 //      do {
4991 //        cnt -= 8;
4992 //          p[-8] = v;
4993 //        case 7:
4994 //          p[-7] = v;
4995 //        case 6:
4996 //          p[-6] = v;
4997 //          // ...
4998 //        case 1:
4999 //          p[-1] = v;
5000 //        case 0:
5001 //          p += 8;
5002 //      } while (cnt);
5003 //    }
5004 
5005   assert_different_registers(base, cnt, value, rscratch1, rscratch2);
5006 
5007   Label fini, skip, entry, loop;
5008   const int unroll = 8; // Number of stp instructions we'll unroll
5009 
5010   cbz(cnt, fini);
5011   tbz(base, 3, skip);
5012   str(value, Address(post(base, 8)));
5013   sub(cnt, cnt, 1);
5014   bind(skip);
5015 
5016   andr(rscratch1, cnt, (unroll-1) * 2);
5017   sub(cnt, cnt, rscratch1);
5018   add(base, base, rscratch1, Assembler::LSL, 3);
5019   adr(rscratch2, entry);
5020   sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1);
5021   br(rscratch2);
5022 
5023   bind(loop);
5024   add(base, base, unroll * 16);
5025   for (int i = -unroll; i < 0; i++)
5026     stp(value, value, Address(base, i * 16));
5027   bind(entry);
5028   subs(cnt, cnt, unroll * 2);
5029   br(Assembler::GE, loop);
5030 
5031   tbz(cnt, 0, fini);
5032   str(value, Address(post(base, 8)));
5033   bind(fini);
5034 }
5035 
5036 // Intrinsic for
5037 //
5038 // - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray
5039 //     return the number of characters copied.
5040 // - java/lang/StringUTF16.compress
5041 //     return zero (0) if copy fails, otherwise 'len'.
5042 //
5043 // This version always returns the number of characters copied, and does not
5044 // clobber the 'len' register. A successful copy will complete with the post-
5045 // condition: 'res' == 'len', while an unsuccessful copy will exit with the
5046 // post-condition: 0 <= 'res' < 'len'.
5047 //
5048 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to
5049 //       degrade performance (on Ampere Altra - Neoverse N1), to an extent
5050 //       beyond the acceptable, even though the footprint would be smaller.
5051 //       Using 'umaxv' in the ASCII-case comes with a small penalty but does
5052 //       avoid additional bloat.
5053 //
5054 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags
5055 void MacroAssembler::encode_iso_array(Register src, Register dst,
5056                                       Register len, Register res, bool ascii,
5057                                       FloatRegister vtmp0, FloatRegister vtmp1,
5058                                       FloatRegister vtmp2, FloatRegister vtmp3,
5059                                       FloatRegister vtmp4, FloatRegister vtmp5)
5060 {
5061   Register cnt = res;
5062   Register max = rscratch1;
5063   Register chk = rscratch2;
5064 
5065   prfm(Address(src), PLDL1STRM);
5066   movw(cnt, len);
5067 
5068 #define ASCII(insn) do { if (ascii) { insn; } } while (0)
5069 
5070   Label LOOP_32, DONE_32, FAIL_32;
5071 
5072   BIND(LOOP_32);
5073   {
5074     cmpw(cnt, 32);
5075     br(LT, DONE_32);
5076     ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64)));
5077     // Extract lower bytes.
5078     FloatRegister vlo0 = vtmp4;
5079     FloatRegister vlo1 = vtmp5;
5080     uzp1(vlo0, T16B, vtmp0, vtmp1);
5081     uzp1(vlo1, T16B, vtmp2, vtmp3);
5082     // Merge bits...
5083     orr(vtmp0, T16B, vtmp0, vtmp1);
5084     orr(vtmp2, T16B, vtmp2, vtmp3);
5085     // Extract merged upper bytes.
5086     FloatRegister vhix = vtmp0;
5087     uzp2(vhix, T16B, vtmp0, vtmp2);
5088     // ISO-check on hi-parts (all zero).
5089     //                          ASCII-check on lo-parts (no sign).
5090     FloatRegister vlox = vtmp1; // Merge lower bytes.
5091                                 ASCII(orr(vlox, T16B, vlo0, vlo1));
5092     umov(chk, vhix, D, 1);      ASCII(cmlt(vlox, T16B, vlox));
5093     fmovd(max, vhix);           ASCII(umaxv(vlox, T16B, vlox));
5094     orr(chk, chk, max);         ASCII(umov(max, vlox, B, 0));
5095                                 ASCII(orr(chk, chk, max));
5096     cbnz(chk, FAIL_32);
5097     subw(cnt, cnt, 32);
5098     st1(vlo0, vlo1, T16B, Address(post(dst, 32)));
5099     b(LOOP_32);
5100   }
5101   BIND(FAIL_32);
5102   sub(src, src, 64);
5103   BIND(DONE_32);
5104 
5105   Label LOOP_8, SKIP_8;
5106 
5107   BIND(LOOP_8);
5108   {
5109     cmpw(cnt, 8);
5110     br(LT, SKIP_8);
5111     FloatRegister vhi = vtmp0;
5112     FloatRegister vlo = vtmp1;
5113     ld1(vtmp3, T8H, src);
5114     uzp1(vlo, T16B, vtmp3, vtmp3);
5115     uzp2(vhi, T16B, vtmp3, vtmp3);
5116     // ISO-check on hi-parts (all zero).
5117     //                          ASCII-check on lo-parts (no sign).
5118                                 ASCII(cmlt(vtmp2, T16B, vlo));
5119     fmovd(chk, vhi);            ASCII(umaxv(vtmp2, T16B, vtmp2));
5120                                 ASCII(umov(max, vtmp2, B, 0));
5121                                 ASCII(orr(chk, chk, max));
5122     cbnz(chk, SKIP_8);
5123 
5124     strd(vlo, Address(post(dst, 8)));
5125     subw(cnt, cnt, 8);
5126     add(src, src, 16);
5127     b(LOOP_8);
5128   }
5129   BIND(SKIP_8);
5130 
5131 #undef ASCII
5132 
5133   Label LOOP, DONE;
5134 
5135   cbz(cnt, DONE);
5136   BIND(LOOP);
5137   {
5138     Register chr = rscratch1;
5139     ldrh(chr, Address(post(src, 2)));
5140     tst(chr, ascii ? 0xff80 : 0xff00);
5141     br(NE, DONE);
5142     strb(chr, Address(post(dst, 1)));
5143     subs(cnt, cnt, 1);
5144     br(GT, LOOP);
5145   }
5146   BIND(DONE);
5147   // Return index where we stopped.
5148   subw(res, len, cnt);
5149 }
5150 
5151 // Inflate byte[] array to char[].
5152 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6
5153 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
5154                                            FloatRegister vtmp1, FloatRegister vtmp2,
5155                                            FloatRegister vtmp3, Register tmp4) {
5156   Label big, done, after_init, to_stub;
5157 
5158   assert_different_registers(src, dst, len, tmp4, rscratch1);
5159 
5160   fmovd(vtmp1, 0.0);
5161   lsrw(tmp4, len, 3);
5162   bind(after_init);
5163   cbnzw(tmp4, big);
5164   // Short string: less than 8 bytes.
5165   {
5166     Label loop, tiny;
5167 
5168     cmpw(len, 4);
5169     br(LT, tiny);
5170     // Use SIMD to do 4 bytes.
5171     ldrs(vtmp2, post(src, 4));
5172     zip1(vtmp3, T8B, vtmp2, vtmp1);
5173     subw(len, len, 4);
5174     strd(vtmp3, post(dst, 8));
5175 
5176     cbzw(len, done);
5177 
5178     // Do the remaining bytes by steam.
5179     bind(loop);
5180     ldrb(tmp4, post(src, 1));
5181     strh(tmp4, post(dst, 2));
5182     subw(len, len, 1);
5183 
5184     bind(tiny);
5185     cbnz(len, loop);
5186 
5187     b(done);
5188   }
5189 
5190   if (SoftwarePrefetchHintDistance >= 0) {
5191     bind(to_stub);
5192       RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate());
5193       assert(stub.target() != NULL, "large_byte_array_inflate stub has not been generated");
5194       address tpc = trampoline_call(stub);
5195       if (tpc == NULL) {
5196         DEBUG_ONLY(reset_labels(big, done));
5197         postcond(pc() == badAddress);
5198         return NULL;
5199       }
5200       b(after_init);
5201   }
5202 
5203   // Unpack the bytes 8 at a time.
5204   bind(big);
5205   {
5206     Label loop, around, loop_last, loop_start;
5207 
5208     if (SoftwarePrefetchHintDistance >= 0) {
5209       const int large_loop_threshold = (64 + 16)/8;
5210       ldrd(vtmp2, post(src, 8));
5211       andw(len, len, 7);
5212       cmp(tmp4, (u1)large_loop_threshold);
5213       br(GE, to_stub);
5214       b(loop_start);
5215 
5216       bind(loop);
5217       ldrd(vtmp2, post(src, 8));
5218       bind(loop_start);
5219       subs(tmp4, tmp4, 1);
5220       br(EQ, loop_last);
5221       zip1(vtmp2, T16B, vtmp2, vtmp1);
5222       ldrd(vtmp3, post(src, 8));
5223       st1(vtmp2, T8H, post(dst, 16));
5224       subs(tmp4, tmp4, 1);
5225       zip1(vtmp3, T16B, vtmp3, vtmp1);
5226       st1(vtmp3, T8H, post(dst, 16));
5227       br(NE, loop);
5228       b(around);
5229       bind(loop_last);
5230       zip1(vtmp2, T16B, vtmp2, vtmp1);
5231       st1(vtmp2, T8H, post(dst, 16));
5232       bind(around);
5233       cbz(len, done);
5234     } else {
5235       andw(len, len, 7);
5236       bind(loop);
5237       ldrd(vtmp2, post(src, 8));
5238       sub(tmp4, tmp4, 1);
5239       zip1(vtmp3, T16B, vtmp2, vtmp1);
5240       st1(vtmp3, T8H, post(dst, 16));
5241       cbnz(tmp4, loop);
5242     }
5243   }
5244 
5245   // Do the tail of up to 8 bytes.
5246   add(src, src, len);
5247   ldrd(vtmp3, Address(src, -8));
5248   add(dst, dst, len, ext::uxtw, 1);
5249   zip1(vtmp3, T16B, vtmp3, vtmp1);
5250   strq(vtmp3, Address(dst, -16));
5251 
5252   bind(done);
5253   postcond(pc() != badAddress);
5254   return pc();
5255 }
5256 
5257 // Compress char[] array to byte[].
5258 void MacroAssembler::char_array_compress(Register src, Register dst, Register len,
5259                                          Register res,
5260                                          FloatRegister tmp0, FloatRegister tmp1,
5261                                          FloatRegister tmp2, FloatRegister tmp3,
5262                                          FloatRegister tmp4, FloatRegister tmp5) {
5263   encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5);
5264   // Adjust result: res == len ? len : 0
5265   cmp(len, res);
5266   csel(res, res, zr, EQ);
5267 }
5268 
5269 // get_thread() can be called anywhere inside generated code so we
5270 // need to save whatever non-callee save context might get clobbered
5271 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
5272 // the call setup code.
5273 //
5274 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags.
5275 // On other systems, the helper is a usual C function.
5276 //
5277 void MacroAssembler::get_thread(Register dst) {
5278   RegSet saved_regs =
5279     LINUX_ONLY(RegSet::range(r0, r1)  + lr - dst)
5280     NOT_LINUX (RegSet::range(r0, r17) + lr - dst);
5281 
5282   push(saved_regs, sp);
5283 
5284   mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
5285   blr(lr);
5286   if (dst != c_rarg0) {
5287     mov(dst, c_rarg0);
5288   }
5289 
5290   pop(saved_regs, sp);
5291 }
5292 
5293 void MacroAssembler::cache_wb(Address line) {
5294   assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset");
5295   assert(line.index() == noreg, "index should be noreg");
5296   assert(line.offset() == 0, "offset should be 0");
5297   // would like to assert this
5298   // assert(line._ext.shift == 0, "shift should be zero");
5299   if (VM_Version::features() & VM_Version::CPU_DCPOP) {
5300     // writeback using clear virtual address to point of persistence
5301     dc(Assembler::CVAP, line.base());
5302   } else {
5303     // no need to generate anything as Unsafe.writebackMemory should
5304     // never invoke this stub
5305   }
5306 }
5307 
5308 void MacroAssembler::cache_wbsync(bool is_pre) {
5309   // we only need a barrier post sync
5310   if (!is_pre) {
5311     membar(Assembler::AnyAny);
5312   }
5313 }
5314 
5315 void MacroAssembler::verify_sve_vector_length() {
5316   // Make sure that native code does not change SVE vector length.
5317   if (!UseSVE) return;
5318   Label verify_ok;
5319   movw(rscratch1, zr);
5320   sve_inc(rscratch1, B);
5321   subsw(zr, rscratch1, VM_Version::get_initial_sve_vector_length());
5322   br(EQ, verify_ok);
5323   stop("Error: SVE vector length has changed since jvm startup");
5324   bind(verify_ok);
5325 }
5326 
5327 void MacroAssembler::verify_ptrue() {
5328   Label verify_ok;
5329   if (!UseSVE) {
5330     return;
5331   }
5332   sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count.
5333   sve_dec(rscratch1, B);
5334   cbz(rscratch1, verify_ok);
5335   stop("Error: the preserved predicate register (p7) elements are not all true");
5336   bind(verify_ok);
5337 }
5338 
5339 void MacroAssembler::safepoint_isb() {
5340   isb();
5341 #ifndef PRODUCT
5342   if (VerifyCrossModifyFence) {
5343     // Clear the thread state.
5344     strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
5345   }
5346 #endif
5347 }
5348 
5349 #ifndef PRODUCT
5350 void MacroAssembler::verify_cross_modify_fence_not_required() {
5351   if (VerifyCrossModifyFence) {
5352     // Check if thread needs a cross modify fence.
5353     ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
5354     Label fence_not_required;
5355     cbz(rscratch1, fence_not_required);
5356     // If it does then fail.
5357     lea(rscratch1, CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure));
5358     mov(c_rarg0, rthread);
5359     blr(rscratch1);
5360     bind(fence_not_required);
5361   }
5362 }
5363 #endif
5364 
5365 void MacroAssembler::spin_wait() {
5366   for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) {
5367     switch (VM_Version::spin_wait_desc().inst()) {
5368       case SpinWait::NOP:
5369         nop();
5370         break;
5371       case SpinWait::ISB:
5372         isb();
5373         break;
5374       case SpinWait::YIELD:
5375         yield();
5376         break;
5377       default:
5378         ShouldNotReachHere();
5379     }
5380   }
5381 }