1 /*
   2 /*
   3  * Copyright (c) 2013, Red Hat Inc.
   4  * Copyright (c) 1997, 2012, Oracle and/or its affiliates.
   5  * All rights reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  23  * or visit www.oracle.com if you need additional information or have any
  24  * questions.
  25  *
  26  */
  27 
  28 #include <sys/types.h>
  29 
  30 #include "precompiled.hpp"
  31 #include "asm/assembler.hpp"
  32 #include "asm/assembler.inline.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 
  35 #include "compiler/disassembler.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "runtime/biasedLocking.hpp"
  38 #include "runtime/interfaceSupport.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 
  41 // #include "gc_interface/collectedHeap.inline.hpp"
  42 // #include "interpreter/interpreter.hpp"
  43 // #include "memory/cardTableModRefBS.hpp"
  44 // #include "prims/methodHandles.hpp"
  45 // #include "runtime/biasedLocking.hpp"
  46 // #include "runtime/interfaceSupport.hpp"
  47 // #include "runtime/objectMonitor.hpp"
  48 // #include "runtime/os.hpp"
  49 // #include "runtime/sharedRuntime.hpp"
  50 // #include "runtime/stubRoutines.hpp"
  51 
  52 #if INCLUDE_ALL_GCS
  53 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  54 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
  55 #include "gc_implementation/g1/heapRegion.hpp"
  56 #endif
  57 
  58 #ifdef COMPILER2
  59 #include "opto/node.hpp"
  60 #include "opto/compile.hpp"
  61 #endif
  62 
  63 #ifdef PRODUCT
  64 #define BLOCK_COMMENT(str) /* nothing */
  65 #define STOP(error) stop(error)
  66 #else
  67 #define BLOCK_COMMENT(str) block_comment(str)
  68 #define STOP(error) block_comment(error); stop(error)
  69 #endif
  70 
  71 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  72 
  73 // Patch any kind of instruction; there may be several instructions.
  74 // Return the total length (in bytes) of the instructions.
  75 int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
  76   int instructions = 1;
  77   assert((uint64_t)target < (1ul << 48), "48-bit overflow in address constant");
  78   long offset = (target - branch) >> 2;
  79   unsigned insn = *(unsigned*)branch;
  80   if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) {
  81     // Load register (literal)
  82     Instruction_aarch64::spatch(branch, 23, 5, offset);
  83   } else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) {
  84     // Unconditional branch (immediate)
  85     Instruction_aarch64::spatch(branch, 25, 0, offset);
  86   } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) {
  87     // Conditional branch (immediate)
  88     Instruction_aarch64::spatch(branch, 23, 5, offset);
  89   } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) {
  90     // Compare & branch (immediate)
  91     Instruction_aarch64::spatch(branch, 23, 5, offset);
  92   } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) {
  93     // Test & branch (immediate)
  94     Instruction_aarch64::spatch(branch, 18, 5, offset);
  95   } else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) {
  96     // PC-rel. addressing
  97     offset = target-branch;
  98     int shift = Instruction_aarch64::extract(insn, 31, 31);
  99     if (shift) {
 100       u_int64_t dest = (u_int64_t)target;
 101       uint64_t pc_page = (uint64_t)branch >> 12;
 102       uint64_t adr_page = (uint64_t)target >> 12;
 103       unsigned offset_lo = dest & 0xfff;
 104       offset = adr_page - pc_page;
 105 
 106       // We handle 4 types of PC relative addressing
 107       //   1 - adrp    Rx, target_page
 108       //       ldr/str Ry, [Rx, #offset_in_page]
 109       //   2 - adrp    Rx, target_page
 110       //       add     Ry, Rx, #offset_in_page
 111       //   3 - adrp    Rx, target_page (page aligned reloc, offset == 0)
 112       //       movk    Rx, #imm16<<32
 113       //   4 - adrp    Rx, target_page (page aligned reloc, offset == 0)
 114       // In the first 3 cases we must check that Rx is the same in the adrp and the
 115       // subsequent ldr/str, add or movk instruction. Otherwise we could accidentally end
 116       // up treating a type 4 relocation as a type 1, 2 or 3 just because it happened
 117       // to be followed by a random unrelated ldr/str, add or movk instruction.
 118       //
 119       unsigned insn2 = ((unsigned*)branch)[1];
 120       if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
 121                 Instruction_aarch64::extract(insn, 4, 0) ==
 122                         Instruction_aarch64::extract(insn2, 9, 5)) {
 123         // Load/store register (unsigned immediate)
 124         unsigned size = Instruction_aarch64::extract(insn2, 31, 30);
 125         Instruction_aarch64::patch(branch + sizeof (unsigned),
 126                                     21, 10, offset_lo >> size);
 127         guarantee(((dest >> size) << size) == dest, "misaligned target");
 128         instructions = 2;
 129       } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
 130                 Instruction_aarch64::extract(insn, 4, 0) ==
 131                         Instruction_aarch64::extract(insn2, 4, 0)) {
 132         // add (immediate)
 133         Instruction_aarch64::patch(branch + sizeof (unsigned),
 134                                    21, 10, offset_lo);
 135         instructions = 2;
 136       } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 &&
 137                    Instruction_aarch64::extract(insn, 4, 0) ==
 138                      Instruction_aarch64::extract(insn2, 4, 0)) {
 139         // movk #imm16<<32
 140         Instruction_aarch64::patch(branch + 4, 20, 5, (uint64_t)target >> 32);
 141         long dest = ((long)target & 0xffffffffL) | ((long)branch & 0xffff00000000L);
 142         long pc_page = (long)branch >> 12;
 143         long adr_page = (long)dest >> 12;
 144         offset = adr_page - pc_page;
 145         instructions = 2;
 146       }
 147     }
 148     int offset_lo = offset & 3;
 149     offset >>= 2;
 150     Instruction_aarch64::spatch(branch, 23, 5, offset);
 151     Instruction_aarch64::patch(branch, 30, 29, offset_lo);
 152   } else if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010100) {
 153     u_int64_t dest = (u_int64_t)target;
 154     // Move wide constant
 155     assert(nativeInstruction_at(branch+4)->is_movk(), "wrong insns in patch");
 156     assert(nativeInstruction_at(branch+8)->is_movk(), "wrong insns in patch");
 157     Instruction_aarch64::patch(branch, 20, 5, dest & 0xffff);
 158     Instruction_aarch64::patch(branch+4, 20, 5, (dest >>= 16) & 0xffff);
 159     Instruction_aarch64::patch(branch+8, 20, 5, (dest >>= 16) & 0xffff);
 160     assert(target_addr_for_insn(branch) == target, "should be");
 161     instructions = 3;
 162   } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
 163              Instruction_aarch64::extract(insn, 4, 0) == 0b11111) {
 164     // nothing to do
 165     assert(target == 0, "did not expect to relocate target for polling page load");
 166   } else {
 167     ShouldNotReachHere();
 168   }
 169   return instructions * NativeInstruction::instruction_size;
 170 }
 171 
 172 int MacroAssembler::patch_oop(address insn_addr, address o) {
 173   int instructions;
 174   unsigned insn = *(unsigned*)insn_addr;
 175   assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
 176 
 177   // OOPs are either narrow (32 bits) or wide (48 bits).  We encode
 178   // narrow OOPs by setting the upper 16 bits in the first
 179   // instruction.
 180   if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
 181     // Move narrow OOP
 182     narrowOop n = oopDesc::encode_heap_oop((oop)o);
 183     Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
 184     Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
 185     instructions = 2;
 186   } else {
 187     // Move wide OOP
 188     assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
 189     uintptr_t dest = (uintptr_t)o;
 190     Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
 191     Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
 192     Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
 193     instructions = 3;
 194   }
 195   return instructions * NativeInstruction::instruction_size;
 196 }
 197 
 198 address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
 199   long offset = 0;
 200   if ((Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000) {
 201     // Load register (literal)
 202     offset = Instruction_aarch64::sextract(insn, 23, 5);
 203     return address(((uint64_t)insn_addr + (offset << 2)));
 204   } else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) {
 205     // Unconditional branch (immediate)
 206     offset = Instruction_aarch64::sextract(insn, 25, 0);
 207   } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) {
 208     // Conditional branch (immediate)
 209     offset = Instruction_aarch64::sextract(insn, 23, 5);
 210   } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) {
 211     // Compare & branch (immediate)
 212     offset = Instruction_aarch64::sextract(insn, 23, 5);
 213    } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) {
 214     // Test & branch (immediate)
 215     offset = Instruction_aarch64::sextract(insn, 18, 5);
 216   } else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) {
 217     // PC-rel. addressing
 218     offset = Instruction_aarch64::extract(insn, 30, 29);
 219     offset |= Instruction_aarch64::sextract(insn, 23, 5) << 2;
 220     int shift = Instruction_aarch64::extract(insn, 31, 31) ? 12 : 0;
 221     if (shift) {
 222       offset <<= shift;
 223       uint64_t target_page = ((uint64_t)insn_addr) + offset;
 224       target_page &= ((uint64_t)-1) << shift;
 225       // Return the target address for the following sequences
 226       //   1 - adrp    Rx, target_page
 227       //       ldr/str Ry, [Rx, #offset_in_page]
 228       //   2 - adrp    Rx, target_page
 229       //       add     Ry, Rx, #offset_in_page
 230       //   3 - adrp    Rx, target_page (page aligned reloc, offset == 0)
 231       //       movk    Rx, #imm12<<32
 232       //   4 - adrp    Rx, target_page (page aligned reloc, offset == 0)
 233       //
 234       // In the first two cases  we check that the register is the same and
 235       // return the target_page + the offset within the page.
 236       // Otherwise we assume it is a page aligned relocation and return
 237       // the target page only.
 238       //
 239       unsigned insn2 = ((unsigned*)insn_addr)[1];
 240       if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
 241                 Instruction_aarch64::extract(insn, 4, 0) ==
 242                         Instruction_aarch64::extract(insn2, 9, 5)) {
 243         // Load/store register (unsigned immediate)
 244         unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
 245         unsigned int size = Instruction_aarch64::extract(insn2, 31, 30);
 246         return address(target_page + (byte_offset << size));
 247       } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
 248                 Instruction_aarch64::extract(insn, 4, 0) ==
 249                         Instruction_aarch64::extract(insn2, 4, 0)) {
 250         // add (immediate)
 251         unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
 252         return address(target_page + byte_offset);
 253       } else {
 254         if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110  &&
 255                Instruction_aarch64::extract(insn, 4, 0) ==
 256                  Instruction_aarch64::extract(insn2, 4, 0)) {
 257           target_page = (target_page & 0xffffffff) |
 258                          ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
 259         }
 260         return (address)target_page;
 261       }
 262     } else {
 263       ShouldNotReachHere();
 264     }
 265   } else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) {
 266     u_int32_t *insns = (u_int32_t *)insn_addr;
 267     // Move wide constant: movz, movk, movk.  See movptr().
 268     assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
 269     assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
 270     return address(u_int64_t(Instruction_aarch64::extract(insns[0], 20, 5))
 271                    + (u_int64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
 272                    + (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
 273   } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
 274              Instruction_aarch64::extract(insn, 4, 0) == 0b11111) {
 275     return 0;
 276   } else {
 277     ShouldNotReachHere();
 278   }
 279   return address(((uint64_t)insn_addr + (offset << 2)));
 280 }
 281 
 282 void MacroAssembler::serialize_memory(Register thread, Register tmp) {
 283   dsb(Assembler::SY);
 284 }
 285 
 286 
 287 void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
 288   // we must set sp to zero to clear frame
 289   str(zr, Address(rthread, JavaThread::last_Java_sp_offset()));
 290 
 291   // must clear fp, so that compiled frames are not confused; it is
 292   // possible that we need it only for debugging
 293   if (clear_fp) {
 294     str(zr, Address(rthread, JavaThread::last_Java_fp_offset()));
 295   }
 296 
 297   // Always clear the pc because it could have been set by make_walkable()
 298   str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
 299 }
 300 
 301 // Calls to C land
 302 //
 303 // When entering C land, the rfp, & resp of the last Java frame have to be recorded
 304 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
 305 // has to be reset to 0. This is required to allow proper stack traversal.
 306 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 307                                          Register last_java_fp,
 308                                          Register last_java_pc,
 309                                          Register scratch) {
 310 
 311   if (last_java_pc->is_valid()) {
 312       str(last_java_pc, Address(rthread,
 313                                 JavaThread::frame_anchor_offset()
 314                                 + JavaFrameAnchor::last_Java_pc_offset()));
 315     }
 316 
 317   // determine last_java_sp register
 318   if (last_java_sp == sp) {
 319     mov(scratch, sp);
 320     last_java_sp = scratch;
 321   } else if (!last_java_sp->is_valid()) {
 322     last_java_sp = esp;
 323   }
 324 
 325   str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset()));
 326 
 327   // last_java_fp is optional
 328   if (last_java_fp->is_valid()) {
 329     str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset()));
 330   }
 331 }
 332 
 333 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 334                                          Register last_java_fp,
 335                                          address  last_java_pc,
 336                                          Register scratch) {
 337   if (last_java_pc != NULL) {
 338     adr(scratch, last_java_pc);
 339   } else {
 340     // FIXME: This is almost never correct.  We should delete all
 341     // cases of set_last_Java_frame with last_java_pc=NULL and use the
 342     // correct return address instead.
 343     adr(scratch, pc());
 344   }
 345 
 346   str(scratch, Address(rthread,
 347                        JavaThread::frame_anchor_offset()
 348                        + JavaFrameAnchor::last_Java_pc_offset()));
 349 
 350   set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
 351 }
 352 
 353 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 354                                          Register last_java_fp,
 355                                          Label &L,
 356                                          Register scratch) {
 357   if (L.is_bound()) {
 358     set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
 359   } else {
 360     InstructionMark im(this);
 361     L.add_patch_at(code(), locator());
 362     set_last_Java_frame(last_java_sp, last_java_fp, (address)NULL, scratch);
 363   }
 364 }
 365 
 366 void MacroAssembler::far_call(Address entry, CodeBuffer *cbuf, Register tmp) {
 367   assert(ReservedCodeCacheSize < 4*G, "branch out of range");
 368   assert(CodeCache::find_blob(entry.target()) != NULL,
 369          "destination of far call not found in code cache");
 370   if (far_branches()) {
 371     unsigned long offset;
 372     // We can use ADRP here because we know that the total size of
 373     // the code cache cannot exceed 2Gb.
 374     adrp(tmp, entry, offset);
 375     add(tmp, tmp, offset);
 376     if (cbuf) cbuf->set_insts_mark();
 377     blr(tmp);
 378   } else {
 379     if (cbuf) cbuf->set_insts_mark();
 380     bl(entry);
 381   }
 382 }
 383 
 384 void MacroAssembler::far_jump(Address entry, CodeBuffer *cbuf, Register tmp) {
 385   assert(ReservedCodeCacheSize < 4*G, "branch out of range");
 386   assert(CodeCache::find_blob(entry.target()) != NULL,
 387          "destination of far call not found in code cache");
 388   if (far_branches()) {
 389     unsigned long offset;
 390     // We can use ADRP here because we know that the total size of
 391     // the code cache cannot exceed 2Gb.
 392     adrp(tmp, entry, offset);
 393     add(tmp, tmp, offset);
 394     if (cbuf) cbuf->set_insts_mark();
 395     br(tmp);
 396   } else {
 397     if (cbuf) cbuf->set_insts_mark();
 398     b(entry);
 399   }
 400 }
 401 
 402 int MacroAssembler::biased_locking_enter(Register lock_reg,
 403                                          Register obj_reg,
 404                                          Register swap_reg,
 405                                          Register tmp_reg,
 406                                          bool swap_reg_contains_mark,
 407                                          Label& done,
 408                                          Label* slow_case,
 409                                          BiasedLockingCounters* counters) {
 410   assert(UseBiasedLocking, "why call this otherwise?");
 411   assert_different_registers(lock_reg, obj_reg, swap_reg);
 412 
 413   if (PrintBiasedLockingStatistics && counters == NULL)
 414     counters = BiasedLocking::counters();
 415 
 416   assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, rscratch1, rscratch2, noreg);
 417   assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
 418   Address mark_addr      (obj_reg, oopDesc::mark_offset_in_bytes());
 419   Address klass_addr     (obj_reg, oopDesc::klass_offset_in_bytes());
 420   Address saved_mark_addr(lock_reg, 0);
 421 
 422   // Biased locking
 423   // See whether the lock is currently biased toward our thread and
 424   // whether the epoch is still valid
 425   // Note that the runtime guarantees sufficient alignment of JavaThread
 426   // pointers to allow age to be placed into low bits
 427   // First check to see whether biasing is even enabled for this object
 428   Label cas_label;
 429   int null_check_offset = -1;
 430   if (!swap_reg_contains_mark) {
 431     null_check_offset = offset();
 432     ldr(swap_reg, mark_addr);
 433   }
 434   andr(tmp_reg, swap_reg, markOopDesc::biased_lock_mask_in_place);
 435   cmp(tmp_reg, markOopDesc::biased_lock_pattern);
 436   br(Assembler::NE, cas_label);
 437   // The bias pattern is present in the object's header. Need to check
 438   // whether the bias owner and the epoch are both still current.
 439   load_prototype_header(tmp_reg, obj_reg);
 440   orr(tmp_reg, tmp_reg, rthread);
 441   eor(tmp_reg, swap_reg, tmp_reg);
 442   andr(tmp_reg, tmp_reg, ~((int) markOopDesc::age_mask_in_place));
 443   if (counters != NULL) {
 444     Label around;
 445     cbnz(tmp_reg, around);
 446     atomic_incw(Address((address)counters->biased_lock_entry_count_addr()), tmp_reg, rscratch1, rscratch2);
 447     b(done);
 448     bind(around);
 449   } else {
 450     cbz(tmp_reg, done);
 451   }
 452 
 453   Label try_revoke_bias;
 454   Label try_rebias;
 455 
 456   // At this point we know that the header has the bias pattern and
 457   // that we are not the bias owner in the current epoch. We need to
 458   // figure out more details about the state of the header in order to
 459   // know what operations can be legally performed on the object's
 460   // header.
 461 
 462   // If the low three bits in the xor result aren't clear, that means
 463   // the prototype header is no longer biased and we have to revoke
 464   // the bias on this object.
 465   andr(rscratch1, tmp_reg, markOopDesc::biased_lock_mask_in_place);
 466   cbnz(rscratch1, try_revoke_bias);
 467 
 468   // Biasing is still enabled for this data type. See whether the
 469   // epoch of the current bias is still valid, meaning that the epoch
 470   // bits of the mark word are equal to the epoch bits of the
 471   // prototype header. (Note that the prototype header's epoch bits
 472   // only change at a safepoint.) If not, attempt to rebias the object
 473   // toward the current thread. Note that we must be absolutely sure
 474   // that the current epoch is invalid in order to do this because
 475   // otherwise the manipulations it performs on the mark word are
 476   // illegal.
 477   andr(rscratch1, tmp_reg, markOopDesc::epoch_mask_in_place);
 478   cbnz(rscratch1, try_rebias);
 479 
 480   // The epoch of the current bias is still valid but we know nothing
 481   // about the owner; it might be set or it might be clear. Try to
 482   // acquire the bias of the object using an atomic operation. If this
 483   // fails we will go in to the runtime to revoke the object's bias.
 484   // Note that we first construct the presumed unbiased header so we
 485   // don't accidentally blow away another thread's valid bias.
 486   {
 487     Label here;
 488     mov(rscratch1, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
 489     andr(swap_reg, swap_reg, rscratch1);
 490     orr(tmp_reg, swap_reg, rthread);
 491     cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
 492     // If the biasing toward our thread failed, this means that
 493     // another thread succeeded in biasing it toward itself and we
 494     // need to revoke that bias. The revocation will occur in the
 495     // interpreter runtime in the slow case.
 496     bind(here);
 497     if (counters != NULL) {
 498       atomic_incw(Address((address)counters->anonymously_biased_lock_entry_count_addr()),
 499                   tmp_reg, rscratch1, rscratch2);
 500     }
 501   }
 502   b(done);
 503 
 504   bind(try_rebias);
 505   // At this point we know the epoch has expired, meaning that the
 506   // current "bias owner", if any, is actually invalid. Under these
 507   // circumstances _only_, we are allowed to use the current header's
 508   // value as the comparison value when doing the cas to acquire the
 509   // bias in the current epoch. In other words, we allow transfer of
 510   // the bias from one thread to another directly in this situation.
 511   //
 512   // FIXME: due to a lack of registers we currently blow away the age
 513   // bits in this situation. Should attempt to preserve them.
 514   {
 515     Label here;
 516     load_prototype_header(tmp_reg, obj_reg);
 517     orr(tmp_reg, rthread, tmp_reg);
 518     cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
 519     // If the biasing toward our thread failed, then another thread
 520     // succeeded in biasing it toward itself and we need to revoke that
 521     // bias. The revocation will occur in the runtime in the slow case.
 522     bind(here);
 523     if (counters != NULL) {
 524       atomic_incw(Address((address)counters->rebiased_lock_entry_count_addr()),
 525                   tmp_reg, rscratch1, rscratch2);
 526     }
 527   }
 528   b(done);
 529 
 530   bind(try_revoke_bias);
 531   // The prototype mark in the klass doesn't have the bias bit set any
 532   // more, indicating that objects of this data type are not supposed
 533   // to be biased any more. We are going to try to reset the mark of
 534   // this object to the prototype value and fall through to the
 535   // CAS-based locking scheme. Note that if our CAS fails, it means
 536   // that another thread raced us for the privilege of revoking the
 537   // bias of this particular object, so it's okay to continue in the
 538   // normal locking code.
 539   //
 540   // FIXME: due to a lack of registers we currently blow away the age
 541   // bits in this situation. Should attempt to preserve them.
 542   {
 543     Label here, nope;
 544     load_prototype_header(tmp_reg, obj_reg);
 545     cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope);
 546     bind(here);
 547 
 548     // Fall through to the normal CAS-based lock, because no matter what
 549     // the result of the above CAS, some thread must have succeeded in
 550     // removing the bias bit from the object's header.
 551     if (counters != NULL) {
 552       atomic_incw(Address((address)counters->revoked_lock_entry_count_addr()), tmp_reg,
 553                   rscratch1, rscratch2);
 554     }
 555     bind(nope);
 556   }
 557 
 558   bind(cas_label);
 559 
 560   return null_check_offset;
 561 }
 562 
 563 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
 564   assert(UseBiasedLocking, "why call this otherwise?");
 565 
 566   // Check for biased locking unlock case, which is a no-op
 567   // Note: we do not have to check the thread ID for two reasons.
 568   // First, the interpreter checks for IllegalMonitorStateException at
 569   // a higher level. Second, if the bias was revoked while we held the
 570   // lock, the object could not be rebiased toward another thread, so
 571   // the bias bit would be clear.
 572   ldr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 573   andr(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
 574   cmp(temp_reg, markOopDesc::biased_lock_pattern);
 575   br(Assembler::EQ, done);
 576 }
 577 
 578 
 579 // added to make this compile
 580 
 581 REGISTER_DEFINITION(Register, noreg);
 582 
 583 static void pass_arg0(MacroAssembler* masm, Register arg) {
 584   if (c_rarg0 != arg ) {
 585     masm->mov(c_rarg0, arg);
 586   }
 587 }
 588 
 589 static void pass_arg1(MacroAssembler* masm, Register arg) {
 590   if (c_rarg1 != arg ) {
 591     masm->mov(c_rarg1, arg);
 592   }
 593 }
 594 
 595 static void pass_arg2(MacroAssembler* masm, Register arg) {
 596   if (c_rarg2 != arg ) {
 597     masm->mov(c_rarg2, arg);
 598   }
 599 }
 600 
 601 static void pass_arg3(MacroAssembler* masm, Register arg) {
 602   if (c_rarg3 != arg ) {
 603     masm->mov(c_rarg3, arg);
 604   }
 605 }
 606 
 607 void MacroAssembler::call_VM_base(Register oop_result,
 608                                   Register java_thread,
 609                                   Register last_java_sp,
 610                                   address  entry_point,
 611                                   int      number_of_arguments,
 612                                   bool     check_exceptions) {
 613    // determine java_thread register
 614   if (!java_thread->is_valid()) {
 615     java_thread = rthread;
 616   }
 617 
 618   // determine last_java_sp register
 619   if (!last_java_sp->is_valid()) {
 620     last_java_sp = esp;
 621   }
 622 
 623   // debugging support
 624   assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
 625   assert(java_thread == rthread, "unexpected register");
 626 #ifdef ASSERT
 627   // TraceBytecodes does not use r12 but saves it over the call, so don't verify
 628   // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
 629 #endif // ASSERT
 630 
 631   assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
 632   assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
 633 
 634   // push java thread (becomes first argument of C function)
 635 
 636   mov(c_rarg0, java_thread);
 637 
 638   // set last Java frame before call
 639   assert(last_java_sp != rfp, "can't use rfp");
 640 
 641   Label l;
 642   set_last_Java_frame(last_java_sp, rfp, l, rscratch1);
 643 
 644   // do the call, remove parameters
 645   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l);
 646 
 647   // lr could be poisoned with PAC signature during throw_pending_exception
 648   // if it was tail-call optimized by compiler, since lr is not callee-saved
 649   // reload it with proper value
 650   adr(lr, l);
 651 
 652   // reset last Java frame
 653   // Only interpreter should have to clear fp
 654   reset_last_Java_frame(true);
 655 
 656    // C++ interp handles this in the interpreter
 657   check_and_handle_popframe(java_thread);
 658   check_and_handle_earlyret(java_thread);
 659 
 660   if (check_exceptions) {
 661     // check for pending exceptions (java_thread is set upon return)
 662     ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
 663     Label ok;
 664     cbz(rscratch1, ok);
 665     lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
 666     br(rscratch1);
 667     bind(ok);
 668   }
 669 
 670   // get oop result if there is one and reset the value in the thread
 671   if (oop_result->is_valid()) {
 672     get_vm_result(oop_result, java_thread);
 673   }
 674 }
 675 
 676 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
 677   call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
 678 }
 679 
 680 // Maybe emit a call via a trampoline.  If the code cache is small
 681 // trampolines won't be emitted.
 682 
 683 address MacroAssembler::trampoline_call(Address entry, CodeBuffer *cbuf) {
 684   assert(entry.rspec().type() == relocInfo::runtime_call_type
 685          || entry.rspec().type() == relocInfo::opt_virtual_call_type
 686          || entry.rspec().type() == relocInfo::static_call_type
 687          || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type");
 688 
 689   unsigned int start_offset = offset();
 690 #ifdef COMPILER2
 691   // We need a trampoline if branches are far.
 692   if (far_branches()) {
 693     // We don't want to emit a trampoline if C2 is generating dummy
 694     // code during its branch shortening phase.
 695     CompileTask* task = ciEnv::current()->task();
 696     bool in_scratch_emit_size =
 697       ((task != NULL) && is_c2_compile(task->comp_level())
 698        && Compile::current()->in_scratch_emit_size());
 699     if (! in_scratch_emit_size) {
 700       address stub = emit_trampoline_stub(start_offset, entry.target());
 701       if (stub == NULL) {
 702         return NULL; // CodeCache is full
 703       }
 704     }
 705   }
 706 #endif
 707 
 708   if (cbuf) cbuf->set_insts_mark();
 709   relocate(entry.rspec());
 710 #ifdef COMPILER2
 711   if (!far_branches()) {
 712     bl(entry.target());
 713   } else {
 714     bl(pc());
 715   }
 716 #else
 717     bl(entry.target());
 718 #endif
 719   // just need to return a non-null address
 720   return pc();
 721 }
 722 
 723 
 724 // Emit a trampoline stub for a call to a target which is too far away.
 725 //
 726 // code sequences:
 727 //
 728 // call-site:
 729 //   branch-and-link to <destination> or <trampoline stub>
 730 //
 731 // Related trampoline stub for this call site in the stub section:
 732 //   load the call target from the constant pool
 733 //   branch (LR still points to the call site above)
 734 
 735 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
 736                                              address dest) {
 737 #ifdef COMPILER2
 738   address stub = start_a_stub(Compile::MAX_stubs_size/2);
 739   if (stub == NULL) {
 740     return NULL;  // CodeBuffer::expand failed
 741   }
 742 
 743   // Create a trampoline stub relocation which relates this trampoline stub
 744   // with the call instruction at insts_call_instruction_offset in the
 745   // instructions code-section.
 746   align(wordSize);
 747   relocate(trampoline_stub_Relocation::spec(code()->insts()->start()
 748                                             + insts_call_instruction_offset));
 749   const int stub_start_offset = offset();
 750 
 751   // Now, create the trampoline stub's code:
 752   // - load the call
 753   // - call
 754   Label target;
 755   ldr(rscratch1, target);
 756   br(rscratch1);
 757   bind(target);
 758   assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset,
 759          "should be");
 760   emit_int64((int64_t)dest);
 761 
 762   const address stub_start_addr = addr_at(stub_start_offset);
 763 
 764   assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
 765 
 766   end_a_stub();
 767   return stub;
 768 #else
 769   ShouldNotReachHere();
 770   return NULL;
 771 #endif
 772 }
 773 
 774 void MacroAssembler::c2bool(Register x) {
 775   // implements x == 0 ? 0 : 1
 776   // note: must only look at least-significant byte of x
 777   //       since C-style booleans are stored in one byte
 778   //       only! (was bug)
 779   tst(x, 0xff);
 780   cset(x, Assembler::NE);
 781 }
 782 
 783 address MacroAssembler::ic_call(address entry) {
 784   RelocationHolder rh = virtual_call_Relocation::spec(pc());
 785   // address const_ptr = long_constant((jlong)Universe::non_oop_word());
 786   // unsigned long offset;
 787   // ldr_constant(rscratch2, const_ptr);
 788   movptr(rscratch2, (uintptr_t)Universe::non_oop_word());
 789   return trampoline_call(Address(entry, rh));
 790 }
 791 
 792 // Implementation of call_VM versions
 793 
 794 void MacroAssembler::call_VM(Register oop_result,
 795                              address entry_point,
 796                              bool check_exceptions) {
 797   call_VM_helper(oop_result, entry_point, 0, check_exceptions);
 798 }
 799 
 800 void MacroAssembler::call_VM(Register oop_result,
 801                              address entry_point,
 802                              Register arg_1,
 803                              bool check_exceptions) {
 804   pass_arg1(this, arg_1);
 805   call_VM_helper(oop_result, entry_point, 1, check_exceptions);
 806 }
 807 
 808 void MacroAssembler::call_VM(Register oop_result,
 809                              address entry_point,
 810                              Register arg_1,
 811                              Register arg_2,
 812                              bool check_exceptions) {
 813   assert(arg_1 != c_rarg2, "smashed arg");
 814   pass_arg2(this, arg_2);
 815   pass_arg1(this, arg_1);
 816   call_VM_helper(oop_result, entry_point, 2, check_exceptions);
 817 }
 818 
 819 void MacroAssembler::call_VM(Register oop_result,
 820                              address entry_point,
 821                              Register arg_1,
 822                              Register arg_2,
 823                              Register arg_3,
 824                              bool check_exceptions) {
 825   assert(arg_1 != c_rarg3, "smashed arg");
 826   assert(arg_2 != c_rarg3, "smashed arg");
 827   pass_arg3(this, arg_3);
 828 
 829   assert(arg_1 != c_rarg2, "smashed arg");
 830   pass_arg2(this, arg_2);
 831 
 832   pass_arg1(this, arg_1);
 833   call_VM_helper(oop_result, entry_point, 3, check_exceptions);
 834 }
 835 
 836 void MacroAssembler::call_VM(Register oop_result,
 837                              Register last_java_sp,
 838                              address entry_point,
 839                              int number_of_arguments,
 840                              bool check_exceptions) {
 841   call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
 842 }
 843 
 844 void MacroAssembler::call_VM(Register oop_result,
 845                              Register last_java_sp,
 846                              address entry_point,
 847                              Register arg_1,
 848                              bool check_exceptions) {
 849   pass_arg1(this, arg_1);
 850   call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
 851 }
 852 
 853 void MacroAssembler::call_VM(Register oop_result,
 854                              Register last_java_sp,
 855                              address entry_point,
 856                              Register arg_1,
 857                              Register arg_2,
 858                              bool check_exceptions) {
 859 
 860   assert(arg_1 != c_rarg2, "smashed arg");
 861   pass_arg2(this, arg_2);
 862   pass_arg1(this, arg_1);
 863   call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
 864 }
 865 
 866 void MacroAssembler::call_VM(Register oop_result,
 867                              Register last_java_sp,
 868                              address entry_point,
 869                              Register arg_1,
 870                              Register arg_2,
 871                              Register arg_3,
 872                              bool check_exceptions) {
 873   assert(arg_1 != c_rarg3, "smashed arg");
 874   assert(arg_2 != c_rarg3, "smashed arg");
 875   pass_arg3(this, arg_3);
 876   assert(arg_1 != c_rarg2, "smashed arg");
 877   pass_arg2(this, arg_2);
 878   pass_arg1(this, arg_1);
 879   call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
 880 }
 881 
 882 
 883 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
 884   ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
 885   str(zr, Address(java_thread, JavaThread::vm_result_offset()));
 886   verify_oop(oop_result, "broken oop in call_VM_base");
 887 }
 888 
 889 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
 890   ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
 891   str(zr, Address(java_thread, JavaThread::vm_result_2_offset()));
 892 }
 893 
 894 void MacroAssembler::align(int modulus) {
 895   while (offset() % modulus != 0) nop();
 896 }
 897 
 898 // these are no-ops overridden by InterpreterMacroAssembler
 899 
 900 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { }
 901 
 902 void MacroAssembler::check_and_handle_popframe(Register java_thread) { }
 903 
 904 
 905 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
 906                                                       Register tmp,
 907                                                       int offset) {
 908   intptr_t value = *delayed_value_addr;
 909   if (value != 0)
 910     return RegisterOrConstant(value + offset);
 911 
 912   // load indirectly to solve generation ordering problem
 913   ldr(tmp, ExternalAddress((address) delayed_value_addr));
 914 
 915   if (offset != 0)
 916     add(tmp, tmp, offset);
 917 
 918   return RegisterOrConstant(tmp);
 919 }
 920 
 921 // Look up the method for a megamorphic invokeinterface call.
 922 // The target method is determined by <intf_klass, itable_index>.
 923 // The receiver klass is in recv_klass.
 924 // On success, the result will be in method_result, and execution falls through.
 925 // On failure, execution transfers to the given label.
 926 void MacroAssembler::lookup_interface_method(Register recv_klass,
 927                                              Register intf_klass,
 928                                              RegisterOrConstant itable_index,
 929                                              Register method_result,
 930                                              Register scan_temp,
 931                                              Label& L_no_such_interface,
 932                                              bool return_method) {
 933   assert_different_registers(recv_klass, intf_klass, scan_temp);
 934   assert_different_registers(method_result, intf_klass, scan_temp);
 935   assert(recv_klass != method_result || !return_method,
 936          "recv_klass can be destroyed when method isn't needed");
 937 
 938   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
 939          "caller must use same register for non-constant itable index as for method");
 940 
 941   // Compute start of first itableOffsetEntry (which is at the end of the vtable)
 942   int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
 943   int itentry_off = itableMethodEntry::method_offset_in_bytes();
 944   int scan_step   = itableOffsetEntry::size() * wordSize;
 945   int vte_size    = vtableEntry::size() * wordSize;
 946   assert(vte_size == wordSize, "else adjust times_vte_scale");
 947 
 948   ldrw(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
 949 
 950   // %%% Could store the aligned, prescaled offset in the klassoop.
 951   // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
 952   lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3)));
 953   add(scan_temp, scan_temp, vtable_base);
 954   if (HeapWordsPerLong > 1) {
 955     // Round up to align_object_offset boundary
 956     // see code for instanceKlass::start_of_itable!
 957     round_to(scan_temp, BytesPerLong);
 958   }
 959 
 960   if (return_method) {
 961     // Adjust recv_klass by scaled itable_index, so we can free itable_index.
 962     assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
 963     // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
 964     lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3)));
 965     if (itentry_off)
 966       add(recv_klass, recv_klass, itentry_off);
 967   }
 968 
 969   // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
 970   //   if (scan->interface() == intf) {
 971   //     result = (klass + scan->offset() + itable_index);
 972   //   }
 973   // }
 974   Label search, found_method;
 975 
 976   for (int peel = 1; peel >= 0; peel--) {
 977     ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
 978     cmp(intf_klass, method_result);
 979 
 980     if (peel) {
 981       br(Assembler::EQ, found_method);
 982     } else {
 983       br(Assembler::NE, search);
 984       // (invert the test to fall through to found_method...)
 985     }
 986 
 987     if (!peel)  break;
 988 
 989     bind(search);
 990 
 991     // Check that the previous entry is non-null.  A null entry means that
 992     // the receiver class doesn't implement the interface, and wasn't the
 993     // same as when the caller was compiled.
 994     cbz(method_result, L_no_such_interface);
 995     add(scan_temp, scan_temp, scan_step);
 996   }
 997 
 998   bind(found_method);
 999 
1000   if (return_method) {
1001     // Got a hit.
1002     ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
1003     ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0)));
1004   }
1005 }
1006 
1007 // virtual method calling
1008 void MacroAssembler::lookup_virtual_method(Register recv_klass,
1009                                            RegisterOrConstant vtable_index,
1010                                            Register method_result) {
1011   const int base = InstanceKlass::vtable_start_offset() * wordSize;
1012   assert(vtableEntry::size() * wordSize == 8,
1013          "adjust the scaling in the code below");
1014   int vtable_offset_in_bytes = base + vtableEntry::method_offset_in_bytes();
1015 
1016   if (vtable_index.is_register()) {
1017     lea(method_result, Address(recv_klass,
1018                                vtable_index.as_register(),
1019                                Address::lsl(LogBytesPerWord)));
1020     ldr(method_result, Address(method_result, vtable_offset_in_bytes));
1021   } else {
1022     vtable_offset_in_bytes += vtable_index.as_constant() * wordSize;
1023     ldr(method_result,
1024         form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0));
1025   }
1026 }
1027 
1028 void MacroAssembler::check_klass_subtype(Register sub_klass,
1029                            Register super_klass,
1030                            Register temp_reg,
1031                            Label& L_success) {
1032   Label L_failure;
1033   check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg,        &L_success, &L_failure, NULL);
1034   check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
1035   bind(L_failure);
1036 }
1037 
1038 
1039 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
1040                                                    Register super_klass,
1041                                                    Register temp_reg,
1042                                                    Label* L_success,
1043                                                    Label* L_failure,
1044                                                    Label* L_slow_path,
1045                                         RegisterOrConstant super_check_offset) {
1046   assert_different_registers(sub_klass, super_klass, temp_reg);
1047   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
1048   if (super_check_offset.is_register()) {
1049     assert_different_registers(sub_klass, super_klass,
1050                                super_check_offset.as_register());
1051   } else if (must_load_sco) {
1052     assert(temp_reg != noreg, "supply either a temp or a register offset");
1053   }
1054 
1055   Label L_fallthrough;
1056   int label_nulls = 0;
1057   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
1058   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
1059   if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
1060   assert(label_nulls <= 1, "at most one NULL in the batch");
1061 
1062   int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
1063   int sco_offset = in_bytes(Klass::super_check_offset_offset());
1064   Address super_check_offset_addr(super_klass, sco_offset);
1065 
1066   // Hacked jmp, which may only be used just before L_fallthrough.
1067 #define final_jmp(label)                                                \
1068   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
1069   else                            b(label)                /*omit semi*/
1070 
1071   // If the pointers are equal, we are done (e.g., String[] elements).
1072   // This self-check enables sharing of secondary supertype arrays among
1073   // non-primary types such as array-of-interface.  Otherwise, each such
1074   // type would need its own customized SSA.
1075   // We move this check to the front of the fast path because many
1076   // type checks are in fact trivially successful in this manner,
1077   // so we get a nicely predicted branch right at the start of the check.
1078   cmp(sub_klass, super_klass);
1079   br(Assembler::EQ, *L_success);
1080 
1081   // Check the supertype display:
1082   if (must_load_sco) {
1083     // Positive movl does right thing on LP64.
1084     ldrw(temp_reg, super_check_offset_addr);
1085     super_check_offset = RegisterOrConstant(temp_reg);
1086   }
1087   Address super_check_addr(sub_klass, super_check_offset);
1088   ldr(rscratch1, super_check_addr);
1089   cmp(super_klass, rscratch1); // load displayed supertype
1090 
1091   // This check has worked decisively for primary supers.
1092   // Secondary supers are sought in the super_cache ('super_cache_addr').
1093   // (Secondary supers are interfaces and very deeply nested subtypes.)
1094   // This works in the same check above because of a tricky aliasing
1095   // between the super_cache and the primary super display elements.
1096   // (The 'super_check_addr' can address either, as the case requires.)
1097   // Note that the cache is updated below if it does not help us find
1098   // what we need immediately.
1099   // So if it was a primary super, we can just fail immediately.
1100   // Otherwise, it's the slow path for us (no success at this point).
1101 
1102   if (super_check_offset.is_register()) {
1103     br(Assembler::EQ, *L_success);
1104     cmp(super_check_offset.as_register(), sc_offset);
1105     if (L_failure == &L_fallthrough) {
1106       br(Assembler::EQ, *L_slow_path);
1107     } else {
1108       br(Assembler::NE, *L_failure);
1109       final_jmp(*L_slow_path);
1110     }
1111   } else if (super_check_offset.as_constant() == sc_offset) {
1112     // Need a slow path; fast failure is impossible.
1113     if (L_slow_path == &L_fallthrough) {
1114       br(Assembler::EQ, *L_success);
1115     } else {
1116       br(Assembler::NE, *L_slow_path);
1117       final_jmp(*L_success);
1118     }
1119   } else {
1120     // No slow path; it's a fast decision.
1121     if (L_failure == &L_fallthrough) {
1122       br(Assembler::EQ, *L_success);
1123     } else {
1124       br(Assembler::NE, *L_failure);
1125       final_jmp(*L_success);
1126     }
1127   }
1128 
1129   bind(L_fallthrough);
1130 
1131 #undef final_jmp
1132 }
1133 
1134 // These two are taken from x86, but they look generally useful
1135 
1136 // scans count pointer sized words at [addr] for occurence of value,
1137 // generic
1138 void MacroAssembler::repne_scan(Register addr, Register value, Register count,
1139                                 Register scratch) {
1140   Label Lloop, Lexit;
1141   cbz(count, Lexit);
1142   bind(Lloop);
1143   ldr(scratch, post(addr, wordSize));
1144   cmp(value, scratch);
1145   br(EQ, Lexit);
1146   sub(count, count, 1);
1147   cbnz(count, Lloop);
1148   bind(Lexit);
1149 }
1150 
1151 // scans count 4 byte words at [addr] for occurence of value,
1152 // generic
1153 void MacroAssembler::repne_scanw(Register addr, Register value, Register count,
1154                                 Register scratch) {
1155   Label Lloop, Lexit;
1156   cbz(count, Lexit);
1157   bind(Lloop);
1158   ldrw(scratch, post(addr, wordSize));
1159   cmpw(value, scratch);
1160   br(EQ, Lexit);
1161   sub(count, count, 1);
1162   cbnz(count, Lloop);
1163   bind(Lexit);
1164 }
1165 
1166 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
1167                                                    Register super_klass,
1168                                                    Register temp_reg,
1169                                                    Register temp2_reg,
1170                                                    Label* L_success,
1171                                                    Label* L_failure,
1172                                                    bool set_cond_codes) {
1173   assert_different_registers(sub_klass, super_klass, temp_reg);
1174   if (temp2_reg != noreg)
1175     assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1);
1176 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
1177 
1178   Label L_fallthrough;
1179   int label_nulls = 0;
1180   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
1181   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
1182   assert(label_nulls <= 1, "at most one NULL in the batch");
1183 
1184   // a couple of useful fields in sub_klass:
1185   int ss_offset = in_bytes(Klass::secondary_supers_offset());
1186   int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
1187   Address secondary_supers_addr(sub_klass, ss_offset);
1188   Address super_cache_addr(     sub_klass, sc_offset);
1189 
1190   BLOCK_COMMENT("check_klass_subtype_slow_path");
1191 
1192   // Do a linear scan of the secondary super-klass chain.
1193   // This code is rarely used, so simplicity is a virtue here.
1194   // The repne_scan instruction uses fixed registers, which we must spill.
1195   // Don't worry too much about pre-existing connections with the input regs.
1196 
1197   assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super)
1198   assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter)
1199 
1200   RegSet pushed_registers;
1201   if (!IS_A_TEMP(r2))    pushed_registers += r2;
1202   if (!IS_A_TEMP(r5))    pushed_registers += r5;
1203 
1204   if (super_klass != r0 || UseCompressedOops) {
1205     if (!IS_A_TEMP(r0))   pushed_registers += r0;
1206   }
1207 
1208   push(pushed_registers, sp);
1209 
1210   // Get super_klass value into r0 (even if it was in r5 or r2).
1211   if (super_klass != r0) {
1212     mov(r0, super_klass);
1213   }
1214 
1215 #ifndef PRODUCT
1216   mov(rscratch2, (address)&SharedRuntime::_partial_subtype_ctr);
1217   Address pst_counter_addr(rscratch2);
1218   ldr(rscratch1, pst_counter_addr);
1219   add(rscratch1, rscratch1, 1);
1220   str(rscratch1, pst_counter_addr);
1221 #endif //PRODUCT
1222 
1223   // We will consult the secondary-super array.
1224   ldr(r5, secondary_supers_addr);
1225   // Load the array length.  (Positive movl does right thing on LP64.)
1226   ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes()));
1227   // Skip to start of data.
1228   add(r5, r5, Array<Klass*>::base_offset_in_bytes());
1229 
1230   cmp(sp, zr); // Clear Z flag; SP is never zero
1231   // Scan R2 words at [R5] for an occurrence of R0.
1232   // Set NZ/Z based on last compare.
1233   repne_scan(r5, r0, r2, rscratch1);
1234 
1235   // Unspill the temp. registers:
1236   pop(pushed_registers, sp);
1237 
1238   br(Assembler::NE, *L_failure);
1239 
1240   // Success.  Cache the super we found and proceed in triumph.
1241   str(super_klass, super_cache_addr);
1242 
1243   if (L_success != &L_fallthrough) {
1244     b(*L_success);
1245   }
1246 
1247 #undef IS_A_TEMP
1248 
1249   bind(L_fallthrough);
1250 }
1251 
1252 
1253 void MacroAssembler::verify_oop(Register reg, const char* s) {
1254   if (!VerifyOops) return;
1255 
1256   // Pass register number to verify_oop_subroutine
1257   const char* b = NULL;
1258   {
1259     ResourceMark rm;
1260     stringStream ss;
1261     ss.print("verify_oop: %s: %s", reg->name(), s);
1262     b = code_string(ss.as_string());
1263   }
1264   BLOCK_COMMENT("verify_oop {");
1265 
1266   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1267   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1268 
1269   mov(r0, reg);
1270   movptr(rscratch1, (uintptr_t)(address)b);
1271 
1272   // call indirectly to solve generation ordering problem
1273   lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1274   ldr(rscratch2, Address(rscratch2));
1275   blr(rscratch2);
1276 
1277   ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1278   ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1279 
1280   BLOCK_COMMENT("} verify_oop");
1281 }
1282 
1283 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
1284   if (!VerifyOops) return;
1285 
1286   const char* b = NULL;
1287   {
1288     ResourceMark rm;
1289     stringStream ss;
1290     ss.print("verify_oop_addr: %s", s);
1291     b = code_string(ss.as_string());
1292   }
1293   BLOCK_COMMENT("verify_oop_addr {");
1294 
1295   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1296   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1297 
1298   // addr may contain sp so we will have to adjust it based on the
1299   // pushes that we just did.
1300   if (addr.uses(sp)) {
1301     lea(r0, addr);
1302     ldr(r0, Address(r0, 4 * wordSize));
1303   } else {
1304     ldr(r0, addr);
1305   }
1306   movptr(rscratch1, (uintptr_t)(address)b);
1307 
1308   // call indirectly to solve generation ordering problem
1309   lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1310   ldr(rscratch2, Address(rscratch2));
1311   blr(rscratch2);
1312 
1313   ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1314   ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1315 
1316   BLOCK_COMMENT("} verify_oop_addr");
1317 }
1318 
1319 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
1320                                          int extra_slot_offset) {
1321   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
1322   int stackElementSize = Interpreter::stackElementSize;
1323   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
1324 #ifdef ASSERT
1325   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
1326   assert(offset1 - offset == stackElementSize, "correct arithmetic");
1327 #endif
1328   if (arg_slot.is_constant()) {
1329     return Address(esp, arg_slot.as_constant() * stackElementSize
1330                    + offset);
1331   } else {
1332     add(rscratch1, esp, arg_slot.as_register(),
1333         ext::uxtx, exact_log2(stackElementSize));
1334     return Address(rscratch1, offset);
1335   }
1336 }
1337 
1338 void MacroAssembler::call_VM_leaf_base(address entry_point,
1339                                        int number_of_arguments,
1340                                        Label *retaddr) {
1341   stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize)));
1342 
1343   // We add 1 to number_of_arguments because the thread in arg0 is
1344   // not counted
1345   mov(rscratch1, entry_point);
1346   blr(rscratch1);
1347   if (retaddr)
1348     bind(*retaddr);
1349 
1350   ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize)));
1351   maybe_isb();
1352 }
1353 
1354 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
1355   call_VM_leaf_base(entry_point, number_of_arguments);
1356 }
1357 
1358 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1359   pass_arg0(this, arg_0);
1360   call_VM_leaf_base(entry_point, 1);
1361 }
1362 
1363 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1364   pass_arg0(this, arg_0);
1365   pass_arg1(this, arg_1);
1366   call_VM_leaf_base(entry_point, 2);
1367 }
1368 
1369 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
1370                                   Register arg_1, Register arg_2) {
1371   pass_arg0(this, arg_0);
1372   pass_arg1(this, arg_1);
1373   pass_arg2(this, arg_2);
1374   call_VM_leaf_base(entry_point, 3);
1375 }
1376 
1377 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1378   pass_arg0(this, arg_0);
1379   MacroAssembler::call_VM_leaf_base(entry_point, 1);
1380 }
1381 
1382 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1383 
1384   assert(arg_0 != c_rarg1, "smashed arg");
1385   pass_arg1(this, arg_1);
1386   pass_arg0(this, arg_0);
1387   MacroAssembler::call_VM_leaf_base(entry_point, 2);
1388 }
1389 
1390 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1391   assert(arg_0 != c_rarg2, "smashed arg");
1392   assert(arg_1 != c_rarg2, "smashed arg");
1393   pass_arg2(this, arg_2);
1394   assert(arg_0 != c_rarg1, "smashed arg");
1395   pass_arg1(this, arg_1);
1396   pass_arg0(this, arg_0);
1397   MacroAssembler::call_VM_leaf_base(entry_point, 3);
1398 }
1399 
1400 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
1401   assert(arg_0 != c_rarg3, "smashed arg");
1402   assert(arg_1 != c_rarg3, "smashed arg");
1403   assert(arg_2 != c_rarg3, "smashed arg");
1404   pass_arg3(this, arg_3);
1405   assert(arg_0 != c_rarg2, "smashed arg");
1406   assert(arg_1 != c_rarg2, "smashed arg");
1407   pass_arg2(this, arg_2);
1408   assert(arg_0 != c_rarg1, "smashed arg");
1409   pass_arg1(this, arg_1);
1410   pass_arg0(this, arg_0);
1411   MacroAssembler::call_VM_leaf_base(entry_point, 4);
1412 }
1413 
1414 void MacroAssembler::null_check(Register reg, int offset) {
1415   if (needs_explicit_null_check(offset)) {
1416     // provoke OS NULL exception if reg = NULL by
1417     // accessing M[reg] w/o changing any registers
1418     // NOTE: this is plenty to provoke a segv
1419     ldr(zr, Address(reg));
1420   } else {
1421     // nothing to do, (later) access of M[reg + offset]
1422     // will provoke OS NULL exception if reg = NULL
1423   }
1424 }
1425 
1426 // MacroAssembler protected routines needed to implement
1427 // public methods
1428 
1429 void MacroAssembler::mov(Register r, Address dest) {
1430   code_section()->relocate(pc(), dest.rspec());
1431   u_int64_t imm64 = (u_int64_t)dest.target();
1432   movptr(r, imm64);
1433 }
1434 
1435 // Move a constant pointer into r.  In AArch64 mode the virtual
1436 // address space is 48 bits in size, so we only need three
1437 // instructions to create a patchable instruction sequence that can
1438 // reach anywhere.
1439 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
1440 #ifndef PRODUCT
1441   {
1442     char buffer[64];
1443     snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64);
1444     block_comment(buffer);
1445   }
1446 #endif
1447   assert(imm64 < (1ul << 48), "48-bit overflow in address constant");
1448   movz(r, imm64 & 0xffff);
1449   imm64 >>= 16;
1450   movk(r, imm64 & 0xffff, 16);
1451   imm64 >>= 16;
1452   movk(r, imm64 & 0xffff, 32);
1453 }
1454 
1455 // Macro to mov replicated immediate to vector register.
1456 //  Vd will get the following values for different arrangements in T
1457 //   imm32 == hex 000000gh  T8B:  Vd = ghghghghghghghgh
1458 //   imm32 == hex 000000gh  T16B: Vd = ghghghghghghghghghghghghghghghgh
1459 //   imm32 == hex 0000efgh  T4H:  Vd = efghefghefghefgh
1460 //   imm32 == hex 0000efgh  T8H:  Vd = efghefghefghefghefghefghefghefgh
1461 //   imm32 == hex abcdefgh  T2S:  Vd = abcdefghabcdefgh
1462 //   imm32 == hex abcdefgh  T4S:  Vd = abcdefghabcdefghabcdefghabcdefgh
1463 //   T1D/T2D: invalid
1464 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) {
1465   assert(T != T1D && T != T2D, "invalid arrangement");
1466   if (T == T8B || T == T16B) {
1467     assert((imm32 & ~0xff) == 0, "extraneous bits in unsigned imm32 (T8B/T16B)");
1468     movi(Vd, T, imm32 & 0xff, 0);
1469     return;
1470   }
1471   u_int32_t nimm32 = ~imm32;
1472   if (T == T4H || T == T8H) {
1473     assert((imm32  & ~0xffff) == 0, "extraneous bits in unsigned imm32 (T4H/T8H)");
1474     imm32 &= 0xffff;
1475     nimm32 &= 0xffff;
1476   }
1477   u_int32_t x = imm32;
1478   int movi_cnt = 0;
1479   int movn_cnt = 0;
1480   while (x) { if (x & 0xff) movi_cnt++; x >>= 8; }
1481   x = nimm32;
1482   while (x) { if (x & 0xff) movn_cnt++; x >>= 8; }
1483   if (movn_cnt < movi_cnt) imm32 = nimm32;
1484   unsigned lsl = 0;
1485   while (imm32 && (imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; }
1486   if (movn_cnt < movi_cnt)
1487     mvni(Vd, T, imm32 & 0xff, lsl);
1488   else
1489     movi(Vd, T, imm32 & 0xff, lsl);
1490   imm32 >>= 8; lsl += 8;
1491   while (imm32) {
1492     while ((imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; }
1493     if (movn_cnt < movi_cnt)
1494       bici(Vd, T, imm32 & 0xff, lsl);
1495     else
1496       orri(Vd, T, imm32 & 0xff, lsl);
1497     lsl += 8; imm32 >>= 8;
1498   }
1499 }
1500 
1501 void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64)
1502 {
1503 #ifndef PRODUCT
1504   {
1505     char buffer[64];
1506     snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64);
1507     block_comment(buffer);
1508   }
1509 #endif
1510   if (operand_valid_for_logical_immediate(false, imm64)) {
1511     orr(dst, zr, imm64);
1512   } else {
1513     // we can use a combination of MOVZ or MOVN with
1514     // MOVK to build up the constant
1515     u_int64_t imm_h[4];
1516     int zero_count = 0;
1517     int neg_count = 0;
1518     int i;
1519     for (i = 0; i < 4; i++) {
1520       imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL);
1521       if (imm_h[i] == 0) {
1522         zero_count++;
1523       } else if (imm_h[i] == 0xffffL) {
1524         neg_count++;
1525       }
1526     }
1527     if (zero_count == 4) {
1528       // one MOVZ will do
1529       movz(dst, 0);
1530     } else if (neg_count == 4) {
1531       // one MOVN will do
1532       movn(dst, 0);
1533     } else if (zero_count == 3) {
1534       for (i = 0; i < 4; i++) {
1535         if (imm_h[i] != 0L) {
1536           movz(dst, (u_int32_t)imm_h[i], (i << 4));
1537           break;
1538         }
1539       }
1540     } else if (neg_count == 3) {
1541       // one MOVN will do
1542       for (int i = 0; i < 4; i++) {
1543         if (imm_h[i] != 0xffffL) {
1544           movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4));
1545           break;
1546         }
1547       }
1548     } else if (zero_count == 2) {
1549       // one MOVZ and one MOVK will do
1550       for (i = 0; i < 3; i++) {
1551         if (imm_h[i] != 0L) {
1552           movz(dst, (u_int32_t)imm_h[i], (i << 4));
1553           i++;
1554           break;
1555         }
1556       }
1557       for (;i < 4; i++) {
1558         if (imm_h[i] != 0L) {
1559           movk(dst, (u_int32_t)imm_h[i], (i << 4));
1560         }
1561       }
1562     } else if (neg_count == 2) {
1563       // one MOVN and one MOVK will do
1564       for (i = 0; i < 4; i++) {
1565         if (imm_h[i] != 0xffffL) {
1566           movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4));
1567           i++;
1568           break;
1569         }
1570       }
1571       for (;i < 4; i++) {
1572         if (imm_h[i] != 0xffffL) {
1573           movk(dst, (u_int32_t)imm_h[i], (i << 4));
1574         }
1575       }
1576     } else if (zero_count == 1) {
1577       // one MOVZ and two MOVKs will do
1578       for (i = 0; i < 4; i++) {
1579         if (imm_h[i] != 0L) {
1580           movz(dst, (u_int32_t)imm_h[i], (i << 4));
1581           i++;
1582           break;
1583         }
1584       }
1585       for (;i < 4; i++) {
1586         if (imm_h[i] != 0x0L) {
1587           movk(dst, (u_int32_t)imm_h[i], (i << 4));
1588         }
1589       }
1590     } else if (neg_count == 1) {
1591       // one MOVN and two MOVKs will do
1592       for (i = 0; i < 4; i++) {
1593         if (imm_h[i] != 0xffffL) {
1594           movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4));
1595           i++;
1596           break;
1597         }
1598       }
1599       for (;i < 4; i++) {
1600         if (imm_h[i] != 0xffffL) {
1601           movk(dst, (u_int32_t)imm_h[i], (i << 4));
1602         }
1603       }
1604     } else {
1605       // use a MOVZ and 3 MOVKs (makes it easier to debug)
1606       movz(dst, (u_int32_t)imm_h[0], 0);
1607       for (i = 1; i < 4; i++) {
1608         movk(dst, (u_int32_t)imm_h[i], (i << 4));
1609       }
1610     }
1611   }
1612 }
1613 
1614 void MacroAssembler::mov_immediate32(Register dst, u_int32_t imm32)
1615 {
1616 #ifndef PRODUCT
1617     {
1618       char buffer[64];
1619       snprintf(buffer, sizeof(buffer), "0x%"PRIX32, imm32);
1620       block_comment(buffer);
1621     }
1622 #endif
1623   if (operand_valid_for_logical_immediate(true, imm32)) {
1624     orrw(dst, zr, imm32);
1625   } else {
1626     // we can use MOVZ, MOVN or two calls to MOVK to build up the
1627     // constant
1628     u_int32_t imm_h[2];
1629     imm_h[0] = imm32 & 0xffff;
1630     imm_h[1] = ((imm32 >> 16) & 0xffff);
1631     if (imm_h[0] == 0) {
1632       movzw(dst, imm_h[1], 16);
1633     } else if (imm_h[0] == 0xffff) {
1634       movnw(dst, imm_h[1] ^ 0xffff, 16);
1635     } else if (imm_h[1] == 0) {
1636       movzw(dst, imm_h[0], 0);
1637     } else if (imm_h[1] == 0xffff) {
1638       movnw(dst, imm_h[0] ^ 0xffff, 0);
1639     } else {
1640       // use a MOVZ and MOVK (makes it easier to debug)
1641       movzw(dst, imm_h[0], 0);
1642       movkw(dst, imm_h[1], 16);
1643     }
1644   }
1645 }
1646 
1647 // Form an address from base + offset in Rd.  Rd may or may
1648 // not actually be used: you must use the Address that is returned.
1649 // It is up to you to ensure that the shift provided matches the size
1650 // of your data.
1651 Address MacroAssembler::form_address(Register Rd, Register base, long byte_offset, int shift) {
1652   if (Address::offset_ok_for_immed(byte_offset, shift))
1653     // It fits; no need for any heroics
1654     return Address(base, byte_offset);
1655 
1656   // Don't do anything clever with negative or misaligned offsets
1657   unsigned mask = (1 << shift) - 1;
1658   if (byte_offset < 0 || byte_offset & mask) {
1659     mov(Rd, byte_offset);
1660     add(Rd, base, Rd);
1661     return Address(Rd);
1662   }
1663 
1664   // See if we can do this with two 12-bit offsets
1665   {
1666     unsigned long word_offset = byte_offset >> shift;
1667     unsigned long masked_offset = word_offset & 0xfff000;
1668     if (Address::offset_ok_for_immed(word_offset - masked_offset)
1669         && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) {
1670       add(Rd, base, masked_offset << shift);
1671       word_offset -= masked_offset;
1672       return Address(Rd, word_offset << shift);
1673     }
1674   }
1675 
1676   // Do it the hard way
1677   mov(Rd, byte_offset);
1678   add(Rd, base, Rd);
1679   return Address(Rd);
1680 }
1681 
1682 void MacroAssembler::atomic_incw(Register counter_addr, Register tmp, Register tmp2) {
1683   if (UseLSE) {
1684     mov(tmp, 1);
1685     ldadd(Assembler::word, tmp, zr, counter_addr);
1686     return;
1687   }
1688   Label retry_load;
1689   if ((VM_Version::cpu_cpuFeatures() & VM_Version::CPU_STXR_PREFETCH))
1690     prfm(Address(counter_addr), PSTL1STRM);
1691   bind(retry_load);
1692   // flush and load exclusive from the memory location
1693   ldxrw(tmp, counter_addr);
1694   addw(tmp, tmp, 1);
1695   // if we store+flush with no intervening write tmp wil be zero
1696   stxrw(tmp2, tmp, counter_addr);
1697   cbnzw(tmp2, retry_load);
1698 }
1699 
1700 
1701 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb,
1702                                     bool want_remainder, Register scratch)
1703 {
1704   // Full implementation of Java idiv and irem.  The function
1705   // returns the (pc) offset of the div instruction - may be needed
1706   // for implicit exceptions.
1707   //
1708   // constraint : ra/rb =/= scratch
1709   //         normal case
1710   //
1711   // input : ra: dividend
1712   //         rb: divisor
1713   //
1714   // result: either
1715   //         quotient  (= ra idiv rb)
1716   //         remainder (= ra irem rb)
1717 
1718   assert(ra != scratch && rb != scratch, "reg cannot be scratch");
1719 
1720   int idivl_offset = offset();
1721   if (! want_remainder) {
1722     sdivw(result, ra, rb);
1723   } else {
1724     sdivw(scratch, ra, rb);
1725     Assembler::msubw(result, scratch, rb, ra);
1726   }
1727 
1728   return idivl_offset;
1729 }
1730 
1731 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb,
1732                                     bool want_remainder, Register scratch)
1733 {
1734   // Full implementation of Java ldiv and lrem.  The function
1735   // returns the (pc) offset of the div instruction - may be needed
1736   // for implicit exceptions.
1737   //
1738   // constraint : ra/rb =/= scratch
1739   //         normal case
1740   //
1741   // input : ra: dividend
1742   //         rb: divisor
1743   //
1744   // result: either
1745   //         quotient  (= ra idiv rb)
1746   //         remainder (= ra irem rb)
1747 
1748   assert(ra != scratch && rb != scratch, "reg cannot be scratch");
1749 
1750   int idivq_offset = offset();
1751   if (! want_remainder) {
1752     sdiv(result, ra, rb);
1753   } else {
1754     sdiv(scratch, ra, rb);
1755     Assembler::msub(result, scratch, rb, ra);
1756   }
1757 
1758   return idivq_offset;
1759 }
1760 
1761 // MacroAssembler routines found actually to be needed
1762 
1763 void MacroAssembler::push(Register src)
1764 {
1765   str(src, Address(pre(esp, -1 * wordSize)));
1766 }
1767 
1768 void MacroAssembler::pop(Register dst)
1769 {
1770   ldr(dst, Address(post(esp, 1 * wordSize)));
1771 }
1772 
1773 // Note: load_unsigned_short used to be called load_unsigned_word.
1774 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
1775   int off = offset();
1776   ldrh(dst, src);
1777   return off;
1778 }
1779 
1780 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
1781   int off = offset();
1782   ldrb(dst, src);
1783   return off;
1784 }
1785 
1786 int MacroAssembler::load_signed_short(Register dst, Address src) {
1787   int off = offset();
1788   ldrsh(dst, src);
1789   return off;
1790 }
1791 
1792 int MacroAssembler::load_signed_byte(Register dst, Address src) {
1793   int off = offset();
1794   ldrsb(dst, src);
1795   return off;
1796 }
1797 
1798 int MacroAssembler::load_signed_short32(Register dst, Address src) {
1799   int off = offset();
1800   ldrshw(dst, src);
1801   return off;
1802 }
1803 
1804 int MacroAssembler::load_signed_byte32(Register dst, Address src) {
1805   int off = offset();
1806   ldrsbw(dst, src);
1807   return off;
1808 }
1809 
1810 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
1811   switch (size_in_bytes) {
1812   case  8:  ldr(dst, src); break;
1813   case  4:  ldrw(dst, src); break;
1814   case  2:  is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
1815   case  1:  is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
1816   default:  ShouldNotReachHere();
1817   }
1818 }
1819 
1820 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
1821   switch (size_in_bytes) {
1822   case  8:  str(src, dst); break;
1823   case  4:  strw(src, dst); break;
1824   case  2:  strh(src, dst); break;
1825   case  1:  strb(src, dst); break;
1826   default:  ShouldNotReachHere();
1827   }
1828 }
1829 
1830 void MacroAssembler::decrementw(Register reg, int value)
1831 {
1832   if (value < 0)  { incrementw(reg, -value);      return; }
1833   if (value == 0) {                               return; }
1834   if (value < (1 << 12)) { subw(reg, reg, value); return; }
1835   /* else */ {
1836     guarantee(reg != rscratch2, "invalid dst for register decrement");
1837     movw(rscratch2, (unsigned)value);
1838     subw(reg, reg, rscratch2);
1839   }
1840 }
1841 
1842 void MacroAssembler::decrement(Register reg, int value)
1843 {
1844   if (value < 0)  { increment(reg, -value);      return; }
1845   if (value == 0) {                              return; }
1846   if (value < (1 << 12)) { sub(reg, reg, value); return; }
1847   /* else */ {
1848     assert(reg != rscratch2, "invalid dst for register decrement");
1849     mov(rscratch2, (unsigned long)value);
1850     sub(reg, reg, rscratch2);
1851   }
1852 }
1853 
1854 void MacroAssembler::decrementw(Address dst, int value)
1855 {
1856   assert(!dst.uses(rscratch1), "invalid dst for address decrement");
1857   ldrw(rscratch1, dst);
1858   decrementw(rscratch1, value);
1859   strw(rscratch1, dst);
1860 }
1861 
1862 void MacroAssembler::decrement(Address dst, int value)
1863 {
1864   assert(!dst.uses(rscratch1), "invalid address for decrement");
1865   ldr(rscratch1, dst);
1866   decrement(rscratch1, value);
1867   str(rscratch1, dst);
1868 }
1869 
1870 void MacroAssembler::incrementw(Register reg, int value)
1871 {
1872   if (value < 0)  { decrementw(reg, -value);      return; }
1873   if (value == 0) {                               return; }
1874   if (value < (1 << 12)) { addw(reg, reg, value); return; }
1875   /* else */ {
1876     assert(reg != rscratch2, "invalid dst for register increment");
1877     movw(rscratch2, (unsigned)value);
1878     addw(reg, reg, rscratch2);
1879   }
1880 }
1881 
1882 void MacroAssembler::increment(Register reg, int value)
1883 {
1884   if (value < 0)  { decrement(reg, -value);      return; }
1885   if (value == 0) {                              return; }
1886   if (value < (1 << 12)) { add(reg, reg, value); return; }
1887   /* else */ {
1888     assert(reg != rscratch2, "invalid dst for register increment");
1889     movw(rscratch2, (unsigned)value);
1890     add(reg, reg, rscratch2);
1891   }
1892 }
1893 
1894 void MacroAssembler::incrementw(Address dst, int value)
1895 {
1896   assert(!dst.uses(rscratch1), "invalid dst for address increment");
1897   ldrw(rscratch1, dst);
1898   incrementw(rscratch1, value);
1899   strw(rscratch1, dst);
1900 }
1901 
1902 void MacroAssembler::increment(Address dst, int value)
1903 {
1904   assert(!dst.uses(rscratch1), "invalid dst for address increment");
1905   ldr(rscratch1, dst);
1906   increment(rscratch1, value);
1907   str(rscratch1, dst);
1908 }
1909 
1910 
1911 void MacroAssembler::pusha() {
1912   push(0x7fffffff, sp);
1913 }
1914 
1915 void MacroAssembler::popa() {
1916   pop(0x7fffffff, sp);
1917 }
1918 
1919 // Push lots of registers in the bit set supplied.  Don't push sp.
1920 // Return the number of words pushed
1921 int MacroAssembler::push(unsigned int bitset, Register stack) {
1922   int words_pushed = 0;
1923 
1924   // Scan bitset to accumulate register pairs
1925   unsigned char regs[32];
1926   int count = 0;
1927   for (int reg = 0; reg <= 30; reg++) {
1928     if (1 & bitset)
1929       regs[count++] = reg;
1930     bitset >>= 1;
1931   }
1932   regs[count++] = zr->encoding_nocheck();
1933   count &= ~1;  // Only push an even nuber of regs
1934 
1935   if (count) {
1936     stp(as_Register(regs[0]), as_Register(regs[1]),
1937        Address(pre(stack, -count * wordSize)));
1938     words_pushed += 2;
1939   }
1940   for (int i = 2; i < count; i += 2) {
1941     stp(as_Register(regs[i]), as_Register(regs[i+1]),
1942        Address(stack, i * wordSize));
1943     words_pushed += 2;
1944   }
1945 
1946   assert(words_pushed == count, "oops, pushed != count");
1947 
1948   return count;
1949 }
1950 
1951 int MacroAssembler::pop(unsigned int bitset, Register stack) {
1952   int words_pushed = 0;
1953 
1954   // Scan bitset to accumulate register pairs
1955   unsigned char regs[32];
1956   int count = 0;
1957   for (int reg = 0; reg <= 30; reg++) {
1958     if (1 & bitset)
1959       regs[count++] = reg;
1960     bitset >>= 1;
1961   }
1962   regs[count++] = zr->encoding_nocheck();
1963   count &= ~1;
1964 
1965   for (int i = 2; i < count; i += 2) {
1966     ldp(as_Register(regs[i]), as_Register(regs[i+1]),
1967        Address(stack, i * wordSize));
1968     words_pushed += 2;
1969   }
1970   if (count) {
1971     ldp(as_Register(regs[0]), as_Register(regs[1]),
1972        Address(post(stack, count * wordSize)));
1973     words_pushed += 2;
1974   }
1975 
1976   assert(words_pushed == count, "oops, pushed != count");
1977 
1978   return count;
1979 }
1980 #ifdef ASSERT
1981 void MacroAssembler::verify_heapbase(const char* msg) {
1982 #if 0
1983   assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed");
1984   assert (Universe::heap() != NULL, "java heap should be initialized");
1985   if (CheckCompressedOops) {
1986     Label ok;
1987     push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1
1988     cmpptr(rheapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
1989     br(Assembler::EQ, ok);
1990     stop(msg);
1991     bind(ok);
1992     pop(1 << rscratch1->encoding(), sp);
1993   }
1994 #endif
1995 }
1996 #endif
1997 
1998 void MacroAssembler::stop(const char* msg) {
1999   address ip = pc();
2000   pusha();
2001   movptr(c_rarg0, (uintptr_t)(address)msg);
2002   movptr(c_rarg1, (uintptr_t)(address)ip);
2003   mov(c_rarg2, sp);
2004   mov(c_rarg3, CAST_FROM_FN_PTR(address, MacroAssembler::debug64));
2005   blr(c_rarg3);
2006   hlt(0);
2007 }
2008 
2009 void MacroAssembler::warn(const char* msg) {
2010   pusha();
2011   mov(c_rarg0, (address)msg);
2012   mov(lr, CAST_FROM_FN_PTR(address, warning));
2013   blr(lr);
2014   popa();
2015 }
2016 
2017 // If a constant does not fit in an immediate field, generate some
2018 // number of MOV instructions and then perform the operation.
2019 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm,
2020                                            add_sub_imm_insn insn1,
2021                                            add_sub_reg_insn insn2) {
2022   assert(Rd != zr, "Rd = zr and not setting flags?");
2023   if (operand_valid_for_add_sub_immediate((int)imm)) {
2024     (this->*insn1)(Rd, Rn, imm);
2025   } else {
2026     if (uabs(imm) < (1 << 24)) {
2027        (this->*insn1)(Rd, Rn, imm & -(1 << 12));
2028        (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1));
2029     } else {
2030        assert_different_registers(Rd, Rn);
2031        mov(Rd, (uint64_t)imm);
2032        (this->*insn2)(Rd, Rn, Rd, LSL, 0);
2033     }
2034   }
2035 }
2036 
2037 // Seperate vsn which sets the flags. Optimisations are more restricted
2038 // because we must set the flags correctly.
2039 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm,
2040                                            add_sub_imm_insn insn1,
2041                                            add_sub_reg_insn insn2) {
2042   if (operand_valid_for_add_sub_immediate((int)imm)) {
2043     (this->*insn1)(Rd, Rn, imm);
2044   } else {
2045     assert_different_registers(Rd, Rn);
2046     assert(Rd != zr, "overflow in immediate operand");
2047     mov(Rd, (uint64_t)imm);
2048     (this->*insn2)(Rd, Rn, Rd, LSL, 0);
2049   }
2050 }
2051 
2052 
2053 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) {
2054   if (increment.is_register()) {
2055     add(Rd, Rn, increment.as_register());
2056   } else {
2057     add(Rd, Rn, increment.as_constant());
2058   }
2059 }
2060 
2061 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) {
2062   if (increment.is_register()) {
2063     addw(Rd, Rn, increment.as_register());
2064   } else {
2065     addw(Rd, Rn, increment.as_constant());
2066   }
2067 }
2068 
2069 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
2070   if (decrement.is_register()) {
2071     sub(Rd, Rn, decrement.as_register());
2072   } else {
2073     sub(Rd, Rn, decrement.as_constant());
2074   }
2075 }
2076 
2077 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
2078   if (decrement.is_register()) {
2079     subw(Rd, Rn, decrement.as_register());
2080   } else {
2081     subw(Rd, Rn, decrement.as_constant());
2082   }
2083 }
2084 
2085 void MacroAssembler::reinit_heapbase()
2086 {
2087   if (UseCompressedOops) {
2088     if (Universe::is_fully_initialized()) {
2089       mov(rheapbase, Universe::narrow_ptrs_base());
2090     } else {
2091       lea(rheapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
2092       ldr(rheapbase, Address(rheapbase));
2093     }
2094   }
2095 }
2096 
2097 // this simulates the behaviour of the x86 cmpxchg instruction using a
2098 // load linked/store conditional pair. we use the acquire/release
2099 // versions of these instructions so that we flush pending writes as
2100 // per Java semantics.
2101 
2102 // n.b the x86 version assumes the old value to be compared against is
2103 // in rax and updates rax with the value located in memory if the
2104 // cmpxchg fails. we supply a register for the old value explicitly
2105 
2106 // the aarch64 load linked/store conditional instructions do not
2107 // accept an offset. so, unlike x86, we must provide a plain register
2108 // to identify the memory word to be compared/exchanged rather than a
2109 // register+offset Address.
2110 
2111 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp,
2112                                 Label &succeed, Label *fail) {
2113   // oldv holds comparison value
2114   // newv holds value to write in exchange
2115   // addr identifies memory word to compare against/update
2116   if (UseLSE) {
2117     mov(tmp, oldv);
2118     casal(Assembler::xword, oldv, newv, addr);
2119     cmp(tmp, oldv);
2120     br(Assembler::EQ, succeed);
2121     membar(AnyAny);
2122   } else {
2123     Label retry_load, nope;
2124     if ((VM_Version::cpu_cpuFeatures() & VM_Version::CPU_STXR_PREFETCH))
2125       prfm(Address(addr), PSTL1STRM);
2126     bind(retry_load);
2127     // flush and load exclusive from the memory location
2128     // and fail if it is not what we expect
2129     ldaxr(tmp, addr);
2130     cmp(tmp, oldv);
2131     br(Assembler::NE, nope);
2132     // if we store+flush with no intervening write tmp wil be zero
2133     stlxr(tmp, newv, addr);
2134     cbzw(tmp, succeed);
2135     // retry so we only ever return after a load fails to compare
2136     // ensures we don't return a stale value after a failed write.
2137     b(retry_load);
2138     // if the memory word differs we return it in oldv and signal a fail
2139     bind(nope);
2140     membar(AnyAny);
2141     mov(oldv, tmp);
2142   }
2143   if (fail)
2144     b(*fail);
2145 }
2146 
2147 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp,
2148                                 Label &succeed, Label *fail) {
2149   // oldv holds comparison value
2150   // newv holds value to write in exchange
2151   // addr identifies memory word to compare against/update
2152   // tmp returns 0/1 for success/failure
2153   if (UseLSE) {
2154     mov(tmp, oldv);
2155     casal(Assembler::word, oldv, newv, addr);
2156     cmp(tmp, oldv);
2157     br(Assembler::EQ, succeed);
2158     membar(AnyAny);
2159   } else {
2160     Label retry_load, nope;
2161     if ((VM_Version::cpu_cpuFeatures() & VM_Version::CPU_STXR_PREFETCH))
2162       prfm(Address(addr), PSTL1STRM);
2163     bind(retry_load);
2164     // flush and load exclusive from the memory location
2165     // and fail if it is not what we expect
2166     ldaxrw(tmp, addr);
2167     cmp(tmp, oldv);
2168     br(Assembler::NE, nope);
2169     // if we store+flush with no intervening write tmp wil be zero
2170     stlxrw(tmp, newv, addr);
2171     cbzw(tmp, succeed);
2172     // retry so we only ever return after a load fails to compare
2173     // ensures we don't return a stale value after a failed write.
2174     b(retry_load);
2175     // if the memory word differs we return it in oldv and signal a fail
2176     bind(nope);
2177     membar(AnyAny);
2178     mov(oldv, tmp);
2179   }
2180   if (fail)
2181     b(*fail);
2182 }
2183 
2184 // A generic CAS; success or failure is in the EQ flag.
2185 void MacroAssembler::cmpxchg(Register addr, Register expected,
2186                              Register new_val,
2187                              enum operand_size size,
2188                              bool acquire, bool release,
2189                              Register tmp) {
2190   if (UseLSE) {
2191     mov(tmp, expected);
2192     lse_cas(tmp, new_val, addr, size, acquire, release, /*not_pair*/ true);
2193     cmp(tmp, expected);
2194   } else {
2195     BLOCK_COMMENT("cmpxchg {");
2196     Label retry_load, done;
2197     if ((VM_Version::cpu_cpuFeatures() & VM_Version::CPU_STXR_PREFETCH))
2198       prfm(Address(addr), PSTL1STRM);
2199     bind(retry_load);
2200     load_exclusive(tmp, addr, size, acquire);
2201     if (size == xword)
2202       cmp(tmp, expected);
2203     else
2204       cmpw(tmp, expected);
2205     br(Assembler::NE, done);
2206     store_exclusive(tmp, new_val, addr, size, release);
2207     cbnzw(tmp, retry_load);
2208     bind(done);
2209     BLOCK_COMMENT("} cmpxchg");
2210   }
2211 }
2212 
2213 static bool different(Register a, RegisterOrConstant b, Register c) {
2214   if (b.is_constant())
2215     return a != c;
2216   else
2217     return a != b.as_register() && a != c && b.as_register() != c;
2218 }
2219 
2220 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz)                   \
2221 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \
2222   if (UseLSE) {                                                         \
2223     prev = prev->is_valid() ? prev : zr;                                \
2224     if (incr.is_register()) {                                           \
2225       AOP(sz, incr.as_register(), prev, addr);                          \
2226     } else {                                                            \
2227       mov(rscratch2, incr.as_constant());                               \
2228       AOP(sz, rscratch2, prev, addr);                                   \
2229     }                                                                   \
2230     return;                                                             \
2231   }                                                                     \
2232   Register result = rscratch2;                                          \
2233   if (prev->is_valid())                                                 \
2234     result = different(prev, incr, addr) ? prev : rscratch2;            \
2235                                                                         \
2236   Label retry_load;                                                     \
2237   if ((VM_Version::cpu_cpuFeatures() & VM_Version::CPU_STXR_PREFETCH))         \
2238     prfm(Address(addr), PSTL1STRM);                                     \
2239   bind(retry_load);                                                     \
2240   LDXR(result, addr);                                                   \
2241   OP(rscratch1, result, incr);                                          \
2242   STXR(rscratch2, rscratch1, addr);                                     \
2243   cbnzw(rscratch2, retry_load);                                         \
2244   if (prev->is_valid() && prev != result) {                             \
2245     IOP(prev, rscratch1, incr);                                         \
2246   }                                                                     \
2247 }
2248 
2249 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword)
2250 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word)
2251 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword)
2252 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word)
2253 
2254 #undef ATOMIC_OP
2255 
2256 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz)                            \
2257 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \
2258   if (UseLSE) {                                                         \
2259     prev = prev->is_valid() ? prev : zr;                                \
2260     AOP(sz, newv, prev, addr);                                          \
2261     return;                                                             \
2262   }                                                                     \
2263   Register result = rscratch2;                                          \
2264   if (prev->is_valid())                                                 \
2265     result = different(prev, newv, addr) ? prev : rscratch2;            \
2266                                                                         \
2267   Label retry_load;                                                     \
2268   if ((VM_Version::cpu_cpuFeatures() & VM_Version::CPU_STXR_PREFETCH))         \
2269     prfm(Address(addr), PSTL1STRM);                                     \
2270   bind(retry_load);                                                     \
2271   LDXR(result, addr);                                                   \
2272   STXR(rscratch1, newv, addr);                                          \
2273   cbnzw(rscratch1, retry_load);                                         \
2274   if (prev->is_valid() && prev != result)                               \
2275     mov(prev, result);                                                  \
2276 }
2277 
2278 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword)
2279 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word)
2280 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword)
2281 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word)
2282 
2283 #undef ATOMIC_XCHG
2284 
2285 void MacroAssembler::incr_allocated_bytes(Register thread,
2286                                           Register var_size_in_bytes,
2287                                           int con_size_in_bytes,
2288                                           Register t1) {
2289   if (!thread->is_valid()) {
2290     thread = rthread;
2291   }
2292   assert(t1->is_valid(), "need temp reg");
2293 
2294   ldr(t1, Address(thread, in_bytes(JavaThread::allocated_bytes_offset())));
2295   if (var_size_in_bytes->is_valid()) {
2296     add(t1, t1, var_size_in_bytes);
2297   } else {
2298     add(t1, t1, con_size_in_bytes);
2299   }
2300   str(t1, Address(thread, in_bytes(JavaThread::allocated_bytes_offset())));
2301 }
2302 
2303 #ifndef PRODUCT
2304 extern "C" void findpc(intptr_t x);
2305 #endif
2306 
2307 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[])
2308 {
2309   // In order to get locks to work, we need to fake a in_VM state
2310   if (ShowMessageBoxOnError ) {
2311     JavaThread* thread = JavaThread::current();
2312     JavaThreadState saved_state = thread->thread_state();
2313     thread->set_thread_state(_thread_in_vm);
2314 #ifndef PRODUCT
2315     if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
2316       ttyLocker ttyl;
2317       BytecodeCounter::print();
2318     }
2319 #endif
2320     if (os::message_box(msg, "Execution stopped, print registers?")) {
2321       ttyLocker ttyl;
2322       tty->print_cr(" pc = 0x%016lx", pc);
2323 #ifndef PRODUCT
2324       tty->cr();
2325       findpc(pc);
2326       tty->cr();
2327 #endif
2328       tty->print_cr(" r0 = 0x%016lx", regs[0]);
2329       tty->print_cr(" r1 = 0x%016lx", regs[1]);
2330       tty->print_cr(" r2 = 0x%016lx", regs[2]);
2331       tty->print_cr(" r3 = 0x%016lx", regs[3]);
2332       tty->print_cr(" r4 = 0x%016lx", regs[4]);
2333       tty->print_cr(" r5 = 0x%016lx", regs[5]);
2334       tty->print_cr(" r6 = 0x%016lx", regs[6]);
2335       tty->print_cr(" r7 = 0x%016lx", regs[7]);
2336       tty->print_cr(" r8 = 0x%016lx", regs[8]);
2337       tty->print_cr(" r9 = 0x%016lx", regs[9]);
2338       tty->print_cr("r10 = 0x%016lx", regs[10]);
2339       tty->print_cr("r11 = 0x%016lx", regs[11]);
2340       tty->print_cr("r12 = 0x%016lx", regs[12]);
2341       tty->print_cr("r13 = 0x%016lx", regs[13]);
2342       tty->print_cr("r14 = 0x%016lx", regs[14]);
2343       tty->print_cr("r15 = 0x%016lx", regs[15]);
2344       tty->print_cr("r16 = 0x%016lx", regs[16]);
2345       tty->print_cr("r17 = 0x%016lx", regs[17]);
2346       tty->print_cr("r18 = 0x%016lx", regs[18]);
2347       tty->print_cr("r19 = 0x%016lx", regs[19]);
2348       tty->print_cr("r20 = 0x%016lx", regs[20]);
2349       tty->print_cr("r21 = 0x%016lx", regs[21]);
2350       tty->print_cr("r22 = 0x%016lx", regs[22]);
2351       tty->print_cr("r23 = 0x%016lx", regs[23]);
2352       tty->print_cr("r24 = 0x%016lx", regs[24]);
2353       tty->print_cr("r25 = 0x%016lx", regs[25]);
2354       tty->print_cr("r26 = 0x%016lx", regs[26]);
2355       tty->print_cr("r27 = 0x%016lx", regs[27]);
2356       tty->print_cr("r28 = 0x%016lx", regs[28]);
2357       tty->print_cr("r30 = 0x%016lx", regs[30]);
2358       tty->print_cr("r31 = 0x%016lx", regs[31]);
2359       BREAKPOINT;
2360     }
2361     ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
2362   } else {
2363     ttyLocker ttyl;
2364     ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
2365                     msg);
2366     assert(false, err_msg("DEBUG MESSAGE: %s", msg));
2367   }
2368 }
2369 
2370 void MacroAssembler::push_call_clobbered_registers() {
2371   push(RegSet::range(r0, r18) - RegSet::of(rscratch1, rscratch2), sp);
2372 
2373   // Push v0-v7, v16-v31.
2374   for (int i = 30; i >= 0; i -= 2) {
2375     if (i <= v7->encoding() || i >= v16->encoding()) {
2376         stpd(as_FloatRegister(i), as_FloatRegister(i+1),
2377              Address(pre(sp, -2 * wordSize)));
2378     }
2379   }
2380 }
2381 
2382 void MacroAssembler::pop_call_clobbered_registers() {
2383 
2384   for (int i = 0; i < 32; i += 2) {
2385     if (i <= v7->encoding() || i >= v16->encoding()) {
2386       ldpd(as_FloatRegister(i), as_FloatRegister(i+1),
2387            Address(post(sp, 2 * wordSize)));
2388     }
2389   }
2390 
2391   pop(RegSet::range(r0, r18) - RegSet::of(rscratch1, rscratch2), sp);
2392 }
2393 
2394 void MacroAssembler::push_CPU_state(bool save_vectors) {
2395   push(0x3fffffff, sp);         // integer registers except lr & sp
2396 
2397   if (!save_vectors) {
2398     for (int i = 30; i >= 0; i -= 2)
2399       stpd(as_FloatRegister(i), as_FloatRegister(i+1),
2400            Address(pre(sp, -2 * wordSize)));
2401   } else {
2402     for (int i = 30; i >= 0; i -= 2)
2403       stpq(as_FloatRegister(i), as_FloatRegister(i+1),
2404            Address(pre(sp, -4 * wordSize)));
2405   }
2406 }
2407 
2408 void MacroAssembler::pop_CPU_state(bool restore_vectors) {
2409   if (!restore_vectors) {
2410     for (int i = 0; i < 32; i += 2)
2411       ldpd(as_FloatRegister(i), as_FloatRegister(i+1),
2412            Address(post(sp, 2 * wordSize)));
2413   } else {
2414     for (int i = 0; i < 32; i += 2)
2415       ldpq(as_FloatRegister(i), as_FloatRegister(i+1),
2416            Address(post(sp, 4 * wordSize)));
2417   }
2418 
2419   pop(0x3fffffff, sp);         // integer registers except lr & sp
2420 }
2421 
2422 /**
2423  * Helpers for multiply_to_len().
2424  */
2425 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
2426                                      Register src1, Register src2) {
2427   adds(dest_lo, dest_lo, src1);
2428   adc(dest_hi, dest_hi, zr);
2429   adds(dest_lo, dest_lo, src2);
2430   adc(final_dest_hi, dest_hi, zr);
2431 }
2432 
2433 // Generate an address from (r + r1 extend offset).  "size" is the
2434 // size of the operand.  The result may be in rscratch2.
2435 Address MacroAssembler::offsetted_address(Register r, Register r1,
2436                                           Address::extend ext, int offset, int size) {
2437   if (offset || (ext.shift() % size != 0)) {
2438     lea(rscratch2, Address(r, r1, ext));
2439     return Address(rscratch2, offset);
2440   } else {
2441     return Address(r, r1, ext);
2442   }
2443 }
2444 
2445 Address MacroAssembler::spill_address(int size, int offset, Register tmp)
2446 {
2447   assert(offset >= 0, "spill to negative address?");
2448   // Offset reachable ?
2449   //   Not aligned - 9 bits signed offset
2450   //   Aligned - 12 bits unsigned offset shifted
2451   Register base = sp;
2452   if ((offset & (size-1)) && offset >= (1<<8)) {
2453     add(tmp, base, offset & ((1<<12)-1));
2454     base = tmp;
2455     offset &= -1u<<12;
2456   }
2457 
2458   if (offset >= (1<<12) * size) {
2459     add(tmp, base, offset & (((1<<12)-1)<<12));
2460     base = tmp;
2461     offset &= ~(((1<<12)-1)<<12);
2462   }
2463 
2464   return Address(base, offset);
2465 }
2466 
2467 /**
2468  * Multiply 64 bit by 64 bit first loop.
2469  */
2470 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
2471                                            Register y, Register y_idx, Register z,
2472                                            Register carry, Register product,
2473                                            Register idx, Register kdx) {
2474   //
2475   //  jlong carry, x[], y[], z[];
2476   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
2477   //    huge_128 product = y[idx] * x[xstart] + carry;
2478   //    z[kdx] = (jlong)product;
2479   //    carry  = (jlong)(product >>> 64);
2480   //  }
2481   //  z[xstart] = carry;
2482   //
2483 
2484   Label L_first_loop, L_first_loop_exit;
2485   Label L_one_x, L_one_y, L_multiply;
2486 
2487   subsw(xstart, xstart, 1);
2488   br(Assembler::MI, L_one_x);
2489 
2490   lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt)));
2491   ldr(x_xstart, Address(rscratch1));
2492   ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian
2493 
2494   bind(L_first_loop);
2495   subsw(idx, idx, 1);
2496   br(Assembler::MI, L_first_loop_exit);
2497   subsw(idx, idx, 1);
2498   br(Assembler::MI, L_one_y);
2499   lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
2500   ldr(y_idx, Address(rscratch1));
2501   ror(y_idx, y_idx, 32); // convert big-endian to little-endian
2502   bind(L_multiply);
2503 
2504   // AArch64 has a multiply-accumulate instruction that we can't use
2505   // here because it has no way to process carries, so we have to use
2506   // separate add and adc instructions.  Bah.
2507   umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product
2508   mul(product, x_xstart, y_idx);
2509   adds(product, product, carry);
2510   adc(carry, rscratch1, zr);   // x_xstart * y_idx + carry -> carry:product
2511 
2512   subw(kdx, kdx, 2);
2513   ror(product, product, 32); // back to big-endian
2514   str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong));
2515 
2516   b(L_first_loop);
2517 
2518   bind(L_one_y);
2519   ldrw(y_idx, Address(y,  0));
2520   b(L_multiply);
2521 
2522   bind(L_one_x);
2523   ldrw(x_xstart, Address(x,  0));
2524   b(L_first_loop);
2525 
2526   bind(L_first_loop_exit);
2527 }
2528 
2529 /**
2530  * Multiply 128 bit by 128. Unrolled inner loop.
2531  *
2532  */
2533 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z,
2534                                              Register carry, Register carry2,
2535                                              Register idx, Register jdx,
2536                                              Register yz_idx1, Register yz_idx2,
2537                                              Register tmp, Register tmp3, Register tmp4,
2538                                              Register tmp6, Register product_hi) {
2539 
2540   //   jlong carry, x[], y[], z[];
2541   //   int kdx = ystart+1;
2542   //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
2543   //     huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry;
2544   //     jlong carry2  = (jlong)(tmp3 >>> 64);
2545   //     huge_128 tmp4 = (y[idx]   * product_hi) + z[kdx+idx] + carry2;
2546   //     carry  = (jlong)(tmp4 >>> 64);
2547   //     z[kdx+idx+1] = (jlong)tmp3;
2548   //     z[kdx+idx] = (jlong)tmp4;
2549   //   }
2550   //   idx += 2;
2551   //   if (idx > 0) {
2552   //     yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry;
2553   //     z[kdx+idx] = (jlong)yz_idx1;
2554   //     carry  = (jlong)(yz_idx1 >>> 64);
2555   //   }
2556   //
2557 
2558   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
2559 
2560   lsrw(jdx, idx, 2);
2561 
2562   bind(L_third_loop);
2563 
2564   subsw(jdx, jdx, 1);
2565   br(Assembler::MI, L_third_loop_exit);
2566   subw(idx, idx, 4);
2567 
2568   lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
2569 
2570   ldp(yz_idx2, yz_idx1, Address(rscratch1, 0));
2571 
2572   lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt)));
2573 
2574   ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
2575   ror(yz_idx2, yz_idx2, 32);
2576 
2577   ldp(rscratch2, rscratch1, Address(tmp6, 0));
2578 
2579   mul(tmp3, product_hi, yz_idx1);  //  yz_idx1 * product_hi -> tmp4:tmp3
2580   umulh(tmp4, product_hi, yz_idx1);
2581 
2582   ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian
2583   ror(rscratch2, rscratch2, 32);
2584 
2585   mul(tmp, product_hi, yz_idx2);   //  yz_idx2 * product_hi -> carry2:tmp
2586   umulh(carry2, product_hi, yz_idx2);
2587 
2588   // propagate sum of both multiplications into carry:tmp4:tmp3
2589   adds(tmp3, tmp3, carry);
2590   adc(tmp4, tmp4, zr);
2591   adds(tmp3, tmp3, rscratch1);
2592   adcs(tmp4, tmp4, tmp);
2593   adc(carry, carry2, zr);
2594   adds(tmp4, tmp4, rscratch2);
2595   adc(carry, carry, zr);
2596 
2597   ror(tmp3, tmp3, 32); // convert little-endian to big-endian
2598   ror(tmp4, tmp4, 32);
2599   stp(tmp4, tmp3, Address(tmp6, 0));
2600 
2601   b(L_third_loop);
2602   bind (L_third_loop_exit);
2603 
2604   andw (idx, idx, 0x3);
2605   cbz(idx, L_post_third_loop_done);
2606 
2607   Label L_check_1;
2608   subsw(idx, idx, 2);
2609   br(Assembler::MI, L_check_1);
2610 
2611   lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
2612   ldr(yz_idx1, Address(rscratch1, 0));
2613   ror(yz_idx1, yz_idx1, 32);
2614   mul(tmp3, product_hi, yz_idx1);  //  yz_idx1 * product_hi -> tmp4:tmp3
2615   umulh(tmp4, product_hi, yz_idx1);
2616   lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt)));
2617   ldr(yz_idx2, Address(rscratch1, 0));
2618   ror(yz_idx2, yz_idx2, 32);
2619 
2620   add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2);
2621 
2622   ror(tmp3, tmp3, 32);
2623   str(tmp3, Address(rscratch1, 0));
2624 
2625   bind (L_check_1);
2626 
2627   andw (idx, idx, 0x1);
2628   subsw(idx, idx, 1);
2629   br(Assembler::MI, L_post_third_loop_done);
2630   ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt)));
2631   mul(tmp3, tmp4, product_hi);  //  tmp4 * product_hi -> carry2:tmp3
2632   umulh(carry2, tmp4, product_hi);
2633   ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt)));
2634 
2635   add2_with_carry(carry2, tmp3, tmp4, carry);
2636 
2637   strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt)));
2638   extr(carry, carry2, tmp3, 32);
2639 
2640   bind(L_post_third_loop_done);
2641 }
2642 
2643 /**
2644  * Code for BigInteger::multiplyToLen() instrinsic.
2645  *
2646  * r0: x
2647  * r1: xlen
2648  * r2: y
2649  * r3: ylen
2650  * r4:  z
2651  * r5: zlen
2652  * r10: tmp1
2653  * r11: tmp2
2654  * r12: tmp3
2655  * r13: tmp4
2656  * r14: tmp5
2657  * r15: tmp6
2658  * r16: tmp7
2659  *
2660  */
2661 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen,
2662                                      Register z, Register zlen,
2663                                      Register tmp1, Register tmp2, Register tmp3, Register tmp4,
2664                                      Register tmp5, Register tmp6, Register product_hi) {
2665 
2666   assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6);
2667 
2668   const Register idx = tmp1;
2669   const Register kdx = tmp2;
2670   const Register xstart = tmp3;
2671 
2672   const Register y_idx = tmp4;
2673   const Register carry = tmp5;
2674   const Register product  = xlen;
2675   const Register x_xstart = zlen;  // reuse register
2676 
2677   // First Loop.
2678   //
2679   //  final static long LONG_MASK = 0xffffffffL;
2680   //  int xstart = xlen - 1;
2681   //  int ystart = ylen - 1;
2682   //  long carry = 0;
2683   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
2684   //    long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
2685   //    z[kdx] = (int)product;
2686   //    carry = product >>> 32;
2687   //  }
2688   //  z[xstart] = (int)carry;
2689   //
2690 
2691   movw(idx, ylen);      // idx = ylen;
2692   movw(kdx, zlen);      // kdx = xlen+ylen;
2693   mov(carry, zr);       // carry = 0;
2694 
2695   Label L_done;
2696 
2697   movw(xstart, xlen);
2698   subsw(xstart, xstart, 1);
2699   br(Assembler::MI, L_done);
2700 
2701   multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
2702 
2703   Label L_second_loop;
2704   cbzw(kdx, L_second_loop);
2705 
2706   Label L_carry;
2707   subw(kdx, kdx, 1);
2708   cbzw(kdx, L_carry);
2709 
2710   strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
2711   lsr(carry, carry, 32);
2712   subw(kdx, kdx, 1);
2713 
2714   bind(L_carry);
2715   strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
2716 
2717   // Second and third (nested) loops.
2718   //
2719   // for (int i = xstart-1; i >= 0; i--) { // Second loop
2720   //   carry = 0;
2721   //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
2722   //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
2723   //                    (z[k] & LONG_MASK) + carry;
2724   //     z[k] = (int)product;
2725   //     carry = product >>> 32;
2726   //   }
2727   //   z[i] = (int)carry;
2728   // }
2729   //
2730   // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi
2731 
2732   const Register jdx = tmp1;
2733 
2734   bind(L_second_loop);
2735   mov(carry, zr);                // carry = 0;
2736   movw(jdx, ylen);               // j = ystart+1
2737 
2738   subsw(xstart, xstart, 1);      // i = xstart-1;
2739   br(Assembler::MI, L_done);
2740 
2741   str(z, Address(pre(sp, -4 * wordSize)));
2742 
2743   Label L_last_x;
2744   lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j
2745   subsw(xstart, xstart, 1);       // i = xstart-1;
2746   br(Assembler::MI, L_last_x);
2747 
2748   lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt)));
2749   ldr(product_hi, Address(rscratch1));
2750   ror(product_hi, product_hi, 32);  // convert big-endian to little-endian
2751 
2752   Label L_third_loop_prologue;
2753   bind(L_third_loop_prologue);
2754 
2755   str(ylen, Address(sp, wordSize));
2756   stp(x, xstart, Address(sp, 2 * wordSize));
2757   multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product,
2758                           tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi);
2759   ldp(z, ylen, Address(post(sp, 2 * wordSize)));
2760   ldp(x, xlen, Address(post(sp, 2 * wordSize)));   // copy old xstart -> xlen
2761 
2762   addw(tmp3, xlen, 1);
2763   strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
2764   subsw(tmp3, tmp3, 1);
2765   br(Assembler::MI, L_done);
2766 
2767   lsr(carry, carry, 32);
2768   strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
2769   b(L_second_loop);
2770 
2771   // Next infrequent code is moved outside loops.
2772   bind(L_last_x);
2773   ldrw(product_hi, Address(x,  0));
2774   b(L_third_loop_prologue);
2775 
2776   bind(L_done);
2777 }
2778 
2779 /**
2780  * Emits code to update CRC-32 with a byte value according to constants in table
2781  *
2782  * @param [in,out]crc   Register containing the crc.
2783  * @param [in]val       Register containing the byte to fold into the CRC.
2784  * @param [in]table     Register containing the table of crc constants.
2785  *
2786  * uint32_t crc;
2787  * val = crc_table[(val ^ crc) & 0xFF];
2788  * crc = val ^ (crc >> 8);
2789  *
2790  */
2791 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
2792   eor(val, val, crc);
2793   andr(val, val, 0xff);
2794   ldrw(val, Address(table, val, Address::lsl(2)));
2795   eor(crc, val, crc, Assembler::LSR, 8);
2796 }
2797 
2798 /**
2799  * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3
2800  *
2801  * @param [in,out]crc   Register containing the crc.
2802  * @param [in]v         Register containing the 32-bit to fold into the CRC.
2803  * @param [in]table0    Register containing table 0 of crc constants.
2804  * @param [in]table1    Register containing table 1 of crc constants.
2805  * @param [in]table2    Register containing table 2 of crc constants.
2806  * @param [in]table3    Register containing table 3 of crc constants.
2807  *
2808  * uint32_t crc;
2809  *   v = crc ^ v
2810  *   crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24]
2811  *
2812  */
2813 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp,
2814         Register table0, Register table1, Register table2, Register table3,
2815         bool upper) {
2816   eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0);
2817   uxtb(tmp, v);
2818   ldrw(crc, Address(table3, tmp, Address::lsl(2)));
2819   ubfx(tmp, v, 8, 8);
2820   ldrw(tmp, Address(table2, tmp, Address::lsl(2)));
2821   eor(crc, crc, tmp);
2822   ubfx(tmp, v, 16, 8);
2823   ldrw(tmp, Address(table1, tmp, Address::lsl(2)));
2824   eor(crc, crc, tmp);
2825   ubfx(tmp, v, 24, 8);
2826   ldrw(tmp, Address(table0, tmp, Address::lsl(2)));
2827   eor(crc, crc, tmp);
2828 }
2829 
2830 /**
2831  * @param crc   register containing existing CRC (32-bit)
2832  * @param buf   register pointing to input byte buffer (byte*)
2833  * @param len   register containing number of bytes
2834  * @param table register that will contain address of CRC table
2835  * @param tmp   scratch register
2836  */
2837 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len,
2838         Register table0, Register table1, Register table2, Register table3,
2839         Register tmp, Register tmp2, Register tmp3) {
2840   Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit;
2841   unsigned long offset;
2842 
2843     ornw(crc, zr, crc);
2844 
2845   if (UseCRC32) {
2846     Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop;
2847 
2848       subs(len, len, 64);
2849       br(Assembler::GE, CRC_by64_loop);
2850       adds(len, len, 64-4);
2851       br(Assembler::GE, CRC_by4_loop);
2852       adds(len, len, 4);
2853       br(Assembler::GT, CRC_by1_loop);
2854       b(L_exit);
2855 
2856     BIND(CRC_by4_loop);
2857       ldrw(tmp, Address(post(buf, 4)));
2858       subs(len, len, 4);
2859       crc32w(crc, crc, tmp);
2860       br(Assembler::GE, CRC_by4_loop);
2861       adds(len, len, 4);
2862       br(Assembler::LE, L_exit);
2863     BIND(CRC_by1_loop);
2864       ldrb(tmp, Address(post(buf, 1)));
2865       subs(len, len, 1);
2866       crc32b(crc, crc, tmp);
2867       br(Assembler::GT, CRC_by1_loop);
2868       b(L_exit);
2869 
2870       align(CodeEntryAlignment);
2871     BIND(CRC_by64_loop);
2872       subs(len, len, 64);
2873       ldp(tmp, tmp3, Address(post(buf, 16)));
2874       crc32x(crc, crc, tmp);
2875       crc32x(crc, crc, tmp3);
2876       ldp(tmp, tmp3, Address(post(buf, 16)));
2877       crc32x(crc, crc, tmp);
2878       crc32x(crc, crc, tmp3);
2879       ldp(tmp, tmp3, Address(post(buf, 16)));
2880       crc32x(crc, crc, tmp);
2881       crc32x(crc, crc, tmp3);
2882       ldp(tmp, tmp3, Address(post(buf, 16)));
2883       crc32x(crc, crc, tmp);
2884       crc32x(crc, crc, tmp3);
2885       br(Assembler::GE, CRC_by64_loop);
2886       adds(len, len, 64-4);
2887       br(Assembler::GE, CRC_by4_loop);
2888       adds(len, len, 4);
2889       br(Assembler::GT, CRC_by1_loop);
2890     BIND(L_exit);
2891       ornw(crc, zr, crc);
2892       return;
2893   }
2894 
2895     adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset);
2896     if (offset) add(table0, table0, offset);
2897     add(table1, table0, 1*256*sizeof(juint));
2898     add(table2, table0, 2*256*sizeof(juint));
2899     add(table3, table0, 3*256*sizeof(juint));
2900 
2901   if (UseNeon) {
2902       cmp(len, 64);
2903       br(Assembler::LT, L_by16);
2904       eor(v16, T16B, v16, v16);
2905 
2906     Label L_fold;
2907 
2908       add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants
2909 
2910       ld1(v0, v1, T2D, post(buf, 32));
2911       ld1r(v4, T2D, post(tmp, 8));
2912       ld1r(v5, T2D, post(tmp, 8));
2913       ld1r(v6, T2D, post(tmp, 8));
2914       ld1r(v7, T2D, post(tmp, 8));
2915       mov(v16, T4S, 0, crc);
2916 
2917       eor(v0, T16B, v0, v16);
2918       sub(len, len, 64);
2919 
2920     BIND(L_fold);
2921       pmull(v22, T8H, v0, v5, T8B);
2922       pmull(v20, T8H, v0, v7, T8B);
2923       pmull(v23, T8H, v0, v4, T8B);
2924       pmull(v21, T8H, v0, v6, T8B);
2925 
2926       pmull2(v18, T8H, v0, v5, T16B);
2927       pmull2(v16, T8H, v0, v7, T16B);
2928       pmull2(v19, T8H, v0, v4, T16B);
2929       pmull2(v17, T8H, v0, v6, T16B);
2930 
2931       uzp1(v24, v20, v22, T8H);
2932       uzp2(v25, v20, v22, T8H);
2933       eor(v20, T16B, v24, v25);
2934 
2935       uzp1(v26, v16, v18, T8H);
2936       uzp2(v27, v16, v18, T8H);
2937       eor(v16, T16B, v26, v27);
2938 
2939       ushll2(v22, T4S, v20, T8H, 8);
2940       ushll(v20, T4S, v20, T4H, 8);
2941 
2942       ushll2(v18, T4S, v16, T8H, 8);
2943       ushll(v16, T4S, v16, T4H, 8);
2944 
2945       eor(v22, T16B, v23, v22);
2946       eor(v18, T16B, v19, v18);
2947       eor(v20, T16B, v21, v20);
2948       eor(v16, T16B, v17, v16);
2949 
2950       uzp1(v17, v16, v20, T2D);
2951       uzp2(v21, v16, v20, T2D);
2952       eor(v17, T16B, v17, v21);
2953 
2954       ushll2(v20, T2D, v17, T4S, 16);
2955       ushll(v16, T2D, v17, T2S, 16);
2956 
2957       eor(v20, T16B, v20, v22);
2958       eor(v16, T16B, v16, v18);
2959 
2960       uzp1(v17, v20, v16, T2D);
2961       uzp2(v21, v20, v16, T2D);
2962       eor(v28, T16B, v17, v21);
2963 
2964       pmull(v22, T8H, v1, v5, T8B);
2965       pmull(v20, T8H, v1, v7, T8B);
2966       pmull(v23, T8H, v1, v4, T8B);
2967       pmull(v21, T8H, v1, v6, T8B);
2968 
2969       pmull2(v18, T8H, v1, v5, T16B);
2970       pmull2(v16, T8H, v1, v7, T16B);
2971       pmull2(v19, T8H, v1, v4, T16B);
2972       pmull2(v17, T8H, v1, v6, T16B);
2973 
2974       ld1(v0, v1, T2D, post(buf, 32));
2975 
2976       uzp1(v24, v20, v22, T8H);
2977       uzp2(v25, v20, v22, T8H);
2978       eor(v20, T16B, v24, v25);
2979 
2980       uzp1(v26, v16, v18, T8H);
2981       uzp2(v27, v16, v18, T8H);
2982       eor(v16, T16B, v26, v27);
2983 
2984       ushll2(v22, T4S, v20, T8H, 8);
2985       ushll(v20, T4S, v20, T4H, 8);
2986 
2987       ushll2(v18, T4S, v16, T8H, 8);
2988       ushll(v16, T4S, v16, T4H, 8);
2989 
2990       eor(v22, T16B, v23, v22);
2991       eor(v18, T16B, v19, v18);
2992       eor(v20, T16B, v21, v20);
2993       eor(v16, T16B, v17, v16);
2994 
2995       uzp1(v17, v16, v20, T2D);
2996       uzp2(v21, v16, v20, T2D);
2997       eor(v16, T16B, v17, v21);
2998 
2999       ushll2(v20, T2D, v16, T4S, 16);
3000       ushll(v16, T2D, v16, T2S, 16);
3001 
3002       eor(v20, T16B, v22, v20);
3003       eor(v16, T16B, v16, v18);
3004 
3005       uzp1(v17, v20, v16, T2D);
3006       uzp2(v21, v20, v16, T2D);
3007       eor(v20, T16B, v17, v21);
3008 
3009       shl(v16, T2D, v28, 1);
3010       shl(v17, T2D, v20, 1);
3011 
3012       eor(v0, T16B, v0, v16);
3013       eor(v1, T16B, v1, v17);
3014 
3015       subs(len, len, 32);
3016       br(Assembler::GE, L_fold);
3017 
3018       mov(crc, 0);
3019       mov(tmp, v0, T1D, 0);
3020       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
3021       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
3022       mov(tmp, v0, T1D, 1);
3023       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
3024       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
3025       mov(tmp, v1, T1D, 0);
3026       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
3027       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
3028       mov(tmp, v1, T1D, 1);
3029       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
3030       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
3031 
3032       add(len, len, 32);
3033   }
3034 
3035   BIND(L_by16);
3036     subs(len, len, 16);
3037     br(Assembler::GE, L_by16_loop);
3038     adds(len, len, 16-4);
3039     br(Assembler::GE, L_by4_loop);
3040     adds(len, len, 4);
3041     br(Assembler::GT, L_by1_loop);
3042     b(L_exit);
3043 
3044   BIND(L_by4_loop);
3045     ldrw(tmp, Address(post(buf, 4)));
3046     update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3);
3047     subs(len, len, 4);
3048     br(Assembler::GE, L_by4_loop);
3049     adds(len, len, 4);
3050     br(Assembler::LE, L_exit);
3051   BIND(L_by1_loop);
3052     subs(len, len, 1);
3053     ldrb(tmp, Address(post(buf, 1)));
3054     update_byte_crc32(crc, tmp, table0);
3055     br(Assembler::GT, L_by1_loop);
3056     b(L_exit);
3057 
3058     align(CodeEntryAlignment);
3059   BIND(L_by16_loop);
3060     subs(len, len, 16);
3061     ldp(tmp, tmp3, Address(post(buf, 16)));
3062     update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
3063     update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
3064     update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false);
3065     update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true);
3066     br(Assembler::GE, L_by16_loop);
3067     adds(len, len, 16-4);
3068     br(Assembler::GE, L_by4_loop);
3069     adds(len, len, 4);
3070     br(Assembler::GT, L_by1_loop);
3071   BIND(L_exit);
3072     ornw(crc, zr, crc);
3073 }
3074 
3075 SkipIfEqual::SkipIfEqual(
3076     MacroAssembler* masm, const bool* flag_addr, bool value) {
3077   _masm = masm;
3078   unsigned long offset;
3079   _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset);
3080   _masm->ldrb(rscratch1, Address(rscratch1, offset));
3081   _masm->cbzw(rscratch1, _label);
3082 }
3083 
3084 SkipIfEqual::~SkipIfEqual() {
3085   _masm->bind(_label);
3086 }
3087 
3088 void MacroAssembler::addptr(const Address &dst, int32_t src) {
3089   Address adr;
3090   switch(dst.getMode()) {
3091   case Address::base_plus_offset:
3092     // This is the expected mode, although we allow all the other
3093     // forms below.
3094     adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord);
3095     break;
3096   default:
3097     lea(rscratch2, dst);
3098     adr = Address(rscratch2);
3099     break;
3100   }
3101   ldr(rscratch1, adr);
3102   add(rscratch1, rscratch1, src);
3103   str(rscratch1, adr);
3104 }
3105 
3106 void MacroAssembler::cmpptr(Register src1, Address src2) {
3107   unsigned long offset;
3108   adrp(rscratch1, src2, offset);
3109   ldr(rscratch1, Address(rscratch1, offset));
3110   cmp(src1, rscratch1);
3111 }
3112 
3113 void MacroAssembler::store_check(Register obj) {
3114   // Does a store check for the oop in register obj. The content of
3115   // register obj is destroyed afterwards.
3116   store_check_part_1(obj);
3117   store_check_part_2(obj);
3118 }
3119 
3120 void MacroAssembler::store_check(Register obj, Address dst) {
3121   store_check(obj);
3122 }
3123 
3124 
3125 // split the store check operation so that other instructions can be scheduled inbetween
3126 void MacroAssembler::store_check_part_1(Register obj) {
3127   BarrierSet* bs = Universe::heap()->barrier_set();
3128   assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
3129   lsr(obj, obj, CardTableModRefBS::card_shift);
3130 }
3131 
3132 void MacroAssembler::store_check_part_2(Register obj) {
3133   BarrierSet* bs = Universe::heap()->barrier_set();
3134   assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
3135   CardTableModRefBS* ct = (CardTableModRefBS*)bs;
3136   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
3137 
3138   // The calculation for byte_map_base is as follows:
3139   // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
3140   // So this essentially converts an address to a displacement and
3141   // it will never need to be relocated.
3142 
3143   // FIXME: It's not likely that disp will fit into an offset so we
3144   // don't bother to check, but it could save an instruction.
3145   intptr_t disp = (intptr_t) ct->byte_map_base;
3146   load_byte_map_base(rscratch1);
3147 
3148   if (UseConcMarkSweepGC && CMSPrecleaningEnabled) {
3149       membar(StoreStore);
3150   }
3151   strb(zr, Address(obj, rscratch1));
3152 }
3153 
3154 void MacroAssembler::load_klass(Register dst, Register src) {
3155   if (UseCompressedClassPointers) {
3156     ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
3157     decode_klass_not_null(dst);
3158   } else {
3159     ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
3160   }
3161 }
3162 
3163 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
3164   if (UseCompressedClassPointers) {
3165     ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
3166     if (Universe::narrow_klass_base() == NULL) {
3167       cmp(trial_klass, tmp, LSL, Universe::narrow_klass_shift());
3168       return;
3169     } else if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0
3170                && Universe::narrow_klass_shift() == 0) {
3171       // Only the bottom 32 bits matter
3172       cmpw(trial_klass, tmp);
3173       return;
3174     }
3175     decode_klass_not_null(tmp);
3176   } else {
3177     ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
3178   }
3179   cmp(trial_klass, tmp);
3180 }
3181 
3182 void MacroAssembler::load_prototype_header(Register dst, Register src) {
3183   load_klass(dst, src);
3184   ldr(dst, Address(dst, Klass::prototype_header_offset()));
3185 }
3186 
3187 void MacroAssembler::store_klass(Register dst, Register src) {
3188   // FIXME: Should this be a store release?  concurrent gcs assumes
3189   // klass length is valid if klass field is not null.
3190   if (UseCompressedClassPointers) {
3191     encode_klass_not_null(src);
3192     strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
3193   } else {
3194     str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
3195   }
3196 }
3197 
3198 void MacroAssembler::store_klass_gap(Register dst, Register src) {
3199   if (UseCompressedClassPointers) {
3200     // Store to klass gap in destination
3201     strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
3202   }
3203 }
3204 
3205 // Algorithm must match oop.inline.hpp encode_heap_oop.
3206 void MacroAssembler::encode_heap_oop(Register d, Register s) {
3207 #ifdef ASSERT
3208   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
3209 #endif
3210   verify_oop(s, "broken oop in encode_heap_oop");
3211   if (Universe::narrow_oop_base() == NULL) {
3212     if (Universe::narrow_oop_shift() != 0) {
3213       assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3214       lsr(d, s, LogMinObjAlignmentInBytes);
3215     } else {
3216       mov(d, s);
3217     }
3218   } else {
3219     subs(d, s, rheapbase);
3220     csel(d, d, zr, Assembler::HS);
3221     lsr(d, d, LogMinObjAlignmentInBytes);
3222 
3223     /*  Old algorithm: is this any worse?
3224     Label nonnull;
3225     cbnz(r, nonnull);
3226     sub(r, r, rheapbase);
3227     bind(nonnull);
3228     lsr(r, r, LogMinObjAlignmentInBytes);
3229     */
3230   }
3231 }
3232 
3233 void MacroAssembler::encode_heap_oop_not_null(Register r) {
3234 #ifdef ASSERT
3235   verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
3236   if (CheckCompressedOops) {
3237     Label ok;
3238     cbnz(r, ok);
3239     stop("null oop passed to encode_heap_oop_not_null");
3240     bind(ok);
3241   }
3242 #endif
3243   verify_oop(r, "broken oop in encode_heap_oop_not_null");
3244   if (Universe::narrow_oop_base() != NULL) {
3245     sub(r, r, rheapbase);
3246   }
3247   if (Universe::narrow_oop_shift() != 0) {
3248     assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3249     lsr(r, r, LogMinObjAlignmentInBytes);
3250   }
3251 }
3252 
3253 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
3254 #ifdef ASSERT
3255   verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
3256   if (CheckCompressedOops) {
3257     Label ok;
3258     cbnz(src, ok);
3259     stop("null oop passed to encode_heap_oop_not_null2");
3260     bind(ok);
3261   }
3262 #endif
3263   verify_oop(src, "broken oop in encode_heap_oop_not_null2");
3264 
3265   Register data = src;
3266   if (Universe::narrow_oop_base() != NULL) {
3267     sub(dst, src, rheapbase);
3268     data = dst;
3269   }
3270   if (Universe::narrow_oop_shift() != 0) {
3271     assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3272     lsr(dst, data, LogMinObjAlignmentInBytes);
3273     data = dst;
3274   }
3275   if (data == src)
3276     mov(dst, src);
3277 }
3278 
3279 void  MacroAssembler::decode_heap_oop(Register d, Register s) {
3280 #ifdef ASSERT
3281   verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
3282 #endif
3283   if (Universe::narrow_oop_base() == NULL) {
3284     if (Universe::narrow_oop_shift() != 0 || d != s) {
3285       lsl(d, s, Universe::narrow_oop_shift());
3286     }
3287   } else {
3288     Label done;
3289     if (d != s)
3290       mov(d, s);
3291     cbz(s, done);
3292     add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes);
3293     bind(done);
3294   }
3295   verify_oop(d, "broken oop in decode_heap_oop");
3296 }
3297 
3298 void  MacroAssembler::decode_heap_oop_not_null(Register r) {
3299   assert (UseCompressedOops, "should only be used for compressed headers");
3300   assert (Universe::heap() != NULL, "java heap should be initialized");
3301   // Cannot assert, unverified entry point counts instructions (see .ad file)
3302   // vtableStubs also counts instructions in pd_code_size_limit.
3303   // Also do not verify_oop as this is called by verify_oop.
3304   if (Universe::narrow_oop_shift() != 0) {
3305     assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3306     if (Universe::narrow_oop_base() != NULL) {
3307       add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes);
3308     } else {
3309       add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes);
3310     }
3311   } else {
3312     assert (Universe::narrow_oop_base() == NULL, "sanity");
3313   }
3314 }
3315 
3316 void  MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
3317   assert (UseCompressedOops, "should only be used for compressed headers");
3318   assert (Universe::heap() != NULL, "java heap should be initialized");
3319   // Cannot assert, unverified entry point counts instructions (see .ad file)
3320   // vtableStubs also counts instructions in pd_code_size_limit.
3321   // Also do not verify_oop as this is called by verify_oop.
3322   if (Universe::narrow_oop_shift() != 0) {
3323     assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3324     if (Universe::narrow_oop_base() != NULL) {
3325       add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes);
3326     } else {
3327       add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes);
3328     }
3329   } else {
3330     assert (Universe::narrow_oop_base() == NULL, "sanity");
3331     if (dst != src) {
3332       mov(dst, src);
3333     }
3334   }
3335 }
3336 
3337 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
3338   if (Universe::narrow_klass_base() == NULL) {
3339     if (Universe::narrow_klass_shift() != 0) {
3340       assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
3341       lsr(dst, src, LogKlassAlignmentInBytes);
3342     } else {
3343       if (dst != src) mov(dst, src);
3344     }
3345     return;
3346   }
3347 
3348   if (use_XOR_for_compressed_class_base) {
3349     if (Universe::narrow_klass_shift() != 0) {
3350       eor(dst, src, (uint64_t)Universe::narrow_klass_base());
3351       lsr(dst, dst, LogKlassAlignmentInBytes);
3352     } else {
3353       eor(dst, src, (uint64_t)Universe::narrow_klass_base());
3354     }
3355     return;
3356   }
3357 
3358   if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0
3359       && Universe::narrow_klass_shift() == 0) {
3360     movw(dst, src);
3361     return;
3362   }
3363 
3364 #ifdef ASSERT
3365   verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?");
3366 #endif
3367 
3368   Register rbase = dst;
3369   if (dst == src) rbase = rheapbase;
3370   mov(rbase, (uint64_t)Universe::narrow_klass_base());
3371   sub(dst, src, rbase);
3372   if (Universe::narrow_klass_shift() != 0) {
3373     assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
3374     lsr(dst, dst, LogKlassAlignmentInBytes);
3375   }
3376   if (dst == src) reinit_heapbase();
3377 }
3378 
3379 void MacroAssembler::encode_klass_not_null(Register r) {
3380   encode_klass_not_null(r, r);
3381 }
3382 
3383 void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
3384   Register rbase = dst;
3385   assert (UseCompressedClassPointers, "should only be used for compressed headers");
3386 
3387   if (Universe::narrow_klass_base() == NULL) {
3388     if (Universe::narrow_klass_shift() != 0) {
3389       assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
3390       lsl(dst, src, LogKlassAlignmentInBytes);
3391     } else {
3392       if (dst != src) mov(dst, src);
3393     }
3394     return;
3395   }
3396 
3397   if (use_XOR_for_compressed_class_base) {
3398     if (Universe::narrow_klass_shift() != 0) {
3399       lsl(dst, src, LogKlassAlignmentInBytes);
3400       eor(dst, dst, (uint64_t)Universe::narrow_klass_base());
3401     } else {
3402       eor(dst, src, (uint64_t)Universe::narrow_klass_base());
3403     }
3404     return;
3405   }
3406 
3407   if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0
3408       && Universe::narrow_klass_shift() == 0) {
3409     if (dst != src)
3410       movw(dst, src);
3411     movk(dst, (uint64_t)Universe::narrow_klass_base() >> 32, 32);
3412     return;
3413   }
3414 
3415   // Cannot assert, unverified entry point counts instructions (see .ad file)
3416   // vtableStubs also counts instructions in pd_code_size_limit.
3417   // Also do not verify_oop as this is called by verify_oop.
3418   if (dst == src) rbase = rheapbase;
3419   mov(rbase, (uint64_t)Universe::narrow_klass_base());
3420   if (Universe::narrow_klass_shift() != 0) {
3421     assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
3422     add(dst, rbase, src, Assembler::LSL, LogKlassAlignmentInBytes);
3423   } else {
3424     add(dst, rbase, src);
3425   }
3426   if (dst == src) reinit_heapbase();
3427 }
3428 
3429 void  MacroAssembler::decode_klass_not_null(Register r) {
3430   decode_klass_not_null(r, r);
3431 }
3432 
3433 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
3434   assert (UseCompressedOops, "should only be used for compressed oops");
3435   assert (Universe::heap() != NULL, "java heap should be initialized");
3436   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
3437 
3438   int oop_index = oop_recorder()->find_index(obj);
3439   assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop");
3440 
3441   InstructionMark im(this);
3442   RelocationHolder rspec = oop_Relocation::spec(oop_index);
3443   code_section()->relocate(inst_mark(), rspec);
3444   movz(dst, 0xDEAD, 16);
3445   movk(dst, 0xBEEF);
3446 }
3447 
3448 void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
3449   assert (UseCompressedClassPointers, "should only be used for compressed headers");
3450   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
3451   int index = oop_recorder()->find_index(k);
3452   assert(! Universe::heap()->is_in_reserved(k), "should not be an oop");
3453 
3454   InstructionMark im(this);
3455   RelocationHolder rspec = metadata_Relocation::spec(index);
3456   code_section()->relocate(inst_mark(), rspec);
3457   narrowKlass nk = Klass::encode_klass(k);
3458   movz(dst, (nk >> 16), 16);
3459   movk(dst, nk & 0xffff);
3460 }
3461 
3462 void MacroAssembler::load_heap_oop(Register dst, Address src)
3463 {
3464   if (UseCompressedOops) {
3465     ldrw(dst, src);
3466     decode_heap_oop(dst);
3467   } else {
3468     ldr(dst, src);
3469   }
3470 }
3471 
3472 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src)
3473 {
3474   if (UseCompressedOops) {
3475     ldrw(dst, src);
3476     decode_heap_oop_not_null(dst);
3477   } else {
3478     ldr(dst, src);
3479   }
3480 }
3481 
3482 void MacroAssembler::store_heap_oop(Address dst, Register src) {
3483   if (UseCompressedOops) {
3484     assert(!dst.uses(src), "not enough registers");
3485     encode_heap_oop(src);
3486     strw(src, dst);
3487   } else
3488     str(src, dst);
3489 }
3490 
3491 // Used for storing NULLs.
3492 void MacroAssembler::store_heap_oop_null(Address dst) {
3493   if (UseCompressedOops) {
3494     strw(zr, dst);
3495   } else
3496     str(zr, dst);
3497 }
3498 
3499 #if INCLUDE_ALL_GCS
3500 /*
3501  * g1_write_barrier_pre -- G1GC pre-write barrier for store of new_val at
3502  * store_addr.
3503  *
3504  * Allocates rscratch1
3505  */
3506 void MacroAssembler::g1_write_barrier_pre(Register obj,
3507                                           Register pre_val,
3508                                           Register thread,
3509                                           Register tmp,
3510                                           bool tosca_live,
3511                                           bool expand_call) {
3512   // If expand_call is true then we expand the call_VM_leaf macro
3513   // directly to skip generating the check by
3514   // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
3515 
3516 #ifdef _LP64
3517   assert(thread == rthread, "must be");
3518 #endif // _LP64
3519 
3520   Label done;
3521   Label runtime;
3522 
3523   assert_different_registers(obj, pre_val, tmp, rscratch1);
3524   assert(pre_val != noreg &&  tmp != noreg, "expecting a register");
3525 
3526   Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
3527                                        PtrQueue::byte_offset_of_active()));
3528   Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
3529                                        PtrQueue::byte_offset_of_index()));
3530   Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
3531                                        PtrQueue::byte_offset_of_buf()));
3532 
3533 
3534   // Is marking active?
3535   if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
3536     ldrw(tmp, in_progress);
3537   } else {
3538     assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
3539     ldrb(tmp, in_progress);
3540   }
3541   cbzw(tmp, done);
3542 
3543   // Do we need to load the previous value?
3544   if (obj != noreg) {
3545     load_heap_oop(pre_val, Address(obj, 0));
3546   }
3547 
3548   // Is the previous value null?
3549   cbz(pre_val, done);
3550 
3551   // Can we store original value in the thread's buffer?
3552   // Is index == 0?
3553   // (The index field is typed as size_t.)
3554 
3555   ldr(tmp, index);                      // tmp := *index_adr
3556   cbz(tmp, runtime);                    // tmp == 0?
3557                                         // If yes, goto runtime
3558 
3559   sub(tmp, tmp, wordSize);              // tmp := tmp - wordSize
3560   str(tmp, index);                      // *index_adr := tmp
3561   ldr(rscratch1, buffer);
3562   add(tmp, tmp, rscratch1);             // tmp := tmp + *buffer_adr
3563 
3564   // Record the previous value
3565   str(pre_val, Address(tmp, 0));
3566   b(done);
3567 
3568   bind(runtime);
3569   // save the live input values
3570   push(r0->bit(tosca_live) | obj->bit(obj != noreg) | pre_val->bit(true), sp);
3571 
3572   // Calling the runtime using the regular call_VM_leaf mechanism generates
3573   // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
3574   // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL.
3575   //
3576   // If we care generating the pre-barrier without a frame (e.g. in the
3577   // intrinsified Reference.get() routine) then ebp might be pointing to
3578   // the caller frame and so this check will most likely fail at runtime.
3579   //
3580   // Expanding the call directly bypasses the generation of the check.
3581   // So when we do not have have a full interpreter frame on the stack
3582   // expand_call should be passed true.
3583 
3584   if (expand_call) {
3585     LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
3586     pass_arg1(this, thread);
3587     pass_arg0(this, pre_val);
3588     MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
3589   } else {
3590     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
3591   }
3592 
3593   pop(r0->bit(tosca_live) | obj->bit(obj != noreg) | pre_val->bit(true), sp);
3594 
3595   bind(done);
3596 }
3597 
3598 /*
3599  * g1_write_barrier_post -- G1GC post-write barrier for store of new_val at
3600  * store_addr
3601  *
3602  * Allocates rscratch1
3603  */
3604 void MacroAssembler::g1_write_barrier_post(Register store_addr,
3605                                            Register new_val,
3606                                            Register thread,
3607                                            Register tmp,
3608                                            Register tmp2) {
3609 #ifdef _LP64
3610   assert(thread == rthread, "must be");
3611 #endif // _LP64
3612   assert_different_registers(store_addr, new_val, thread, tmp, tmp2,
3613                              rscratch1);
3614   assert(store_addr != noreg && new_val != noreg && tmp != noreg
3615          && tmp2 != noreg, "expecting a register");
3616 
3617   Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
3618                                        PtrQueue::byte_offset_of_index()));
3619   Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
3620                                        PtrQueue::byte_offset_of_buf()));
3621 
3622   BarrierSet* bs = Universe::heap()->barrier_set();
3623   CardTableModRefBS* ct = (CardTableModRefBS*)bs;
3624   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
3625 
3626   Label done;
3627   Label runtime;
3628 
3629   // Does store cross heap regions?
3630 
3631   eor(tmp, store_addr, new_val);
3632   lsr(tmp, tmp, HeapRegion::LogOfHRGrainBytes);
3633   cbz(tmp, done);
3634 
3635   // crosses regions, storing NULL?
3636 
3637   cbz(new_val, done);
3638 
3639   // storing region crossing non-NULL, is card already dirty?
3640 
3641   ExternalAddress cardtable((address) ct->byte_map_base);
3642   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
3643   const Register card_addr = tmp;
3644 
3645   lsr(card_addr, store_addr, CardTableModRefBS::card_shift);
3646 
3647   // get the address of the card
3648   load_byte_map_base(tmp2);
3649   add(card_addr, card_addr, tmp2);
3650   ldrb(tmp2, Address(card_addr));
3651   cmpw(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val());
3652   br(Assembler::EQ, done);
3653 
3654   assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0");
3655 
3656   membar(Assembler::Assembler::StoreLoad);
3657 
3658   ldrb(tmp2, Address(card_addr));
3659   cbzw(tmp2, done);
3660 
3661   // storing a region crossing, non-NULL oop, card is clean.
3662   // dirty card and log.
3663 
3664   strb(zr, Address(card_addr));
3665 
3666   ldr(rscratch1, queue_index);
3667   cbz(rscratch1, runtime);
3668   sub(rscratch1, rscratch1, wordSize);
3669   str(rscratch1, queue_index);
3670 
3671   ldr(tmp2, buffer);
3672   str(card_addr, Address(tmp2, rscratch1));
3673   b(done);
3674 
3675   bind(runtime);
3676   // save the live input values
3677   push(store_addr->bit(true) | new_val->bit(true), sp);
3678   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
3679   pop(store_addr->bit(true) | new_val->bit(true), sp);
3680 
3681   bind(done);
3682 }
3683 
3684 #endif // INCLUDE_ALL_GCS
3685 
3686 Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
3687   assert(oop_recorder() != NULL, "this assembler needs a Recorder");
3688   int index = oop_recorder()->allocate_metadata_index(obj);
3689   RelocationHolder rspec = metadata_Relocation::spec(index);
3690   return Address((address)obj, rspec);
3691 }
3692 
3693 // Move an oop into a register.  immediate is true if we want
3694 // immediate instrcutions, i.e. we are not going to patch this
3695 // instruction while the code is being executed by another thread.  In
3696 // that case we can use move immediates rather than the constant pool.
3697 void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) {
3698   int oop_index;
3699   if (obj == NULL) {
3700     oop_index = oop_recorder()->allocate_oop_index(obj);
3701   } else {
3702     oop_index = oop_recorder()->find_index(obj);
3703     assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop");
3704   }
3705   RelocationHolder rspec = oop_Relocation::spec(oop_index);
3706   if (! immediate) {
3707     address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address
3708     ldr_constant(dst, Address(dummy, rspec));
3709   } else
3710     mov(dst, Address((address)obj, rspec));
3711 }
3712 
3713 // Move a metadata address into a register.
3714 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
3715   int oop_index;
3716   if (obj == NULL) {
3717     oop_index = oop_recorder()->allocate_metadata_index(obj);
3718   } else {
3719     oop_index = oop_recorder()->find_index(obj);
3720   }
3721   RelocationHolder rspec = metadata_Relocation::spec(oop_index);
3722   mov(dst, Address((address)obj, rspec));
3723 }
3724 
3725 Address MacroAssembler::constant_oop_address(jobject obj) {
3726   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
3727   assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop");
3728   int oop_index = oop_recorder()->find_index(obj);
3729   return Address((address)obj, oop_Relocation::spec(oop_index));
3730 }
3731 
3732 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
3733 void MacroAssembler::tlab_allocate(Register obj,
3734                                    Register var_size_in_bytes,
3735                                    int con_size_in_bytes,
3736                                    Register t1,
3737                                    Register t2,
3738                                    Label& slow_case) {
3739   assert_different_registers(obj, t2);
3740   assert_different_registers(obj, var_size_in_bytes);
3741   Register end = t2;
3742 
3743   // verify_tlab();
3744 
3745   ldr(obj, Address(rthread, JavaThread::tlab_top_offset()));
3746   if (var_size_in_bytes == noreg) {
3747     lea(end, Address(obj, con_size_in_bytes));
3748   } else {
3749     lea(end, Address(obj, var_size_in_bytes));
3750   }
3751   ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset()));
3752   cmp(end, rscratch1);
3753   br(Assembler::HI, slow_case);
3754 
3755   // update the tlab top pointer
3756   str(end, Address(rthread, JavaThread::tlab_top_offset()));
3757 
3758   // recover var_size_in_bytes if necessary
3759   if (var_size_in_bytes == end) {
3760     sub(var_size_in_bytes, var_size_in_bytes, obj);
3761   }
3762   // verify_tlab();
3763 }
3764 
3765 // Preserves r19, and r3.
3766 Register MacroAssembler::tlab_refill(Label& retry,
3767                                      Label& try_eden,
3768                                      Label& slow_case) {
3769   Register top = r0;
3770   Register t1  = r2;
3771   Register t2  = r4;
3772   assert_different_registers(top, rthread, t1, t2, /* preserve: */ r19, r3);
3773   Label do_refill, discard_tlab;
3774 
3775   if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
3776     // No allocation in the shared eden.
3777     b(slow_case);
3778   }
3779 
3780   ldr(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
3781   ldr(t1,  Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
3782 
3783   // calculate amount of free space
3784   sub(t1, t1, top);
3785   lsr(t1, t1, LogHeapWordSize);
3786 
3787   // Retain tlab and allocate object in shared space if
3788   // the amount free in the tlab is too large to discard.
3789 
3790   ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
3791   cmp(t1, rscratch1);
3792   br(Assembler::LE, discard_tlab);
3793 
3794   // Retain
3795   // ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
3796   mov(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
3797   add(rscratch1, rscratch1, t2);
3798   str(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
3799 
3800   if (TLABStats) {
3801     // increment number of slow_allocations
3802     addmw(Address(rthread, in_bytes(JavaThread::tlab_slow_allocations_offset())),
3803          1, rscratch1);
3804   }
3805   b(try_eden);
3806 
3807   bind(discard_tlab);
3808   if (TLABStats) {
3809     // increment number of refills
3810     addmw(Address(rthread, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1,
3811          rscratch1);
3812     // accumulate wastage -- t1 is amount free in tlab
3813     addmw(Address(rthread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1,
3814          rscratch1);
3815   }
3816 
3817   // if tlab is currently allocated (top or end != null) then
3818   // fill [top, end + alignment_reserve) with array object
3819   cbz(top, do_refill);
3820 
3821   // set up the mark word
3822   mov(rscratch1, (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
3823   str(rscratch1, Address(top, oopDesc::mark_offset_in_bytes()));
3824   // set the length to the remaining space
3825   sub(t1, t1, typeArrayOopDesc::header_size(T_INT));
3826   add(t1, t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
3827   lsl(t1, t1, log2_intptr(HeapWordSize/sizeof(jint)));
3828   strw(t1, Address(top, arrayOopDesc::length_offset_in_bytes()));
3829   // set klass to intArrayKlass
3830   {
3831     unsigned long offset;
3832     // dubious reloc why not an oop reloc?
3833     adrp(rscratch1, ExternalAddress((address)Universe::intArrayKlassObj_addr()),
3834          offset);
3835     ldr(t1, Address(rscratch1, offset));
3836   }
3837   // store klass last.  concurrent gcs assumes klass length is valid if
3838   // klass field is not null.
3839   store_klass(top, t1);
3840 
3841   mov(t1, top);
3842   ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
3843   sub(t1, t1, rscratch1);
3844   incr_allocated_bytes(rthread, t1, 0, rscratch1);
3845 
3846   // refill the tlab with an eden allocation
3847   bind(do_refill);
3848   ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_size_offset())));
3849   lsl(t1, t1, LogHeapWordSize);
3850   // allocate new tlab, address returned in top
3851   eden_allocate(top, t1, 0, t2, slow_case);
3852 
3853   // Check that t1 was preserved in eden_allocate.
3854 #ifdef ASSERT
3855   if (UseTLAB) {
3856     Label ok;
3857     Register tsize = r4;
3858     assert_different_registers(tsize, rthread, t1);
3859     str(tsize, Address(pre(sp, -16)));
3860     ldr(tsize, Address(rthread, in_bytes(JavaThread::tlab_size_offset())));
3861     lsl(tsize, tsize, LogHeapWordSize);
3862     cmp(t1, tsize);
3863     br(Assembler::EQ, ok);
3864     STOP("assert(t1 != tlab size)");
3865     should_not_reach_here();
3866 
3867     bind(ok);
3868     ldr(tsize, Address(post(sp, 16)));
3869   }
3870 #endif
3871   str(top, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
3872   str(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
3873   add(top, top, t1);
3874   sub(top, top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
3875   str(top, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
3876   verify_tlab();
3877   b(retry);
3878 
3879   return rthread; // for use by caller
3880 }
3881 
3882 // Defines obj, preserves var_size_in_bytes
3883 void MacroAssembler::eden_allocate(Register obj,
3884                                    Register var_size_in_bytes,
3885                                    int con_size_in_bytes,
3886                                    Register t1,
3887                                    Label& slow_case) {
3888   assert_different_registers(obj, var_size_in_bytes, t1);
3889   if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
3890     b(slow_case);
3891   } else {
3892     Register end = t1;
3893     Register heap_end = rscratch2;
3894     Label retry;
3895     bind(retry);
3896     {
3897       unsigned long offset;
3898       adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset);
3899       ldr(heap_end, Address(rscratch1, offset));
3900     }
3901 
3902     ExternalAddress heap_top((address) Universe::heap()->top_addr());
3903 
3904     // Get the current top of the heap
3905     {
3906       unsigned long offset;
3907       adrp(rscratch1, heap_top, offset);
3908       // Use add() here after ARDP, rather than lea().
3909       // lea() does not generate anything if its offset is zero.
3910       // However, relocs expect to find either an ADD or a load/store
3911       // insn after an ADRP.  add() always generates an ADD insn, even
3912       // for add(Rn, Rn, 0).
3913       add(rscratch1, rscratch1, offset);
3914       ldaxr(obj, rscratch1);
3915     }
3916 
3917     // Adjust it my the size of our new object
3918     if (var_size_in_bytes == noreg) {
3919       lea(end, Address(obj, con_size_in_bytes));
3920     } else {
3921       lea(end, Address(obj, var_size_in_bytes));
3922     }
3923 
3924     // if end < obj then we wrapped around high memory
3925     cmp(end, obj);
3926     br(Assembler::LO, slow_case);
3927 
3928     cmp(end, heap_end);
3929     br(Assembler::HI, slow_case);
3930 
3931     // If heap_top hasn't been changed by some other thread, update it.
3932     stlxr(rscratch2, end, rscratch1);
3933     cbnzw(rscratch2, retry);
3934   }
3935 }
3936 
3937 void MacroAssembler::verify_tlab() {
3938 #ifdef ASSERT
3939   if (UseTLAB && VerifyOops) {
3940     Label next, ok;
3941 
3942     stp(rscratch2, rscratch1, Address(pre(sp, -16)));
3943 
3944     ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
3945     ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
3946     cmp(rscratch2, rscratch1);
3947     br(Assembler::HS, next);
3948     STOP("assert(top >= start)");
3949     should_not_reach_here();
3950 
3951     bind(next);
3952     ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
3953     ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
3954     cmp(rscratch2, rscratch1);
3955     br(Assembler::HS, ok);
3956     STOP("assert(top <= end)");
3957     should_not_reach_here();
3958 
3959     bind(ok);
3960     ldp(rscratch2, rscratch1, Address(post(sp, 16)));
3961   }
3962 #endif
3963 }
3964 
3965 // Writes to stack successive pages until offset reached to check for
3966 // stack overflow + shadow pages.  This clobbers tmp.
3967 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
3968   assert_different_registers(tmp, size, rscratch1);
3969   mov(tmp, sp);
3970   // Bang stack for total size given plus shadow page size.
3971   // Bang one page at a time because large size can bang beyond yellow and
3972   // red zones.
3973   Label loop;
3974   mov(rscratch1, os::vm_page_size());
3975   bind(loop);
3976   lea(tmp, Address(tmp, -os::vm_page_size()));
3977   subsw(size, size, rscratch1);
3978   str(size, Address(tmp));
3979   br(Assembler::GT, loop);
3980 
3981   // Bang down shadow pages too.
3982   // The -1 because we already subtracted 1 page.
3983   for (int i = 0; i< StackShadowPages-1; i++) {
3984     // this could be any sized move but this is can be a debugging crumb
3985     // so the bigger the better.
3986     lea(tmp, Address(tmp, -os::vm_page_size()));
3987     str(size, Address(tmp));
3988   }
3989 }
3990 
3991 
3992 address MacroAssembler::read_polling_page(Register r, address page, relocInfo::relocType rtype) {
3993   unsigned long off;
3994   adrp(r, Address(page, rtype), off);
3995   InstructionMark im(this);
3996   code_section()->relocate(inst_mark(), rtype);
3997   ldrw(zr, Address(r, off));
3998   return inst_mark();
3999 }
4000 
4001 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) {
4002   InstructionMark im(this);
4003   code_section()->relocate(inst_mark(), rtype);
4004   ldrw(zr, Address(r, 0));
4005   return inst_mark();
4006 }
4007 
4008 void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byte_offset) {
4009   relocInfo::relocType rtype = dest.rspec().reloc()->type();
4010   unsigned long low_page = (unsigned long)CodeCache::low_bound() >> 12;
4011   unsigned long high_page = (unsigned long)(CodeCache::high_bound()-1) >> 12;
4012   unsigned long dest_page = (unsigned long)dest.target() >> 12;
4013   long offset_low = dest_page - low_page;
4014   long offset_high = dest_page - high_page;
4015 
4016   assert(is_valid_AArch64_address(dest.target()), "bad address");
4017   assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address");
4018 
4019   InstructionMark im(this);
4020   code_section()->relocate(inst_mark(), dest.rspec());
4021   // 8143067: Ensure that the adrp can reach the dest from anywhere within
4022   // the code cache so that if it is relocated we know it will still reach
4023   if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
4024     _adrp(reg1, dest.target());
4025   } else {
4026     unsigned long target = (unsigned long)dest.target();
4027     unsigned long adrp_target
4028       = (target & 0xffffffffUL) | ((unsigned long)pc() & 0xffff00000000UL);
4029 
4030     _adrp(reg1, (address)adrp_target);
4031     movk(reg1, target >> 32, 32);
4032   }
4033   byte_offset = (unsigned long)dest.target() & 0xfff;
4034 }
4035 
4036 void MacroAssembler::load_byte_map_base(Register reg) {
4037   jbyte *byte_map_base =
4038     ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base;
4039 
4040   if (is_valid_AArch64_address((address)byte_map_base)) {
4041     // Strictly speaking the byte_map_base isn't an address at all,
4042     // and it might even be negative.
4043     unsigned long offset;
4044     adrp(reg, ExternalAddress((address)byte_map_base), offset);
4045     // We expect offset to be zero with most collectors.
4046     if (offset != 0) {
4047       add(reg, reg, offset);
4048     }
4049   } else {
4050     mov(reg, (uint64_t)byte_map_base);
4051   }
4052 }
4053 
4054 void MacroAssembler::build_frame(int framesize) {
4055   if (framesize == 0) {
4056     // Is this even possible?
4057     stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
4058   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
4059     sub(sp, sp, framesize);
4060     stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
4061   } else {
4062     stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
4063     if (framesize < ((1 << 12) + 2 * wordSize))
4064       sub(sp, sp, framesize - 2 * wordSize);
4065     else {
4066       mov(rscratch1, framesize - 2 * wordSize);
4067       sub(sp, sp, rscratch1);
4068     }
4069   }
4070 }
4071 
4072 void MacroAssembler::remove_frame(int framesize) {
4073   if (framesize == 0) {
4074     ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
4075   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
4076     ldp(rfp, lr, Address(sp, framesize - 2 * wordSize));
4077     add(sp, sp, framesize);
4078   } else {
4079     if (framesize < ((1 << 12) + 2 * wordSize))
4080       add(sp, sp, framesize - 2 * wordSize);
4081     else {
4082       mov(rscratch1, framesize - 2 * wordSize);
4083       add(sp, sp, rscratch1);
4084     }
4085     ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
4086   }
4087 }
4088 
4089 // Search for str1 in str2 and return index or -1
4090 void MacroAssembler::string_indexof(Register str2, Register str1,
4091                                     Register cnt2, Register cnt1,
4092                                     Register tmp1, Register tmp2,
4093                                     Register tmp3, Register tmp4,
4094                                     int icnt1, Register result) {
4095   Label BM, LINEARSEARCH, DONE, NOMATCH, MATCH;
4096 
4097   Register ch1 = rscratch1;
4098   Register ch2 = rscratch2;
4099   Register cnt1tmp = tmp1;
4100   Register cnt2tmp = tmp2;
4101   Register cnt1_neg = cnt1;
4102   Register cnt2_neg = cnt2;
4103   Register result_tmp = tmp4;
4104 
4105   // Note, inline_string_indexOf() generates checks:
4106   // if (substr.count > string.count) return -1;
4107   // if (substr.count == 0) return 0;
4108 
4109 // We have two strings, a source string in str2, cnt2 and a pattern string
4110 // in str1, cnt1. Find the 1st occurence of pattern in source or return -1.
4111 
4112 // For larger pattern and source we use a simplified Boyer Moore algorithm.
4113 // With a small pattern and source we use linear scan.
4114 
4115   if (icnt1 == -1) {
4116     cmp(cnt1, 256);             // Use Linear Scan if cnt1 < 8 || cnt1 >= 256
4117     ccmp(cnt1, 8, 0b0000, LO);  // Can't handle skip >= 256 because we use
4118     br(LO, LINEARSEARCH);       // a byte array.
4119     cmp(cnt1, cnt2, LSR, 2);    // Source must be 4 * pattern for BM
4120     br(HS, LINEARSEARCH);
4121   }
4122 
4123 // The Boyer Moore alogorithm is based on the description here:-
4124 //
4125 // http://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string_search_algorithm
4126 //
4127 // This describes and algorithm with 2 shift rules. The 'Bad Character' rule
4128 // and the 'Good Suffix' rule.
4129 //
4130 // These rules are essentially heuristics for how far we can shift the
4131 // pattern along the search string.
4132 //
4133 // The implementation here uses the 'Bad Character' rule only because of the
4134 // complexity of initialisation for the 'Good Suffix' rule.
4135 //
4136 // This is also known as the Boyer-Moore-Horspool algorithm:-
4137 //
4138 // http://en.wikipedia.org/wiki/Boyer-Moore-Horspool_algorithm
4139 //
4140 // #define ASIZE 128
4141 //
4142 //    int bm(unsigned char *x, int m, unsigned char *y, int n) {
4143 //       int i, j;
4144 //       unsigned c;
4145 //       unsigned char bc[ASIZE];
4146 //
4147 //       /* Preprocessing */
4148 //       for (i = 0; i < ASIZE; ++i)
4149 //          bc[i] = 0;
4150 //       for (i = 0; i < m - 1; ) {
4151 //          c = x[i];
4152 //          ++i;
4153 //          if (c < ASIZE) bc[c] = i;
4154 //       }
4155 //
4156 //       /* Searching */
4157 //       j = 0;
4158 //       while (j <= n - m) {
4159 //          c = y[i+j];
4160 //          if (x[m-1] == c)
4161 //            for (i = m - 2; i >= 0 && x[i] == y[i + j]; --i);
4162 //          if (i < 0) return j;
4163 //          if (c < ASIZE)
4164 //            j = j - bc[y[j+m-1]] + m;
4165 //          else
4166 //            j += 1; // Advance by 1 only if char >= ASIZE
4167 //       }
4168 //    }
4169 
4170   if (icnt1 == -1) {
4171     BIND(BM);
4172 
4173     Label ZLOOP, BCLOOP, BCSKIP, BMLOOPSTR2, BMLOOPSTR1, BMSKIP;
4174     Label BMADV, BMMATCH, BMCHECKEND;
4175 
4176     Register cnt1end = tmp2;
4177     Register str2end = cnt2;
4178     Register skipch = tmp2;
4179 
4180     // Restrict ASIZE to 128 to reduce stack space/initialisation.
4181     // The presence of chars >= ASIZE in the target string does not affect
4182     // performance, but we must be careful not to initialise them in the stack
4183     // array.
4184     // The presence of chars >= ASIZE in the source string may adversely affect
4185     // performance since we can only advance by one when we encounter one.
4186 
4187       stp(zr, zr, pre(sp, -128));
4188       for (int i = 1; i < 8; i++)
4189           stp(zr, zr, Address(sp, i*16));
4190 
4191       mov(cnt1tmp, 0);
4192       sub(cnt1end, cnt1, 1);
4193     BIND(BCLOOP);
4194       ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1)));
4195       cmp(ch1, 128);
4196       add(cnt1tmp, cnt1tmp, 1);
4197       br(HS, BCSKIP);
4198       strb(cnt1tmp, Address(sp, ch1));
4199     BIND(BCSKIP);
4200       cmp(cnt1tmp, cnt1end);
4201       br(LT, BCLOOP);
4202 
4203       mov(result_tmp, str2);
4204 
4205       sub(cnt2, cnt2, cnt1);
4206       add(str2end, str2, cnt2, LSL, 1);
4207     BIND(BMLOOPSTR2);
4208       sub(cnt1tmp, cnt1, 1);
4209       ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1)));
4210       ldrh(skipch, Address(str2, cnt1tmp, Address::lsl(1)));
4211       cmp(ch1, skipch);
4212       br(NE, BMSKIP);
4213       subs(cnt1tmp, cnt1tmp, 1);
4214       br(LT, BMMATCH);
4215     BIND(BMLOOPSTR1);
4216       ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1)));
4217       ldrh(ch2, Address(str2, cnt1tmp, Address::lsl(1)));
4218       cmp(ch1, ch2);
4219       br(NE, BMSKIP);
4220       subs(cnt1tmp, cnt1tmp, 1);
4221       br(GE, BMLOOPSTR1);
4222     BIND(BMMATCH);
4223       sub(result_tmp, str2, result_tmp);
4224       lsr(result, result_tmp, 1);
4225       add(sp, sp, 128);
4226       b(DONE);
4227     BIND(BMADV);
4228       add(str2, str2, 2);
4229       b(BMCHECKEND);
4230     BIND(BMSKIP);
4231       cmp(skipch, 128);
4232       br(HS, BMADV);
4233       ldrb(ch2, Address(sp, skipch));
4234       add(str2, str2, cnt1, LSL, 1);
4235       sub(str2, str2, ch2, LSL, 1);
4236     BIND(BMCHECKEND);
4237       cmp(str2, str2end);
4238       br(LE, BMLOOPSTR2);
4239       add(sp, sp, 128);
4240       b(NOMATCH);
4241   }
4242 
4243   BIND(LINEARSEARCH);
4244   {
4245     Label DO1, DO2, DO3;
4246 
4247     Register str2tmp = tmp2;
4248     Register first = tmp3;
4249 
4250     if (icnt1 == -1)
4251     {
4252         Label DOSHORT, FIRST_LOOP, STR2_NEXT, STR1_LOOP, STR1_NEXT, LAST_WORD;
4253 
4254         cmp(cnt1, 4);
4255         br(LT, DOSHORT);
4256 
4257         sub(cnt2, cnt2, cnt1);
4258         sub(cnt1, cnt1, 4);
4259         mov(result_tmp, cnt2);
4260 
4261         lea(str1, Address(str1, cnt1, Address::uxtw(1)));
4262         lea(str2, Address(str2, cnt2, Address::uxtw(1)));
4263         sub(cnt1_neg, zr, cnt1, LSL, 1);
4264         sub(cnt2_neg, zr, cnt2, LSL, 1);
4265         ldr(first, Address(str1, cnt1_neg));
4266 
4267       BIND(FIRST_LOOP);
4268         ldr(ch2, Address(str2, cnt2_neg));
4269         cmp(first, ch2);
4270         br(EQ, STR1_LOOP);
4271       BIND(STR2_NEXT);
4272         adds(cnt2_neg, cnt2_neg, 2);
4273         br(LE, FIRST_LOOP);
4274         b(NOMATCH);
4275 
4276       BIND(STR1_LOOP);
4277         adds(cnt1tmp, cnt1_neg, 8);
4278         add(cnt2tmp, cnt2_neg, 8);
4279         br(GE, LAST_WORD);
4280 
4281       BIND(STR1_NEXT);
4282         ldr(ch1, Address(str1, cnt1tmp));
4283         ldr(ch2, Address(str2, cnt2tmp));
4284         cmp(ch1, ch2);
4285         br(NE, STR2_NEXT);
4286         adds(cnt1tmp, cnt1tmp, 8);
4287         add(cnt2tmp, cnt2tmp, 8);
4288         br(LT, STR1_NEXT);
4289 
4290       BIND(LAST_WORD);
4291         ldr(ch1, Address(str1));
4292         sub(str2tmp, str2, cnt1_neg);         // adjust to corresponding
4293         ldr(ch2, Address(str2tmp, cnt2_neg)); // word in str2
4294         cmp(ch1, ch2);
4295         br(NE, STR2_NEXT);
4296         b(MATCH);
4297 
4298       BIND(DOSHORT);
4299         cmp(cnt1, 2);
4300         br(LT, DO1);
4301         br(GT, DO3);
4302     }
4303 
4304     if (icnt1 == 4) {
4305       Label CH1_LOOP;
4306 
4307         ldr(ch1, str1);
4308         sub(cnt2, cnt2, 4);
4309         mov(result_tmp, cnt2);
4310         lea(str2, Address(str2, cnt2, Address::uxtw(1)));
4311         sub(cnt2_neg, zr, cnt2, LSL, 1);
4312 
4313       BIND(CH1_LOOP);
4314         ldr(ch2, Address(str2, cnt2_neg));
4315         cmp(ch1, ch2);
4316         br(EQ, MATCH);
4317         adds(cnt2_neg, cnt2_neg, 2);
4318         br(LE, CH1_LOOP);
4319         b(NOMATCH);
4320     }
4321 
4322     if (icnt1 == -1 || icnt1 == 2) {
4323       Label CH1_LOOP;
4324 
4325       BIND(DO2);
4326         ldrw(ch1, str1);
4327         sub(cnt2, cnt2, 2);
4328         mov(result_tmp, cnt2);
4329         lea(str2, Address(str2, cnt2, Address::uxtw(1)));
4330         sub(cnt2_neg, zr, cnt2, LSL, 1);
4331 
4332       BIND(CH1_LOOP);
4333         ldrw(ch2, Address(str2, cnt2_neg));
4334         cmp(ch1, ch2);
4335         br(EQ, MATCH);
4336         adds(cnt2_neg, cnt2_neg, 2);
4337         br(LE, CH1_LOOP);
4338         b(NOMATCH);
4339     }
4340 
4341     if (icnt1 == -1 || icnt1 == 3) {
4342       Label FIRST_LOOP, STR2_NEXT, STR1_LOOP;
4343 
4344       BIND(DO3);
4345         ldrw(first, str1);
4346         ldrh(ch1, Address(str1, 4));
4347 
4348         sub(cnt2, cnt2, 3);
4349         mov(result_tmp, cnt2);
4350         lea(str2, Address(str2, cnt2, Address::uxtw(1)));
4351         sub(cnt2_neg, zr, cnt2, LSL, 1);
4352 
4353       BIND(FIRST_LOOP);
4354         ldrw(ch2, Address(str2, cnt2_neg));
4355         cmpw(first, ch2);
4356         br(EQ, STR1_LOOP);
4357       BIND(STR2_NEXT);
4358         adds(cnt2_neg, cnt2_neg, 2);
4359         br(LE, FIRST_LOOP);
4360         b(NOMATCH);
4361 
4362       BIND(STR1_LOOP);
4363         add(cnt2tmp, cnt2_neg, 4);
4364         ldrh(ch2, Address(str2, cnt2tmp));
4365         cmp(ch1, ch2);
4366         br(NE, STR2_NEXT);
4367         b(MATCH);
4368     }
4369 
4370     if (icnt1 == -1 || icnt1 == 1) {
4371       Label CH1_LOOP, HAS_ZERO;
4372       Label DO1_SHORT, DO1_LOOP;
4373 
4374       BIND(DO1);
4375         ldrh(ch1, str1);
4376         cmp(cnt2, 4);
4377         br(LT, DO1_SHORT);
4378 
4379         orr(ch1, ch1, ch1, LSL, 16);
4380         orr(ch1, ch1, ch1, LSL, 32);
4381 
4382         sub(cnt2, cnt2, 4);
4383         mov(result_tmp, cnt2);
4384         lea(str2, Address(str2, cnt2, Address::uxtw(1)));
4385         sub(cnt2_neg, zr, cnt2, LSL, 1);
4386 
4387         mov(tmp3, 0x0001000100010001);
4388       BIND(CH1_LOOP);
4389         ldr(ch2, Address(str2, cnt2_neg));
4390         eor(ch2, ch1, ch2);
4391         sub(tmp1, ch2, tmp3);
4392         orr(tmp2, ch2, 0x7fff7fff7fff7fff);
4393         bics(tmp1, tmp1, tmp2);
4394         br(NE, HAS_ZERO);
4395         adds(cnt2_neg, cnt2_neg, 8);
4396         br(LT, CH1_LOOP);
4397 
4398         cmp(cnt2_neg, 8);
4399         mov(cnt2_neg, 0);
4400         br(LT, CH1_LOOP);
4401         b(NOMATCH);
4402 
4403       BIND(HAS_ZERO);
4404         rev(tmp1, tmp1);
4405         clz(tmp1, tmp1);
4406         add(cnt2_neg, cnt2_neg, tmp1, LSR, 3);
4407         b(MATCH);
4408 
4409       BIND(DO1_SHORT);
4410         mov(result_tmp, cnt2);
4411         lea(str2, Address(str2, cnt2, Address::uxtw(1)));
4412         sub(cnt2_neg, zr, cnt2, LSL, 1);
4413       BIND(DO1_LOOP);
4414         ldrh(ch2, Address(str2, cnt2_neg));
4415         cmpw(ch1, ch2);
4416         br(EQ, MATCH);
4417         adds(cnt2_neg, cnt2_neg, 2);
4418         br(LT, DO1_LOOP);
4419     }
4420   }
4421   BIND(NOMATCH);
4422     mov(result, -1);
4423     b(DONE);
4424   BIND(MATCH);
4425     add(result, result_tmp, cnt2_neg, ASR, 1);
4426   BIND(DONE);
4427 }
4428 
4429 // Compare strings.
4430 void MacroAssembler::string_compare(Register str1, Register str2,
4431                                     Register cnt1, Register cnt2, Register result,
4432                                     Register tmp1) {
4433   Label LENGTH_DIFF, DONE, SHORT_LOOP, SHORT_STRING,
4434     NEXT_WORD, DIFFERENCE;
4435 
4436   BLOCK_COMMENT("string_compare {");
4437 
4438   // Compute the minimum of the string lengths and save the difference.
4439   subsw(tmp1, cnt1, cnt2);
4440   cselw(cnt2, cnt1, cnt2, Assembler::LE); // min
4441 
4442   // A very short string
4443   cmpw(cnt2, 4);
4444   br(Assembler::LT, SHORT_STRING);
4445 
4446   // Check if the strings start at the same location.
4447   cmp(str1, str2);
4448   br(Assembler::EQ, LENGTH_DIFF);
4449 
4450   // Compare longwords
4451   {
4452     subw(cnt2, cnt2, 4); // The last longword is a special case
4453 
4454     // Move both string pointers to the last longword of their
4455     // strings, negate the remaining count, and convert it to bytes.
4456     lea(str1, Address(str1, cnt2, Address::uxtw(1)));
4457     lea(str2, Address(str2, cnt2, Address::uxtw(1)));
4458     sub(cnt2, zr, cnt2, LSL, 1);
4459 
4460     // Loop, loading longwords and comparing them into rscratch2.
4461     bind(NEXT_WORD);
4462     ldr(result, Address(str1, cnt2));
4463     ldr(cnt1, Address(str2, cnt2));
4464     adds(cnt2, cnt2, wordSize);
4465     eor(rscratch2, result, cnt1);
4466     cbnz(rscratch2, DIFFERENCE);
4467     br(Assembler::LT, NEXT_WORD);
4468 
4469     // Last longword.  In the case where length == 4 we compare the
4470     // same longword twice, but that's still faster than another
4471     // conditional branch.
4472 
4473     ldr(result, Address(str1));
4474     ldr(cnt1, Address(str2));
4475     eor(rscratch2, result, cnt1);
4476     cbz(rscratch2, LENGTH_DIFF);
4477 
4478     // Find the first different characters in the longwords and
4479     // compute their difference.
4480     bind(DIFFERENCE);
4481     rev(rscratch2, rscratch2);
4482     clz(rscratch2, rscratch2);
4483     andr(rscratch2, rscratch2, -16);
4484     lsrv(result, result, rscratch2);
4485     uxthw(result, result);
4486     lsrv(cnt1, cnt1, rscratch2);
4487     uxthw(cnt1, cnt1);
4488     subw(result, result, cnt1);
4489     b(DONE);
4490   }
4491 
4492   bind(SHORT_STRING);
4493   // Is the minimum length zero?
4494   cbz(cnt2, LENGTH_DIFF);
4495 
4496   bind(SHORT_LOOP);
4497   load_unsigned_short(result, Address(post(str1, 2)));
4498   load_unsigned_short(cnt1, Address(post(str2, 2)));
4499   subw(result, result, cnt1);
4500   cbnz(result, DONE);
4501   sub(cnt2, cnt2, 1);
4502   cbnz(cnt2, SHORT_LOOP);
4503 
4504   // Strings are equal up to min length.  Return the length difference.
4505   bind(LENGTH_DIFF);
4506   mov(result, tmp1);
4507 
4508   // That's it
4509   bind(DONE);
4510 
4511   BLOCK_COMMENT("} string_compare");
4512 }
4513 
4514 
4515 // base:     Address of a buffer to be zeroed, 8 bytes aligned.
4516 // cnt:      Count in HeapWords.
4517 // is_large: True when 'cnt' is known to be >= BlockZeroingLowLimit.
4518 void MacroAssembler::zero_words(Register base, Register cnt)
4519 {
4520   if (UseBlockZeroing) {
4521     block_zero(base, cnt);
4522   } else {
4523     fill_words(base, cnt, zr);
4524   }
4525 }
4526 
4527 // r10 = base:   Address of a buffer to be zeroed, 8 bytes aligned.
4528 // cnt:          Immediate count in HeapWords.
4529 // r11 = tmp:    For use as cnt if we need to call out
4530 #define ShortArraySize (18 * BytesPerLong)
4531 void MacroAssembler::zero_words(Register base, u_int64_t cnt)
4532 {
4533   Register tmp = r11;
4534   int i = cnt & 1;  // store any odd word to start
4535   if (i) str(zr, Address(base));
4536 
4537   if (cnt <= ShortArraySize / BytesPerLong) {
4538     for (; i < (int)cnt; i += 2)
4539       stp(zr, zr, Address(base, i * wordSize));
4540   } else if (UseBlockZeroing && cnt >= (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord)) {
4541     mov(tmp, cnt);
4542     block_zero(base, tmp, true);
4543   } else {
4544     const int unroll = 4; // Number of stp(zr, zr) instructions we'll unroll
4545     int remainder = cnt % (2 * unroll);
4546     for (; i < remainder; i += 2)
4547       stp(zr, zr, Address(base, i * wordSize));
4548 
4549     Label loop;
4550     Register cnt_reg = rscratch1;
4551     Register loop_base = rscratch2;
4552     cnt = cnt - remainder;
4553     mov(cnt_reg, cnt);
4554     // adjust base and prebias by -2 * wordSize so we can pre-increment
4555     add(loop_base, base, (remainder - 2) * wordSize);
4556     bind(loop);
4557     sub(cnt_reg, cnt_reg, 2 * unroll);
4558     for (i = 1; i < unroll; i++)
4559       stp(zr, zr, Address(loop_base, 2 * i * wordSize));
4560     stp(zr, zr, Address(pre(loop_base, 2 * unroll * wordSize)));
4561     cbnz(cnt_reg, loop);
4562   }
4563 }
4564 
4565 // base:   Address of a buffer to be filled, 8 bytes aligned.
4566 // cnt:    Count in 8-byte unit.
4567 // value:  Value to be filled with.
4568 // base will point to the end of the buffer after filling.
4569 void MacroAssembler::fill_words(Register base, Register cnt, Register value)
4570 {
4571 //  Algorithm:
4572 //
4573 //    scratch1 = cnt & 7;
4574 //    cnt -= scratch1;
4575 //    p += scratch1;
4576 //    switch (scratch1) {
4577 //      do {
4578 //        cnt -= 8;
4579 //          p[-8] = v;
4580 //        case 7:
4581 //          p[-7] = v;
4582 //        case 6:
4583 //          p[-6] = v;
4584 //          // ...
4585 //        case 1:
4586 //          p[-1] = v;
4587 //        case 0:
4588 //          p += 8;
4589 //      } while (cnt);
4590 //    }
4591 
4592   assert_different_registers(base, cnt, value, rscratch1, rscratch2);
4593 
4594   Label fini, skip, entry, loop;
4595   const int unroll = 8; // Number of stp instructions we'll unroll
4596 
4597   cbz(cnt, fini);
4598   tbz(base, 3, skip);
4599   str(value, Address(post(base, 8)));
4600   sub(cnt, cnt, 1);
4601   bind(skip);
4602 
4603   andr(rscratch1, cnt, (unroll-1) * 2);
4604   sub(cnt, cnt, rscratch1);
4605   add(base, base, rscratch1, Assembler::LSL, 3);
4606   adr(rscratch2, entry);
4607   sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1);
4608   br(rscratch2);
4609 
4610   bind(loop);
4611   add(base, base, unroll * 16);
4612   for (int i = -unroll; i < 0; i++)
4613     stp(value, value, Address(base, i * 16));
4614   bind(entry);
4615   subs(cnt, cnt, unroll * 2);
4616   br(Assembler::GE, loop);
4617 
4618   tbz(cnt, 0, fini);
4619   str(value, Address(post(base, 8)));
4620   bind(fini);
4621 }
4622 
4623 // Use DC ZVA to do fast zeroing.
4624 // base:   Address of a buffer to be zeroed, 8 bytes aligned.
4625 // cnt:    Count in HeapWords.
4626 // is_large: True when 'cnt' is known to be >= BlockZeroingLowLimit.
4627 void MacroAssembler::block_zero(Register base, Register cnt, bool is_large)
4628 {
4629   Label small;
4630   Label store_pair, loop_store_pair, done;
4631   Label base_aligned;
4632 
4633   assert_different_registers(base, cnt, rscratch1);
4634   guarantee(base == r10 && cnt == r11, "fix register usage");
4635 
4636   Register tmp = rscratch1;
4637   Register tmp2 = rscratch2;
4638   int zva_length = VM_Version::zva_length();
4639 
4640   // Ensure ZVA length can be divided by 16. This is required by
4641   // the subsequent operations.
4642   assert (zva_length % 16 == 0, "Unexpected ZVA Length");
4643 
4644   if (!is_large) cbz(cnt, done);
4645   tbz(base, 3, base_aligned);
4646   str(zr, Address(post(base, 8)));
4647   sub(cnt, cnt, 1);
4648   bind(base_aligned);
4649 
4650   // Ensure count >= zva_length * 2 so that it still deserves a zva after
4651   // alignment.
4652   if (!is_large || !(BlockZeroingLowLimit >= zva_length * 2)) {
4653     int low_limit = MAX2(zva_length * 2, (int)BlockZeroingLowLimit);
4654     subs(tmp, cnt, low_limit >> 3);
4655     br(Assembler::LT, small);
4656   }
4657 
4658   far_call(StubRoutines::aarch64::get_zero_longs());
4659 
4660   bind(small);
4661 
4662   const int unroll = 8; // Number of stp instructions we'll unroll
4663   Label small_loop, small_table_end;
4664 
4665   andr(tmp, cnt, (unroll-1) * 2);
4666   sub(cnt, cnt, tmp);
4667   add(base, base, tmp, Assembler::LSL, 3);
4668   adr(tmp2, small_table_end);
4669   sub(tmp2, tmp2, tmp, Assembler::LSL, 1);
4670   br(tmp2);
4671 
4672   bind(small_loop);
4673   add(base, base, unroll * 16);
4674   for (int i = -unroll; i < 0; i++)
4675     stp(zr, zr, Address(base, i * 16));
4676   bind(small_table_end);
4677   subs(cnt, cnt, unroll * 2);
4678   br(Assembler::GE, small_loop);
4679 
4680   tbz(cnt, 0, done);
4681   str(zr, Address(post(base, 8)));
4682 
4683   bind(done);
4684 }
4685 
4686 void MacroAssembler::string_equals(Register str1, Register str2,
4687                                    Register cnt, Register result,
4688                                    Register tmp1) {
4689   Label SAME_CHARS, DONE, SHORT_LOOP, SHORT_STRING,
4690     NEXT_WORD;
4691 
4692   const Register tmp2 = rscratch1;
4693   assert_different_registers(str1, str2, cnt, result, tmp1, tmp2, rscratch2);
4694 
4695   BLOCK_COMMENT("string_equals {");
4696 
4697   // Start by assuming that the strings are not equal.
4698   mov(result, zr);
4699 
4700   // A very short string
4701   cmpw(cnt, 4);
4702   br(Assembler::LT, SHORT_STRING);
4703 
4704   // Check if the strings start at the same location.
4705   cmp(str1, str2);
4706   br(Assembler::EQ, SAME_CHARS);
4707 
4708   // Compare longwords
4709   {
4710     subw(cnt, cnt, 4); // The last longword is a special case
4711 
4712     // Move both string pointers to the last longword of their
4713     // strings, negate the remaining count, and convert it to bytes.
4714     lea(str1, Address(str1, cnt, Address::uxtw(1)));
4715     lea(str2, Address(str2, cnt, Address::uxtw(1)));
4716     sub(cnt, zr, cnt, LSL, 1);
4717 
4718     // Loop, loading longwords and comparing them into rscratch2.
4719     bind(NEXT_WORD);
4720     ldr(tmp1, Address(str1, cnt));
4721     ldr(tmp2, Address(str2, cnt));
4722     adds(cnt, cnt, wordSize);
4723     eor(rscratch2, tmp1, tmp2);
4724     cbnz(rscratch2, DONE);
4725     br(Assembler::LT, NEXT_WORD);
4726 
4727     // Last longword.  In the case where length == 4 we compare the
4728     // same longword twice, but that's still faster than another
4729     // conditional branch.
4730 
4731     ldr(tmp1, Address(str1));
4732     ldr(tmp2, Address(str2));
4733     eor(rscratch2, tmp1, tmp2);
4734     cbz(rscratch2, SAME_CHARS);
4735     b(DONE);
4736   }
4737 
4738   bind(SHORT_STRING);
4739   // Is the length zero?
4740   cbz(cnt, SAME_CHARS);
4741 
4742   bind(SHORT_LOOP);
4743   load_unsigned_short(tmp1, Address(post(str1, 2)));
4744   load_unsigned_short(tmp2, Address(post(str2, 2)));
4745   subw(tmp1, tmp1, tmp2);
4746   cbnz(tmp1, DONE);
4747   sub(cnt, cnt, 1);
4748   cbnz(cnt, SHORT_LOOP);
4749 
4750   // Strings are equal.
4751   bind(SAME_CHARS);
4752   mov(result, true);
4753 
4754   // That's it
4755   bind(DONE);
4756 
4757   BLOCK_COMMENT("} string_equals");
4758 }
4759 
4760 // Compare char[] arrays aligned to 4 bytes
4761 void MacroAssembler::char_arrays_equals(Register ary1, Register ary2,
4762                                         Register result, Register tmp1)
4763 {
4764   Register cnt1 = rscratch1;
4765   Register cnt2 = rscratch2;
4766   Register tmp2 = rscratch2;
4767 
4768   Label SAME, DIFFER, NEXT, TAIL03, TAIL01;
4769 
4770   int length_offset  = arrayOopDesc::length_offset_in_bytes();
4771   int base_offset    = arrayOopDesc::base_offset_in_bytes(T_CHAR);
4772 
4773   BLOCK_COMMENT("char_arrays_equals  {");
4774 
4775     // different until proven equal
4776     mov(result, false);
4777 
4778     // same array?
4779     cmp(ary1, ary2);
4780     br(Assembler::EQ, SAME);
4781 
4782     // ne if either null
4783     cbz(ary1, DIFFER);
4784     cbz(ary2, DIFFER);
4785 
4786     // lengths ne?
4787     ldrw(cnt1, Address(ary1, length_offset));
4788     ldrw(cnt2, Address(ary2, length_offset));
4789     cmp(cnt1, cnt2);
4790     br(Assembler::NE, DIFFER);
4791 
4792     lea(ary1, Address(ary1, base_offset));
4793     lea(ary2, Address(ary2, base_offset));
4794 
4795     subs(cnt1, cnt1, 4);
4796     br(LT, TAIL03);
4797 
4798   BIND(NEXT);
4799     ldr(tmp1, Address(post(ary1, 8)));
4800     ldr(tmp2, Address(post(ary2, 8)));
4801     subs(cnt1, cnt1, 4);
4802     eor(tmp1, tmp1, tmp2);
4803     cbnz(tmp1, DIFFER);
4804     br(GE, NEXT);
4805 
4806   BIND(TAIL03);  // 0-3 chars left, cnt1 = #chars left - 4
4807     tst(cnt1, 0b10);
4808     br(EQ, TAIL01);
4809     ldrw(tmp1, Address(post(ary1, 4)));
4810     ldrw(tmp2, Address(post(ary2, 4)));
4811     cmp(tmp1, tmp2);
4812     br(NE, DIFFER);
4813   BIND(TAIL01);  // 0-1 chars left
4814     tst(cnt1, 0b01);
4815     br(EQ, SAME);
4816     ldrh(tmp1, ary1);
4817     ldrh(tmp2, ary2);
4818     cmp(tmp1, tmp2);
4819     br(NE, DIFFER);
4820 
4821   BIND(SAME);
4822     mov(result, true);
4823   BIND(DIFFER); // result already set
4824 
4825   BLOCK_COMMENT("} char_arrays_equals");
4826 }
4827 
4828 // encode char[] to byte[] in ISO_8859_1
4829 void MacroAssembler::encode_iso_array(Register src, Register dst,
4830                       Register len, Register result,
4831                       FloatRegister Vtmp1, FloatRegister Vtmp2,
4832                       FloatRegister Vtmp3, FloatRegister Vtmp4)
4833 {
4834     Label DONE, NEXT_32, LOOP_8, NEXT_8, LOOP_1, NEXT_1;
4835     Register tmp1 = rscratch1;
4836 
4837       mov(result, len); // Save initial len
4838 
4839       subs(len, len, 32);
4840       br(LT, LOOP_8);
4841 
4842 // The following code uses the SIMD 'uqxtn' and 'uqxtn2' instructions
4843 // to convert chars to bytes. These set the 'QC' bit in the FPSR if
4844 // any char could not fit in a byte, so clear the FPSR so we can test it.
4845       clear_fpsr();
4846 
4847     BIND(NEXT_32);
4848       ld1(Vtmp1, Vtmp2, Vtmp3, Vtmp4, T8H, src);
4849       uqxtn(Vtmp1, T8B, Vtmp1, T8H);  // uqxtn  - write bottom half
4850       uqxtn(Vtmp1, T16B, Vtmp2, T8H); // uqxtn2 - write top half
4851       uqxtn(Vtmp2, T8B, Vtmp3, T8H);
4852       uqxtn(Vtmp2, T16B, Vtmp4, T8H); // uqxtn2
4853       get_fpsr(tmp1);
4854       cbnzw(tmp1, LOOP_8);
4855       st1(Vtmp1, Vtmp2, T16B, post(dst, 32));
4856       subs(len, len, 32);
4857       add(src, src, 64);
4858       br(GE, NEXT_32);
4859 
4860     BIND(LOOP_8);
4861       adds(len, len, 32-8);
4862       br(LT, LOOP_1);
4863       clear_fpsr(); // QC may be set from loop above, clear again
4864     BIND(NEXT_8);
4865       ld1(Vtmp1, T8H, src);
4866       uqxtn(Vtmp1, T8B, Vtmp1, T8H);
4867       get_fpsr(tmp1);
4868       cbnzw(tmp1, LOOP_1);
4869       st1(Vtmp1, T8B, post(dst, 8));
4870       subs(len, len, 8);
4871       add(src, src, 16);
4872       br(GE, NEXT_8);
4873 
4874     BIND(LOOP_1);
4875       adds(len, len, 8);
4876       br(LE, DONE);
4877 
4878     BIND(NEXT_1);
4879       ldrh(tmp1, Address(post(src, 2)));
4880       tst(tmp1, 0xff00);
4881       br(NE, DONE);
4882       strb(tmp1, Address(post(dst, 1)));
4883       subs(len, len, 1);
4884       br(GT, NEXT_1);
4885 
4886     BIND(DONE);
4887       sub(result, result, len); // Return index where we stopped
4888 }