1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/assembler.hpp"
  27 #include "asm/assembler.inline.hpp"
  28 #include "ci/ciEnv.hpp"
  29 #include "ci/ciInlineKlass.hpp"
  30 #include "code/compiledIC.hpp"
  31 #include "compiler/compileTask.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "compiler/oopMap.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "gc/shared/cardTableBarrierSet.hpp"
  37 #include "gc/shared/cardTable.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/tlab_globals.hpp"
  40 #include "interpreter/bytecodeHistogram.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "interpreter/interpreterRuntime.hpp"
  43 #include "jvm.h"
  44 #include "memory/resourceArea.hpp"
  45 #include "memory/universe.hpp"
  46 #include "nativeInst_aarch64.hpp"
  47 #include "oops/accessDecorators.hpp"
  48 #include "oops/compressedKlass.inline.hpp"
  49 #include "oops/compressedOops.inline.hpp"
  50 #include "oops/klass.inline.hpp"
  51 #include "oops/resolvedFieldEntry.hpp"
  52 #include "runtime/continuation.hpp"
  53 #include "runtime/globals.hpp"
  54 #include "runtime/icache.hpp"
  55 #include "runtime/interfaceSupport.inline.hpp"
  56 #include "runtime/javaThread.hpp"
  57 #include "runtime/jniHandles.inline.hpp"
  58 #include "runtime/sharedRuntime.hpp"
  59 #include "runtime/signature_cc.hpp"
  60 #include "runtime/stubRoutines.hpp"
  61 #include "utilities/globalDefinitions.hpp"
  62 #include "utilities/powerOfTwo.hpp"
  63 #include "vmreg_aarch64.inline.hpp"
  64 #ifdef COMPILER1
  65 #include "c1/c1_LIRAssembler.hpp"
  66 #endif
  67 #ifdef COMPILER2
  68 #include "oops/oop.hpp"
  69 #include "opto/compile.hpp"
  70 #include "opto/node.hpp"
  71 #include "opto/output.hpp"
  72 #endif
  73 
  74 #include <sys/types.h>
  75 
  76 #ifdef PRODUCT
  77 #define BLOCK_COMMENT(str) /* nothing */
  78 #else
  79 #define BLOCK_COMMENT(str) block_comment(str)
  80 #endif
  81 #define STOP(str) stop(str);
  82 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  83 
  84 #ifdef ASSERT
  85 extern "C" void disnm(intptr_t p);
  86 #endif
  87 // Target-dependent relocation processing
  88 //
  89 // Instruction sequences whose target may need to be retrieved or
  90 // patched are distinguished by their leading instruction, sorting
  91 // them into three main instruction groups and related subgroups.
  92 //
  93 // 1) Branch, Exception and System (insn count = 1)
  94 //    1a) Unconditional branch (immediate):
  95 //      b/bl imm19
  96 //    1b) Compare & branch (immediate):
  97 //      cbz/cbnz Rt imm19
  98 //    1c) Test & branch (immediate):
  99 //      tbz/tbnz Rt imm14
 100 //    1d) Conditional branch (immediate):
 101 //      b.cond imm19
 102 //
 103 // 2) Loads and Stores (insn count = 1)
 104 //    2a) Load register literal:
 105 //      ldr Rt imm19
 106 //
 107 // 3) Data Processing Immediate (insn count = 2 or 3)
 108 //    3a) PC-rel. addressing
 109 //      adr/adrp Rx imm21; ldr/str Ry Rx  #imm12
 110 //      adr/adrp Rx imm21; add Ry Rx  #imm12
 111 //      adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page]
 112 //      adr/adrp Rx imm21
 113 //      adr/adrp Rx imm21; movk Rx #imm16<<32
 114 //      adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page
 115 //      The latter form can only happen when the target is an
 116 //      ExternalAddress, and (by definition) ExternalAddresses don't
 117 //      move. Because of that property, there is never any need to
 118 //      patch the last of the three instructions. However,
 119 //      MacroAssembler::target_addr_for_insn takes all three
 120 //      instructions into account and returns the correct address.
 121 //    3b) Move wide (immediate)
 122 //      movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32;
 123 //
 124 // A switch on a subset of the instruction's bits provides an
 125 // efficient dispatch to these subcases.
 126 //
 127 // insn[28:26] -> main group ('x' == don't care)
 128 //   00x -> UNALLOCATED
 129 //   100 -> Data Processing Immediate
 130 //   101 -> Branch, Exception and System
 131 //   x1x -> Loads and Stores
 132 //
 133 // insn[30:25] -> subgroup ('_' == group, 'x' == don't care).
 134 // n.b. in some cases extra bits need to be checked to verify the
 135 // instruction is as expected
 136 //
 137 // 1) ... xx101x Branch, Exception and System
 138 //   1a)  00___x Unconditional branch (immediate)
 139 //   1b)  01___0 Compare & branch (immediate)
 140 //   1c)  01___1 Test & branch (immediate)
 141 //   1d)  10___0 Conditional branch (immediate)
 142 //        other  Should not happen
 143 //
 144 // 2) ... xxx1x0 Loads and Stores
 145 //   2a)  xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0)
 146 //   2aa) x01__00 Load register literal (i.e. requires insn[29] == 0)
 147 //                strictly should be 64 bit non-FP/SIMD i.e.
 148 //       0101_000 (i.e. requires insn[31:24] == 01011000)
 149 //
 150 // 3) ... xx100x Data Processing Immediate
 151 //   3a)  xx___00 PC-rel. addressing (n.b. requires insn[24] == 0)
 152 //   3b)  xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01)
 153 //                 strictly should be 64 bit movz #imm16<<0
 154 //       110___10100 (i.e. requires insn[31:21] == 11010010100)
 155 //
 156 class RelocActions {
 157 protected:
 158   typedef int (*reloc_insn)(address insn_addr, address &target);
 159 
 160   virtual reloc_insn adrpMem() = 0;
 161   virtual reloc_insn adrpAdd() = 0;
 162   virtual reloc_insn adrpMovk() = 0;
 163 
 164   const address _insn_addr;
 165   const uint32_t _insn;
 166 
 167   static uint32_t insn_at(address insn_addr, int n) {
 168     return ((uint32_t*)insn_addr)[n];
 169   }
 170   uint32_t insn_at(int n) const {
 171     return insn_at(_insn_addr, n);
 172   }
 173 
 174 public:
 175 
 176   RelocActions(address insn_addr) : _insn_addr(insn_addr), _insn(insn_at(insn_addr, 0)) {}
 177   RelocActions(address insn_addr, uint32_t insn)
 178     :  _insn_addr(insn_addr), _insn(insn) {}
 179 
 180   virtual int unconditionalBranch(address insn_addr, address &target) = 0;
 181   virtual int conditionalBranch(address insn_addr, address &target) = 0;
 182   virtual int testAndBranch(address insn_addr, address &target) = 0;
 183   virtual int loadStore(address insn_addr, address &target) = 0;
 184   virtual int adr(address insn_addr, address &target) = 0;
 185   virtual int adrp(address insn_addr, address &target, reloc_insn inner) = 0;
 186   virtual int immediate(address insn_addr, address &target) = 0;
 187   virtual void verify(address insn_addr, address &target) = 0;
 188 
 189   int ALWAYSINLINE run(address insn_addr, address &target) {
 190     int instructions = 1;
 191 
 192     uint32_t dispatch = Instruction_aarch64::extract(_insn, 30, 25);
 193     switch(dispatch) {
 194       case 0b001010:
 195       case 0b001011: {
 196         instructions = unconditionalBranch(insn_addr, target);
 197         break;
 198       }
 199       case 0b101010:   // Conditional branch (immediate)
 200       case 0b011010: { // Compare & branch (immediate)
 201         instructions = conditionalBranch(insn_addr, target);
 202           break;
 203       }
 204       case 0b011011: {
 205         instructions = testAndBranch(insn_addr, target);
 206         break;
 207       }
 208       case 0b001100:
 209       case 0b001110:
 210       case 0b011100:
 211       case 0b011110:
 212       case 0b101100:
 213       case 0b101110:
 214       case 0b111100:
 215       case 0b111110: {
 216         // load/store
 217         if ((Instruction_aarch64::extract(_insn, 29, 24) & 0b111011) == 0b011000) {
 218           // Load register (literal)
 219           instructions = loadStore(insn_addr, target);
 220           break;
 221         } else {
 222           // nothing to do
 223           assert(target == nullptr, "did not expect to relocate target for polling page load");
 224         }
 225         break;
 226       }
 227       case 0b001000:
 228       case 0b011000:
 229       case 0b101000:
 230       case 0b111000: {
 231         // adr/adrp
 232         assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be");
 233         int shift = Instruction_aarch64::extract(_insn, 31, 31);
 234         if (shift) {
 235           uint32_t insn2 = insn_at(1);
 236           if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
 237               Instruction_aarch64::extract(_insn, 4, 0) ==
 238               Instruction_aarch64::extract(insn2, 9, 5)) {
 239             instructions = adrp(insn_addr, target, adrpMem());
 240           } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
 241                      Instruction_aarch64::extract(_insn, 4, 0) ==
 242                      Instruction_aarch64::extract(insn2, 4, 0)) {
 243             instructions = adrp(insn_addr, target, adrpAdd());
 244           } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 &&
 245                      Instruction_aarch64::extract(_insn, 4, 0) ==
 246                      Instruction_aarch64::extract(insn2, 4, 0)) {
 247             instructions = adrp(insn_addr, target, adrpMovk());
 248           } else {
 249             ShouldNotReachHere();
 250           }
 251         } else {
 252           instructions = adr(insn_addr, target);
 253         }
 254         break;
 255       }
 256       case 0b001001:
 257       case 0b011001:
 258       case 0b101001:
 259       case 0b111001: {
 260         instructions = immediate(insn_addr, target);
 261         break;
 262       }
 263       default: {
 264         ShouldNotReachHere();
 265       }
 266     }
 267 
 268     verify(insn_addr, target);
 269     return instructions * NativeInstruction::instruction_size;
 270   }
 271 };
 272 
 273 class Patcher : public RelocActions {
 274   virtual reloc_insn adrpMem() { return &Patcher::adrpMem_impl; }
 275   virtual reloc_insn adrpAdd() { return &Patcher::adrpAdd_impl; }
 276   virtual reloc_insn adrpMovk() { return &Patcher::adrpMovk_impl; }
 277 
 278 public:
 279   Patcher(address insn_addr) : RelocActions(insn_addr) {}
 280 
 281   virtual int unconditionalBranch(address insn_addr, address &target) {
 282     intptr_t offset = (target - insn_addr) >> 2;
 283     Instruction_aarch64::spatch(insn_addr, 25, 0, offset);
 284     return 1;
 285   }
 286   virtual int conditionalBranch(address insn_addr, address &target) {
 287     intptr_t offset = (target - insn_addr) >> 2;
 288     Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
 289     return 1;
 290   }
 291   virtual int testAndBranch(address insn_addr, address &target) {
 292     intptr_t offset = (target - insn_addr) >> 2;
 293     Instruction_aarch64::spatch(insn_addr, 18, 5, offset);
 294     return 1;
 295   }
 296   virtual int loadStore(address insn_addr, address &target) {
 297     intptr_t offset = (target - insn_addr) >> 2;
 298     Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
 299     return 1;
 300   }
 301   virtual int adr(address insn_addr, address &target) {
 302 #ifdef ASSERT
 303     assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be");
 304 #endif
 305     // PC-rel. addressing
 306     ptrdiff_t offset = target - insn_addr;
 307     int offset_lo = offset & 3;
 308     offset >>= 2;
 309     Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
 310     Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo);
 311     return 1;
 312   }
 313   virtual int adrp(address insn_addr, address &target, reloc_insn inner) {
 314     int instructions = 1;
 315 #ifdef ASSERT
 316     assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be");
 317 #endif
 318     ptrdiff_t offset = target - insn_addr;
 319     instructions = 2;
 320     precond(inner != nullptr);
 321     // Give the inner reloc a chance to modify the target.
 322     address adjusted_target = target;
 323     instructions = (*inner)(insn_addr, adjusted_target);
 324     uintptr_t pc_page = (uintptr_t)insn_addr >> 12;
 325     uintptr_t adr_page = (uintptr_t)adjusted_target >> 12;
 326     offset = adr_page - pc_page;
 327     int offset_lo = offset & 3;
 328     offset >>= 2;
 329     Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
 330     Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo);
 331     return instructions;
 332   }
 333   static int adrpMem_impl(address insn_addr, address &target) {
 334     uintptr_t dest = (uintptr_t)target;
 335     int offset_lo = dest & 0xfff;
 336     uint32_t insn2 = insn_at(insn_addr, 1);
 337     uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
 338     Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
 339     guarantee(((dest >> size) << size) == dest, "misaligned target");
 340     return 2;
 341   }
 342   static int adrpAdd_impl(address insn_addr, address &target) {
 343     uintptr_t dest = (uintptr_t)target;
 344     int offset_lo = dest & 0xfff;
 345     Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
 346     return 2;
 347   }
 348   static int adrpMovk_impl(address insn_addr, address &target) {
 349     uintptr_t dest = uintptr_t(target);
 350     Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
 351     dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
 352     target = address(dest);
 353     return 2;
 354   }
 355   virtual int immediate(address insn_addr, address &target) {
 356     assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
 357     uint64_t dest = (uint64_t)target;
 358     // Move wide constant
 359     assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
 360     assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
 361     Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
 362     Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
 363     Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
 364     return 3;
 365   }
 366   virtual void verify(address insn_addr, address &target) {
 367 #ifdef ASSERT
 368     address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
 369     if (!(address_is == target)) {
 370       tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
 371       disnm((intptr_t)insn_addr);
 372       assert(address_is == target, "should be");
 373     }
 374 #endif
 375   }
 376 };
 377 
 378 // If insn1 and insn2 use the same register to form an address, either
 379 // by an offsetted LDR or a simple ADD, return the offset. If the
 380 // second instruction is an LDR, the offset may be scaled.
 381 static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) {
 382   if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
 383       Instruction_aarch64::extract(insn1, 4, 0) ==
 384       Instruction_aarch64::extract(insn2, 9, 5)) {
 385     // Load/store register (unsigned immediate)
 386     byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
 387     uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
 388     byte_offset <<= size;
 389     return true;
 390   } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
 391              Instruction_aarch64::extract(insn1, 4, 0) ==
 392              Instruction_aarch64::extract(insn2, 4, 0)) {
 393     // add (immediate)
 394     byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
 395     return true;
 396   }
 397   return false;
 398 }
 399 
 400 class AArch64Decoder : public RelocActions {
 401   virtual reloc_insn adrpMem() { return &AArch64Decoder::adrpMem_impl; }
 402   virtual reloc_insn adrpAdd() { return &AArch64Decoder::adrpAdd_impl; }
 403   virtual reloc_insn adrpMovk() { return &AArch64Decoder::adrpMovk_impl; }
 404 
 405 public:
 406   AArch64Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {}
 407 
 408   virtual int loadStore(address insn_addr, address &target) {
 409     intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5);
 410     target = insn_addr + (offset << 2);
 411     return 1;
 412   }
 413   virtual int unconditionalBranch(address insn_addr, address &target) {
 414     intptr_t offset = Instruction_aarch64::sextract(_insn, 25, 0);
 415     target = insn_addr + (offset << 2);
 416     return 1;
 417   }
 418   virtual int conditionalBranch(address insn_addr, address &target) {
 419     intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5);
 420     target = address(((uint64_t)insn_addr + (offset << 2)));
 421     return 1;
 422   }
 423   virtual int testAndBranch(address insn_addr, address &target) {
 424     intptr_t offset = Instruction_aarch64::sextract(_insn, 18, 5);
 425     target = address(((uint64_t)insn_addr + (offset << 2)));
 426     return 1;
 427   }
 428   virtual int adr(address insn_addr, address &target) {
 429     // PC-rel. addressing
 430     intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29);
 431     offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2;
 432     target = address((uint64_t)insn_addr + offset);
 433     return 1;
 434   }
 435   virtual int adrp(address insn_addr, address &target, reloc_insn inner) {
 436     assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be");
 437     intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29);
 438     offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2;
 439     int shift = 12;
 440     offset <<= shift;
 441     uint64_t target_page = ((uint64_t)insn_addr) + offset;
 442     target_page &= ((uint64_t)-1) << shift;
 443     uint32_t insn2 = insn_at(1);
 444     target = address(target_page);
 445     precond(inner != nullptr);
 446     (*inner)(insn_addr, target);
 447     return 2;
 448   }
 449   static int adrpMem_impl(address insn_addr, address &target) {
 450     uint32_t insn2 = insn_at(insn_addr, 1);
 451     // Load/store register (unsigned immediate)
 452     ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
 453     uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
 454     byte_offset <<= size;
 455     target += byte_offset;
 456     return 2;
 457   }
 458   static int adrpAdd_impl(address insn_addr, address &target) {
 459     uint32_t insn2 = insn_at(insn_addr, 1);
 460     // add (immediate)
 461     ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
 462     target += byte_offset;
 463     return 2;
 464   }
 465   static int adrpMovk_impl(address insn_addr, address &target) {
 466     uint32_t insn2 = insn_at(insn_addr, 1);
 467     uint64_t dest = uint64_t(target);
 468     dest = (dest & 0xffff0000ffffffff) |
 469       ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
 470     target = address(dest);
 471 
 472     // We know the destination 4k page. Maybe we have a third
 473     // instruction.
 474     uint32_t insn = insn_at(insn_addr, 0);
 475     uint32_t insn3 = insn_at(insn_addr, 2);
 476     ptrdiff_t byte_offset;
 477     if (offset_for(insn, insn3, byte_offset)) {
 478       target += byte_offset;
 479       return 3;
 480     } else {
 481       return 2;
 482     }
 483   }
 484   virtual int immediate(address insn_addr, address &target) {
 485     uint32_t *insns = (uint32_t *)insn_addr;
 486     assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
 487     // Move wide constant: movz, movk, movk.  See movptr().
 488     assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
 489     assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
 490     target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5))
 491                  + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
 492                  + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
 493     assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
 494     assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
 495     return 3;
 496   }
 497   virtual void verify(address insn_addr, address &target) {
 498   }
 499 };
 500 
 501 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) {
 502   AArch64Decoder decoder(insn_addr, insn);
 503   address target;
 504   decoder.run(insn_addr, target);
 505   return target;
 506 }
 507 
 508 // Patch any kind of instruction; there may be several instructions.
 509 // Return the total length (in bytes) of the instructions.
 510 int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) {
 511   Patcher patcher(insn_addr);
 512   return patcher.run(insn_addr, target);
 513 }
 514 
 515 int MacroAssembler::patch_oop(address insn_addr, address o) {
 516   int instructions;
 517   unsigned insn = *(unsigned*)insn_addr;
 518   assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
 519 
 520   // OOPs are either narrow (32 bits) or wide (48 bits).  We encode
 521   // narrow OOPs by setting the upper 16 bits in the first
 522   // instruction.
 523   if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
 524     // Move narrow OOP
 525     uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o));
 526     Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
 527     Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
 528     instructions = 2;
 529   } else {
 530     // Move wide OOP
 531     assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
 532     uintptr_t dest = (uintptr_t)o;
 533     Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
 534     Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
 535     Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
 536     instructions = 3;
 537   }
 538   return instructions * NativeInstruction::instruction_size;
 539 }
 540 
 541 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) {
 542   // Metadata pointers are either narrow (32 bits) or wide (48 bits).
 543   // We encode narrow ones by setting the upper 16 bits in the first
 544   // instruction.
 545   NativeInstruction *insn = nativeInstruction_at(insn_addr);
 546   assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 &&
 547          nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
 548 
 549   Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
 550   Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
 551   return 2 * NativeInstruction::instruction_size;
 552 }
 553 
 554 address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) {
 555   if (NativeInstruction::is_ldrw_to_zr(address(&insn))) {
 556     return nullptr;
 557   }
 558   return MacroAssembler::target_addr_for_insn(insn_addr, insn);
 559 }
 560 
 561 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) {
 562   if (acquire) {
 563     lea(tmp, Address(rthread, JavaThread::polling_word_offset()));
 564     ldar(tmp, tmp);
 565   } else {
 566     ldr(tmp, Address(rthread, JavaThread::polling_word_offset()));
 567   }
 568   if (at_return) {
 569     // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
 570     // we may safely use the sp instead to perform the stack watermark check.
 571     cmp(in_nmethod ? sp : rfp, tmp);
 572     br(Assembler::HI, slow_path);
 573   } else {
 574     tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path);
 575   }
 576 }
 577 
 578 void MacroAssembler::rt_call(address dest, Register tmp) {
 579   CodeBlob *cb = CodeCache::find_blob(dest);
 580   if (cb) {
 581     far_call(RuntimeAddress(dest));
 582   } else {
 583     lea(tmp, RuntimeAddress(dest));
 584     blr(tmp);
 585   }
 586 }
 587 
 588 void MacroAssembler::push_cont_fastpath(Register java_thread) {
 589   if (!Continuations::enabled()) return;
 590   Label done;
 591   ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
 592   cmp(sp, rscratch1);
 593   br(Assembler::LS, done);
 594   mov(rscratch1, sp); // we can't use sp as the source in str
 595   str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
 596   bind(done);
 597 }
 598 
 599 void MacroAssembler::pop_cont_fastpath(Register java_thread) {
 600   if (!Continuations::enabled()) return;
 601   Label done;
 602   ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset()));
 603   cmp(sp, rscratch1);
 604   br(Assembler::LO, done);
 605   str(zr, Address(java_thread, JavaThread::cont_fastpath_offset()));
 606   bind(done);
 607 }
 608 
 609 void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
 610   // we must set sp to zero to clear frame
 611   str(zr, Address(rthread, JavaThread::last_Java_sp_offset()));
 612 
 613   // must clear fp, so that compiled frames are not confused; it is
 614   // possible that we need it only for debugging
 615   if (clear_fp) {
 616     str(zr, Address(rthread, JavaThread::last_Java_fp_offset()));
 617   }
 618 
 619   // Always clear the pc because it could have been set by make_walkable()
 620   str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
 621 }
 622 
 623 // Calls to C land
 624 //
 625 // When entering C land, the rfp, & resp of the last Java frame have to be recorded
 626 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
 627 // has to be reset to 0. This is required to allow proper stack traversal.
 628 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 629                                          Register last_java_fp,
 630                                          Register last_java_pc,
 631                                          Register scratch) {
 632 
 633   if (last_java_pc->is_valid()) {
 634       str(last_java_pc, Address(rthread,
 635                                 JavaThread::frame_anchor_offset()
 636                                 + JavaFrameAnchor::last_Java_pc_offset()));
 637     }
 638 
 639   // determine last_java_sp register
 640   if (last_java_sp == sp) {
 641     mov(scratch, sp);
 642     last_java_sp = scratch;
 643   } else if (!last_java_sp->is_valid()) {
 644     last_java_sp = esp;
 645   }
 646 
 647   str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset()));
 648 
 649   // last_java_fp is optional
 650   if (last_java_fp->is_valid()) {
 651     str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset()));
 652   }
 653 }
 654 
 655 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 656                                          Register last_java_fp,
 657                                          address  last_java_pc,
 658                                          Register scratch) {
 659   assert(last_java_pc != nullptr, "must provide a valid PC");
 660 
 661   adr(scratch, last_java_pc);
 662   str(scratch, Address(rthread,
 663                        JavaThread::frame_anchor_offset()
 664                        + JavaFrameAnchor::last_Java_pc_offset()));
 665 
 666   set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
 667 }
 668 
 669 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 670                                          Register last_java_fp,
 671                                          Label &L,
 672                                          Register scratch) {
 673   if (L.is_bound()) {
 674     set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
 675   } else {
 676     InstructionMark im(this);
 677     L.add_patch_at(code(), locator());
 678     set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
 679   }
 680 }
 681 
 682 static inline bool target_needs_far_branch(address addr) {
 683   // codecache size <= 128M
 684   if (!MacroAssembler::far_branches()) {
 685     return false;
 686   }
 687   // codecache size > 240M
 688   if (MacroAssembler::codestub_branch_needs_far_jump()) {
 689     return true;
 690   }
 691   // codecache size: 128M..240M
 692   return !CodeCache::is_non_nmethod(addr);
 693 }
 694 
 695 void MacroAssembler::far_call(Address entry, Register tmp) {
 696   assert(ReservedCodeCacheSize < 4*G, "branch out of range");
 697   assert(CodeCache::find_blob(entry.target()) != nullptr,
 698          "destination of far call not found in code cache");
 699   assert(entry.rspec().type() == relocInfo::external_word_type
 700          || entry.rspec().type() == relocInfo::runtime_call_type
 701          || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
 702   if (target_needs_far_branch(entry.target())) {
 703     uint64_t offset;
 704     // We can use ADRP here because we know that the total size of
 705     // the code cache cannot exceed 2Gb (ADRP limit is 4GB).
 706     adrp(tmp, entry, offset);
 707     add(tmp, tmp, offset);
 708     blr(tmp);
 709   } else {
 710     bl(entry);
 711   }
 712 }
 713 
 714 int MacroAssembler::far_jump(Address entry, Register tmp) {
 715   assert(ReservedCodeCacheSize < 4*G, "branch out of range");
 716   assert(CodeCache::find_blob(entry.target()) != nullptr,
 717          "destination of far call not found in code cache");
 718   assert(entry.rspec().type() == relocInfo::external_word_type
 719          || entry.rspec().type() == relocInfo::runtime_call_type
 720          || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
 721   address start = pc();
 722   if (target_needs_far_branch(entry.target())) {
 723     uint64_t offset;
 724     // We can use ADRP here because we know that the total size of
 725     // the code cache cannot exceed 2Gb (ADRP limit is 4GB).
 726     adrp(tmp, entry, offset);
 727     add(tmp, tmp, offset);
 728     br(tmp);
 729   } else {
 730     b(entry);
 731   }
 732   return pc() - start;
 733 }
 734 
 735 void MacroAssembler::reserved_stack_check() {
 736     // testing if reserved zone needs to be enabled
 737     Label no_reserved_zone_enabling;
 738 
 739     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 740     cmp(sp, rscratch1);
 741     br(Assembler::LO, no_reserved_zone_enabling);
 742 
 743     enter();   // LR and FP are live.
 744     lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone)));
 745     mov(c_rarg0, rthread);
 746     blr(rscratch1);
 747     leave();
 748 
 749     // We have already removed our own frame.
 750     // throw_delayed_StackOverflowError will think that it's been
 751     // called by our caller.
 752     lea(rscratch1, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
 753     br(rscratch1);
 754     should_not_reach_here();
 755 
 756     bind(no_reserved_zone_enabling);
 757 }
 758 
 759 static void pass_arg0(MacroAssembler* masm, Register arg) {
 760   if (c_rarg0 != arg ) {
 761     masm->mov(c_rarg0, arg);
 762   }
 763 }
 764 
 765 static void pass_arg1(MacroAssembler* masm, Register arg) {
 766   if (c_rarg1 != arg ) {
 767     masm->mov(c_rarg1, arg);
 768   }
 769 }
 770 
 771 static void pass_arg2(MacroAssembler* masm, Register arg) {
 772   if (c_rarg2 != arg ) {
 773     masm->mov(c_rarg2, arg);
 774   }
 775 }
 776 
 777 static void pass_arg3(MacroAssembler* masm, Register arg) {
 778   if (c_rarg3 != arg ) {
 779     masm->mov(c_rarg3, arg);
 780   }
 781 }
 782 
 783 static bool is_preemptable(address entry_point) {
 784   return entry_point == CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter);
 785 }
 786 
 787 void MacroAssembler::call_VM_base(Register oop_result,
 788                                   Register java_thread,
 789                                   Register last_java_sp,
 790                                   address  entry_point,
 791                                   int      number_of_arguments,
 792                                   bool     check_exceptions) {
 793    // determine java_thread register
 794   if (!java_thread->is_valid()) {
 795     java_thread = rthread;
 796   }
 797 
 798   // determine last_java_sp register
 799   if (!last_java_sp->is_valid()) {
 800     last_java_sp = esp;
 801   }
 802 
 803   // debugging support
 804   assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
 805   assert(java_thread == rthread, "unexpected register");
 806 #ifdef ASSERT
 807   // TraceBytecodes does not use r12 but saves it over the call, so don't verify
 808   // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
 809 #endif // ASSERT
 810 
 811   assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
 812   assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
 813 
 814   // push java thread (becomes first argument of C function)
 815 
 816   mov(c_rarg0, java_thread);
 817 
 818   // set last Java frame before call
 819   assert(last_java_sp != rfp, "can't use rfp");
 820 
 821   Label l;
 822   if (is_preemptable(entry_point)) {
 823     // skip setting last_pc since we already set it to desired value.
 824     set_last_Java_frame(last_java_sp, rfp, noreg, rscratch1);
 825   } else {
 826     set_last_Java_frame(last_java_sp, rfp, l, rscratch1);
 827   }
 828 
 829   // do the call, remove parameters
 830   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l);
 831 
 832   // lr could be poisoned with PAC signature during throw_pending_exception
 833   // if it was tail-call optimized by compiler, since lr is not callee-saved
 834   // reload it with proper value
 835   adr(lr, l);
 836 
 837   // reset last Java frame
 838   // Only interpreter should have to clear fp
 839   reset_last_Java_frame(true);
 840 
 841    // C++ interp handles this in the interpreter
 842   check_and_handle_popframe(java_thread);
 843   check_and_handle_earlyret(java_thread);
 844 
 845   if (check_exceptions) {
 846     // check for pending exceptions (java_thread is set upon return)
 847     ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
 848     Label ok;
 849     cbz(rscratch1, ok);
 850     lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
 851     br(rscratch1);
 852     bind(ok);
 853   }
 854 
 855   // get oop result if there is one and reset the value in the thread
 856   if (oop_result->is_valid()) {
 857     get_vm_result(oop_result, java_thread);
 858   }
 859 }
 860 
 861 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
 862   call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
 863 }
 864 
 865 // Check the entry target is always reachable from any branch.
 866 static bool is_always_within_branch_range(Address entry) {
 867   const address target = entry.target();
 868 
 869   if (!CodeCache::contains(target)) {
 870     // We always use trampolines for callees outside CodeCache.
 871     assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target");
 872     return false;
 873   }
 874 
 875   if (!MacroAssembler::far_branches()) {
 876     return true;
 877   }
 878 
 879   if (entry.rspec().type() == relocInfo::runtime_call_type) {
 880     // Runtime calls are calls of a non-compiled method (stubs, adapters).
 881     // Non-compiled methods stay forever in CodeCache.
 882     // We check whether the longest possible branch is within the branch range.
 883     assert(CodeCache::find_blob(target) != nullptr &&
 884           !CodeCache::find_blob(target)->is_nmethod(),
 885           "runtime call of compiled method");
 886     const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
 887     const address left_longest_branch_start = CodeCache::low_bound();
 888     const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) &&
 889                               Assembler::reachable_from_branch_at(right_longest_branch_start, target);
 890     return is_reachable;
 891   }
 892 
 893   return false;
 894 }
 895 
 896 // Maybe emit a call via a trampoline. If the code cache is small
 897 // trampolines won't be emitted.
 898 address MacroAssembler::trampoline_call(Address entry) {
 899   assert(entry.rspec().type() == relocInfo::runtime_call_type
 900          || entry.rspec().type() == relocInfo::opt_virtual_call_type
 901          || entry.rspec().type() == relocInfo::static_call_type
 902          || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type");
 903 
 904   address target = entry.target();
 905 
 906   if (!is_always_within_branch_range(entry)) {
 907     if (!in_scratch_emit_size()) {
 908       // We don't want to emit a trampoline if C2 is generating dummy
 909       // code during its branch shortening phase.
 910       if (entry.rspec().type() == relocInfo::runtime_call_type) {
 911         assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs");
 912         code()->share_trampoline_for(entry.target(), offset());
 913       } else {
 914         address stub = emit_trampoline_stub(offset(), target);
 915         if (stub == nullptr) {
 916           postcond(pc() == badAddress);
 917           return nullptr; // CodeCache is full
 918         }
 919       }
 920     }
 921     target = pc();
 922   }
 923 
 924   address call_pc = pc();
 925   relocate(entry.rspec());
 926   bl(target);
 927 
 928   postcond(pc() != badAddress);
 929   return call_pc;
 930 }
 931 
 932 // Emit a trampoline stub for a call to a target which is too far away.
 933 //
 934 // code sequences:
 935 //
 936 // call-site:
 937 //   branch-and-link to <destination> or <trampoline stub>
 938 //
 939 // Related trampoline stub for this call site in the stub section:
 940 //   load the call target from the constant pool
 941 //   branch (LR still points to the call site above)
 942 
 943 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
 944                                              address dest) {
 945   // Max stub size: alignment nop, TrampolineStub.
 946   address stub = start_a_stub(max_trampoline_stub_size());
 947   if (stub == nullptr) {
 948     return nullptr;  // CodeBuffer::expand failed
 949   }
 950 
 951   // Create a trampoline stub relocation which relates this trampoline stub
 952   // with the call instruction at insts_call_instruction_offset in the
 953   // instructions code-section.
 954   align(wordSize);
 955   relocate(trampoline_stub_Relocation::spec(code()->insts()->start()
 956                                             + insts_call_instruction_offset));
 957   const int stub_start_offset = offset();
 958 
 959   // Now, create the trampoline stub's code:
 960   // - load the call
 961   // - call
 962   Label target;
 963   ldr(rscratch1, target);
 964   br(rscratch1);
 965   bind(target);
 966   assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset,
 967          "should be");
 968   emit_int64((int64_t)dest);
 969 
 970   const address stub_start_addr = addr_at(stub_start_offset);
 971 
 972   assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
 973 
 974   end_a_stub();
 975   return stub_start_addr;
 976 }
 977 
 978 int MacroAssembler::max_trampoline_stub_size() {
 979   // Max stub size: alignment nop, TrampolineStub.
 980   return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
 981 }
 982 
 983 void MacroAssembler::emit_static_call_stub() {
 984   // CompiledDirectCall::set_to_interpreted knows the
 985   // exact layout of this stub.
 986 
 987   isb();
 988   mov_metadata(rmethod, nullptr);
 989 
 990   // Jump to the entry point of the c2i stub.
 991   movptr(rscratch1, 0);
 992   br(rscratch1);
 993 }
 994 
 995 int MacroAssembler::static_call_stub_size() {
 996   // isb; movk; movz; movz; movk; movz; movz; br
 997   return 8 * NativeInstruction::instruction_size;
 998 }
 999 
1000 void MacroAssembler::c2bool(Register x) {
1001   // implements x == 0 ? 0 : 1
1002   // note: must only look at least-significant byte of x
1003   //       since C-style booleans are stored in one byte
1004   //       only! (was bug)
1005   tst(x, 0xff);
1006   cset(x, Assembler::NE);
1007 }
1008 
1009 address MacroAssembler::ic_call(address entry, jint method_index) {
1010   RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
1011   // address const_ptr = long_constant((jlong)Universe::non_oop_word());
1012   // uintptr_t offset;
1013   // ldr_constant(rscratch2, const_ptr);
1014   movptr(rscratch2, (intptr_t)Universe::non_oop_word());
1015   return trampoline_call(Address(entry, rh));
1016 }
1017 
1018 int MacroAssembler::ic_check_size() {
1019   int extra_instructions = UseCompactObjectHeaders ? 1 : 0;
1020   if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) {
1021     return NativeInstruction::instruction_size * (7 + extra_instructions);
1022   } else {
1023     return NativeInstruction::instruction_size * (5 + extra_instructions);
1024   }
1025 }
1026 
1027 int MacroAssembler::ic_check(int end_alignment) {
1028   Register receiver = j_rarg0;
1029   Register data = rscratch2;
1030   Register tmp1 = rscratch1;
1031   Register tmp2 = r10;
1032 
1033   // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
1034   // before the inline cache check, so we don't have to execute any nop instructions when dispatching
1035   // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
1036   // before the inline cache check here, and not after
1037   align(end_alignment, offset() + ic_check_size());
1038 
1039   int uep_offset = offset();
1040 
1041   if (UseCompactObjectHeaders) {
1042     load_narrow_klass_compact(tmp1, receiver);
1043     ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
1044     cmpw(tmp1, tmp2);
1045   } else if (UseCompressedClassPointers) {
1046     ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
1047     ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
1048     cmpw(tmp1, tmp2);
1049   } else {
1050     ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
1051     ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
1052     cmp(tmp1, tmp2);
1053   }
1054 
1055   Label dont;
1056   br(Assembler::EQ, dont);
1057   far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1058   bind(dont);
1059   assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
1060 
1061   return uep_offset;
1062 }
1063 
1064 // Implementation of call_VM versions
1065 
1066 void MacroAssembler::call_VM(Register oop_result,
1067                              address entry_point,
1068                              bool check_exceptions) {
1069   call_VM_helper(oop_result, entry_point, 0, check_exceptions);
1070 }
1071 
1072 void MacroAssembler::call_VM(Register oop_result,
1073                              address entry_point,
1074                              Register arg_1,
1075                              bool check_exceptions) {
1076   pass_arg1(this, arg_1);
1077   call_VM_helper(oop_result, entry_point, 1, check_exceptions);
1078 }
1079 
1080 void MacroAssembler::call_VM(Register oop_result,
1081                              address entry_point,
1082                              Register arg_1,
1083                              Register arg_2,
1084                              bool check_exceptions) {
1085   assert_different_registers(arg_1, c_rarg2);
1086   pass_arg2(this, arg_2);
1087   pass_arg1(this, arg_1);
1088   call_VM_helper(oop_result, entry_point, 2, check_exceptions);
1089 }
1090 
1091 void MacroAssembler::call_VM(Register oop_result,
1092                              address entry_point,
1093                              Register arg_1,
1094                              Register arg_2,
1095                              Register arg_3,
1096                              bool check_exceptions) {
1097   assert_different_registers(arg_1, c_rarg2, c_rarg3);
1098   assert_different_registers(arg_2, c_rarg3);
1099   pass_arg3(this, arg_3);
1100 
1101   pass_arg2(this, arg_2);
1102 
1103   pass_arg1(this, arg_1);
1104   call_VM_helper(oop_result, entry_point, 3, check_exceptions);
1105 }
1106 
1107 void MacroAssembler::call_VM(Register oop_result,
1108                              Register last_java_sp,
1109                              address entry_point,
1110                              int number_of_arguments,
1111                              bool check_exceptions) {
1112   call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1113 }
1114 
1115 void MacroAssembler::call_VM(Register oop_result,
1116                              Register last_java_sp,
1117                              address entry_point,
1118                              Register arg_1,
1119                              bool check_exceptions) {
1120   pass_arg1(this, arg_1);
1121   call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1122 }
1123 
1124 void MacroAssembler::call_VM(Register oop_result,
1125                              Register last_java_sp,
1126                              address entry_point,
1127                              Register arg_1,
1128                              Register arg_2,
1129                              bool check_exceptions) {
1130 
1131   assert_different_registers(arg_1, c_rarg2);
1132   pass_arg2(this, arg_2);
1133   pass_arg1(this, arg_1);
1134   call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1135 }
1136 
1137 void MacroAssembler::call_VM(Register oop_result,
1138                              Register last_java_sp,
1139                              address entry_point,
1140                              Register arg_1,
1141                              Register arg_2,
1142                              Register arg_3,
1143                              bool check_exceptions) {
1144   assert_different_registers(arg_1, c_rarg2, c_rarg3);
1145   assert_different_registers(arg_2, c_rarg3);
1146   pass_arg3(this, arg_3);
1147   pass_arg2(this, arg_2);
1148   pass_arg1(this, arg_1);
1149   call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1150 }
1151 
1152 
1153 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
1154   ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
1155   str(zr, Address(java_thread, JavaThread::vm_result_offset()));
1156   verify_oop_msg(oop_result, "broken oop in call_VM_base");
1157 }
1158 
1159 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
1160   ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
1161   str(zr, Address(java_thread, JavaThread::vm_result_2_offset()));
1162 }
1163 
1164 void MacroAssembler::align(int modulus) {
1165   align(modulus, offset());
1166 }
1167 
1168 // Ensure that the code at target bytes offset from the current offset() is aligned
1169 // according to modulus.
1170 void MacroAssembler::align(int modulus, int target) {
1171   int delta = target - offset();
1172   while ((offset() + delta) % modulus != 0) nop();
1173 }
1174 
1175 void MacroAssembler::post_call_nop() {
1176   if (!Continuations::enabled()) {
1177     return;
1178   }
1179   InstructionMark im(this);
1180   relocate(post_call_nop_Relocation::spec());
1181   InlineSkippedInstructionsCounter skipCounter(this);
1182   nop();
1183   movk(zr, 0);
1184   movk(zr, 0);
1185 }
1186 
1187 // these are no-ops overridden by InterpreterMacroAssembler
1188 
1189 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { }
1190 
1191 void MacroAssembler::check_and_handle_popframe(Register java_thread) { }
1192 
1193 // Look up the method for a megamorphic invokeinterface call.
1194 // The target method is determined by <intf_klass, itable_index>.
1195 // The receiver klass is in recv_klass.
1196 // On success, the result will be in method_result, and execution falls through.
1197 // On failure, execution transfers to the given label.
1198 void MacroAssembler::lookup_interface_method(Register recv_klass,
1199                                              Register intf_klass,
1200                                              RegisterOrConstant itable_index,
1201                                              Register method_result,
1202                                              Register scan_temp,
1203                                              Label& L_no_such_interface,
1204                          bool return_method) {
1205   assert_different_registers(recv_klass, intf_klass, scan_temp);
1206   assert_different_registers(method_result, intf_klass, scan_temp);
1207   assert(recv_klass != method_result || !return_method,
1208      "recv_klass can be destroyed when method isn't needed");
1209   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
1210          "caller must use same register for non-constant itable index as for method");
1211 
1212   // Compute start of first itableOffsetEntry (which is at the end of the vtable)
1213   int vtable_base = in_bytes(Klass::vtable_start_offset());
1214   int itentry_off = in_bytes(itableMethodEntry::method_offset());
1215   int scan_step   = itableOffsetEntry::size() * wordSize;
1216   int vte_size    = vtableEntry::size_in_bytes();
1217   assert(vte_size == wordSize, "else adjust times_vte_scale");
1218 
1219   ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
1220 
1221   // Could store the aligned, prescaled offset in the klass.
1222   // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
1223   lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3)));
1224   add(scan_temp, scan_temp, vtable_base);
1225 
1226   if (return_method) {
1227     // Adjust recv_klass by scaled itable_index, so we can free itable_index.
1228     assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
1229     // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
1230     lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3)));
1231     if (itentry_off)
1232       add(recv_klass, recv_klass, itentry_off);
1233   }
1234 
1235   // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) {
1236   //   if (scan->interface() == intf) {
1237   //     result = (klass + scan->offset() + itable_index);
1238   //   }
1239   // }
1240   Label search, found_method;
1241 
1242   ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
1243   cmp(intf_klass, method_result);
1244   br(Assembler::EQ, found_method);
1245   bind(search);
1246   // Check that the previous entry is non-null.  A null entry means that
1247   // the receiver class doesn't implement the interface, and wasn't the
1248   // same as when the caller was compiled.
1249   cbz(method_result, L_no_such_interface);
1250   if (itableOffsetEntry::interface_offset() != 0) {
1251     add(scan_temp, scan_temp, scan_step);
1252     ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
1253   } else {
1254     ldr(method_result, Address(pre(scan_temp, scan_step)));
1255   }
1256   cmp(intf_klass, method_result);
1257   br(Assembler::NE, search);
1258 
1259   bind(found_method);
1260 
1261   // Got a hit.
1262   if (return_method) {
1263     ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset()));
1264     ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0)));
1265   }
1266 }
1267 
1268 // Look up the method for a megamorphic invokeinterface call in a single pass over itable:
1269 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
1270 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
1271 // The target method is determined by <holder_klass, itable_index>.
1272 // The receiver klass is in recv_klass.
1273 // On success, the result will be in method_result, and execution falls through.
1274 // On failure, execution transfers to the given label.
1275 void MacroAssembler::lookup_interface_method_stub(Register recv_klass,
1276                                                   Register holder_klass,
1277                                                   Register resolved_klass,
1278                                                   Register method_result,
1279                                                   Register temp_itbl_klass,
1280                                                   Register scan_temp,
1281                                                   int itable_index,
1282                                                   Label& L_no_such_interface) {
1283   // 'method_result' is only used as output register at the very end of this method.
1284   // Until then we can reuse it as 'holder_offset'.
1285   Register holder_offset = method_result;
1286   assert_different_registers(resolved_klass, recv_klass, holder_klass, temp_itbl_klass, scan_temp, holder_offset);
1287 
1288   int vtable_start_offset = in_bytes(Klass::vtable_start_offset());
1289   int itable_offset_entry_size = itableOffsetEntry::size() * wordSize;
1290   int ioffset = in_bytes(itableOffsetEntry::interface_offset());
1291   int ooffset = in_bytes(itableOffsetEntry::offset_offset());
1292 
1293   Label L_loop_search_resolved_entry, L_resolved_found, L_holder_found;
1294 
1295   ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
1296   add(recv_klass, recv_klass, vtable_start_offset + ioffset);
1297   // itableOffsetEntry[] itable = recv_klass + Klass::vtable_start_offset() + sizeof(vtableEntry) * recv_klass->_vtable_len;
1298   // temp_itbl_klass = itable[0]._interface;
1299   int vtblEntrySize = vtableEntry::size_in_bytes();
1300   assert(vtblEntrySize == wordSize, "ldr lsl shift amount must be 3");
1301   ldr(temp_itbl_klass, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize))));
1302   mov(holder_offset, zr);
1303   // scan_temp = &(itable[0]._interface)
1304   lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize))));
1305 
1306   // Initial checks:
1307   //   - if (holder_klass != resolved_klass), go to "scan for resolved"
1308   //   - if (itable[0] == holder_klass), shortcut to "holder found"
1309   //   - if (itable[0] == 0), no such interface
1310   cmp(resolved_klass, holder_klass);
1311   br(Assembler::NE, L_loop_search_resolved_entry);
1312   cmp(holder_klass, temp_itbl_klass);
1313   br(Assembler::EQ, L_holder_found);
1314   cbz(temp_itbl_klass, L_no_such_interface);
1315 
1316   // Loop: Look for holder_klass record in itable
1317   //   do {
1318   //     temp_itbl_klass = *(scan_temp += itable_offset_entry_size);
1319   //     if (temp_itbl_klass == holder_klass) {
1320   //       goto L_holder_found; // Found!
1321   //     }
1322   //   } while (temp_itbl_klass != 0);
1323   //   goto L_no_such_interface // Not found.
1324   Label L_search_holder;
1325   bind(L_search_holder);
1326     ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size)));
1327     cmp(holder_klass, temp_itbl_klass);
1328     br(Assembler::EQ, L_holder_found);
1329     cbnz(temp_itbl_klass, L_search_holder);
1330 
1331   b(L_no_such_interface);
1332 
1333   // Loop: Look for resolved_class record in itable
1334   //   while (true) {
1335   //     temp_itbl_klass = *(scan_temp += itable_offset_entry_size);
1336   //     if (temp_itbl_klass == 0) {
1337   //       goto L_no_such_interface;
1338   //     }
1339   //     if (temp_itbl_klass == resolved_klass) {
1340   //        goto L_resolved_found;  // Found!
1341   //     }
1342   //     if (temp_itbl_klass == holder_klass) {
1343   //        holder_offset = scan_temp;
1344   //     }
1345   //   }
1346   //
1347   Label L_loop_search_resolved;
1348   bind(L_loop_search_resolved);
1349     ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size)));
1350   bind(L_loop_search_resolved_entry);
1351     cbz(temp_itbl_klass, L_no_such_interface);
1352     cmp(resolved_klass, temp_itbl_klass);
1353     br(Assembler::EQ, L_resolved_found);
1354     cmp(holder_klass, temp_itbl_klass);
1355     br(Assembler::NE, L_loop_search_resolved);
1356     mov(holder_offset, scan_temp);
1357     b(L_loop_search_resolved);
1358 
1359   // See if we already have a holder klass. If not, go and scan for it.
1360   bind(L_resolved_found);
1361   cbz(holder_offset, L_search_holder);
1362   mov(scan_temp, holder_offset);
1363 
1364   // Finally, scan_temp contains holder_klass vtable offset
1365   bind(L_holder_found);
1366   ldrw(method_result, Address(scan_temp, ooffset - ioffset));
1367   add(recv_klass, recv_klass, itable_index * wordSize + in_bytes(itableMethodEntry::method_offset())
1368     - vtable_start_offset - ioffset); // substract offsets to restore the original value of recv_klass
1369   ldr(method_result, Address(recv_klass, method_result, Address::uxtw(0)));
1370 }
1371 
1372 // virtual method calling
1373 void MacroAssembler::lookup_virtual_method(Register recv_klass,
1374                                            RegisterOrConstant vtable_index,
1375                                            Register method_result) {
1376   assert(vtableEntry::size() * wordSize == 8,
1377          "adjust the scaling in the code below");
1378   int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset());
1379 
1380   if (vtable_index.is_register()) {
1381     lea(method_result, Address(recv_klass,
1382                                vtable_index.as_register(),
1383                                Address::lsl(LogBytesPerWord)));
1384     ldr(method_result, Address(method_result, vtable_offset_in_bytes));
1385   } else {
1386     vtable_offset_in_bytes += vtable_index.as_constant() * wordSize;
1387     ldr(method_result,
1388         form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0));
1389   }
1390 }
1391 
1392 void MacroAssembler::check_klass_subtype(Register sub_klass,
1393                            Register super_klass,
1394                            Register temp_reg,
1395                            Label& L_success) {
1396   Label L_failure;
1397   check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg,        &L_success, &L_failure, nullptr);
1398   check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr);
1399   bind(L_failure);
1400 }
1401 
1402 
1403 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
1404                                                    Register super_klass,
1405                                                    Register temp_reg,
1406                                                    Label* L_success,
1407                                                    Label* L_failure,
1408                                                    Label* L_slow_path,
1409                                                    Register super_check_offset) {
1410   assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset);
1411   bool must_load_sco = ! super_check_offset->is_valid();
1412   if (must_load_sco) {
1413     assert(temp_reg != noreg, "supply either a temp or a register offset");
1414   }
1415 
1416   Label L_fallthrough;
1417   int label_nulls = 0;
1418   if (L_success == nullptr)   { L_success   = &L_fallthrough; label_nulls++; }
1419   if (L_failure == nullptr)   { L_failure   = &L_fallthrough; label_nulls++; }
1420   if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; }
1421   assert(label_nulls <= 1, "at most one null in the batch");
1422 
1423   int sco_offset = in_bytes(Klass::super_check_offset_offset());
1424   Address super_check_offset_addr(super_klass, sco_offset);
1425 
1426   // Hacked jmp, which may only be used just before L_fallthrough.
1427 #define final_jmp(label)                                                \
1428   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
1429   else                            b(label)                /*omit semi*/
1430 
1431   // If the pointers are equal, we are done (e.g., String[] elements).
1432   // This self-check enables sharing of secondary supertype arrays among
1433   // non-primary types such as array-of-interface.  Otherwise, each such
1434   // type would need its own customized SSA.
1435   // We move this check to the front of the fast path because many
1436   // type checks are in fact trivially successful in this manner,
1437   // so we get a nicely predicted branch right at the start of the check.
1438   cmp(sub_klass, super_klass);
1439   br(Assembler::EQ, *L_success);
1440 
1441   // Check the supertype display:
1442   if (must_load_sco) {
1443     ldrw(temp_reg, super_check_offset_addr);
1444     super_check_offset = temp_reg;
1445   }
1446 
1447   Address super_check_addr(sub_klass, super_check_offset);
1448   ldr(rscratch1, super_check_addr);
1449   cmp(super_klass, rscratch1); // load displayed supertype
1450   br(Assembler::EQ, *L_success);
1451 
1452   // This check has worked decisively for primary supers.
1453   // Secondary supers are sought in the super_cache ('super_cache_addr').
1454   // (Secondary supers are interfaces and very deeply nested subtypes.)
1455   // This works in the same check above because of a tricky aliasing
1456   // between the super_cache and the primary super display elements.
1457   // (The 'super_check_addr' can address either, as the case requires.)
1458   // Note that the cache is updated below if it does not help us find
1459   // what we need immediately.
1460   // So if it was a primary super, we can just fail immediately.
1461   // Otherwise, it's the slow path for us (no success at this point).
1462 
1463   sub(rscratch1, super_check_offset, in_bytes(Klass::secondary_super_cache_offset()));
1464   if (L_failure == &L_fallthrough) {
1465     cbz(rscratch1, *L_slow_path);
1466   } else {
1467     cbnz(rscratch1, *L_failure);
1468     final_jmp(*L_slow_path);
1469   }
1470 
1471   bind(L_fallthrough);
1472 
1473 #undef final_jmp
1474 }
1475 
1476 // These two are taken from x86, but they look generally useful
1477 
1478 // scans count pointer sized words at [addr] for occurrence of value,
1479 // generic
1480 void MacroAssembler::repne_scan(Register addr, Register value, Register count,
1481                                 Register scratch) {
1482   Label Lloop, Lexit;
1483   cbz(count, Lexit);
1484   bind(Lloop);
1485   ldr(scratch, post(addr, wordSize));
1486   cmp(value, scratch);
1487   br(EQ, Lexit);
1488   sub(count, count, 1);
1489   cbnz(count, Lloop);
1490   bind(Lexit);
1491 }
1492 
1493 // scans count 4 byte words at [addr] for occurrence of value,
1494 // generic
1495 void MacroAssembler::repne_scanw(Register addr, Register value, Register count,
1496                                 Register scratch) {
1497   Label Lloop, Lexit;
1498   cbz(count, Lexit);
1499   bind(Lloop);
1500   ldrw(scratch, post(addr, wordSize));
1501   cmpw(value, scratch);
1502   br(EQ, Lexit);
1503   sub(count, count, 1);
1504   cbnz(count, Lloop);
1505   bind(Lexit);
1506 }
1507 
1508 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass,
1509                                                           Register super_klass,
1510                                                           Register temp_reg,
1511                                                           Register temp2_reg,
1512                                                           Label* L_success,
1513                                                           Label* L_failure,
1514                                                           bool set_cond_codes) {
1515   // NB! Callers may assume that, when temp2_reg is a valid register,
1516   // this code sets it to a nonzero value.
1517 
1518   assert_different_registers(sub_klass, super_klass, temp_reg);
1519   if (temp2_reg != noreg)
1520     assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1);
1521 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
1522 
1523   Label L_fallthrough;
1524   int label_nulls = 0;
1525   if (L_success == nullptr)   { L_success   = &L_fallthrough; label_nulls++; }
1526   if (L_failure == nullptr)   { L_failure   = &L_fallthrough; label_nulls++; }
1527   assert(label_nulls <= 1, "at most one null in the batch");
1528 
1529   // a couple of useful fields in sub_klass:
1530   int ss_offset = in_bytes(Klass::secondary_supers_offset());
1531   int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
1532   Address secondary_supers_addr(sub_klass, ss_offset);
1533   Address super_cache_addr(     sub_klass, sc_offset);
1534 
1535   BLOCK_COMMENT("check_klass_subtype_slow_path");
1536 
1537   // Do a linear scan of the secondary super-klass chain.
1538   // This code is rarely used, so simplicity is a virtue here.
1539   // The repne_scan instruction uses fixed registers, which we must spill.
1540   // Don't worry too much about pre-existing connections with the input regs.
1541 
1542   assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super)
1543   assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter)
1544 
1545   RegSet pushed_registers;
1546   if (!IS_A_TEMP(r2))    pushed_registers += r2;
1547   if (!IS_A_TEMP(r5))    pushed_registers += r5;
1548 
1549   if (super_klass != r0) {
1550     if (!IS_A_TEMP(r0))   pushed_registers += r0;
1551   }
1552 
1553   push(pushed_registers, sp);
1554 
1555   // Get super_klass value into r0 (even if it was in r5 or r2).
1556   if (super_klass != r0) {
1557     mov(r0, super_klass);
1558   }
1559 
1560 #ifndef PRODUCT
1561   incrementw(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr));
1562 #endif //PRODUCT
1563 
1564   // We will consult the secondary-super array.
1565   ldr(r5, secondary_supers_addr);
1566   // Load the array length.
1567   ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes()));
1568   // Skip to start of data.
1569   add(r5, r5, Array<Klass*>::base_offset_in_bytes());
1570 
1571   cmp(sp, zr); // Clear Z flag; SP is never zero
1572   // Scan R2 words at [R5] for an occurrence of R0.
1573   // Set NZ/Z based on last compare.
1574   repne_scan(r5, r0, r2, rscratch1);
1575 
1576   // Unspill the temp. registers:
1577   pop(pushed_registers, sp);
1578 
1579   br(Assembler::NE, *L_failure);
1580 
1581   // Success.  Cache the super we found and proceed in triumph.
1582 
1583   if (UseSecondarySupersCache) {
1584     str(super_klass, super_cache_addr);
1585   }
1586 
1587   if (L_success != &L_fallthrough) {
1588     b(*L_success);
1589   }
1590 
1591 #undef IS_A_TEMP
1592 
1593   bind(L_fallthrough);
1594 }
1595 
1596 // If Register r is invalid, remove a new register from
1597 // available_regs, and add new register to regs_to_push.
1598 Register MacroAssembler::allocate_if_noreg(Register r,
1599                                   RegSetIterator<Register> &available_regs,
1600                                   RegSet &regs_to_push) {
1601   if (!r->is_valid()) {
1602     r = *available_regs++;
1603     regs_to_push += r;
1604   }
1605   return r;
1606 }
1607 
1608 // check_klass_subtype_slow_path_table() looks for super_klass in the
1609 // hash table belonging to super_klass, branching to L_success or
1610 // L_failure as appropriate. This is essentially a shim which
1611 // allocates registers as necessary then calls
1612 // lookup_secondary_supers_table() to do the work. Any of the temp
1613 // regs may be noreg, in which case this logic will chooses some
1614 // registers push and pop them from the stack.
1615 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass,
1616                                                          Register super_klass,
1617                                                          Register temp_reg,
1618                                                          Register temp2_reg,
1619                                                          Register temp3_reg,
1620                                                          Register result_reg,
1621                                                          FloatRegister vtemp,
1622                                                          Label* L_success,
1623                                                          Label* L_failure,
1624                                                          bool set_cond_codes) {
1625   RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg);
1626 
1627   assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1);
1628 
1629   Label L_fallthrough;
1630   int label_nulls = 0;
1631   if (L_success == nullptr)   { L_success   = &L_fallthrough; label_nulls++; }
1632   if (L_failure == nullptr)   { L_failure   = &L_fallthrough; label_nulls++; }
1633   assert(label_nulls <= 1, "at most one null in the batch");
1634 
1635   BLOCK_COMMENT("check_klass_subtype_slow_path");
1636 
1637   RegSetIterator<Register> available_regs
1638     = (RegSet::range(r0, r15) - temps - sub_klass - super_klass).begin();
1639 
1640   RegSet pushed_regs;
1641 
1642   temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs);
1643   temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs);
1644   temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs);
1645   result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs);
1646 
1647   push(pushed_regs, sp);
1648 
1649   lookup_secondary_supers_table_var(sub_klass,
1650                                     super_klass,
1651                                     temp_reg, temp2_reg, temp3_reg, vtemp, result_reg,
1652                                     nullptr);
1653   cmp(result_reg, zr);
1654 
1655   // Unspill the temp. registers:
1656   pop(pushed_regs, sp);
1657 
1658   // NB! Callers may assume that, when set_cond_codes is true, this
1659   // code sets temp2_reg to a nonzero value.
1660   if (set_cond_codes) {
1661     mov(temp2_reg, 1);
1662   }
1663 
1664   br(Assembler::NE, *L_failure);
1665 
1666   if (L_success != &L_fallthrough) {
1667     b(*L_success);
1668   }
1669 
1670   bind(L_fallthrough);
1671 }
1672 
1673 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
1674                                                    Register super_klass,
1675                                                    Register temp_reg,
1676                                                    Register temp2_reg,
1677                                                    Label* L_success,
1678                                                    Label* L_failure,
1679                                                    bool set_cond_codes) {
1680   if (UseSecondarySupersTable) {
1681     check_klass_subtype_slow_path_table
1682       (sub_klass, super_klass, temp_reg, temp2_reg, /*temp3*/noreg, /*result*/noreg,
1683        /*vtemp*/fnoreg,
1684        L_success, L_failure, set_cond_codes);
1685   } else {
1686     check_klass_subtype_slow_path_linear
1687       (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, set_cond_codes);
1688   }
1689 }
1690 
1691 
1692 // Ensure that the inline code and the stub are using the same registers.
1693 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS                    \
1694 do {                                                               \
1695   assert(r_super_klass  == r0                                   && \
1696          r_array_base   == r1                                   && \
1697          r_array_length == r2                                   && \
1698          (r_array_index == r3        || r_array_index == noreg) && \
1699          (r_sub_klass   == r4        || r_sub_klass   == noreg) && \
1700          (r_bitmap      == rscratch2 || r_bitmap      == noreg) && \
1701          (result        == r5        || result        == noreg), "registers must match aarch64.ad"); \
1702 } while(0)
1703 
1704 bool MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass,
1705                                                          Register r_super_klass,
1706                                                          Register temp1,
1707                                                          Register temp2,
1708                                                          Register temp3,
1709                                                          FloatRegister vtemp,
1710                                                          Register result,
1711                                                          u1 super_klass_slot,
1712                                                          bool stub_is_near) {
1713   assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2);
1714 
1715   Label L_fallthrough;
1716 
1717   BLOCK_COMMENT("lookup_secondary_supers_table {");
1718 
1719   const Register
1720     r_array_base   = temp1, // r1
1721     r_array_length = temp2, // r2
1722     r_array_index  = temp3, // r3
1723     r_bitmap       = rscratch2;
1724 
1725   LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
1726 
1727   u1 bit = super_klass_slot;
1728 
1729   // Make sure that result is nonzero if the TBZ below misses.
1730   mov(result, 1);
1731 
1732   // We're going to need the bitmap in a vector reg and in a core reg,
1733   // so load both now.
1734   ldr(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
1735   if (bit != 0) {
1736     ldrd(vtemp, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
1737   }
1738   // First check the bitmap to see if super_klass might be present. If
1739   // the bit is zero, we are certain that super_klass is not one of
1740   // the secondary supers.
1741   tbz(r_bitmap, bit, L_fallthrough);
1742 
1743   // Get the first array index that can contain super_klass into r_array_index.
1744   if (bit != 0) {
1745     shld(vtemp, vtemp, Klass::SECONDARY_SUPERS_TABLE_MASK - bit);
1746     cnt(vtemp, T8B, vtemp);
1747     addv(vtemp, T8B, vtemp);
1748     fmovd(r_array_index, vtemp);
1749   } else {
1750     mov(r_array_index, (u1)1);
1751   }
1752   // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
1753 
1754   // We will consult the secondary-super array.
1755   ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
1756 
1757   // The value i in r_array_index is >= 1, so even though r_array_base
1758   // points to the length, we don't need to adjust it to point to the
1759   // data.
1760   assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
1761   assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
1762 
1763   ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord)));
1764   eor(result, result, r_super_klass);
1765   cbz(result, L_fallthrough); // Found a match
1766 
1767   // Is there another entry to check? Consult the bitmap.
1768   tbz(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK, L_fallthrough);
1769 
1770   // Linear probe.
1771   if (bit != 0) {
1772     ror(r_bitmap, r_bitmap, bit);
1773   }
1774 
1775   // The slot we just inspected is at secondary_supers[r_array_index - 1].
1776   // The next slot to be inspected, by the stub we're about to call,
1777   // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap
1778   // have been checked.
1779   Address stub = RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub());
1780   if (stub_is_near) {
1781     bl(stub);
1782   } else {
1783     address call = trampoline_call(stub);
1784     if (call == nullptr) {
1785       return false; // trampoline allocation failed
1786     }
1787   }
1788 
1789   BLOCK_COMMENT("} lookup_secondary_supers_table");
1790 
1791   bind(L_fallthrough);
1792 
1793   if (VerifySecondarySupers) {
1794     verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0
1795                                   temp1, temp2, result);      // r1, r2, r5
1796   }
1797   return true;
1798 }
1799 
1800 // At runtime, return 0 in result if r_super_klass is a superclass of
1801 // r_sub_klass, otherwise return nonzero. Use this version of
1802 // lookup_secondary_supers_table() if you don't know ahead of time
1803 // which superclass will be searched for. Used by interpreter and
1804 // runtime stubs. It is larger and has somewhat greater latency than
1805 // the version above, which takes a constant super_klass_slot.
1806 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass,
1807                                                        Register r_super_klass,
1808                                                        Register temp1,
1809                                                        Register temp2,
1810                                                        Register temp3,
1811                                                        FloatRegister vtemp,
1812                                                        Register result,
1813                                                        Label *L_success) {
1814   assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2);
1815 
1816   Label L_fallthrough;
1817 
1818   BLOCK_COMMENT("lookup_secondary_supers_table {");
1819 
1820   const Register
1821     r_array_index = temp3,
1822     slot          = rscratch1,
1823     r_bitmap      = rscratch2;
1824 
1825   ldrb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
1826 
1827   // Make sure that result is nonzero if the test below misses.
1828   mov(result, 1);
1829 
1830   ldr(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset()));
1831 
1832   // First check the bitmap to see if super_klass might be present. If
1833   // the bit is zero, we are certain that super_klass is not one of
1834   // the secondary supers.
1835 
1836   // This next instruction is equivalent to:
1837   // mov(tmp_reg, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1));
1838   // sub(temp2, tmp_reg, slot);
1839   eor(temp2, slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1));
1840   lslv(temp2, r_bitmap, temp2);
1841   tbz(temp2, Klass::SECONDARY_SUPERS_TABLE_SIZE - 1, L_fallthrough);
1842 
1843   bool must_save_v0 = (vtemp == fnoreg);
1844   if (must_save_v0) {
1845     // temp1 and result are free, so use them to preserve vtemp
1846     vtemp = v0;
1847     mov(temp1,  vtemp, D, 0);
1848     mov(result, vtemp, D, 1);
1849   }
1850 
1851   // Get the first array index that can contain super_klass into r_array_index.
1852   mov(vtemp, D, 0, temp2);
1853   cnt(vtemp, T8B, vtemp);
1854   addv(vtemp, T8B, vtemp);
1855   mov(r_array_index, vtemp, D, 0);
1856 
1857   if (must_save_v0) {
1858     mov(vtemp, D, 0, temp1 );
1859     mov(vtemp, D, 1, result);
1860   }
1861 
1862   // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
1863 
1864   const Register
1865     r_array_base   = temp1,
1866     r_array_length = temp2;
1867 
1868   // The value i in r_array_index is >= 1, so even though r_array_base
1869   // points to the length, we don't need to adjust it to point to the
1870   // data.
1871   assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code");
1872   assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code");
1873 
1874   // We will consult the secondary-super array.
1875   ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
1876 
1877   ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord)));
1878   eor(result, result, r_super_klass);
1879   cbz(result, L_success ? *L_success : L_fallthrough); // Found a match
1880 
1881   // Is there another entry to check? Consult the bitmap.
1882   rorv(r_bitmap, r_bitmap, slot);
1883   // rol(r_bitmap, r_bitmap, 1);
1884   tbz(r_bitmap, 1, L_fallthrough);
1885 
1886   // The slot we just inspected is at secondary_supers[r_array_index - 1].
1887   // The next slot to be inspected, by the logic we're about to call,
1888   // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap
1889   // have been checked.
1890   lookup_secondary_supers_table_slow_path(r_super_klass, r_array_base, r_array_index,
1891                                           r_bitmap, r_array_length, result, /*is_stub*/false);
1892 
1893   BLOCK_COMMENT("} lookup_secondary_supers_table");
1894 
1895   bind(L_fallthrough);
1896 
1897   if (VerifySecondarySupers) {
1898     verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0
1899                                   temp1, temp2, result);      // r1, r2, r5
1900   }
1901 
1902   if (L_success) {
1903     cbz(result, *L_success);
1904   }
1905 }
1906 
1907 // Called by code generated by check_klass_subtype_slow_path
1908 // above. This is called when there is a collision in the hashed
1909 // lookup in the secondary supers array.
1910 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass,
1911                                                              Register r_array_base,
1912                                                              Register r_array_index,
1913                                                              Register r_bitmap,
1914                                                              Register temp1,
1915                                                              Register result,
1916                                                              bool is_stub) {
1917   assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, result, rscratch1);
1918 
1919   const Register
1920     r_array_length = temp1,
1921     r_sub_klass    = noreg; // unused
1922 
1923   if (is_stub) {
1924     LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS;
1925   }
1926 
1927   Label L_fallthrough, L_huge;
1928 
1929   // Load the array length.
1930   ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
1931   // And adjust the array base to point to the data.
1932   // NB! Effectively increments current slot index by 1.
1933   assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "");
1934   add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes());
1935 
1936   // The bitmap is full to bursting.
1937   // Implicit invariant: BITMAP_FULL implies (length > 0)
1938   assert(Klass::SECONDARY_SUPERS_BITMAP_FULL == ~uintx(0), "");
1939   cmpw(r_array_length, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 2));
1940   br(GT, L_huge);
1941 
1942   // NB! Our caller has checked bits 0 and 1 in the bitmap. The
1943   // current slot (at secondary_supers[r_array_index]) has not yet
1944   // been inspected, and r_array_index may be out of bounds if we
1945   // wrapped around the end of the array.
1946 
1947   { // This is conventional linear probing, but instead of terminating
1948     // when a null entry is found in the table, we maintain a bitmap
1949     // in which a 0 indicates missing entries.
1950     // As long as the bitmap is not completely full,
1951     // array_length == popcount(bitmap). The array_length check above
1952     // guarantees there are 0s in the bitmap, so the loop eventually
1953     // terminates.
1954     Label L_loop;
1955     bind(L_loop);
1956 
1957     // Check for wraparound.
1958     cmp(r_array_index, r_array_length);
1959     csel(r_array_index, zr, r_array_index, GE);
1960 
1961     ldr(rscratch1, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord)));
1962     eor(result, rscratch1, r_super_klass);
1963     cbz(result, L_fallthrough);
1964 
1965     tbz(r_bitmap, 2, L_fallthrough); // look-ahead check (Bit 2); result is non-zero
1966 
1967     ror(r_bitmap, r_bitmap, 1);
1968     add(r_array_index, r_array_index, 1);
1969     b(L_loop);
1970   }
1971 
1972   { // Degenerate case: more than 64 secondary supers.
1973     // FIXME: We could do something smarter here, maybe a vectorized
1974     // comparison or a binary search, but is that worth any added
1975     // complexity?
1976     bind(L_huge);
1977     cmp(sp, zr); // Clear Z flag; SP is never zero
1978     repne_scan(r_array_base, r_super_klass, r_array_length, rscratch1);
1979     cset(result, NE); // result == 0 iff we got a match.
1980   }
1981 
1982   bind(L_fallthrough);
1983 }
1984 
1985 // Make sure that the hashed lookup and a linear scan agree.
1986 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass,
1987                                                    Register r_super_klass,
1988                                                    Register temp1,
1989                                                    Register temp2,
1990                                                    Register result) {
1991   assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, result, rscratch1);
1992 
1993   const Register
1994     r_array_base   = temp1,
1995     r_array_length = temp2,
1996     r_array_index  = noreg, // unused
1997     r_bitmap       = noreg; // unused
1998 
1999   BLOCK_COMMENT("verify_secondary_supers_table {");
2000 
2001   // We will consult the secondary-super array.
2002   ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset())));
2003 
2004   // Load the array length.
2005   ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes()));
2006   // And adjust the array base to point to the data.
2007   add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes());
2008 
2009   cmp(sp, zr); // Clear Z flag; SP is never zero
2010   // Scan R2 words at [R5] for an occurrence of R0.
2011   // Set NZ/Z based on last compare.
2012   repne_scan(/*addr*/r_array_base, /*value*/r_super_klass, /*count*/r_array_length, rscratch2);
2013   // rscratch1 == 0 iff we got a match.
2014   cset(rscratch1, NE);
2015 
2016   Label passed;
2017   cmp(result, zr);
2018   cset(result, NE); // normalize result to 0/1 for comparison
2019 
2020   cmp(rscratch1, result);
2021   br(EQ, passed);
2022   {
2023     mov(r0, r_super_klass);         // r0 <- r0
2024     mov(r1, r_sub_klass);           // r1 <- r4
2025     mov(r2, /*expected*/rscratch1); // r2 <- r8
2026     mov(r3, result);                // r3 <- r5
2027     mov(r4, (address)("mismatch")); // r4 <- const
2028     rt_call(CAST_FROM_FN_PTR(address, Klass::on_secondary_supers_verification_failure), rscratch2);
2029     should_not_reach_here();
2030   }
2031   bind(passed);
2032 
2033   BLOCK_COMMENT("} verify_secondary_supers_table");
2034 }
2035 
2036 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) {
2037   assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
2038   assert_different_registers(klass, rthread, scratch);
2039 
2040   Label L_fallthrough, L_tmp;
2041   if (L_fast_path == nullptr) {
2042     L_fast_path = &L_fallthrough;
2043   } else if (L_slow_path == nullptr) {
2044     L_slow_path = &L_fallthrough;
2045   }
2046   // Fast path check: class is fully initialized
2047   lea(scratch, Address(klass, InstanceKlass::init_state_offset()));
2048   ldarb(scratch, scratch);
2049   subs(zr, scratch, InstanceKlass::fully_initialized);
2050   br(Assembler::EQ, *L_fast_path);
2051 
2052   // Fast path check: current thread is initializer thread
2053   ldr(scratch, Address(klass, InstanceKlass::init_thread_offset()));
2054   cmp(rthread, scratch);
2055 
2056   if (L_slow_path == &L_fallthrough) {
2057     br(Assembler::EQ, *L_fast_path);
2058     bind(*L_slow_path);
2059   } else if (L_fast_path == &L_fallthrough) {
2060     br(Assembler::NE, *L_slow_path);
2061     bind(*L_fast_path);
2062   } else {
2063     Unimplemented();
2064   }
2065 }
2066 
2067 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) {
2068   if (!VerifyOops || VerifyAdapterSharing) {
2069     // Below address of the code string confuses VerifyAdapterSharing
2070     // because it may differ between otherwise equivalent adapters.
2071     return;
2072   }
2073 
2074   // Pass register number to verify_oop_subroutine
2075   const char* b = nullptr;
2076   {
2077     ResourceMark rm;
2078     stringStream ss;
2079     ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line);
2080     b = code_string(ss.as_string());
2081   }
2082   BLOCK_COMMENT("verify_oop {");
2083 
2084   strip_return_address(); // This might happen within a stack frame.
2085   protect_return_address();
2086   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
2087   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
2088 
2089   mov(r0, reg);
2090   movptr(rscratch1, (uintptr_t)(address)b);
2091 
2092   // call indirectly to solve generation ordering problem
2093   lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address()));
2094   ldr(rscratch2, Address(rscratch2));
2095   blr(rscratch2);
2096 
2097   ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
2098   ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
2099   authenticate_return_address();
2100 
2101   BLOCK_COMMENT("} verify_oop");
2102 }
2103 
2104 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) {
2105   if (!VerifyOops || VerifyAdapterSharing) {
2106     // Below address of the code string confuses VerifyAdapterSharing
2107     // because it may differ between otherwise equivalent adapters.
2108     return;
2109   }
2110 
2111   const char* b = nullptr;
2112   {
2113     ResourceMark rm;
2114     stringStream ss;
2115     ss.print("verify_oop_addr: %s (%s:%d)", s, file, line);
2116     b = code_string(ss.as_string());
2117   }
2118   BLOCK_COMMENT("verify_oop_addr {");
2119 
2120   strip_return_address(); // This might happen within a stack frame.
2121   protect_return_address();
2122   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
2123   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
2124 
2125   // addr may contain sp so we will have to adjust it based on the
2126   // pushes that we just did.
2127   if (addr.uses(sp)) {
2128     lea(r0, addr);
2129     ldr(r0, Address(r0, 4 * wordSize));
2130   } else {
2131     ldr(r0, addr);
2132   }
2133   movptr(rscratch1, (uintptr_t)(address)b);
2134 
2135   // call indirectly to solve generation ordering problem
2136   lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address()));
2137   ldr(rscratch2, Address(rscratch2));
2138   blr(rscratch2);
2139 
2140   ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
2141   ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
2142   authenticate_return_address();
2143 
2144   BLOCK_COMMENT("} verify_oop_addr");
2145 }
2146 
2147 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
2148                                          int extra_slot_offset) {
2149   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
2150   int stackElementSize = Interpreter::stackElementSize;
2151   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
2152 #ifdef ASSERT
2153   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
2154   assert(offset1 - offset == stackElementSize, "correct arithmetic");
2155 #endif
2156   if (arg_slot.is_constant()) {
2157     return Address(esp, arg_slot.as_constant() * stackElementSize
2158                    + offset);
2159   } else {
2160     add(rscratch1, esp, arg_slot.as_register(),
2161         ext::uxtx, exact_log2(stackElementSize));
2162     return Address(rscratch1, offset);
2163   }
2164 }
2165 
2166 void MacroAssembler::call_VM_leaf_base(address entry_point,
2167                                        int number_of_arguments,
2168                                        Label *retaddr) {
2169   Label E, L;
2170 
2171   stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize)));
2172 
2173   mov(rscratch1, entry_point);
2174   blr(rscratch1);
2175   if (retaddr)
2176     bind(*retaddr);
2177 
2178   ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize)));
2179 }
2180 
2181 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
2182   call_VM_leaf_base(entry_point, number_of_arguments);
2183 }
2184 
2185 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
2186   pass_arg0(this, arg_0);
2187   call_VM_leaf_base(entry_point, 1);
2188 }
2189 
2190 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2191   assert_different_registers(arg_1, c_rarg0);
2192   pass_arg0(this, arg_0);
2193   pass_arg1(this, arg_1);
2194   call_VM_leaf_base(entry_point, 2);
2195 }
2196 
2197 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
2198                                   Register arg_1, Register arg_2) {
2199   assert_different_registers(arg_1, c_rarg0);
2200   assert_different_registers(arg_2, c_rarg0, c_rarg1);
2201   pass_arg0(this, arg_0);
2202   pass_arg1(this, arg_1);
2203   pass_arg2(this, arg_2);
2204   call_VM_leaf_base(entry_point, 3);
2205 }
2206 
2207 void MacroAssembler::super_call_VM_leaf(address entry_point) {
2208   MacroAssembler::call_VM_leaf_base(entry_point, 1);
2209 }
2210 
2211 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
2212   pass_arg0(this, arg_0);
2213   MacroAssembler::call_VM_leaf_base(entry_point, 1);
2214 }
2215 
2216 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2217 
2218   assert_different_registers(arg_0, c_rarg1);
2219   pass_arg1(this, arg_1);
2220   pass_arg0(this, arg_0);
2221   MacroAssembler::call_VM_leaf_base(entry_point, 2);
2222 }
2223 
2224 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
2225   assert_different_registers(arg_0, c_rarg1, c_rarg2);
2226   assert_different_registers(arg_1, c_rarg2);
2227   pass_arg2(this, arg_2);
2228   pass_arg1(this, arg_1);
2229   pass_arg0(this, arg_0);
2230   MacroAssembler::call_VM_leaf_base(entry_point, 3);
2231 }
2232 
2233 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
2234   assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3);
2235   assert_different_registers(arg_1, c_rarg2, c_rarg3);
2236   assert_different_registers(arg_2, c_rarg3);
2237   pass_arg3(this, arg_3);
2238   pass_arg2(this, arg_2);
2239   pass_arg1(this, arg_1);
2240   pass_arg0(this, arg_0);
2241   MacroAssembler::call_VM_leaf_base(entry_point, 4);
2242 }
2243 
2244 void MacroAssembler::null_check(Register reg, int offset) {
2245   if (needs_explicit_null_check(offset)) {
2246     // provoke OS null exception if reg is null by
2247     // accessing M[reg] w/o changing any registers
2248     // NOTE: this is plenty to provoke a segv
2249     ldr(zr, Address(reg));
2250   } else {
2251     // nothing to do, (later) access of M[reg + offset]
2252     // will provoke OS null exception if reg is null
2253   }
2254 }
2255 
2256 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
2257   assert_different_registers(markword, rscratch2);
2258   mov(rscratch2, markWord::inline_type_mask_in_place);
2259   andr(markword, markword, rscratch2);
2260   mov(rscratch2, markWord::inline_type_pattern);
2261   cmp(markword, rscratch2);
2262   br(Assembler::EQ, is_inline_type);
2263 }
2264 
2265 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) {
2266   ldrh(temp_reg, Address(klass, Klass::access_flags_offset()));
2267   andr(temp_reg, temp_reg, JVM_ACC_IDENTITY);
2268   cbz(temp_reg, is_inline_type);
2269 }
2270 
2271 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) {
2272   assert_different_registers(tmp, rscratch1);
2273   cbz(object, not_inline_type);
2274   const int is_inline_type_mask = markWord::inline_type_pattern;
2275   ldr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
2276   mov(rscratch1, is_inline_type_mask);
2277   andr(tmp, tmp, rscratch1);
2278   cmp(tmp, rscratch1);
2279   br(Assembler::NE, not_inline_type);
2280 }
2281 
2282 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) {
2283   assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
2284   tbnz(flags, ResolvedFieldEntry::is_null_free_inline_type_shift, is_null_free_inline_type);
2285 }
2286 
2287 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) {
2288   assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
2289   tbz(flags, ResolvedFieldEntry::is_null_free_inline_type_shift, not_null_free_inline_type);
2290 }
2291 
2292 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) {
2293   assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
2294   tbnz(flags, ResolvedFieldEntry::is_flat_shift, is_flat);
2295 }
2296 
2297 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) {
2298   assert(temp_reg == noreg, "not needed"); // keep signature uniform with x86
2299   tbnz(flags, ResolvedFieldEntry::has_null_marker_shift, has_null_marker);
2300 }
2301 
2302 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
2303   Label test_mark_word;
2304   // load mark word
2305   ldr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
2306   // check displaced
2307   tst(temp_reg, markWord::unlocked_value);
2308   br(Assembler::NE, test_mark_word);
2309   // slow path use klass prototype
2310   load_prototype_header(temp_reg, oop);
2311 
2312   bind(test_mark_word);
2313   andr(temp_reg, temp_reg, test_bit);
2314   if (jmp_set) {
2315     cbnz(temp_reg, jmp_label);
2316   } else {
2317     cbz(temp_reg, jmp_label);
2318   }
2319 }
2320 
2321 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array) {
2322   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array);
2323 }
2324 
2325 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg,
2326                                                   Label&is_non_flat_array) {
2327   test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array);
2328 }
2329 
2330 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array) {
2331   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array);
2332 }
2333 
2334 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
2335   test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array);
2336 }
2337 
2338 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) {
2339   tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2340   br(Assembler::NE, is_flat_array);
2341 }
2342 
2343 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) {
2344   tst(lh, Klass::_lh_array_tag_flat_value_bit_inplace);
2345   br(Assembler::EQ, is_non_flat_array);
2346 }
2347 
2348 // MacroAssembler protected routines needed to implement
2349 // public methods
2350 
2351 void MacroAssembler::mov(Register r, Address dest) {
2352   code_section()->relocate(pc(), dest.rspec());
2353   uint64_t imm64 = (uint64_t)dest.target();
2354   movptr(r, imm64);
2355 }
2356 
2357 // Move a constant pointer into r.  In AArch64 mode the virtual
2358 // address space is 48 bits in size, so we only need three
2359 // instructions to create a patchable instruction sequence that can
2360 // reach anywhere.
2361 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
2362 #ifndef PRODUCT
2363   {
2364     char buffer[64];
2365     snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64);
2366     block_comment(buffer);
2367   }
2368 #endif
2369   assert(imm64 < (1ull << 48), "48-bit overflow in address constant");
2370   movz(r, imm64 & 0xffff);
2371   imm64 >>= 16;
2372   movk(r, imm64 & 0xffff, 16);
2373   imm64 >>= 16;
2374   movk(r, imm64 & 0xffff, 32);
2375 }
2376 
2377 // Macro to mov replicated immediate to vector register.
2378 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is,
2379 //        the upper 56/48/32 bits must be zeros for B/H/S type.
2380 // Vd will get the following values for different arrangements in T
2381 //   imm64 == hex 000000gh  T8B:  Vd = ghghghghghghghgh
2382 //   imm64 == hex 000000gh  T16B: Vd = ghghghghghghghghghghghghghghghgh
2383 //   imm64 == hex 0000efgh  T4H:  Vd = efghefghefghefgh
2384 //   imm64 == hex 0000efgh  T8H:  Vd = efghefghefghefghefghefghefghefgh
2385 //   imm64 == hex abcdefgh  T2S:  Vd = abcdefghabcdefgh
2386 //   imm64 == hex abcdefgh  T4S:  Vd = abcdefghabcdefghabcdefghabcdefgh
2387 //   imm64 == hex abcdefgh  T1D:  Vd = 00000000abcdefgh
2388 //   imm64 == hex abcdefgh  T2D:  Vd = 00000000abcdefgh00000000abcdefgh
2389 // Clobbers rscratch1
2390 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) {
2391   assert(T != T1Q, "unsupported");
2392   if (T == T1D || T == T2D) {
2393     int imm = operand_valid_for_movi_immediate(imm64, T);
2394     if (-1 != imm) {
2395       movi(Vd, T, imm);
2396     } else {
2397       mov(rscratch1, imm64);
2398       dup(Vd, T, rscratch1);
2399     }
2400     return;
2401   }
2402 
2403 #ifdef ASSERT
2404   if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)");
2405   if (T == T4H || T == T8H) assert((imm64  & ~0xffff) == 0, "extraneous bits (T4H/T8H)");
2406   if (T == T2S || T == T4S) assert((imm64  & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)");
2407 #endif
2408   int shift = operand_valid_for_movi_immediate(imm64, T);
2409   uint32_t imm32 = imm64 & 0xffffffffULL;
2410   if (shift >= 0) {
2411     movi(Vd, T, (imm32 >> shift) & 0xff, shift);
2412   } else {
2413     movw(rscratch1, imm32);
2414     dup(Vd, T, rscratch1);
2415   }
2416 }
2417 
2418 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64)
2419 {
2420 #ifndef PRODUCT
2421   {
2422     char buffer[64];
2423     snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
2424     block_comment(buffer);
2425   }
2426 #endif
2427   if (operand_valid_for_logical_immediate(false, imm64)) {
2428     orr(dst, zr, imm64);
2429   } else {
2430     // we can use a combination of MOVZ or MOVN with
2431     // MOVK to build up the constant
2432     uint64_t imm_h[4];
2433     int zero_count = 0;
2434     int neg_count = 0;
2435     int i;
2436     for (i = 0; i < 4; i++) {
2437       imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL);
2438       if (imm_h[i] == 0) {
2439         zero_count++;
2440       } else if (imm_h[i] == 0xffffL) {
2441         neg_count++;
2442       }
2443     }
2444     if (zero_count == 4) {
2445       // one MOVZ will do
2446       movz(dst, 0);
2447     } else if (neg_count == 4) {
2448       // one MOVN will do
2449       movn(dst, 0);
2450     } else if (zero_count == 3) {
2451       for (i = 0; i < 4; i++) {
2452         if (imm_h[i] != 0L) {
2453           movz(dst, (uint32_t)imm_h[i], (i << 4));
2454           break;
2455         }
2456       }
2457     } else if (neg_count == 3) {
2458       // one MOVN will do
2459       for (int i = 0; i < 4; i++) {
2460         if (imm_h[i] != 0xffffL) {
2461           movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2462           break;
2463         }
2464       }
2465     } else if (zero_count == 2) {
2466       // one MOVZ and one MOVK will do
2467       for (i = 0; i < 3; i++) {
2468         if (imm_h[i] != 0L) {
2469           movz(dst, (uint32_t)imm_h[i], (i << 4));
2470           i++;
2471           break;
2472         }
2473       }
2474       for (;i < 4; i++) {
2475         if (imm_h[i] != 0L) {
2476           movk(dst, (uint32_t)imm_h[i], (i << 4));
2477         }
2478       }
2479     } else if (neg_count == 2) {
2480       // one MOVN and one MOVK will do
2481       for (i = 0; i < 4; i++) {
2482         if (imm_h[i] != 0xffffL) {
2483           movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2484           i++;
2485           break;
2486         }
2487       }
2488       for (;i < 4; i++) {
2489         if (imm_h[i] != 0xffffL) {
2490           movk(dst, (uint32_t)imm_h[i], (i << 4));
2491         }
2492       }
2493     } else if (zero_count == 1) {
2494       // one MOVZ and two MOVKs will do
2495       for (i = 0; i < 4; i++) {
2496         if (imm_h[i] != 0L) {
2497           movz(dst, (uint32_t)imm_h[i], (i << 4));
2498           i++;
2499           break;
2500         }
2501       }
2502       for (;i < 4; i++) {
2503         if (imm_h[i] != 0x0L) {
2504           movk(dst, (uint32_t)imm_h[i], (i << 4));
2505         }
2506       }
2507     } else if (neg_count == 1) {
2508       // one MOVN and two MOVKs will do
2509       for (i = 0; i < 4; i++) {
2510         if (imm_h[i] != 0xffffL) {
2511           movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
2512           i++;
2513           break;
2514         }
2515       }
2516       for (;i < 4; i++) {
2517         if (imm_h[i] != 0xffffL) {
2518           movk(dst, (uint32_t)imm_h[i], (i << 4));
2519         }
2520       }
2521     } else {
2522       // use a MOVZ and 3 MOVKs (makes it easier to debug)
2523       movz(dst, (uint32_t)imm_h[0], 0);
2524       for (i = 1; i < 4; i++) {
2525         movk(dst, (uint32_t)imm_h[i], (i << 4));
2526       }
2527     }
2528   }
2529 }
2530 
2531 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32)
2532 {
2533 #ifndef PRODUCT
2534     {
2535       char buffer[64];
2536       snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32);
2537       block_comment(buffer);
2538     }
2539 #endif
2540   if (operand_valid_for_logical_immediate(true, imm32)) {
2541     orrw(dst, zr, imm32);
2542   } else {
2543     // we can use MOVZ, MOVN or two calls to MOVK to build up the
2544     // constant
2545     uint32_t imm_h[2];
2546     imm_h[0] = imm32 & 0xffff;
2547     imm_h[1] = ((imm32 >> 16) & 0xffff);
2548     if (imm_h[0] == 0) {
2549       movzw(dst, imm_h[1], 16);
2550     } else if (imm_h[0] == 0xffff) {
2551       movnw(dst, imm_h[1] ^ 0xffff, 16);
2552     } else if (imm_h[1] == 0) {
2553       movzw(dst, imm_h[0], 0);
2554     } else if (imm_h[1] == 0xffff) {
2555       movnw(dst, imm_h[0] ^ 0xffff, 0);
2556     } else {
2557       // use a MOVZ and MOVK (makes it easier to debug)
2558       movzw(dst, imm_h[0], 0);
2559       movkw(dst, imm_h[1], 16);
2560     }
2561   }
2562 }
2563 
2564 // Form an address from base + offset in Rd.  Rd may or may
2565 // not actually be used: you must use the Address that is returned.
2566 // It is up to you to ensure that the shift provided matches the size
2567 // of your data.
2568 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) {
2569   if (Address::offset_ok_for_immed(byte_offset, shift))
2570     // It fits; no need for any heroics
2571     return Address(base, byte_offset);
2572 
2573   // Don't do anything clever with negative or misaligned offsets
2574   unsigned mask = (1 << shift) - 1;
2575   if (byte_offset < 0 || byte_offset & mask) {
2576     mov(Rd, byte_offset);
2577     add(Rd, base, Rd);
2578     return Address(Rd);
2579   }
2580 
2581   // See if we can do this with two 12-bit offsets
2582   {
2583     uint64_t word_offset = byte_offset >> shift;
2584     uint64_t masked_offset = word_offset & 0xfff000;
2585     if (Address::offset_ok_for_immed(word_offset - masked_offset, 0)
2586         && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) {
2587       add(Rd, base, masked_offset << shift);
2588       word_offset -= masked_offset;
2589       return Address(Rd, word_offset << shift);
2590     }
2591   }
2592 
2593   // Do it the hard way
2594   mov(Rd, byte_offset);
2595   add(Rd, base, Rd);
2596   return Address(Rd);
2597 }
2598 
2599 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb,
2600                                     bool want_remainder, Register scratch)
2601 {
2602   // Full implementation of Java idiv and irem.  The function
2603   // returns the (pc) offset of the div instruction - may be needed
2604   // for implicit exceptions.
2605   //
2606   // constraint : ra/rb =/= scratch
2607   //         normal case
2608   //
2609   // input : ra: dividend
2610   //         rb: divisor
2611   //
2612   // result: either
2613   //         quotient  (= ra idiv rb)
2614   //         remainder (= ra irem rb)
2615 
2616   assert(ra != scratch && rb != scratch, "reg cannot be scratch");
2617 
2618   int idivl_offset = offset();
2619   if (! want_remainder) {
2620     sdivw(result, ra, rb);
2621   } else {
2622     sdivw(scratch, ra, rb);
2623     Assembler::msubw(result, scratch, rb, ra);
2624   }
2625 
2626   return idivl_offset;
2627 }
2628 
2629 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb,
2630                                     bool want_remainder, Register scratch)
2631 {
2632   // Full implementation of Java ldiv and lrem.  The function
2633   // returns the (pc) offset of the div instruction - may be needed
2634   // for implicit exceptions.
2635   //
2636   // constraint : ra/rb =/= scratch
2637   //         normal case
2638   //
2639   // input : ra: dividend
2640   //         rb: divisor
2641   //
2642   // result: either
2643   //         quotient  (= ra idiv rb)
2644   //         remainder (= ra irem rb)
2645 
2646   assert(ra != scratch && rb != scratch, "reg cannot be scratch");
2647 
2648   int idivq_offset = offset();
2649   if (! want_remainder) {
2650     sdiv(result, ra, rb);
2651   } else {
2652     sdiv(scratch, ra, rb);
2653     Assembler::msub(result, scratch, rb, ra);
2654   }
2655 
2656   return idivq_offset;
2657 }
2658 
2659 void MacroAssembler::membar(Membar_mask_bits order_constraint) {
2660   address prev = pc() - NativeMembar::instruction_size;
2661   address last = code()->last_insn();
2662   if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) {
2663     NativeMembar *bar = NativeMembar_at(prev);
2664     if (AlwaysMergeDMB) {
2665       bar->set_kind(bar->get_kind() | order_constraint);
2666       BLOCK_COMMENT("merged membar(always)");
2667       return;
2668     }
2669     // Don't promote DMB ST|DMB LD to DMB (a full barrier) because
2670     // doing so would introduce a StoreLoad which the caller did not
2671     // intend
2672     if (bar->get_kind() == order_constraint
2673         || bar->get_kind() == AnyAny
2674         || order_constraint == AnyAny) {
2675       // We are merging two memory barrier instructions.  On AArch64 we
2676       // can do this simply by ORing them together.
2677       bar->set_kind(bar->get_kind() | order_constraint);
2678       BLOCK_COMMENT("merged membar");
2679       return;
2680     } else {
2681       // A special case like "DMB ST;DMB LD;DMB ST", the last DMB can be skipped
2682       // We need check the last 2 instructions
2683       address prev2 = prev - NativeMembar::instruction_size;
2684       if (last != code()->last_label() && nativeInstruction_at(prev2)->is_Membar()) {
2685         NativeMembar *bar2 = NativeMembar_at(prev2);
2686         assert(bar2->get_kind() == order_constraint, "it should be merged before");
2687         BLOCK_COMMENT("merged membar(elided)");
2688         return;
2689       }
2690     }
2691   }
2692   code()->set_last_insn(pc());
2693   dmb(Assembler::barrier(order_constraint));
2694 }
2695 
2696 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) {
2697   if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) {
2698     merge_ldst(rt, adr, size_in_bytes, is_store);
2699     code()->clear_last_insn();
2700     return true;
2701   } else {
2702     assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported.");
2703     const uint64_t mask = size_in_bytes - 1;
2704     if (adr.getMode() == Address::base_plus_offset &&
2705         (adr.offset() & mask) == 0) { // only supports base_plus_offset.
2706       code()->set_last_insn(pc());
2707     }
2708     return false;
2709   }
2710 }
2711 
2712 void MacroAssembler::ldr(Register Rx, const Address &adr) {
2713   // We always try to merge two adjacent loads into one ldp.
2714   if (!try_merge_ldst(Rx, adr, 8, false)) {
2715     Assembler::ldr(Rx, adr);
2716   }
2717 }
2718 
2719 void MacroAssembler::ldrw(Register Rw, const Address &adr) {
2720   // We always try to merge two adjacent loads into one ldp.
2721   if (!try_merge_ldst(Rw, adr, 4, false)) {
2722     Assembler::ldrw(Rw, adr);
2723   }
2724 }
2725 
2726 void MacroAssembler::str(Register Rx, const Address &adr) {
2727   // We always try to merge two adjacent stores into one stp.
2728   if (!try_merge_ldst(Rx, adr, 8, true)) {
2729     Assembler::str(Rx, adr);
2730   }
2731 }
2732 
2733 void MacroAssembler::strw(Register Rw, const Address &adr) {
2734   // We always try to merge two adjacent stores into one stp.
2735   if (!try_merge_ldst(Rw, adr, 4, true)) {
2736     Assembler::strw(Rw, adr);
2737   }
2738 }
2739 
2740 // MacroAssembler routines found actually to be needed
2741 
2742 void MacroAssembler::push(Register src)
2743 {
2744   str(src, Address(pre(esp, -1 * wordSize)));
2745 }
2746 
2747 void MacroAssembler::pop(Register dst)
2748 {
2749   ldr(dst, Address(post(esp, 1 * wordSize)));
2750 }
2751 
2752 // Note: load_unsigned_short used to be called load_unsigned_word.
2753 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
2754   int off = offset();
2755   ldrh(dst, src);
2756   return off;
2757 }
2758 
2759 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
2760   int off = offset();
2761   ldrb(dst, src);
2762   return off;
2763 }
2764 
2765 int MacroAssembler::load_signed_short(Register dst, Address src) {
2766   int off = offset();
2767   ldrsh(dst, src);
2768   return off;
2769 }
2770 
2771 int MacroAssembler::load_signed_byte(Register dst, Address src) {
2772   int off = offset();
2773   ldrsb(dst, src);
2774   return off;
2775 }
2776 
2777 int MacroAssembler::load_signed_short32(Register dst, Address src) {
2778   int off = offset();
2779   ldrshw(dst, src);
2780   return off;
2781 }
2782 
2783 int MacroAssembler::load_signed_byte32(Register dst, Address src) {
2784   int off = offset();
2785   ldrsbw(dst, src);
2786   return off;
2787 }
2788 
2789 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {
2790   switch (size_in_bytes) {
2791   case  8:  ldr(dst, src); break;
2792   case  4:  ldrw(dst, src); break;
2793   case  2:  is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
2794   case  1:  is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
2795   default:  ShouldNotReachHere();
2796   }
2797 }
2798 
2799 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) {
2800   switch (size_in_bytes) {
2801   case  8:  str(src, dst); break;
2802   case  4:  strw(src, dst); break;
2803   case  2:  strh(src, dst); break;
2804   case  1:  strb(src, dst); break;
2805   default:  ShouldNotReachHere();
2806   }
2807 }
2808 
2809 void MacroAssembler::decrementw(Register reg, int value)
2810 {
2811   if (value < 0)  { incrementw(reg, -value);      return; }
2812   if (value == 0) {                               return; }
2813   if (value < (1 << 12)) { subw(reg, reg, value); return; }
2814   /* else */ {
2815     guarantee(reg != rscratch2, "invalid dst for register decrement");
2816     movw(rscratch2, (unsigned)value);
2817     subw(reg, reg, rscratch2);
2818   }
2819 }
2820 
2821 void MacroAssembler::decrement(Register reg, int value)
2822 {
2823   if (value < 0)  { increment(reg, -value);      return; }
2824   if (value == 0) {                              return; }
2825   if (value < (1 << 12)) { sub(reg, reg, value); return; }
2826   /* else */ {
2827     assert(reg != rscratch2, "invalid dst for register decrement");
2828     mov(rscratch2, (uint64_t)value);
2829     sub(reg, reg, rscratch2);
2830   }
2831 }
2832 
2833 void MacroAssembler::decrementw(Address dst, int value)
2834 {
2835   assert(!dst.uses(rscratch1), "invalid dst for address decrement");
2836   if (dst.getMode() == Address::literal) {
2837     assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2838     lea(rscratch2, dst);
2839     dst = Address(rscratch2);
2840   }
2841   ldrw(rscratch1, dst);
2842   decrementw(rscratch1, value);
2843   strw(rscratch1, dst);
2844 }
2845 
2846 void MacroAssembler::decrement(Address dst, int value)
2847 {
2848   assert(!dst.uses(rscratch1), "invalid address for decrement");
2849   if (dst.getMode() == Address::literal) {
2850     assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2851     lea(rscratch2, dst);
2852     dst = Address(rscratch2);
2853   }
2854   ldr(rscratch1, dst);
2855   decrement(rscratch1, value);
2856   str(rscratch1, dst);
2857 }
2858 
2859 void MacroAssembler::incrementw(Register reg, int value)
2860 {
2861   if (value < 0)  { decrementw(reg, -value);      return; }
2862   if (value == 0) {                               return; }
2863   if (value < (1 << 12)) { addw(reg, reg, value); return; }
2864   /* else */ {
2865     assert(reg != rscratch2, "invalid dst for register increment");
2866     movw(rscratch2, (unsigned)value);
2867     addw(reg, reg, rscratch2);
2868   }
2869 }
2870 
2871 void MacroAssembler::increment(Register reg, int value)
2872 {
2873   if (value < 0)  { decrement(reg, -value);      return; }
2874   if (value == 0) {                              return; }
2875   if (value < (1 << 12)) { add(reg, reg, value); return; }
2876   /* else */ {
2877     assert(reg != rscratch2, "invalid dst for register increment");
2878     movw(rscratch2, (unsigned)value);
2879     add(reg, reg, rscratch2);
2880   }
2881 }
2882 
2883 void MacroAssembler::incrementw(Address dst, int value)
2884 {
2885   assert(!dst.uses(rscratch1), "invalid dst for address increment");
2886   if (dst.getMode() == Address::literal) {
2887     assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2888     lea(rscratch2, dst);
2889     dst = Address(rscratch2);
2890   }
2891   ldrw(rscratch1, dst);
2892   incrementw(rscratch1, value);
2893   strw(rscratch1, dst);
2894 }
2895 
2896 void MacroAssembler::increment(Address dst, int value)
2897 {
2898   assert(!dst.uses(rscratch1), "invalid dst for address increment");
2899   if (dst.getMode() == Address::literal) {
2900     assert(abs(value) < (1 << 12), "invalid value and address mode combination");
2901     lea(rscratch2, dst);
2902     dst = Address(rscratch2);
2903   }
2904   ldr(rscratch1, dst);
2905   increment(rscratch1, value);
2906   str(rscratch1, dst);
2907 }
2908 
2909 // Push lots of registers in the bit set supplied.  Don't push sp.
2910 // Return the number of words pushed
2911 int MacroAssembler::push(unsigned int bitset, Register stack) {
2912   int words_pushed = 0;
2913 
2914   // Scan bitset to accumulate register pairs
2915   unsigned char regs[32];
2916   int count = 0;
2917   for (int reg = 0; reg <= 30; reg++) {
2918     if (1 & bitset)
2919       regs[count++] = reg;
2920     bitset >>= 1;
2921   }
2922   regs[count++] = zr->raw_encoding();
2923   count &= ~1;  // Only push an even number of regs
2924 
2925   if (count) {
2926     stp(as_Register(regs[0]), as_Register(regs[1]),
2927        Address(pre(stack, -count * wordSize)));
2928     words_pushed += 2;
2929   }
2930   for (int i = 2; i < count; i += 2) {
2931     stp(as_Register(regs[i]), as_Register(regs[i+1]),
2932        Address(stack, i * wordSize));
2933     words_pushed += 2;
2934   }
2935 
2936   assert(words_pushed == count, "oops, pushed != count");
2937 
2938   return count;
2939 }
2940 
2941 int MacroAssembler::pop(unsigned int bitset, Register stack) {
2942   int words_pushed = 0;
2943 
2944   // Scan bitset to accumulate register pairs
2945   unsigned char regs[32];
2946   int count = 0;
2947   for (int reg = 0; reg <= 30; reg++) {
2948     if (1 & bitset)
2949       regs[count++] = reg;
2950     bitset >>= 1;
2951   }
2952   regs[count++] = zr->raw_encoding();
2953   count &= ~1;
2954 
2955   for (int i = 2; i < count; i += 2) {
2956     ldp(as_Register(regs[i]), as_Register(regs[i+1]),
2957        Address(stack, i * wordSize));
2958     words_pushed += 2;
2959   }
2960   if (count) {
2961     ldp(as_Register(regs[0]), as_Register(regs[1]),
2962        Address(post(stack, count * wordSize)));
2963     words_pushed += 2;
2964   }
2965 
2966   assert(words_pushed == count, "oops, pushed != count");
2967 
2968   return count;
2969 }
2970 
2971 // Push lots of registers in the bit set supplied.  Don't push sp.
2972 // Return the number of dwords pushed
2973 int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode mode) {
2974   int words_pushed = 0;
2975   bool use_sve = false;
2976   int sve_vector_size_in_bytes = 0;
2977 
2978 #ifdef COMPILER2
2979   use_sve = Matcher::supports_scalable_vector();
2980   sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
2981 #endif
2982 
2983   // Scan bitset to accumulate register pairs
2984   unsigned char regs[32];
2985   int count = 0;
2986   for (int reg = 0; reg <= 31; reg++) {
2987     if (1 & bitset)
2988       regs[count++] = reg;
2989     bitset >>= 1;
2990   }
2991 
2992   if (count == 0) {
2993     return 0;
2994   }
2995 
2996   if (mode == PushPopFull) {
2997     if (use_sve && sve_vector_size_in_bytes > 16) {
2998       mode = PushPopSVE;
2999     } else {
3000       mode = PushPopNeon;
3001     }
3002   }
3003 
3004 #ifndef PRODUCT
3005   {
3006     char buffer[48];
3007     if (mode == PushPopSVE) {
3008       snprintf(buffer, sizeof(buffer), "push_fp: %d SVE registers", count);
3009     } else if (mode == PushPopNeon) {
3010       snprintf(buffer, sizeof(buffer), "push_fp: %d Neon registers", count);
3011     } else {
3012       snprintf(buffer, sizeof(buffer), "push_fp: %d fp registers", count);
3013     }
3014     block_comment(buffer);
3015   }
3016 #endif
3017 
3018   if (mode == PushPopSVE) {
3019     sub(stack, stack, sve_vector_size_in_bytes * count);
3020     for (int i = 0; i < count; i++) {
3021       sve_str(as_FloatRegister(regs[i]), Address(stack, i));
3022     }
3023     return count * sve_vector_size_in_bytes / 8;
3024   }
3025 
3026   if (mode == PushPopNeon) {
3027     if (count == 1) {
3028       strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2)));
3029       return 2;
3030     }
3031 
3032     bool odd = (count & 1) == 1;
3033     int push_slots = count + (odd ? 1 : 0);
3034 
3035     // Always pushing full 128 bit registers.
3036     stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2)));
3037     words_pushed += 2;
3038 
3039     for (int i = 2; i + 1 < count; i += 2) {
3040       stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
3041       words_pushed += 2;
3042     }
3043 
3044     if (odd) {
3045       strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2));
3046       words_pushed++;
3047     }
3048 
3049     assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count);
3050     return count * 2;
3051   }
3052 
3053   if (mode == PushPopFp) {
3054     bool odd = (count & 1) == 1;
3055     int push_slots = count + (odd ? 1 : 0);
3056 
3057     if (count == 1) {
3058       // Stack pointer must be 16 bytes aligned
3059       strd(as_FloatRegister(regs[0]), Address(pre(stack, -push_slots * wordSize)));
3060       return 1;
3061     }
3062 
3063     stpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize)));
3064     words_pushed += 2;
3065 
3066     for (int i = 2; i + 1 < count; i += 2) {
3067       stpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize));
3068       words_pushed += 2;
3069     }
3070 
3071     if (odd) {
3072       // Stack pointer must be 16 bytes aligned
3073       strd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize));
3074       words_pushed++;
3075     }
3076 
3077     assert(words_pushed == count, "oops, pushed != count");
3078 
3079     return count;
3080   }
3081 
3082   return 0;
3083 }
3084 
3085 // Return the number of dwords popped
3086 int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mode) {
3087   int words_pushed = 0;
3088   bool use_sve = false;
3089   int sve_vector_size_in_bytes = 0;
3090 
3091 #ifdef COMPILER2
3092   use_sve = Matcher::supports_scalable_vector();
3093   sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
3094 #endif
3095   // Scan bitset to accumulate register pairs
3096   unsigned char regs[32];
3097   int count = 0;
3098   for (int reg = 0; reg <= 31; reg++) {
3099     if (1 & bitset)
3100       regs[count++] = reg;
3101     bitset >>= 1;
3102   }
3103 
3104   if (count == 0) {
3105     return 0;
3106   }
3107 
3108   if (mode == PushPopFull) {
3109     if (use_sve && sve_vector_size_in_bytes > 16) {
3110       mode = PushPopSVE;
3111     } else {
3112       mode = PushPopNeon;
3113     }
3114   }
3115 
3116 #ifndef PRODUCT
3117   {
3118     char buffer[48];
3119     if (mode == PushPopSVE) {
3120       snprintf(buffer, sizeof(buffer), "pop_fp: %d SVE registers", count);
3121     } else if (mode == PushPopNeon) {
3122       snprintf(buffer, sizeof(buffer), "pop_fp: %d Neon registers", count);
3123     } else {
3124       snprintf(buffer, sizeof(buffer), "pop_fp: %d fp registers", count);
3125     }
3126     block_comment(buffer);
3127   }
3128 #endif
3129 
3130   if (mode == PushPopSVE) {
3131     for (int i = count - 1; i >= 0; i--) {
3132       sve_ldr(as_FloatRegister(regs[i]), Address(stack, i));
3133     }
3134     add(stack, stack, sve_vector_size_in_bytes * count);
3135     return count * sve_vector_size_in_bytes / 8;
3136   }
3137 
3138   if (mode == PushPopNeon) {
3139     if (count == 1) {
3140       ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2)));
3141       return 2;
3142     }
3143 
3144     bool odd = (count & 1) == 1;
3145     int push_slots = count + (odd ? 1 : 0);
3146 
3147     if (odd) {
3148       ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2));
3149       words_pushed++;
3150     }
3151 
3152     for (int i = 2; i + 1 < count; i += 2) {
3153       ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
3154       words_pushed += 2;
3155     }
3156 
3157     ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2)));
3158     words_pushed += 2;
3159 
3160     assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count);
3161 
3162     return count * 2;
3163   }
3164 
3165   if (mode == PushPopFp) {
3166     bool odd = (count & 1) == 1;
3167     int push_slots = count + (odd ? 1 : 0);
3168 
3169     if (count == 1) {
3170       ldrd(as_FloatRegister(regs[0]), Address(post(stack, push_slots * wordSize)));
3171       return 1;
3172     }
3173 
3174     if (odd) {
3175       ldrd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize));
3176       words_pushed++;
3177     }
3178 
3179     for (int i = 2; i + 1 < count; i += 2) {
3180       ldpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize));
3181       words_pushed += 2;
3182     }
3183 
3184     ldpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize)));
3185     words_pushed += 2;
3186 
3187     assert(words_pushed == count, "oops, pushed != count");
3188 
3189     return count;
3190   }
3191 
3192   return 0;
3193 }
3194 
3195 // Return the number of dwords pushed
3196 int MacroAssembler::push_p(unsigned int bitset, Register stack) {
3197   bool use_sve = false;
3198   int sve_predicate_size_in_slots = 0;
3199 
3200 #ifdef COMPILER2
3201   use_sve = Matcher::supports_scalable_vector();
3202   if (use_sve) {
3203     sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
3204   }
3205 #endif
3206 
3207   if (!use_sve) {
3208     return 0;
3209   }
3210 
3211   unsigned char regs[PRegister::number_of_registers];
3212   int count = 0;
3213   for (int reg = 0; reg < PRegister::number_of_registers; reg++) {
3214     if (1 & bitset)
3215       regs[count++] = reg;
3216     bitset >>= 1;
3217   }
3218 
3219   if (count == 0) {
3220     return 0;
3221   }
3222 
3223   int total_push_bytes = align_up(sve_predicate_size_in_slots *
3224                                   VMRegImpl::stack_slot_size * count, 16);
3225   sub(stack, stack, total_push_bytes);
3226   for (int i = 0; i < count; i++) {
3227     sve_str(as_PRegister(regs[i]), Address(stack, i));
3228   }
3229   return total_push_bytes / 8;
3230 }
3231 
3232 // Return the number of dwords popped
3233 int MacroAssembler::pop_p(unsigned int bitset, Register stack) {
3234   bool use_sve = false;
3235   int sve_predicate_size_in_slots = 0;
3236 
3237 #ifdef COMPILER2
3238   use_sve = Matcher::supports_scalable_vector();
3239   if (use_sve) {
3240     sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots();
3241   }
3242 #endif
3243 
3244   if (!use_sve) {
3245     return 0;
3246   }
3247 
3248   unsigned char regs[PRegister::number_of_registers];
3249   int count = 0;
3250   for (int reg = 0; reg < PRegister::number_of_registers; reg++) {
3251     if (1 & bitset)
3252       regs[count++] = reg;
3253     bitset >>= 1;
3254   }
3255 
3256   if (count == 0) {
3257     return 0;
3258   }
3259 
3260   int total_pop_bytes = align_up(sve_predicate_size_in_slots *
3261                                  VMRegImpl::stack_slot_size * count, 16);
3262   for (int i = count - 1; i >= 0; i--) {
3263     sve_ldr(as_PRegister(regs[i]), Address(stack, i));
3264   }
3265   add(stack, stack, total_pop_bytes);
3266   return total_pop_bytes / 8;
3267 }
3268 
3269 #ifdef ASSERT
3270 void MacroAssembler::verify_heapbase(const char* msg) {
3271 #if 0
3272   assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed");
3273   assert (Universe::heap() != nullptr, "java heap should be initialized");
3274   if (!UseCompressedOops || Universe::ptr_base() == nullptr) {
3275     // rheapbase is allocated as general register
3276     return;
3277   }
3278   if (CheckCompressedOops) {
3279     Label ok;
3280     push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1
3281     cmpptr(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3282     br(Assembler::EQ, ok);
3283     stop(msg);
3284     bind(ok);
3285     pop(1 << rscratch1->encoding(), sp);
3286   }
3287 #endif
3288 }
3289 #endif
3290 
3291 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {
3292   assert_different_registers(value, tmp1, tmp2);
3293   Label done, tagged, weak_tagged;
3294 
3295   cbz(value, done);           // Use null as-is.
3296   tst(value, JNIHandles::tag_mask); // Test for tag.
3297   br(Assembler::NE, tagged);
3298 
3299   // Resolve local handle
3300   access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2);
3301   verify_oop(value);
3302   b(done);
3303 
3304   bind(tagged);
3305   STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1);
3306   tbnz(value, 0, weak_tagged);    // Test for weak tag.
3307 
3308   // Resolve global handle
3309   access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3310   verify_oop(value);
3311   b(done);
3312 
3313   bind(weak_tagged);
3314   // Resolve jweak.
3315   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
3316                  value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2);
3317   verify_oop(value);
3318 
3319   bind(done);
3320 }
3321 
3322 void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) {
3323   assert_different_registers(value, tmp1, tmp2);
3324   Label done;
3325 
3326   cbz(value, done);           // Use null as-is.
3327 
3328 #ifdef ASSERT
3329   {
3330     STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
3331     Label valid_global_tag;
3332     tbnz(value, 1, valid_global_tag); // Test for global tag
3333     stop("non global jobject using resolve_global_jobject");
3334     bind(valid_global_tag);
3335   }
3336 #endif
3337 
3338   // Resolve global handle
3339   access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3340   verify_oop(value);
3341 
3342   bind(done);
3343 }
3344 
3345 void MacroAssembler::stop(const char* msg) {
3346   BLOCK_COMMENT(msg);
3347   dcps1(0xdeae);
3348   emit_int64((uintptr_t)msg);
3349 }
3350 
3351 void MacroAssembler::unimplemented(const char* what) {
3352   const char* buf = nullptr;
3353   {
3354     ResourceMark rm;
3355     stringStream ss;
3356     ss.print("unimplemented: %s", what);
3357     buf = code_string(ss.as_string());
3358   }
3359   stop(buf);
3360 }
3361 
3362 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3363 #ifdef ASSERT
3364   Label OK;
3365   br(cc, OK);
3366   stop(msg);
3367   bind(OK);
3368 #endif
3369 }
3370 
3371 // If a constant does not fit in an immediate field, generate some
3372 // number of MOV instructions and then perform the operation.
3373 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm,
3374                                            add_sub_imm_insn insn1,
3375                                            add_sub_reg_insn insn2,
3376                                            bool is32) {
3377   assert(Rd != zr, "Rd = zr and not setting flags?");
3378   bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm);
3379   if (fits) {
3380     (this->*insn1)(Rd, Rn, imm);
3381   } else {
3382     if (uabs(imm) < (1 << 24)) {
3383        (this->*insn1)(Rd, Rn, imm & -(1 << 12));
3384        (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1));
3385     } else {
3386        assert_different_registers(Rd, Rn);
3387        mov(Rd, imm);
3388        (this->*insn2)(Rd, Rn, Rd, LSL, 0);
3389     }
3390   }
3391 }
3392 
3393 // Separate vsn which sets the flags. Optimisations are more restricted
3394 // because we must set the flags correctly.
3395 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm,
3396                                              add_sub_imm_insn insn1,
3397                                              add_sub_reg_insn insn2,
3398                                              bool is32) {
3399   bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm);
3400   if (fits) {
3401     (this->*insn1)(Rd, Rn, imm);
3402   } else {
3403     assert_different_registers(Rd, Rn);
3404     assert(Rd != zr, "overflow in immediate operand");
3405     mov(Rd, imm);
3406     (this->*insn2)(Rd, Rn, Rd, LSL, 0);
3407   }
3408 }
3409 
3410 
3411 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) {
3412   if (increment.is_register()) {
3413     add(Rd, Rn, increment.as_register());
3414   } else {
3415     add(Rd, Rn, increment.as_constant());
3416   }
3417 }
3418 
3419 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) {
3420   if (increment.is_register()) {
3421     addw(Rd, Rn, increment.as_register());
3422   } else {
3423     addw(Rd, Rn, increment.as_constant());
3424   }
3425 }
3426 
3427 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3428   if (decrement.is_register()) {
3429     sub(Rd, Rn, decrement.as_register());
3430   } else {
3431     sub(Rd, Rn, decrement.as_constant());
3432   }
3433 }
3434 
3435 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3436   if (decrement.is_register()) {
3437     subw(Rd, Rn, decrement.as_register());
3438   } else {
3439     subw(Rd, Rn, decrement.as_constant());
3440   }
3441 }
3442 
3443 void MacroAssembler::reinit_heapbase()
3444 {
3445   if (UseCompressedOops) {
3446     if (Universe::is_fully_initialized()) {
3447       mov(rheapbase, CompressedOops::base());
3448     } else {
3449       lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3450       ldr(rheapbase, Address(rheapbase));
3451     }
3452   }
3453 }
3454 
3455 // this simulates the behaviour of the x86 cmpxchg instruction using a
3456 // load linked/store conditional pair. we use the acquire/release
3457 // versions of these instructions so that we flush pending writes as
3458 // per Java semantics.
3459 
3460 // n.b the x86 version assumes the old value to be compared against is
3461 // in rax and updates rax with the value located in memory if the
3462 // cmpxchg fails. we supply a register for the old value explicitly
3463 
3464 // the aarch64 load linked/store conditional instructions do not
3465 // accept an offset. so, unlike x86, we must provide a plain register
3466 // to identify the memory word to be compared/exchanged rather than a
3467 // register+offset Address.
3468 
3469 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp,
3470                                 Label &succeed, Label *fail) {
3471   // oldv holds comparison value
3472   // newv holds value to write in exchange
3473   // addr identifies memory word to compare against/update
3474   if (UseLSE) {
3475     mov(tmp, oldv);
3476     casal(Assembler::xword, oldv, newv, addr);
3477     cmp(tmp, oldv);
3478     br(Assembler::EQ, succeed);
3479     membar(AnyAny);
3480   } else {
3481     Label retry_load, nope;
3482     prfm(Address(addr), PSTL1STRM);
3483     bind(retry_load);
3484     // flush and load exclusive from the memory location
3485     // and fail if it is not what we expect
3486     ldaxr(tmp, addr);
3487     cmp(tmp, oldv);
3488     br(Assembler::NE, nope);
3489     // if we store+flush with no intervening write tmp will be zero
3490     stlxr(tmp, newv, addr);
3491     cbzw(tmp, succeed);
3492     // retry so we only ever return after a load fails to compare
3493     // ensures we don't return a stale value after a failed write.
3494     b(retry_load);
3495     // if the memory word differs we return it in oldv and signal a fail
3496     bind(nope);
3497     membar(AnyAny);
3498     mov(oldv, tmp);
3499   }
3500   if (fail)
3501     b(*fail);
3502 }
3503 
3504 void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp,
3505                                         Label &succeed, Label *fail) {
3506   assert(oopDesc::mark_offset_in_bytes() == 0, "assumption");
3507   cmpxchgptr(oldv, newv, obj, tmp, succeed, fail);
3508 }
3509 
3510 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp,
3511                                 Label &succeed, Label *fail) {
3512   // oldv holds comparison value
3513   // newv holds value to write in exchange
3514   // addr identifies memory word to compare against/update
3515   // tmp returns 0/1 for success/failure
3516   if (UseLSE) {
3517     mov(tmp, oldv);
3518     casal(Assembler::word, oldv, newv, addr);
3519     cmp(tmp, oldv);
3520     br(Assembler::EQ, succeed);
3521     membar(AnyAny);
3522   } else {
3523     Label retry_load, nope;
3524     prfm(Address(addr), PSTL1STRM);
3525     bind(retry_load);
3526     // flush and load exclusive from the memory location
3527     // and fail if it is not what we expect
3528     ldaxrw(tmp, addr);
3529     cmp(tmp, oldv);
3530     br(Assembler::NE, nope);
3531     // if we store+flush with no intervening write tmp will be zero
3532     stlxrw(tmp, newv, addr);
3533     cbzw(tmp, succeed);
3534     // retry so we only ever return after a load fails to compare
3535     // ensures we don't return a stale value after a failed write.
3536     b(retry_load);
3537     // if the memory word differs we return it in oldv and signal a fail
3538     bind(nope);
3539     membar(AnyAny);
3540     mov(oldv, tmp);
3541   }
3542   if (fail)
3543     b(*fail);
3544 }
3545 
3546 // A generic CAS; success or failure is in the EQ flag.  A weak CAS
3547 // doesn't retry and may fail spuriously.  If the oldval is wanted,
3548 // Pass a register for the result, otherwise pass noreg.
3549 
3550 // Clobbers rscratch1
3551 void MacroAssembler::cmpxchg(Register addr, Register expected,
3552                              Register new_val,
3553                              enum operand_size size,
3554                              bool acquire, bool release,
3555                              bool weak,
3556                              Register result) {
3557   if (result == noreg)  result = rscratch1;
3558   BLOCK_COMMENT("cmpxchg {");
3559   if (UseLSE) {
3560     mov(result, expected);
3561     lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true);
3562     compare_eq(result, expected, size);
3563 #ifdef ASSERT
3564     // Poison rscratch1 which is written on !UseLSE branch
3565     mov(rscratch1, 0x1f1f1f1f1f1f1f1f);
3566 #endif
3567   } else {
3568     Label retry_load, done;
3569     prfm(Address(addr), PSTL1STRM);
3570     bind(retry_load);
3571     load_exclusive(result, addr, size, acquire);
3572     compare_eq(result, expected, size);
3573     br(Assembler::NE, done);
3574     store_exclusive(rscratch1, new_val, addr, size, release);
3575     if (weak) {
3576       cmpw(rscratch1, 0u);  // If the store fails, return NE to our caller.
3577     } else {
3578       cbnzw(rscratch1, retry_load);
3579     }
3580     bind(done);
3581   }
3582   BLOCK_COMMENT("} cmpxchg");
3583 }
3584 
3585 // A generic comparison. Only compares for equality, clobbers rscratch1.
3586 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) {
3587   if (size == xword) {
3588     cmp(rm, rn);
3589   } else if (size == word) {
3590     cmpw(rm, rn);
3591   } else if (size == halfword) {
3592     eorw(rscratch1, rm, rn);
3593     ands(zr, rscratch1, 0xffff);
3594   } else if (size == byte) {
3595     eorw(rscratch1, rm, rn);
3596     ands(zr, rscratch1, 0xff);
3597   } else {
3598     ShouldNotReachHere();
3599   }
3600 }
3601 
3602 
3603 static bool different(Register a, RegisterOrConstant b, Register c) {
3604   if (b.is_constant())
3605     return a != c;
3606   else
3607     return a != b.as_register() && a != c && b.as_register() != c;
3608 }
3609 
3610 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz)                   \
3611 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \
3612   if (UseLSE) {                                                         \
3613     prev = prev->is_valid() ? prev : zr;                                \
3614     if (incr.is_register()) {                                           \
3615       AOP(sz, incr.as_register(), prev, addr);                          \
3616     } else {                                                            \
3617       mov(rscratch2, incr.as_constant());                               \
3618       AOP(sz, rscratch2, prev, addr);                                   \
3619     }                                                                   \
3620     return;                                                             \
3621   }                                                                     \
3622   Register result = rscratch2;                                          \
3623   if (prev->is_valid())                                                 \
3624     result = different(prev, incr, addr) ? prev : rscratch2;            \
3625                                                                         \
3626   Label retry_load;                                                     \
3627   prfm(Address(addr), PSTL1STRM);                                       \
3628   bind(retry_load);                                                     \
3629   LDXR(result, addr);                                                   \
3630   OP(rscratch1, result, incr);                                          \
3631   STXR(rscratch2, rscratch1, addr);                                     \
3632   cbnzw(rscratch2, retry_load);                                         \
3633   if (prev->is_valid() && prev != result) {                             \
3634     IOP(prev, rscratch1, incr);                                         \
3635   }                                                                     \
3636 }
3637 
3638 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword)
3639 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word)
3640 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword)
3641 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word)
3642 
3643 #undef ATOMIC_OP
3644 
3645 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz)                            \
3646 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \
3647   if (UseLSE) {                                                         \
3648     prev = prev->is_valid() ? prev : zr;                                \
3649     AOP(sz, newv, prev, addr);                                          \
3650     return;                                                             \
3651   }                                                                     \
3652   Register result = rscratch2;                                          \
3653   if (prev->is_valid())                                                 \
3654     result = different(prev, newv, addr) ? prev : rscratch2;            \
3655                                                                         \
3656   Label retry_load;                                                     \
3657   prfm(Address(addr), PSTL1STRM);                                       \
3658   bind(retry_load);                                                     \
3659   LDXR(result, addr);                                                   \
3660   STXR(rscratch1, newv, addr);                                          \
3661   cbnzw(rscratch1, retry_load);                                         \
3662   if (prev->is_valid() && prev != result)                               \
3663     mov(prev, result);                                                  \
3664 }
3665 
3666 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword)
3667 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word)
3668 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword)
3669 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word)
3670 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword)
3671 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word)
3672 
3673 #undef ATOMIC_XCHG
3674 
3675 #ifndef PRODUCT
3676 extern "C" void findpc(intptr_t x);
3677 #endif
3678 
3679 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[])
3680 {
3681   // In order to get locks to work, we need to fake a in_VM state
3682   if (ShowMessageBoxOnError ) {
3683     JavaThread* thread = JavaThread::current();
3684     JavaThreadState saved_state = thread->thread_state();
3685     thread->set_thread_state(_thread_in_vm);
3686 #ifndef PRODUCT
3687     if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
3688       ttyLocker ttyl;
3689       BytecodeCounter::print();
3690     }
3691 #endif
3692     if (os::message_box(msg, "Execution stopped, print registers?")) {
3693       ttyLocker ttyl;
3694       tty->print_cr(" pc = 0x%016" PRIx64, pc);
3695 #ifndef PRODUCT
3696       tty->cr();
3697       findpc(pc);
3698       tty->cr();
3699 #endif
3700       tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]);
3701       tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]);
3702       tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]);
3703       tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]);
3704       tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]);
3705       tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]);
3706       tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]);
3707       tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]);
3708       tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]);
3709       tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]);
3710       tty->print_cr("r10 = 0x%016" PRIx64, regs[10]);
3711       tty->print_cr("r11 = 0x%016" PRIx64, regs[11]);
3712       tty->print_cr("r12 = 0x%016" PRIx64, regs[12]);
3713       tty->print_cr("r13 = 0x%016" PRIx64, regs[13]);
3714       tty->print_cr("r14 = 0x%016" PRIx64, regs[14]);
3715       tty->print_cr("r15 = 0x%016" PRIx64, regs[15]);
3716       tty->print_cr("r16 = 0x%016" PRIx64, regs[16]);
3717       tty->print_cr("r17 = 0x%016" PRIx64, regs[17]);
3718       tty->print_cr("r18 = 0x%016" PRIx64, regs[18]);
3719       tty->print_cr("r19 = 0x%016" PRIx64, regs[19]);
3720       tty->print_cr("r20 = 0x%016" PRIx64, regs[20]);
3721       tty->print_cr("r21 = 0x%016" PRIx64, regs[21]);
3722       tty->print_cr("r22 = 0x%016" PRIx64, regs[22]);
3723       tty->print_cr("r23 = 0x%016" PRIx64, regs[23]);
3724       tty->print_cr("r24 = 0x%016" PRIx64, regs[24]);
3725       tty->print_cr("r25 = 0x%016" PRIx64, regs[25]);
3726       tty->print_cr("r26 = 0x%016" PRIx64, regs[26]);
3727       tty->print_cr("r27 = 0x%016" PRIx64, regs[27]);
3728       tty->print_cr("r28 = 0x%016" PRIx64, regs[28]);
3729       tty->print_cr("r30 = 0x%016" PRIx64, regs[30]);
3730       tty->print_cr("r31 = 0x%016" PRIx64, regs[31]);
3731       BREAKPOINT;
3732     }
3733   }
3734   fatal("DEBUG MESSAGE: %s", msg);
3735 }
3736 
3737 RegSet MacroAssembler::call_clobbered_gp_registers() {
3738   RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2);
3739 #ifndef R18_RESERVED
3740   regs += r18_tls;
3741 #endif
3742   return regs;
3743 }
3744 
3745 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) {
3746   int step = 4 * wordSize;
3747   push(call_clobbered_gp_registers() - exclude, sp);
3748   sub(sp, sp, step);
3749   mov(rscratch1, -step);
3750   // Push v0-v7, v16-v31.
3751   for (int i = 31; i>= 4; i -= 4) {
3752     if (i <= v7->encoding() || i >= v16->encoding())
3753       st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1),
3754           as_FloatRegister(i), T1D, Address(post(sp, rscratch1)));
3755   }
3756   st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2),
3757       as_FloatRegister(3), T1D, Address(sp));
3758 }
3759 
3760 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) {
3761   for (int i = 0; i < 32; i += 4) {
3762     if (i <= v7->encoding() || i >= v16->encoding())
3763       ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3764           as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize)));
3765   }
3766 
3767   reinitialize_ptrue();
3768 
3769   pop(call_clobbered_gp_registers() - exclude, sp);
3770 }
3771 
3772 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve,
3773                                     int sve_vector_size_in_bytes, int total_predicate_in_bytes) {
3774   push(RegSet::range(r0, r29), sp); // integer registers except lr & sp
3775   if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) {
3776     sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers);
3777     for (int i = 0; i < FloatRegister::number_of_registers; i++) {
3778       sve_str(as_FloatRegister(i), Address(sp, i));
3779     }
3780   } else {
3781     int step = (save_vectors ? 8 : 4) * wordSize;
3782     mov(rscratch1, -step);
3783     sub(sp, sp, step);
3784     for (int i = 28; i >= 4; i -= 4) {
3785       st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3786           as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1)));
3787     }
3788     st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp);
3789   }
3790   if (save_vectors && use_sve && total_predicate_in_bytes > 0) {
3791     sub(sp, sp, total_predicate_in_bytes);
3792     for (int i = 0; i < PRegister::number_of_registers; i++) {
3793       sve_str(as_PRegister(i), Address(sp, i));
3794     }
3795   }
3796 }
3797 
3798 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve,
3799                                    int sve_vector_size_in_bytes, int total_predicate_in_bytes) {
3800   if (restore_vectors && use_sve && total_predicate_in_bytes > 0) {
3801     for (int i = PRegister::number_of_registers - 1; i >= 0; i--) {
3802       sve_ldr(as_PRegister(i), Address(sp, i));
3803     }
3804     add(sp, sp, total_predicate_in_bytes);
3805   }
3806   if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) {
3807     for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) {
3808       sve_ldr(as_FloatRegister(i), Address(sp, i));
3809     }
3810     add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers);
3811   } else {
3812     int step = (restore_vectors ? 8 : 4) * wordSize;
3813     for (int i = 0; i <= 28; i += 4)
3814       ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
3815           as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step)));
3816   }
3817 
3818   // We may use predicate registers and rely on ptrue with SVE,
3819   // regardless of wide vector (> 8 bytes) used or not.
3820   if (use_sve) {
3821     reinitialize_ptrue();
3822   }
3823 
3824   // integer registers except lr & sp
3825   pop(RegSet::range(r0, r17), sp);
3826 #ifdef R18_RESERVED
3827   ldp(zr, r19, Address(post(sp, 2 * wordSize)));
3828   pop(RegSet::range(r20, r29), sp);
3829 #else
3830   pop(RegSet::range(r18_tls, r29), sp);
3831 #endif
3832 }
3833 
3834 /**
3835  * Helpers for multiply_to_len().
3836  */
3837 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
3838                                      Register src1, Register src2) {
3839   adds(dest_lo, dest_lo, src1);
3840   adc(dest_hi, dest_hi, zr);
3841   adds(dest_lo, dest_lo, src2);
3842   adc(final_dest_hi, dest_hi, zr);
3843 }
3844 
3845 // Generate an address from (r + r1 extend offset).  "size" is the
3846 // size of the operand.  The result may be in rscratch2.
3847 Address MacroAssembler::offsetted_address(Register r, Register r1,
3848                                           Address::extend ext, int offset, int size) {
3849   if (offset || (ext.shift() % size != 0)) {
3850     lea(rscratch2, Address(r, r1, ext));
3851     return Address(rscratch2, offset);
3852   } else {
3853     return Address(r, r1, ext);
3854   }
3855 }
3856 
3857 Address MacroAssembler::spill_address(int size, int offset, Register tmp)
3858 {
3859   assert(offset >= 0, "spill to negative address?");
3860   // Offset reachable ?
3861   //   Not aligned - 9 bits signed offset
3862   //   Aligned - 12 bits unsigned offset shifted
3863   Register base = sp;
3864   if ((offset & (size-1)) && offset >= (1<<8)) {
3865     add(tmp, base, offset & ((1<<12)-1));
3866     base = tmp;
3867     offset &= -1u<<12;
3868   }
3869 
3870   if (offset >= (1<<12) * size) {
3871     add(tmp, base, offset & (((1<<12)-1)<<12));
3872     base = tmp;
3873     offset &= ~(((1<<12)-1)<<12);
3874   }
3875 
3876   return Address(base, offset);
3877 }
3878 
3879 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) {
3880   assert(offset >= 0, "spill to negative address?");
3881 
3882   Register base = sp;
3883 
3884   // An immediate offset in the range 0 to 255 which is multiplied
3885   // by the current vector or predicate register size in bytes.
3886   if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) {
3887     return Address(base, offset / sve_reg_size_in_bytes);
3888   }
3889 
3890   add(tmp, base, offset);
3891   return Address(tmp);
3892 }
3893 
3894 // Checks whether offset is aligned.
3895 // Returns true if it is, else false.
3896 bool MacroAssembler::merge_alignment_check(Register base,
3897                                            size_t size,
3898                                            int64_t cur_offset,
3899                                            int64_t prev_offset) const {
3900   if (AvoidUnalignedAccesses) {
3901     if (base == sp) {
3902       // Checks whether low offset if aligned to pair of registers.
3903       int64_t pair_mask = size * 2 - 1;
3904       int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset;
3905       return (offset & pair_mask) == 0;
3906     } else { // If base is not sp, we can't guarantee the access is aligned.
3907       return false;
3908     }
3909   } else {
3910     int64_t mask = size - 1;
3911     // Load/store pair instruction only supports element size aligned offset.
3912     return (cur_offset & mask) == 0 && (prev_offset & mask) == 0;
3913   }
3914 }
3915 
3916 // Checks whether current and previous loads/stores can be merged.
3917 // Returns true if it can be merged, else false.
3918 bool MacroAssembler::ldst_can_merge(Register rt,
3919                                     const Address &adr,
3920                                     size_t cur_size_in_bytes,
3921                                     bool is_store) const {
3922   address prev = pc() - NativeInstruction::instruction_size;
3923   address last = code()->last_insn();
3924 
3925   if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) {
3926     return false;
3927   }
3928 
3929   if (adr.getMode() != Address::base_plus_offset || prev != last) {
3930     return false;
3931   }
3932 
3933   NativeLdSt* prev_ldst = NativeLdSt_at(prev);
3934   size_t prev_size_in_bytes = prev_ldst->size_in_bytes();
3935 
3936   assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging.");
3937   assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging.");
3938 
3939   if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) {
3940     return false;
3941   }
3942 
3943   int64_t max_offset = 63 * prev_size_in_bytes;
3944   int64_t min_offset = -64 * prev_size_in_bytes;
3945 
3946   assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged.");
3947 
3948   // Only same base can be merged.
3949   if (adr.base() != prev_ldst->base()) {
3950     return false;
3951   }
3952 
3953   int64_t cur_offset = adr.offset();
3954   int64_t prev_offset = prev_ldst->offset();
3955   size_t diff = abs(cur_offset - prev_offset);
3956   if (diff != prev_size_in_bytes) {
3957     return false;
3958   }
3959 
3960   // Following cases can not be merged:
3961   // ldr x2, [x2, #8]
3962   // ldr x3, [x2, #16]
3963   // or:
3964   // ldr x2, [x3, #8]
3965   // ldr x2, [x3, #16]
3966   // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL.
3967   if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) {
3968     return false;
3969   }
3970 
3971   int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset;
3972   // Offset range must be in ldp/stp instruction's range.
3973   if (low_offset > max_offset || low_offset < min_offset) {
3974     return false;
3975   }
3976 
3977   if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) {
3978     return true;
3979   }
3980 
3981   return false;
3982 }
3983 
3984 // Merge current load/store with previous load/store into ldp/stp.
3985 void MacroAssembler::merge_ldst(Register rt,
3986                                 const Address &adr,
3987                                 size_t cur_size_in_bytes,
3988                                 bool is_store) {
3989 
3990   assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged.");
3991 
3992   Register rt_low, rt_high;
3993   address prev = pc() - NativeInstruction::instruction_size;
3994   NativeLdSt* prev_ldst = NativeLdSt_at(prev);
3995 
3996   int64_t offset;
3997 
3998   if (adr.offset() < prev_ldst->offset()) {
3999     offset = adr.offset();
4000     rt_low = rt;
4001     rt_high = prev_ldst->target();
4002   } else {
4003     offset = prev_ldst->offset();
4004     rt_low = prev_ldst->target();
4005     rt_high = rt;
4006   }
4007 
4008   Address adr_p = Address(prev_ldst->base(), offset);
4009   // Overwrite previous generated binary.
4010   code_section()->set_end(prev);
4011 
4012   const size_t sz = prev_ldst->size_in_bytes();
4013   assert(sz == 8 || sz == 4, "only supports 64/32bit merging.");
4014   if (!is_store) {
4015     BLOCK_COMMENT("merged ldr pair");
4016     if (sz == 8) {
4017       ldp(rt_low, rt_high, adr_p);
4018     } else {
4019       ldpw(rt_low, rt_high, adr_p);
4020     }
4021   } else {
4022     BLOCK_COMMENT("merged str pair");
4023     if (sz == 8) {
4024       stp(rt_low, rt_high, adr_p);
4025     } else {
4026       stpw(rt_low, rt_high, adr_p);
4027     }
4028   }
4029 }
4030 
4031 /**
4032  * Multiply 64 bit by 64 bit first loop.
4033  */
4034 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
4035                                            Register y, Register y_idx, Register z,
4036                                            Register carry, Register product,
4037                                            Register idx, Register kdx) {
4038   //
4039   //  jlong carry, x[], y[], z[];
4040   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
4041   //    huge_128 product = y[idx] * x[xstart] + carry;
4042   //    z[kdx] = (jlong)product;
4043   //    carry  = (jlong)(product >>> 64);
4044   //  }
4045   //  z[xstart] = carry;
4046   //
4047 
4048   Label L_first_loop, L_first_loop_exit;
4049   Label L_one_x, L_one_y, L_multiply;
4050 
4051   subsw(xstart, xstart, 1);
4052   br(Assembler::MI, L_one_x);
4053 
4054   lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt)));
4055   ldr(x_xstart, Address(rscratch1));
4056   ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian
4057 
4058   bind(L_first_loop);
4059   subsw(idx, idx, 1);
4060   br(Assembler::MI, L_first_loop_exit);
4061   subsw(idx, idx, 1);
4062   br(Assembler::MI, L_one_y);
4063   lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4064   ldr(y_idx, Address(rscratch1));
4065   ror(y_idx, y_idx, 32); // convert big-endian to little-endian
4066   bind(L_multiply);
4067 
4068   // AArch64 has a multiply-accumulate instruction that we can't use
4069   // here because it has no way to process carries, so we have to use
4070   // separate add and adc instructions.  Bah.
4071   umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product
4072   mul(product, x_xstart, y_idx);
4073   adds(product, product, carry);
4074   adc(carry, rscratch1, zr);   // x_xstart * y_idx + carry -> carry:product
4075 
4076   subw(kdx, kdx, 2);
4077   ror(product, product, 32); // back to big-endian
4078   str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong));
4079 
4080   b(L_first_loop);
4081 
4082   bind(L_one_y);
4083   ldrw(y_idx, Address(y,  0));
4084   b(L_multiply);
4085 
4086   bind(L_one_x);
4087   ldrw(x_xstart, Address(x,  0));
4088   b(L_first_loop);
4089 
4090   bind(L_first_loop_exit);
4091 }
4092 
4093 /**
4094  * Multiply 128 bit by 128. Unrolled inner loop.
4095  *
4096  */
4097 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z,
4098                                              Register carry, Register carry2,
4099                                              Register idx, Register jdx,
4100                                              Register yz_idx1, Register yz_idx2,
4101                                              Register tmp, Register tmp3, Register tmp4,
4102                                              Register tmp6, Register product_hi) {
4103 
4104   //   jlong carry, x[], y[], z[];
4105   //   int kdx = ystart+1;
4106   //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
4107   //     huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry;
4108   //     jlong carry2  = (jlong)(tmp3 >>> 64);
4109   //     huge_128 tmp4 = (y[idx]   * product_hi) + z[kdx+idx] + carry2;
4110   //     carry  = (jlong)(tmp4 >>> 64);
4111   //     z[kdx+idx+1] = (jlong)tmp3;
4112   //     z[kdx+idx] = (jlong)tmp4;
4113   //   }
4114   //   idx += 2;
4115   //   if (idx > 0) {
4116   //     yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry;
4117   //     z[kdx+idx] = (jlong)yz_idx1;
4118   //     carry  = (jlong)(yz_idx1 >>> 64);
4119   //   }
4120   //
4121 
4122   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
4123 
4124   lsrw(jdx, idx, 2);
4125 
4126   bind(L_third_loop);
4127 
4128   subsw(jdx, jdx, 1);
4129   br(Assembler::MI, L_third_loop_exit);
4130   subw(idx, idx, 4);
4131 
4132   lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4133 
4134   ldp(yz_idx2, yz_idx1, Address(rscratch1, 0));
4135 
4136   lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4137 
4138   ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
4139   ror(yz_idx2, yz_idx2, 32);
4140 
4141   ldp(rscratch2, rscratch1, Address(tmp6, 0));
4142 
4143   mul(tmp3, product_hi, yz_idx1);  //  yz_idx1 * product_hi -> tmp4:tmp3
4144   umulh(tmp4, product_hi, yz_idx1);
4145 
4146   ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian
4147   ror(rscratch2, rscratch2, 32);
4148 
4149   mul(tmp, product_hi, yz_idx2);   //  yz_idx2 * product_hi -> carry2:tmp
4150   umulh(carry2, product_hi, yz_idx2);
4151 
4152   // propagate sum of both multiplications into carry:tmp4:tmp3
4153   adds(tmp3, tmp3, carry);
4154   adc(tmp4, tmp4, zr);
4155   adds(tmp3, tmp3, rscratch1);
4156   adcs(tmp4, tmp4, tmp);
4157   adc(carry, carry2, zr);
4158   adds(tmp4, tmp4, rscratch2);
4159   adc(carry, carry, zr);
4160 
4161   ror(tmp3, tmp3, 32); // convert little-endian to big-endian
4162   ror(tmp4, tmp4, 32);
4163   stp(tmp4, tmp3, Address(tmp6, 0));
4164 
4165   b(L_third_loop);
4166   bind (L_third_loop_exit);
4167 
4168   andw (idx, idx, 0x3);
4169   cbz(idx, L_post_third_loop_done);
4170 
4171   Label L_check_1;
4172   subsw(idx, idx, 2);
4173   br(Assembler::MI, L_check_1);
4174 
4175   lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4176   ldr(yz_idx1, Address(rscratch1, 0));
4177   ror(yz_idx1, yz_idx1, 32);
4178   mul(tmp3, product_hi, yz_idx1);  //  yz_idx1 * product_hi -> tmp4:tmp3
4179   umulh(tmp4, product_hi, yz_idx1);
4180   lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4181   ldr(yz_idx2, Address(rscratch1, 0));
4182   ror(yz_idx2, yz_idx2, 32);
4183 
4184   add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2);
4185 
4186   ror(tmp3, tmp3, 32);
4187   str(tmp3, Address(rscratch1, 0));
4188 
4189   bind (L_check_1);
4190 
4191   andw (idx, idx, 0x1);
4192   subsw(idx, idx, 1);
4193   br(Assembler::MI, L_post_third_loop_done);
4194   ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt)));
4195   mul(tmp3, tmp4, product_hi);  //  tmp4 * product_hi -> carry2:tmp3
4196   umulh(carry2, tmp4, product_hi);
4197   ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4198 
4199   add2_with_carry(carry2, tmp3, tmp4, carry);
4200 
4201   strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt)));
4202   extr(carry, carry2, tmp3, 32);
4203 
4204   bind(L_post_third_loop_done);
4205 }
4206 
4207 /**
4208  * Code for BigInteger::multiplyToLen() intrinsic.
4209  *
4210  * r0: x
4211  * r1: xlen
4212  * r2: y
4213  * r3: ylen
4214  * r4:  z
4215  * r5: tmp0
4216  * r10: tmp1
4217  * r11: tmp2
4218  * r12: tmp3
4219  * r13: tmp4
4220  * r14: tmp5
4221  * r15: tmp6
4222  * r16: tmp7
4223  *
4224  */
4225 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen,
4226                                      Register z, Register tmp0,
4227                                      Register tmp1, Register tmp2, Register tmp3, Register tmp4,
4228                                      Register tmp5, Register tmp6, Register product_hi) {
4229 
4230   assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, product_hi);
4231 
4232   const Register idx = tmp1;
4233   const Register kdx = tmp2;
4234   const Register xstart = tmp3;
4235 
4236   const Register y_idx = tmp4;
4237   const Register carry = tmp5;
4238   const Register product  = xlen;
4239   const Register x_xstart = tmp0;
4240 
4241   // First Loop.
4242   //
4243   //  final static long LONG_MASK = 0xffffffffL;
4244   //  int xstart = xlen - 1;
4245   //  int ystart = ylen - 1;
4246   //  long carry = 0;
4247   //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
4248   //    long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
4249   //    z[kdx] = (int)product;
4250   //    carry = product >>> 32;
4251   //  }
4252   //  z[xstart] = (int)carry;
4253   //
4254 
4255   movw(idx, ylen);       // idx = ylen;
4256   addw(kdx, xlen, ylen); // kdx = xlen+ylen;
4257   mov(carry, zr);        // carry = 0;
4258 
4259   Label L_done;
4260 
4261   movw(xstart, xlen);
4262   subsw(xstart, xstart, 1);
4263   br(Assembler::MI, L_done);
4264 
4265   multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
4266 
4267   Label L_second_loop;
4268   cbzw(kdx, L_second_loop);
4269 
4270   Label L_carry;
4271   subw(kdx, kdx, 1);
4272   cbzw(kdx, L_carry);
4273 
4274   strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
4275   lsr(carry, carry, 32);
4276   subw(kdx, kdx, 1);
4277 
4278   bind(L_carry);
4279   strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
4280 
4281   // Second and third (nested) loops.
4282   //
4283   // for (int i = xstart-1; i >= 0; i--) { // Second loop
4284   //   carry = 0;
4285   //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
4286   //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
4287   //                    (z[k] & LONG_MASK) + carry;
4288   //     z[k] = (int)product;
4289   //     carry = product >>> 32;
4290   //   }
4291   //   z[i] = (int)carry;
4292   // }
4293   //
4294   // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi
4295 
4296   const Register jdx = tmp1;
4297 
4298   bind(L_second_loop);
4299   mov(carry, zr);                // carry = 0;
4300   movw(jdx, ylen);               // j = ystart+1
4301 
4302   subsw(xstart, xstart, 1);      // i = xstart-1;
4303   br(Assembler::MI, L_done);
4304 
4305   str(z, Address(pre(sp, -4 * wordSize)));
4306 
4307   Label L_last_x;
4308   lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j
4309   subsw(xstart, xstart, 1);       // i = xstart-1;
4310   br(Assembler::MI, L_last_x);
4311 
4312   lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt)));
4313   ldr(product_hi, Address(rscratch1));
4314   ror(product_hi, product_hi, 32);  // convert big-endian to little-endian
4315 
4316   Label L_third_loop_prologue;
4317   bind(L_third_loop_prologue);
4318 
4319   str(ylen, Address(sp, wordSize));
4320   stp(x, xstart, Address(sp, 2 * wordSize));
4321   multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product,
4322                           tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi);
4323   ldp(z, ylen, Address(post(sp, 2 * wordSize)));
4324   ldp(x, xlen, Address(post(sp, 2 * wordSize)));   // copy old xstart -> xlen
4325 
4326   addw(tmp3, xlen, 1);
4327   strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
4328   subsw(tmp3, tmp3, 1);
4329   br(Assembler::MI, L_done);
4330 
4331   lsr(carry, carry, 32);
4332   strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
4333   b(L_second_loop);
4334 
4335   // Next infrequent code is moved outside loops.
4336   bind(L_last_x);
4337   ldrw(product_hi, Address(x,  0));
4338   b(L_third_loop_prologue);
4339 
4340   bind(L_done);
4341 }
4342 
4343 // Code for BigInteger::mulAdd intrinsic
4344 // out     = r0
4345 // in      = r1
4346 // offset  = r2  (already out.length-offset)
4347 // len     = r3
4348 // k       = r4
4349 //
4350 // pseudo code from java implementation:
4351 // carry = 0;
4352 // offset = out.length-offset - 1;
4353 // for (int j=len-1; j >= 0; j--) {
4354 //     product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry;
4355 //     out[offset--] = (int)product;
4356 //     carry = product >>> 32;
4357 // }
4358 // return (int)carry;
4359 void MacroAssembler::mul_add(Register out, Register in, Register offset,
4360       Register len, Register k) {
4361     Label LOOP, END;
4362     // pre-loop
4363     cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches
4364     csel(out, zr, out, Assembler::EQ);
4365     br(Assembler::EQ, END);
4366     add(in, in, len, LSL, 2); // in[j+1] address
4367     add(offset, out, offset, LSL, 2); // out[offset + 1] address
4368     mov(out, zr); // used to keep carry now
4369     BIND(LOOP);
4370     ldrw(rscratch1, Address(pre(in, -4)));
4371     madd(rscratch1, rscratch1, k, out);
4372     ldrw(rscratch2, Address(pre(offset, -4)));
4373     add(rscratch1, rscratch1, rscratch2);
4374     strw(rscratch1, Address(offset));
4375     lsr(out, rscratch1, 32);
4376     subs(len, len, 1);
4377     br(Assembler::NE, LOOP);
4378     BIND(END);
4379 }
4380 
4381 /**
4382  * Emits code to update CRC-32 with a byte value according to constants in table
4383  *
4384  * @param [in,out]crc   Register containing the crc.
4385  * @param [in]val       Register containing the byte to fold into the CRC.
4386  * @param [in]table     Register containing the table of crc constants.
4387  *
4388  * uint32_t crc;
4389  * val = crc_table[(val ^ crc) & 0xFF];
4390  * crc = val ^ (crc >> 8);
4391  *
4392  */
4393 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
4394   eor(val, val, crc);
4395   andr(val, val, 0xff);
4396   ldrw(val, Address(table, val, Address::lsl(2)));
4397   eor(crc, val, crc, Assembler::LSR, 8);
4398 }
4399 
4400 /**
4401  * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3
4402  *
4403  * @param [in,out]crc   Register containing the crc.
4404  * @param [in]v         Register containing the 32-bit to fold into the CRC.
4405  * @param [in]table0    Register containing table 0 of crc constants.
4406  * @param [in]table1    Register containing table 1 of crc constants.
4407  * @param [in]table2    Register containing table 2 of crc constants.
4408  * @param [in]table3    Register containing table 3 of crc constants.
4409  *
4410  * uint32_t crc;
4411  *   v = crc ^ v
4412  *   crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24]
4413  *
4414  */
4415 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp,
4416         Register table0, Register table1, Register table2, Register table3,
4417         bool upper) {
4418   eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0);
4419   uxtb(tmp, v);
4420   ldrw(crc, Address(table3, tmp, Address::lsl(2)));
4421   ubfx(tmp, v, 8, 8);
4422   ldrw(tmp, Address(table2, tmp, Address::lsl(2)));
4423   eor(crc, crc, tmp);
4424   ubfx(tmp, v, 16, 8);
4425   ldrw(tmp, Address(table1, tmp, Address::lsl(2)));
4426   eor(crc, crc, tmp);
4427   ubfx(tmp, v, 24, 8);
4428   ldrw(tmp, Address(table0, tmp, Address::lsl(2)));
4429   eor(crc, crc, tmp);
4430 }
4431 
4432 void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf,
4433         Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) {
4434     Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit;
4435     assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
4436 
4437     subs(tmp0, len, 384);
4438     mvnw(crc, crc);
4439     br(Assembler::GE, CRC_by128_pre);
4440   BIND(CRC_less128);
4441     subs(len, len, 32);
4442     br(Assembler::GE, CRC_by32_loop);
4443   BIND(CRC_less32);
4444     adds(len, len, 32 - 4);
4445     br(Assembler::GE, CRC_by4_loop);
4446     adds(len, len, 4);
4447     br(Assembler::GT, CRC_by1_loop);
4448     b(L_exit);
4449 
4450   BIND(CRC_by32_loop);
4451     ldp(tmp0, tmp1, Address(buf));
4452     crc32x(crc, crc, tmp0);
4453     ldp(tmp2, tmp3, Address(buf, 16));
4454     crc32x(crc, crc, tmp1);
4455     add(buf, buf, 32);
4456     crc32x(crc, crc, tmp2);
4457     subs(len, len, 32);
4458     crc32x(crc, crc, tmp3);
4459     br(Assembler::GE, CRC_by32_loop);
4460     cmn(len, (u1)32);
4461     br(Assembler::NE, CRC_less32);
4462     b(L_exit);
4463 
4464   BIND(CRC_by4_loop);
4465     ldrw(tmp0, Address(post(buf, 4)));
4466     subs(len, len, 4);
4467     crc32w(crc, crc, tmp0);
4468     br(Assembler::GE, CRC_by4_loop);
4469     adds(len, len, 4);
4470     br(Assembler::LE, L_exit);
4471   BIND(CRC_by1_loop);
4472     ldrb(tmp0, Address(post(buf, 1)));
4473     subs(len, len, 1);
4474     crc32b(crc, crc, tmp0);
4475     br(Assembler::GT, CRC_by1_loop);
4476     b(L_exit);
4477 
4478   BIND(CRC_by128_pre);
4479     kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2,
4480       4*256*sizeof(juint) + 8*sizeof(juint));
4481     mov(crc, 0);
4482     crc32x(crc, crc, tmp0);
4483     crc32x(crc, crc, tmp1);
4484 
4485     cbnz(len, CRC_less128);
4486 
4487   BIND(L_exit);
4488     mvnw(crc, crc);
4489 }
4490 
4491 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf,
4492         Register len, Register tmp0, Register tmp1, Register tmp2,
4493         Register tmp3) {
4494     Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit;
4495     assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3);
4496 
4497     mvnw(crc, crc);
4498 
4499     subs(len, len, 128);
4500     br(Assembler::GE, CRC_by64_pre);
4501   BIND(CRC_less64);
4502     adds(len, len, 128-32);
4503     br(Assembler::GE, CRC_by32_loop);
4504   BIND(CRC_less32);
4505     adds(len, len, 32-4);
4506     br(Assembler::GE, CRC_by4_loop);
4507     adds(len, len, 4);
4508     br(Assembler::GT, CRC_by1_loop);
4509     b(L_exit);
4510 
4511   BIND(CRC_by32_loop);
4512     ldp(tmp0, tmp1, Address(post(buf, 16)));
4513     subs(len, len, 32);
4514     crc32x(crc, crc, tmp0);
4515     ldr(tmp2, Address(post(buf, 8)));
4516     crc32x(crc, crc, tmp1);
4517     ldr(tmp3, Address(post(buf, 8)));
4518     crc32x(crc, crc, tmp2);
4519     crc32x(crc, crc, tmp3);
4520     br(Assembler::GE, CRC_by32_loop);
4521     cmn(len, (u1)32);
4522     br(Assembler::NE, CRC_less32);
4523     b(L_exit);
4524 
4525   BIND(CRC_by4_loop);
4526     ldrw(tmp0, Address(post(buf, 4)));
4527     subs(len, len, 4);
4528     crc32w(crc, crc, tmp0);
4529     br(Assembler::GE, CRC_by4_loop);
4530     adds(len, len, 4);
4531     br(Assembler::LE, L_exit);
4532   BIND(CRC_by1_loop);
4533     ldrb(tmp0, Address(post(buf, 1)));
4534     subs(len, len, 1);
4535     crc32b(crc, crc, tmp0);
4536     br(Assembler::GT, CRC_by1_loop);
4537     b(L_exit);
4538 
4539   BIND(CRC_by64_pre);
4540     sub(buf, buf, 8);
4541     ldp(tmp0, tmp1, Address(buf, 8));
4542     crc32x(crc, crc, tmp0);
4543     ldr(tmp2, Address(buf, 24));
4544     crc32x(crc, crc, tmp1);
4545     ldr(tmp3, Address(buf, 32));
4546     crc32x(crc, crc, tmp2);
4547     ldr(tmp0, Address(buf, 40));
4548     crc32x(crc, crc, tmp3);
4549     ldr(tmp1, Address(buf, 48));
4550     crc32x(crc, crc, tmp0);
4551     ldr(tmp2, Address(buf, 56));
4552     crc32x(crc, crc, tmp1);
4553     ldr(tmp3, Address(pre(buf, 64)));
4554 
4555     b(CRC_by64_loop);
4556 
4557     align(CodeEntryAlignment);
4558   BIND(CRC_by64_loop);
4559     subs(len, len, 64);
4560     crc32x(crc, crc, tmp2);
4561     ldr(tmp0, Address(buf, 8));
4562     crc32x(crc, crc, tmp3);
4563     ldr(tmp1, Address(buf, 16));
4564     crc32x(crc, crc, tmp0);
4565     ldr(tmp2, Address(buf, 24));
4566     crc32x(crc, crc, tmp1);
4567     ldr(tmp3, Address(buf, 32));
4568     crc32x(crc, crc, tmp2);
4569     ldr(tmp0, Address(buf, 40));
4570     crc32x(crc, crc, tmp3);
4571     ldr(tmp1, Address(buf, 48));
4572     crc32x(crc, crc, tmp0);
4573     ldr(tmp2, Address(buf, 56));
4574     crc32x(crc, crc, tmp1);
4575     ldr(tmp3, Address(pre(buf, 64)));
4576     br(Assembler::GE, CRC_by64_loop);
4577 
4578     // post-loop
4579     crc32x(crc, crc, tmp2);
4580     crc32x(crc, crc, tmp3);
4581 
4582     sub(len, len, 64);
4583     add(buf, buf, 8);
4584     cmn(len, (u1)128);
4585     br(Assembler::NE, CRC_less64);
4586   BIND(L_exit);
4587     mvnw(crc, crc);
4588 }
4589 
4590 /**
4591  * @param crc   register containing existing CRC (32-bit)
4592  * @param buf   register pointing to input byte buffer (byte*)
4593  * @param len   register containing number of bytes
4594  * @param table register that will contain address of CRC table
4595  * @param tmp   scratch register
4596  */
4597 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len,
4598         Register table0, Register table1, Register table2, Register table3,
4599         Register tmp, Register tmp2, Register tmp3) {
4600   Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit;
4601 
4602   if (UseCryptoPmullForCRC32) {
4603       kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3);
4604       return;
4605   }
4606 
4607   if (UseCRC32) {
4608       kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3);
4609       return;
4610   }
4611 
4612     mvnw(crc, crc);
4613 
4614     {
4615       uint64_t offset;
4616       adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset);
4617       add(table0, table0, offset);
4618     }
4619     add(table1, table0, 1*256*sizeof(juint));
4620     add(table2, table0, 2*256*sizeof(juint));
4621     add(table3, table0, 3*256*sizeof(juint));
4622 
4623     { // Neon code start
4624       cmp(len, (u1)64);
4625       br(Assembler::LT, L_by16);
4626       eor(v16, T16B, v16, v16);
4627 
4628     Label L_fold;
4629 
4630       add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants
4631 
4632       ld1(v0, v1, T2D, post(buf, 32));
4633       ld1r(v4, T2D, post(tmp, 8));
4634       ld1r(v5, T2D, post(tmp, 8));
4635       ld1r(v6, T2D, post(tmp, 8));
4636       ld1r(v7, T2D, post(tmp, 8));
4637       mov(v16, S, 0, crc);
4638 
4639       eor(v0, T16B, v0, v16);
4640       sub(len, len, 64);
4641 
4642     BIND(L_fold);
4643       pmull(v22, T8H, v0, v5, T8B);
4644       pmull(v20, T8H, v0, v7, T8B);
4645       pmull(v23, T8H, v0, v4, T8B);
4646       pmull(v21, T8H, v0, v6, T8B);
4647 
4648       pmull2(v18, T8H, v0, v5, T16B);
4649       pmull2(v16, T8H, v0, v7, T16B);
4650       pmull2(v19, T8H, v0, v4, T16B);
4651       pmull2(v17, T8H, v0, v6, T16B);
4652 
4653       uzp1(v24, T8H, v20, v22);
4654       uzp2(v25, T8H, v20, v22);
4655       eor(v20, T16B, v24, v25);
4656 
4657       uzp1(v26, T8H, v16, v18);
4658       uzp2(v27, T8H, v16, v18);
4659       eor(v16, T16B, v26, v27);
4660 
4661       ushll2(v22, T4S, v20, T8H, 8);
4662       ushll(v20, T4S, v20, T4H, 8);
4663 
4664       ushll2(v18, T4S, v16, T8H, 8);
4665       ushll(v16, T4S, v16, T4H, 8);
4666 
4667       eor(v22, T16B, v23, v22);
4668       eor(v18, T16B, v19, v18);
4669       eor(v20, T16B, v21, v20);
4670       eor(v16, T16B, v17, v16);
4671 
4672       uzp1(v17, T2D, v16, v20);
4673       uzp2(v21, T2D, v16, v20);
4674       eor(v17, T16B, v17, v21);
4675 
4676       ushll2(v20, T2D, v17, T4S, 16);
4677       ushll(v16, T2D, v17, T2S, 16);
4678 
4679       eor(v20, T16B, v20, v22);
4680       eor(v16, T16B, v16, v18);
4681 
4682       uzp1(v17, T2D, v20, v16);
4683       uzp2(v21, T2D, v20, v16);
4684       eor(v28, T16B, v17, v21);
4685 
4686       pmull(v22, T8H, v1, v5, T8B);
4687       pmull(v20, T8H, v1, v7, T8B);
4688       pmull(v23, T8H, v1, v4, T8B);
4689       pmull(v21, T8H, v1, v6, T8B);
4690 
4691       pmull2(v18, T8H, v1, v5, T16B);
4692       pmull2(v16, T8H, v1, v7, T16B);
4693       pmull2(v19, T8H, v1, v4, T16B);
4694       pmull2(v17, T8H, v1, v6, T16B);
4695 
4696       ld1(v0, v1, T2D, post(buf, 32));
4697 
4698       uzp1(v24, T8H, v20, v22);
4699       uzp2(v25, T8H, v20, v22);
4700       eor(v20, T16B, v24, v25);
4701 
4702       uzp1(v26, T8H, v16, v18);
4703       uzp2(v27, T8H, v16, v18);
4704       eor(v16, T16B, v26, v27);
4705 
4706       ushll2(v22, T4S, v20, T8H, 8);
4707       ushll(v20, T4S, v20, T4H, 8);
4708 
4709       ushll2(v18, T4S, v16, T8H, 8);
4710       ushll(v16, T4S, v16, T4H, 8);
4711 
4712       eor(v22, T16B, v23, v22);
4713       eor(v18, T16B, v19, v18);
4714       eor(v20, T16B, v21, v20);
4715       eor(v16, T16B, v17, v16);
4716 
4717       uzp1(v17, T2D, v16, v20);
4718       uzp2(v21, T2D, v16, v20);
4719       eor(v16, T16B, v17, v21);
4720 
4721       ushll2(v20, T2D, v16, T4S, 16);
4722       ushll(v16, T2D, v16, T2S, 16);
4723 
4724       eor(v20, T16B, v22, v20);
4725       eor(v16, T16B, v16, v18);
4726 
4727       uzp1(v17, T2D, v20, v16);
4728       uzp2(v21, T2D, v20, v16);
4729       eor(v20, T16B, v17, v21);
4730 
4731       shl(v16, T2D, v28, 1);
4732       shl(v17, T2D, v20, 1);
4733 
4734       eor(v0, T16B, v0, v16);
4735       eor(v1, T16B, v1, v17);
4736 
4737       subs(len, len, 32);
4738       br(Assembler::GE, L_fold);
4739 
4740       mov(crc, 0);
4741       mov(tmp, v0, D, 0);
4742       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4743       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4744       mov(tmp, v0, D, 1);
4745       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4746       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4747       mov(tmp, v1, D, 0);
4748       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4749       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4750       mov(tmp, v1, D, 1);
4751       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4752       update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4753 
4754       add(len, len, 32);
4755     } // Neon code end
4756 
4757   BIND(L_by16);
4758     subs(len, len, 16);
4759     br(Assembler::GE, L_by16_loop);
4760     adds(len, len, 16-4);
4761     br(Assembler::GE, L_by4_loop);
4762     adds(len, len, 4);
4763     br(Assembler::GT, L_by1_loop);
4764     b(L_exit);
4765 
4766   BIND(L_by4_loop);
4767     ldrw(tmp, Address(post(buf, 4)));
4768     update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3);
4769     subs(len, len, 4);
4770     br(Assembler::GE, L_by4_loop);
4771     adds(len, len, 4);
4772     br(Assembler::LE, L_exit);
4773   BIND(L_by1_loop);
4774     subs(len, len, 1);
4775     ldrb(tmp, Address(post(buf, 1)));
4776     update_byte_crc32(crc, tmp, table0);
4777     br(Assembler::GT, L_by1_loop);
4778     b(L_exit);
4779 
4780     align(CodeEntryAlignment);
4781   BIND(L_by16_loop);
4782     subs(len, len, 16);
4783     ldp(tmp, tmp3, Address(post(buf, 16)));
4784     update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
4785     update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
4786     update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false);
4787     update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true);
4788     br(Assembler::GE, L_by16_loop);
4789     adds(len, len, 16-4);
4790     br(Assembler::GE, L_by4_loop);
4791     adds(len, len, 4);
4792     br(Assembler::GT, L_by1_loop);
4793   BIND(L_exit);
4794     mvnw(crc, crc);
4795 }
4796 
4797 void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf,
4798         Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) {
4799     Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit;
4800     assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
4801 
4802     subs(tmp0, len, 384);
4803     br(Assembler::GE, CRC_by128_pre);
4804   BIND(CRC_less128);
4805     subs(len, len, 32);
4806     br(Assembler::GE, CRC_by32_loop);
4807   BIND(CRC_less32);
4808     adds(len, len, 32 - 4);
4809     br(Assembler::GE, CRC_by4_loop);
4810     adds(len, len, 4);
4811     br(Assembler::GT, CRC_by1_loop);
4812     b(L_exit);
4813 
4814   BIND(CRC_by32_loop);
4815     ldp(tmp0, tmp1, Address(buf));
4816     crc32cx(crc, crc, tmp0);
4817     ldr(tmp2, Address(buf, 16));
4818     crc32cx(crc, crc, tmp1);
4819     ldr(tmp3, Address(buf, 24));
4820     crc32cx(crc, crc, tmp2);
4821     add(buf, buf, 32);
4822     subs(len, len, 32);
4823     crc32cx(crc, crc, tmp3);
4824     br(Assembler::GE, CRC_by32_loop);
4825     cmn(len, (u1)32);
4826     br(Assembler::NE, CRC_less32);
4827     b(L_exit);
4828 
4829   BIND(CRC_by4_loop);
4830     ldrw(tmp0, Address(post(buf, 4)));
4831     subs(len, len, 4);
4832     crc32cw(crc, crc, tmp0);
4833     br(Assembler::GE, CRC_by4_loop);
4834     adds(len, len, 4);
4835     br(Assembler::LE, L_exit);
4836   BIND(CRC_by1_loop);
4837     ldrb(tmp0, Address(post(buf, 1)));
4838     subs(len, len, 1);
4839     crc32cb(crc, crc, tmp0);
4840     br(Assembler::GT, CRC_by1_loop);
4841     b(L_exit);
4842 
4843   BIND(CRC_by128_pre);
4844     kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2,
4845       4*256*sizeof(juint) + 8*sizeof(juint) + 0x50);
4846     mov(crc, 0);
4847     crc32cx(crc, crc, tmp0);
4848     crc32cx(crc, crc, tmp1);
4849 
4850     cbnz(len, CRC_less128);
4851 
4852   BIND(L_exit);
4853 }
4854 
4855 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf,
4856         Register len, Register tmp0, Register tmp1, Register tmp2,
4857         Register tmp3) {
4858     Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit;
4859     assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3);
4860 
4861     subs(len, len, 128);
4862     br(Assembler::GE, CRC_by64_pre);
4863   BIND(CRC_less64);
4864     adds(len, len, 128-32);
4865     br(Assembler::GE, CRC_by32_loop);
4866   BIND(CRC_less32);
4867     adds(len, len, 32-4);
4868     br(Assembler::GE, CRC_by4_loop);
4869     adds(len, len, 4);
4870     br(Assembler::GT, CRC_by1_loop);
4871     b(L_exit);
4872 
4873   BIND(CRC_by32_loop);
4874     ldp(tmp0, tmp1, Address(post(buf, 16)));
4875     subs(len, len, 32);
4876     crc32cx(crc, crc, tmp0);
4877     ldr(tmp2, Address(post(buf, 8)));
4878     crc32cx(crc, crc, tmp1);
4879     ldr(tmp3, Address(post(buf, 8)));
4880     crc32cx(crc, crc, tmp2);
4881     crc32cx(crc, crc, tmp3);
4882     br(Assembler::GE, CRC_by32_loop);
4883     cmn(len, (u1)32);
4884     br(Assembler::NE, CRC_less32);
4885     b(L_exit);
4886 
4887   BIND(CRC_by4_loop);
4888     ldrw(tmp0, Address(post(buf, 4)));
4889     subs(len, len, 4);
4890     crc32cw(crc, crc, tmp0);
4891     br(Assembler::GE, CRC_by4_loop);
4892     adds(len, len, 4);
4893     br(Assembler::LE, L_exit);
4894   BIND(CRC_by1_loop);
4895     ldrb(tmp0, Address(post(buf, 1)));
4896     subs(len, len, 1);
4897     crc32cb(crc, crc, tmp0);
4898     br(Assembler::GT, CRC_by1_loop);
4899     b(L_exit);
4900 
4901   BIND(CRC_by64_pre);
4902     sub(buf, buf, 8);
4903     ldp(tmp0, tmp1, Address(buf, 8));
4904     crc32cx(crc, crc, tmp0);
4905     ldr(tmp2, Address(buf, 24));
4906     crc32cx(crc, crc, tmp1);
4907     ldr(tmp3, Address(buf, 32));
4908     crc32cx(crc, crc, tmp2);
4909     ldr(tmp0, Address(buf, 40));
4910     crc32cx(crc, crc, tmp3);
4911     ldr(tmp1, Address(buf, 48));
4912     crc32cx(crc, crc, tmp0);
4913     ldr(tmp2, Address(buf, 56));
4914     crc32cx(crc, crc, tmp1);
4915     ldr(tmp3, Address(pre(buf, 64)));
4916 
4917     b(CRC_by64_loop);
4918 
4919     align(CodeEntryAlignment);
4920   BIND(CRC_by64_loop);
4921     subs(len, len, 64);
4922     crc32cx(crc, crc, tmp2);
4923     ldr(tmp0, Address(buf, 8));
4924     crc32cx(crc, crc, tmp3);
4925     ldr(tmp1, Address(buf, 16));
4926     crc32cx(crc, crc, tmp0);
4927     ldr(tmp2, Address(buf, 24));
4928     crc32cx(crc, crc, tmp1);
4929     ldr(tmp3, Address(buf, 32));
4930     crc32cx(crc, crc, tmp2);
4931     ldr(tmp0, Address(buf, 40));
4932     crc32cx(crc, crc, tmp3);
4933     ldr(tmp1, Address(buf, 48));
4934     crc32cx(crc, crc, tmp0);
4935     ldr(tmp2, Address(buf, 56));
4936     crc32cx(crc, crc, tmp1);
4937     ldr(tmp3, Address(pre(buf, 64)));
4938     br(Assembler::GE, CRC_by64_loop);
4939 
4940     // post-loop
4941     crc32cx(crc, crc, tmp2);
4942     crc32cx(crc, crc, tmp3);
4943 
4944     sub(len, len, 64);
4945     add(buf, buf, 8);
4946     cmn(len, (u1)128);
4947     br(Assembler::NE, CRC_less64);
4948   BIND(L_exit);
4949 }
4950 
4951 /**
4952  * @param crc   register containing existing CRC (32-bit)
4953  * @param buf   register pointing to input byte buffer (byte*)
4954  * @param len   register containing number of bytes
4955  * @param table register that will contain address of CRC table
4956  * @param tmp   scratch register
4957  */
4958 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len,
4959         Register table0, Register table1, Register table2, Register table3,
4960         Register tmp, Register tmp2, Register tmp3) {
4961   if (UseCryptoPmullForCRC32) {
4962     kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3);
4963   } else {
4964     kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3);
4965   }
4966 }
4967 
4968 void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf,
4969         Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) {
4970     Label CRC_by128_loop;
4971     assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2);
4972 
4973     sub(len, len, 256);
4974     Register table = tmp0;
4975     {
4976       uint64_t offset;
4977       adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset);
4978       add(table, table, offset);
4979     }
4980     add(table, table, table_offset);
4981 
4982     // Registers v0..v7 are used as data registers.
4983     // Registers v16..v31 are used as tmp registers.
4984     sub(buf, buf, 0x10);
4985     ldrq(v0, Address(buf, 0x10));
4986     ldrq(v1, Address(buf, 0x20));
4987     ldrq(v2, Address(buf, 0x30));
4988     ldrq(v3, Address(buf, 0x40));
4989     ldrq(v4, Address(buf, 0x50));
4990     ldrq(v5, Address(buf, 0x60));
4991     ldrq(v6, Address(buf, 0x70));
4992     ldrq(v7, Address(pre(buf, 0x80)));
4993 
4994     movi(v31, T4S, 0);
4995     mov(v31, S, 0, crc);
4996     eor(v0, T16B, v0, v31);
4997 
4998     // Register v16 contains constants from the crc table.
4999     ldrq(v16, Address(table));
5000     b(CRC_by128_loop);
5001 
5002     align(OptoLoopAlignment);
5003   BIND(CRC_by128_loop);
5004     pmull (v17,  T1Q, v0, v16, T1D);
5005     pmull2(v18, T1Q, v0, v16, T2D);
5006     ldrq(v0, Address(buf, 0x10));
5007     eor3(v0, T16B, v17,  v18, v0);
5008 
5009     pmull (v19, T1Q, v1, v16, T1D);
5010     pmull2(v20, T1Q, v1, v16, T2D);
5011     ldrq(v1, Address(buf, 0x20));
5012     eor3(v1, T16B, v19, v20, v1);
5013 
5014     pmull (v21, T1Q, v2, v16, T1D);
5015     pmull2(v22, T1Q, v2, v16, T2D);
5016     ldrq(v2, Address(buf, 0x30));
5017     eor3(v2, T16B, v21, v22, v2);
5018 
5019     pmull (v23, T1Q, v3, v16, T1D);
5020     pmull2(v24, T1Q, v3, v16, T2D);
5021     ldrq(v3, Address(buf, 0x40));
5022     eor3(v3, T16B, v23, v24, v3);
5023 
5024     pmull (v25, T1Q, v4, v16, T1D);
5025     pmull2(v26, T1Q, v4, v16, T2D);
5026     ldrq(v4, Address(buf, 0x50));
5027     eor3(v4, T16B, v25, v26, v4);
5028 
5029     pmull (v27, T1Q, v5, v16, T1D);
5030     pmull2(v28, T1Q, v5, v16, T2D);
5031     ldrq(v5, Address(buf, 0x60));
5032     eor3(v5, T16B, v27, v28, v5);
5033 
5034     pmull (v29, T1Q, v6, v16, T1D);
5035     pmull2(v30, T1Q, v6, v16, T2D);
5036     ldrq(v6, Address(buf, 0x70));
5037     eor3(v6, T16B, v29, v30, v6);
5038 
5039     // Reuse registers v23, v24.
5040     // Using them won't block the first instruction of the next iteration.
5041     pmull (v23, T1Q, v7, v16, T1D);
5042     pmull2(v24, T1Q, v7, v16, T2D);
5043     ldrq(v7, Address(pre(buf, 0x80)));
5044     eor3(v7, T16B, v23, v24, v7);
5045 
5046     subs(len, len, 0x80);
5047     br(Assembler::GE, CRC_by128_loop);
5048 
5049     // fold into 512 bits
5050     // Use v31 for constants because v16 can be still in use.
5051     ldrq(v31, Address(table, 0x10));
5052 
5053     pmull (v17,  T1Q, v0, v31, T1D);
5054     pmull2(v18, T1Q, v0, v31, T2D);
5055     eor3(v0, T16B, v17, v18, v4);
5056 
5057     pmull (v19, T1Q, v1, v31, T1D);
5058     pmull2(v20, T1Q, v1, v31, T2D);
5059     eor3(v1, T16B, v19, v20, v5);
5060 
5061     pmull (v21, T1Q, v2, v31, T1D);
5062     pmull2(v22, T1Q, v2, v31, T2D);
5063     eor3(v2, T16B, v21, v22, v6);
5064 
5065     pmull (v23, T1Q, v3, v31, T1D);
5066     pmull2(v24, T1Q, v3, v31, T2D);
5067     eor3(v3, T16B, v23, v24, v7);
5068 
5069     // fold into 128 bits
5070     // Use v17 for constants because v31 can be still in use.
5071     ldrq(v17, Address(table, 0x20));
5072     pmull (v25, T1Q, v0, v17, T1D);
5073     pmull2(v26, T1Q, v0, v17, T2D);
5074     eor3(v3, T16B, v3, v25, v26);
5075 
5076     // Use v18 for constants because v17 can be still in use.
5077     ldrq(v18, Address(table, 0x30));
5078     pmull (v27, T1Q, v1, v18, T1D);
5079     pmull2(v28, T1Q, v1, v18, T2D);
5080     eor3(v3, T16B, v3, v27, v28);
5081 
5082     // Use v19 for constants because v18 can be still in use.
5083     ldrq(v19, Address(table, 0x40));
5084     pmull (v29, T1Q, v2, v19, T1D);
5085     pmull2(v30, T1Q, v2, v19, T2D);
5086     eor3(v0, T16B, v3, v29, v30);
5087 
5088     add(len, len, 0x80);
5089     add(buf, buf, 0x10);
5090 
5091     mov(tmp0, v0, D, 0);
5092     mov(tmp1, v0, D, 1);
5093 }
5094 
5095 void MacroAssembler::addptr(const Address &dst, int32_t src) {
5096   Address adr;
5097   switch(dst.getMode()) {
5098   case Address::base_plus_offset:
5099     // This is the expected mode, although we allow all the other
5100     // forms below.
5101     adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord);
5102     break;
5103   default:
5104     lea(rscratch2, dst);
5105     adr = Address(rscratch2);
5106     break;
5107   }
5108   ldr(rscratch1, adr);
5109   add(rscratch1, rscratch1, src);
5110   str(rscratch1, adr);
5111 }
5112 
5113 void MacroAssembler::cmpptr(Register src1, Address src2) {
5114   uint64_t offset;
5115   adrp(rscratch1, src2, offset);
5116   ldr(rscratch1, Address(rscratch1, offset));
5117   cmp(src1, rscratch1);
5118 }
5119 
5120 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
5121   cmp(obj1, obj2);
5122 }
5123 
5124 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
5125   load_method_holder(rresult, rmethod);
5126   ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
5127 }
5128 
5129 void MacroAssembler::load_method_holder(Register holder, Register method) {
5130   ldr(holder, Address(method, Method::const_offset()));                      // ConstMethod*
5131   ldr(holder, Address(holder, ConstMethod::constants_offset()));             // ConstantPool*
5132   ldr(holder, Address(holder, ConstantPool::pool_holder_offset()));          // InstanceKlass*
5133 }
5134 
5135 void MacroAssembler::load_metadata(Register dst, Register src) {
5136   if (UseCompactObjectHeaders) {
5137     load_narrow_klass_compact(dst, src);
5138   } else if (UseCompressedClassPointers) {
5139     ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5140   } else {
5141     ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5142   }
5143 }
5144 
5145 // Loads the obj's Klass* into dst.
5146 // Preserves all registers (incl src, rscratch1 and rscratch2).
5147 // Input:
5148 // src - the oop we want to load the klass from.
5149 // dst - output narrow klass.
5150 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) {
5151   assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders");
5152   ldr(dst, Address(src, oopDesc::mark_offset_in_bytes()));
5153   lsr(dst, dst, markWord::klass_shift);
5154 }
5155 
5156 void MacroAssembler::load_klass(Register dst, Register src) {
5157   if (UseCompactObjectHeaders) {
5158     load_narrow_klass_compact(dst, src);
5159     decode_klass_not_null(dst);
5160   } else if (UseCompressedClassPointers) {
5161     ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5162     decode_klass_not_null(dst);
5163   } else {
5164     ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5165   }
5166 }
5167 
5168 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) {
5169   if (RestoreMXCSROnJNICalls) {
5170     Label OK;
5171     get_fpcr(tmp1);
5172     mov(tmp2, tmp1);
5173     // Set FPCR to the state we need. We do want Round to Nearest. We
5174     // don't want non-IEEE rounding modes or floating-point traps.
5175     bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode
5176     bfi(tmp1, zr, 8, 5);  // Clear exception-control bits (8-12)
5177     bfi(tmp1, zr, 0, 2);  // Clear AH:FIZ
5178     eor(tmp2, tmp1, tmp2);
5179     cbz(tmp2, OK);        // Only reset FPCR if it's wrong
5180     set_fpcr(tmp1);
5181     bind(OK);
5182   }
5183 }
5184 
5185 // ((OopHandle)result).resolve();
5186 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) {
5187   // OopHandle::resolve is an indirection.
5188   access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2);
5189 }
5190 
5191 // ((WeakHandle)result).resolve();
5192 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) {
5193   assert_different_registers(result, tmp1, tmp2);
5194   Label resolved;
5195 
5196   // A null weak handle resolves to null.
5197   cbz(result, resolved);
5198 
5199   // Only 64 bit platforms support GCs that require a tmp register
5200   // WeakHandle::resolve is an indirection like jweak.
5201   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
5202                  result, Address(result), tmp1, tmp2);
5203   bind(resolved);
5204 }
5205 
5206 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) {
5207   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
5208   ldr(dst, Address(rmethod, Method::const_offset()));
5209   ldr(dst, Address(dst, ConstMethod::constants_offset()));
5210   ldr(dst, Address(dst, ConstantPool::pool_holder_offset()));
5211   ldr(dst, Address(dst, mirror_offset));
5212   resolve_oop_handle(dst, tmp1, tmp2);
5213 }
5214 
5215 void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) {
5216   assert_different_registers(obj, klass, tmp);
5217   if (UseCompressedClassPointers) {
5218     if (UseCompactObjectHeaders) {
5219       load_narrow_klass_compact(tmp, obj);
5220     } else {
5221       ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
5222     }
5223     if (CompressedKlassPointers::base() == nullptr) {
5224       cmp(klass, tmp, LSL, CompressedKlassPointers::shift());
5225       return;
5226     } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
5227                && CompressedKlassPointers::shift() == 0) {
5228       // Only the bottom 32 bits matter
5229       cmpw(klass, tmp);
5230       return;
5231     }
5232     decode_klass_not_null(tmp);
5233   } else {
5234     ldr(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
5235   }
5236   cmp(klass, tmp);
5237 }
5238 
5239 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) {
5240   if (UseCompactObjectHeaders) {
5241     load_narrow_klass_compact(tmp1, obj1);
5242     load_narrow_klass_compact(tmp2,  obj2);
5243     cmpw(tmp1, tmp2);
5244   } else if (UseCompressedClassPointers) {
5245     ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5246     ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes()));
5247     cmpw(tmp1, tmp2);
5248   } else {
5249     ldr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes()));
5250     ldr(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes()));
5251     cmp(tmp1, tmp2);
5252   }
5253 }
5254 
5255 void MacroAssembler::load_prototype_header(Register dst, Register src) {
5256   load_klass(dst, src);
5257   ldr(dst, Address(dst, Klass::prototype_header_offset()));
5258 }
5259 
5260 void MacroAssembler::store_klass(Register dst, Register src) {
5261   // FIXME: Should this be a store release?  concurrent gcs assumes
5262   // klass length is valid if klass field is not null.
5263   assert(!UseCompactObjectHeaders, "not with compact headers");
5264   if (UseCompressedClassPointers) {
5265     encode_klass_not_null(src);
5266     strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
5267   } else {
5268     str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
5269   }
5270 }
5271 
5272 void MacroAssembler::store_klass_gap(Register dst, Register src) {
5273   assert(!UseCompactObjectHeaders, "not with compact headers");
5274   if (UseCompressedClassPointers) {
5275     // Store to klass gap in destination
5276     strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
5277   }
5278 }
5279 
5280 // Algorithm must match CompressedOops::encode.
5281 void MacroAssembler::encode_heap_oop(Register d, Register s) {
5282 #ifdef ASSERT
5283   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
5284 #endif
5285   verify_oop_msg(s, "broken oop in encode_heap_oop");
5286   if (CompressedOops::base() == nullptr) {
5287     if (CompressedOops::shift() != 0) {
5288       assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5289       lsr(d, s, LogMinObjAlignmentInBytes);
5290     } else {
5291       mov(d, s);
5292     }
5293   } else {
5294     subs(d, s, rheapbase);
5295     csel(d, d, zr, Assembler::HS);
5296     lsr(d, d, LogMinObjAlignmentInBytes);
5297 
5298     /*  Old algorithm: is this any worse?
5299     Label nonnull;
5300     cbnz(r, nonnull);
5301     sub(r, r, rheapbase);
5302     bind(nonnull);
5303     lsr(r, r, LogMinObjAlignmentInBytes);
5304     */
5305   }
5306 }
5307 
5308 void MacroAssembler::encode_heap_oop_not_null(Register r) {
5309 #ifdef ASSERT
5310   verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
5311   if (CheckCompressedOops) {
5312     Label ok;
5313     cbnz(r, ok);
5314     stop("null oop passed to encode_heap_oop_not_null");
5315     bind(ok);
5316   }
5317 #endif
5318   verify_oop_msg(r, "broken oop in encode_heap_oop_not_null");
5319   if (CompressedOops::base() != nullptr) {
5320     sub(r, r, rheapbase);
5321   }
5322   if (CompressedOops::shift() != 0) {
5323     assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5324     lsr(r, r, LogMinObjAlignmentInBytes);
5325   }
5326 }
5327 
5328 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
5329 #ifdef ASSERT
5330   verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
5331   if (CheckCompressedOops) {
5332     Label ok;
5333     cbnz(src, ok);
5334     stop("null oop passed to encode_heap_oop_not_null2");
5335     bind(ok);
5336   }
5337 #endif
5338   verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2");
5339 
5340   Register data = src;
5341   if (CompressedOops::base() != nullptr) {
5342     sub(dst, src, rheapbase);
5343     data = dst;
5344   }
5345   if (CompressedOops::shift() != 0) {
5346     assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5347     lsr(dst, data, LogMinObjAlignmentInBytes);
5348     data = dst;
5349   }
5350   if (data == src)
5351     mov(dst, src);
5352 }
5353 
5354 void  MacroAssembler::decode_heap_oop(Register d, Register s) {
5355 #ifdef ASSERT
5356   verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
5357 #endif
5358   if (CompressedOops::base() == nullptr) {
5359     if (CompressedOops::shift() != 0) {
5360       lsl(d, s, CompressedOops::shift());
5361     } else if (d != s) {
5362       mov(d, s);
5363     }
5364   } else {
5365     Label done;
5366     if (d != s)
5367       mov(d, s);
5368     cbz(s, done);
5369     add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes);
5370     bind(done);
5371   }
5372   verify_oop_msg(d, "broken oop in decode_heap_oop");
5373 }
5374 
5375 void  MacroAssembler::decode_heap_oop_not_null(Register r) {
5376   assert (UseCompressedOops, "should only be used for compressed headers");
5377   assert (Universe::heap() != nullptr, "java heap should be initialized");
5378   // Cannot assert, unverified entry point counts instructions (see .ad file)
5379   // vtableStubs also counts instructions in pd_code_size_limit.
5380   // Also do not verify_oop as this is called by verify_oop.
5381   if (CompressedOops::shift() != 0) {
5382     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5383     if (CompressedOops::base() != nullptr) {
5384       add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes);
5385     } else {
5386       add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes);
5387     }
5388   } else {
5389     assert (CompressedOops::base() == nullptr, "sanity");
5390   }
5391 }
5392 
5393 void  MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
5394   assert (UseCompressedOops, "should only be used for compressed headers");
5395   assert (Universe::heap() != nullptr, "java heap should be initialized");
5396   // Cannot assert, unverified entry point counts instructions (see .ad file)
5397   // vtableStubs also counts instructions in pd_code_size_limit.
5398   // Also do not verify_oop as this is called by verify_oop.
5399   if (CompressedOops::shift() != 0) {
5400     assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
5401     if (CompressedOops::base() != nullptr) {
5402       add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes);
5403     } else {
5404       add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes);
5405     }
5406   } else {
5407     assert (CompressedOops::base() == nullptr, "sanity");
5408     if (dst != src) {
5409       mov(dst, src);
5410     }
5411   }
5412 }
5413 
5414 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone);
5415 
5416 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
5417   assert(Metaspace::initialized(), "metaspace not initialized yet");
5418   assert(_klass_decode_mode != KlassDecodeNone, "should be initialized");
5419   return _klass_decode_mode;
5420 }
5421 
5422 MacroAssembler::KlassDecodeMode  MacroAssembler::klass_decode_mode(address base, int shift, const size_t range) {
5423   assert(UseCompressedClassPointers, "not using compressed class pointers");
5424 
5425   // KlassDecodeMode shouldn't be set already.
5426   assert(_klass_decode_mode == KlassDecodeNone, "set once");
5427 
5428   if (base == nullptr) {
5429     return KlassDecodeZero;
5430   }
5431 
5432   if (operand_valid_for_logical_immediate(
5433         /*is32*/false, (uint64_t)base)) {
5434     const uint64_t range_mask = right_n_bits(log2i_ceil(range));
5435     if (((uint64_t)base & range_mask) == 0) {
5436       return KlassDecodeXor;
5437     }
5438   }
5439 
5440   const uint64_t shifted_base =
5441     (uint64_t)base >> shift;
5442   if ((shifted_base & 0xffff0000ffffffff) == 0) {
5443     return KlassDecodeMovk;
5444   }
5445 
5446   // No valid encoding.
5447   return KlassDecodeNone;
5448 }
5449 
5450 // Check if one of the above decoding modes will work for given base, shift and range.
5451 bool MacroAssembler::check_klass_decode_mode(address base, int shift, const size_t range) {
5452   return klass_decode_mode(base, shift, range) != KlassDecodeNone;
5453 }
5454 
5455 bool MacroAssembler::set_klass_decode_mode(address base, int shift, const size_t range) {
5456   _klass_decode_mode = klass_decode_mode(base, shift, range);
5457   return _klass_decode_mode != KlassDecodeNone;
5458 }
5459 
5460 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
5461   switch (klass_decode_mode()) {
5462   case KlassDecodeZero:
5463     if (CompressedKlassPointers::shift() != 0) {
5464       lsr(dst, src, CompressedKlassPointers::shift());
5465     } else {
5466       if (dst != src) mov(dst, src);
5467     }
5468     break;
5469 
5470   case KlassDecodeXor:
5471     if (CompressedKlassPointers::shift() != 0) {
5472       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5473       lsr(dst, dst, CompressedKlassPointers::shift());
5474     } else {
5475       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5476     }
5477     break;
5478 
5479   case KlassDecodeMovk:
5480     if (CompressedKlassPointers::shift() != 0) {
5481       ubfx(dst, src, CompressedKlassPointers::shift(), 32);
5482     } else {
5483       movw(dst, src);
5484     }
5485     break;
5486 
5487   case KlassDecodeNone:
5488     ShouldNotReachHere();
5489     break;
5490   }
5491 }
5492 
5493 void MacroAssembler::encode_klass_not_null(Register r) {
5494   encode_klass_not_null(r, r);
5495 }
5496 
5497 void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
5498   assert (UseCompressedClassPointers, "should only be used for compressed headers");
5499 
5500   switch (klass_decode_mode()) {
5501   case KlassDecodeZero:
5502     if (CompressedKlassPointers::shift() != 0) {
5503       lsl(dst, src, CompressedKlassPointers::shift());
5504     } else {
5505       if (dst != src) mov(dst, src);
5506     }
5507     break;
5508 
5509   case KlassDecodeXor:
5510     if (CompressedKlassPointers::shift() != 0) {
5511       lsl(dst, src, CompressedKlassPointers::shift());
5512       eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
5513     } else {
5514       eor(dst, src, (uint64_t)CompressedKlassPointers::base());
5515     }
5516     break;
5517 
5518   case KlassDecodeMovk: {
5519     const uint64_t shifted_base =
5520       (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
5521 
5522     if (dst != src) movw(dst, src);
5523     movk(dst, shifted_base >> 32, 32);
5524 
5525     if (CompressedKlassPointers::shift() != 0) {
5526       lsl(dst, dst, CompressedKlassPointers::shift());
5527     }
5528 
5529     break;
5530   }
5531 
5532   case KlassDecodeNone:
5533     ShouldNotReachHere();
5534     break;
5535   }
5536 }
5537 
5538 void  MacroAssembler::decode_klass_not_null(Register r) {
5539   decode_klass_not_null(r, r);
5540 }
5541 
5542 void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
5543 #ifdef ASSERT
5544   {
5545     ThreadInVMfromUnknown tiv;
5546     assert (UseCompressedOops, "should only be used for compressed oops");
5547     assert (Universe::heap() != nullptr, "java heap should be initialized");
5548     assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5549     assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
5550   }
5551 #endif
5552   int oop_index = oop_recorder()->find_index(obj);
5553   InstructionMark im(this);
5554   RelocationHolder rspec = oop_Relocation::spec(oop_index);
5555   code_section()->relocate(inst_mark(), rspec);
5556   movz(dst, 0xDEAD, 16);
5557   movk(dst, 0xBEEF);
5558 }
5559 
5560 void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
5561   assert (UseCompressedClassPointers, "should only be used for compressed headers");
5562   assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5563   int index = oop_recorder()->find_index(k);
5564   assert(! Universe::heap()->is_in(k), "should not be an oop");
5565 
5566   InstructionMark im(this);
5567   RelocationHolder rspec = metadata_Relocation::spec(index);
5568   code_section()->relocate(inst_mark(), rspec);
5569   narrowKlass nk = CompressedKlassPointers::encode(k);
5570   movz(dst, (nk >> 16), 16);
5571   movk(dst, nk & 0xffff);
5572 }
5573 
5574 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
5575                                     Register dst, Address src,
5576                                     Register tmp1, Register tmp2) {
5577   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5578   decorators = AccessInternal::decorator_fixup(decorators, type);
5579   bool as_raw = (decorators & AS_RAW) != 0;
5580   if (as_raw) {
5581     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2);
5582   } else {
5583     bs->load_at(this, decorators, type, dst, src, tmp1, tmp2);
5584   }
5585 }
5586 
5587 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
5588                                      Address dst, Register val,
5589                                      Register tmp1, Register tmp2, Register tmp3) {
5590   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5591   decorators = AccessInternal::decorator_fixup(decorators, type);
5592   bool as_raw = (decorators & AS_RAW) != 0;
5593   if (as_raw) {
5594     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5595   } else {
5596     bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3);
5597   }
5598 }
5599 
5600 void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst,
5601                                      Register inline_layout_info) {
5602   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
5603   bs->flat_field_copy(this, decorators, src, dst, inline_layout_info);
5604 }
5605 
5606 void MacroAssembler::payload_offset(Register inline_klass, Register offset) {
5607   ldr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset()));
5608   ldrw(offset, Address(offset, InlineKlass::payload_offset_offset()));
5609 }
5610 
5611 void MacroAssembler::payload_address(Register oop, Register data, Register inline_klass) {
5612   // ((address) (void*) o) + vk->payload_offset();
5613   Register offset = (data == oop) ? rscratch1 : data;
5614   payload_offset(inline_klass, offset);
5615   if (data == oop) {
5616     add(data, data, offset);
5617   } else {
5618     lea(data, Address(oop, offset));
5619   }
5620 }
5621 
5622 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass,
5623                                                 Register index, Register data) {
5624   assert_different_registers(array, array_klass, index);
5625   assert_different_registers(rscratch1, array, index);
5626 
5627   // array->base() + (index << Klass::layout_helper_log2_element_size(lh));
5628   ldrw(rscratch1, Address(array_klass, Klass::layout_helper_offset()));
5629 
5630   // Klass::layout_helper_log2_element_size(lh)
5631   // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
5632   lsr(rscratch1, rscratch1, Klass::_lh_log2_element_size_shift);
5633   andr(rscratch1, rscratch1, Klass::_lh_log2_element_size_mask);
5634   lslv(index, index, rscratch1);
5635 
5636   add(data, array, index);
5637   add(data, data, arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT));
5638 }
5639 
5640 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
5641                                    Register tmp2, DecoratorSet decorators) {
5642   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2);
5643 }
5644 
5645 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
5646                                             Register tmp2, DecoratorSet decorators) {
5647   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2);
5648 }
5649 
5650 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1,
5651                                     Register tmp2, Register tmp3, DecoratorSet decorators) {
5652   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3);
5653 }
5654 
5655 // Used for storing nulls.
5656 void MacroAssembler::store_heap_oop_null(Address dst) {
5657   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
5658 }
5659 
5660 Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
5661   assert(oop_recorder() != nullptr, "this assembler needs a Recorder");
5662   int index = oop_recorder()->allocate_metadata_index(obj);
5663   RelocationHolder rspec = metadata_Relocation::spec(index);
5664   return Address((address)obj, rspec);
5665 }
5666 
5667 // Move an oop into a register.
5668 void MacroAssembler::movoop(Register dst, jobject obj) {
5669   int oop_index;
5670   if (obj == nullptr) {
5671     oop_index = oop_recorder()->allocate_oop_index(obj);
5672   } else {
5673 #ifdef ASSERT
5674     {
5675       ThreadInVMfromUnknown tiv;
5676       assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
5677     }
5678 #endif
5679     oop_index = oop_recorder()->find_index(obj);
5680   }
5681   RelocationHolder rspec = oop_Relocation::spec(oop_index);
5682 
5683   if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) {
5684     mov(dst, Address((address)obj, rspec));
5685   } else {
5686     address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address
5687     ldr_constant(dst, Address(dummy, rspec));
5688   }
5689 
5690 }
5691 
5692 // Move a metadata address into a register.
5693 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
5694   int oop_index;
5695   if (obj == nullptr) {
5696     oop_index = oop_recorder()->allocate_metadata_index(obj);
5697   } else {
5698     oop_index = oop_recorder()->find_index(obj);
5699   }
5700   RelocationHolder rspec = metadata_Relocation::spec(oop_index);
5701   mov(dst, Address((address)obj, rspec));
5702 }
5703 
5704 Address MacroAssembler::constant_oop_address(jobject obj) {
5705 #ifdef ASSERT
5706   {
5707     ThreadInVMfromUnknown tiv;
5708     assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder");
5709     assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop");
5710   }
5711 #endif
5712   int oop_index = oop_recorder()->find_index(obj);
5713   return Address((address)obj, oop_Relocation::spec(oop_index));
5714 }
5715 
5716 // Object / value buffer allocation...
5717 void MacroAssembler::allocate_instance(Register klass, Register new_obj,
5718                                        Register t1, Register t2,
5719                                        bool clear_fields, Label& alloc_failed)
5720 {
5721   Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop;
5722   Register layout_size = t1;
5723   assert(new_obj == r0, "needs to be r0");
5724   assert_different_registers(klass, new_obj, t1, t2);
5725 
5726   // get instance_size in InstanceKlass (scaled to a count of bytes)
5727   ldrw(layout_size, Address(klass, Klass::layout_helper_offset()));
5728   // test to see if it is malformed in some way
5729   tst(layout_size, Klass::_lh_instance_slow_path_bit);
5730   br(Assembler::NE, slow_case_no_pop);
5731 
5732   // Allocate the instance:
5733   //  If TLAB is enabled:
5734   //    Try to allocate in the TLAB.
5735   //    If fails, go to the slow path.
5736   //    Initialize the allocation.
5737   //    Exit.
5738   //
5739   //  Go to slow path.
5740 
5741   if (UseTLAB) {
5742     push(klass);
5743     tlab_allocate(new_obj, layout_size, 0, klass, t2, slow_case);
5744     if (ZeroTLAB || (!clear_fields)) {
5745       // the fields have been already cleared
5746       b(initialize_header);
5747     } else {
5748       // initialize both the header and fields
5749       b(initialize_object);
5750     }
5751 
5752     if (clear_fields) {
5753       // The object is initialized before the header.  If the object size is
5754       // zero, go directly to the header initialization.
5755       bind(initialize_object);
5756       int header_size = oopDesc::header_size() * HeapWordSize;
5757       assert(is_aligned(header_size, BytesPerLong), "oop header size must be 8-byte-aligned");
5758       subs(layout_size, layout_size, header_size);
5759       br(Assembler::EQ, initialize_header);
5760 
5761       // Initialize topmost object field, divide size by 8, check if odd and
5762       // test if zero.
5763 
5764   #ifdef ASSERT
5765       // make sure instance_size was multiple of 8
5766       Label L;
5767       tst(layout_size, 7);
5768       br(Assembler::EQ, L);
5769       stop("object size is not multiple of 8 - adjust this code");
5770       bind(L);
5771       // must be > 0, no extra check needed here
5772   #endif
5773 
5774       lsr(layout_size, layout_size, LogBytesPerLong);
5775 
5776       // initialize remaining object fields: instance_size was a multiple of 8
5777       {
5778         Label loop;
5779         Register base = t2;
5780 
5781         bind(loop);
5782         add(rscratch1, new_obj, layout_size, Assembler::LSL, LogBytesPerLong);
5783         str(zr, Address(rscratch1, header_size - 1*oopSize));
5784         subs(layout_size, layout_size, 1);
5785         br(Assembler::NE, loop);
5786       }
5787     } // clear_fields
5788 
5789     // initialize object header only.
5790     bind(initialize_header);
5791     pop(klass);
5792     Register mark_word = t2;
5793     if (UseCompactObjectHeaders || EnableValhalla) {
5794       ldr(mark_word, Address(klass, Klass::prototype_header_offset()));
5795       str(mark_word, Address(new_obj, oopDesc::mark_offset_in_bytes()));
5796     } else {
5797       mov(mark_word, (intptr_t)markWord::prototype().value());
5798       str(mark_word, Address(new_obj, oopDesc::mark_offset_in_bytes()));
5799     }
5800     if (!UseCompactObjectHeaders) {
5801       store_klass_gap(new_obj, zr);  // zero klass gap for compressed oops
5802       mov(t2, klass);                // preserve klass
5803       store_klass(new_obj, t2);      // src klass reg is potentially compressed
5804     }
5805     // TODO: Valhalla removed SharedRuntime::dtrace_object_alloc from here ?
5806 
5807     b(done);
5808   }
5809 
5810   if (UseTLAB) {
5811     bind(slow_case);
5812     pop(klass);
5813   }
5814   bind(slow_case_no_pop);
5815   b(alloc_failed);
5816 
5817   bind(done);
5818 }
5819 
5820 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
5821 void MacroAssembler::tlab_allocate(Register obj,
5822                                    Register var_size_in_bytes,
5823                                    int con_size_in_bytes,
5824                                    Register t1,
5825                                    Register t2,
5826                                    Label& slow_case) {
5827   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
5828   bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
5829 }
5830 
5831 void MacroAssembler::inc_held_monitor_count(Register tmp) {
5832   Address dst(rthread, JavaThread::held_monitor_count_offset());
5833 #ifdef ASSERT
5834   ldr(tmp, dst);
5835   increment(tmp);
5836   str(tmp, dst);
5837   Label ok;
5838   tbz(tmp, 63, ok);
5839   STOP("assert(held monitor count underflow)");
5840   should_not_reach_here();
5841   bind(ok);
5842 #else
5843   increment(dst);
5844 #endif
5845 }
5846 
5847 void MacroAssembler::dec_held_monitor_count(Register tmp) {
5848   Address dst(rthread, JavaThread::held_monitor_count_offset());
5849 #ifdef ASSERT
5850   ldr(tmp, dst);
5851   decrement(tmp);
5852   str(tmp, dst);
5853   Label ok;
5854   tbz(tmp, 63, ok);
5855   STOP("assert(held monitor count underflow)");
5856   should_not_reach_here();
5857   bind(ok);
5858 #else
5859   decrement(dst);
5860 #endif
5861 }
5862 
5863 void MacroAssembler::verify_tlab() {
5864 #ifdef ASSERT
5865   if (UseTLAB && VerifyOops) {
5866     Label next, ok;
5867 
5868     stp(rscratch2, rscratch1, Address(pre(sp, -16)));
5869 
5870     ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5871     ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
5872     cmp(rscratch2, rscratch1);
5873     br(Assembler::HS, next);
5874     STOP("assert(top >= start)");
5875     should_not_reach_here();
5876 
5877     bind(next);
5878     ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
5879     ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5880     cmp(rscratch2, rscratch1);
5881     br(Assembler::HS, ok);
5882     STOP("assert(top <= end)");
5883     should_not_reach_here();
5884 
5885     bind(ok);
5886     ldp(rscratch2, rscratch1, Address(post(sp, 16)));
5887   }
5888 #endif
5889 }
5890 
5891 void MacroAssembler::get_inline_type_field_klass(Register holder_klass, Register index, Register inline_klass) {
5892   inline_layout_info(holder_klass, index, inline_klass);
5893   ldr(inline_klass, Address(inline_klass, InlineLayoutInfo::klass_offset()));
5894 }
5895 
5896 void MacroAssembler::inline_layout_info(Register holder_klass, Register index, Register layout_info) {
5897   assert_different_registers(holder_klass, index, layout_info);
5898   InlineLayoutInfo array[2];
5899   int size = (char*)&array[1] - (char*)&array[0]; // computing size of array elements
5900   if (is_power_of_2(size)) {
5901     lsl(index, index, log2i_exact(size)); // Scale index by power of 2
5902   } else {
5903     mov(layout_info, size);
5904     mul(index, index, layout_info); // Scale the index to be the entry index * array_element_size
5905   }
5906   ldr(layout_info, Address(holder_klass, InstanceKlass::inline_layout_info_array_offset()));
5907   add(layout_info, layout_info, Array<InlineLayoutInfo>::base_offset_in_bytes());
5908   lea(layout_info, Address(layout_info, index));
5909 }
5910 
5911 // Writes to stack successive pages until offset reached to check for
5912 // stack overflow + shadow pages.  This clobbers tmp.
5913 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
5914   assert_different_registers(tmp, size, rscratch1);
5915   mov(tmp, sp);
5916   // Bang stack for total size given plus shadow page size.
5917   // Bang one page at a time because large size can bang beyond yellow and
5918   // red zones.
5919   Label loop;
5920   mov(rscratch1, (int)os::vm_page_size());
5921   bind(loop);
5922   lea(tmp, Address(tmp, -(int)os::vm_page_size()));
5923   subsw(size, size, rscratch1);
5924   str(size, Address(tmp));
5925   br(Assembler::GT, loop);
5926 
5927   // Bang down shadow pages too.
5928   // At this point, (tmp-0) is the last address touched, so don't
5929   // touch it again.  (It was touched as (tmp-pagesize) but then tmp
5930   // was post-decremented.)  Skip this address by starting at i=1, and
5931   // touch a few more pages below.  N.B.  It is important to touch all
5932   // the way down to and including i=StackShadowPages.
5933   for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) {
5934     // this could be any sized move but this is can be a debugging crumb
5935     // so the bigger the better.
5936     lea(tmp, Address(tmp, -(int)os::vm_page_size()));
5937     str(size, Address(tmp));
5938   }
5939 }
5940 
5941 // Move the address of the polling page into dest.
5942 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) {
5943   ldr(dest, Address(rthread, JavaThread::polling_page_offset()));
5944 }
5945 
5946 // Read the polling page.  The address of the polling page must
5947 // already be in r.
5948 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) {
5949   address mark;
5950   {
5951     InstructionMark im(this);
5952     code_section()->relocate(inst_mark(), rtype);
5953     ldrw(zr, Address(r, 0));
5954     mark = inst_mark();
5955   }
5956   verify_cross_modify_fence_not_required();
5957   return mark;
5958 }
5959 
5960 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) {
5961   relocInfo::relocType rtype = dest.rspec().reloc()->type();
5962   uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12;
5963   uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12;
5964   uint64_t dest_page = (uint64_t)dest.target() >> 12;
5965   int64_t offset_low = dest_page - low_page;
5966   int64_t offset_high = dest_page - high_page;
5967 
5968   assert(is_valid_AArch64_address(dest.target()), "bad address");
5969   assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address");
5970 
5971   InstructionMark im(this);
5972   code_section()->relocate(inst_mark(), dest.rspec());
5973   // 8143067: Ensure that the adrp can reach the dest from anywhere within
5974   // the code cache so that if it is relocated we know it will still reach
5975   if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5976     _adrp(reg1, dest.target());
5977   } else {
5978     uint64_t target = (uint64_t)dest.target();
5979     uint64_t adrp_target
5980       = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5981 
5982     _adrp(reg1, (address)adrp_target);
5983     movk(reg1, target >> 32, 32);
5984   }
5985   byte_offset = (uint64_t)dest.target() & 0xfff;
5986 }
5987 
5988 void MacroAssembler::load_byte_map_base(Register reg) {
5989   CardTable::CardValue* byte_map_base =
5990     ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
5991 
5992   // Strictly speaking the byte_map_base isn't an address at all, and it might
5993   // even be negative. It is thus materialised as a constant.
5994   mov(reg, (uint64_t)byte_map_base);
5995 }
5996 
5997 void MacroAssembler::build_frame(int framesize) {
5998   assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5999   assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
6000   protect_return_address();
6001   if (framesize < ((1 << 9) + 2 * wordSize)) {
6002     sub(sp, sp, framesize);
6003     stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
6004     if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
6005   } else {
6006     stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
6007     if (PreserveFramePointer) mov(rfp, sp);
6008     if (framesize < ((1 << 12) + 2 * wordSize))
6009       sub(sp, sp, framesize - 2 * wordSize);
6010     else {
6011       mov(rscratch1, framesize - 2 * wordSize);
6012       sub(sp, sp, rscratch1);
6013     }
6014   }
6015   verify_cross_modify_fence_not_required();
6016 }
6017 
6018 void MacroAssembler::remove_frame(int framesize) {
6019   assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
6020   assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
6021   if (framesize < ((1 << 9) + 2 * wordSize)) {
6022     ldp(rfp, lr, Address(sp, framesize - 2 * wordSize));
6023     add(sp, sp, framesize);
6024   } else {
6025     if (framesize < ((1 << 12) + 2 * wordSize))
6026       add(sp, sp, framesize - 2 * wordSize);
6027     else {
6028       mov(rscratch1, framesize - 2 * wordSize);
6029       add(sp, sp, rscratch1);
6030     }
6031     ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
6032   }
6033   authenticate_return_address();
6034 }
6035 
6036 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) {
6037   if (needs_stack_repair) {
6038     // Remove the extension of the caller's frame used for inline type unpacking
6039     //
6040     // Right now the stack looks like this:
6041     //
6042     // | Arguments from caller     |
6043     // |---------------------------|  <-- caller's SP
6044     // | Saved LR #1               |
6045     // | Saved FP #1               |
6046     // |---------------------------|
6047     // | Extension space for       |
6048     // |   inline arg (un)packing  |
6049     // |---------------------------|  <-- start of this method's frame
6050     // | Saved LR #2               |
6051     // | Saved FP #2               |
6052     // |---------------------------|  <-- FP
6053     // | sp_inc                    |
6054     // | method locals             |
6055     // |---------------------------|  <-- SP
6056     //
6057     // There are two copies of FP and LR on the stack. They will be identical
6058     // unless the caller has been deoptimized, in which case LR #1 will be patched
6059     // to point at the deopt blob, and LR #2 will still point into the old method.
6060     //
6061     // The sp_inc stack slot holds the total size of the frame including the
6062     // extension space minus two words for the saved FP and LR.
6063 
6064     int sp_inc_offset = initial_framesize - 3 * wordSize;  // Immediately below saved LR and FP
6065 
6066     ldr(rscratch1, Address(sp, sp_inc_offset));
6067     add(sp, sp, rscratch1);
6068     ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
6069   } else {
6070     remove_frame(initial_framesize);
6071   }
6072 }
6073 
6074 void MacroAssembler::save_stack_increment(int sp_inc, int frame_size) {
6075   int real_frame_size = frame_size + sp_inc;
6076   assert(sp_inc == 0 || sp_inc > 2*wordSize, "invalid sp_inc value");
6077   assert(real_frame_size >= 2*wordSize, "frame size must include FP/LR space");
6078   assert((real_frame_size & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
6079 
6080   int sp_inc_offset = frame_size - 3 * wordSize;  // Immediately below saved LR and FP
6081 
6082   // Subtract two words for the saved FP and LR as these will be popped
6083   // separately. See remove_frame above.
6084   mov(rscratch1, real_frame_size - 2*wordSize);
6085   str(rscratch1, Address(sp, sp_inc_offset));
6086 }
6087 
6088 // This method counts leading positive bytes (highest bit not set) in provided byte array
6089 address MacroAssembler::count_positives(Register ary1, Register len, Register result) {
6090     // Simple and most common case of aligned small array which is not at the
6091     // end of memory page is placed here. All other cases are in stub.
6092     Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE;
6093     const uint64_t UPPER_BIT_MASK=0x8080808080808080;
6094     assert_different_registers(ary1, len, result);
6095 
6096     mov(result, len);
6097     cmpw(len, 0);
6098     br(LE, DONE);
6099     cmpw(len, 4 * wordSize);
6100     br(GE, STUB_LONG); // size > 32 then go to stub
6101 
6102     int shift = 64 - exact_log2(os::vm_page_size());
6103     lsl(rscratch1, ary1, shift);
6104     mov(rscratch2, (size_t)(4 * wordSize) << shift);
6105     adds(rscratch2, rscratch1, rscratch2);  // At end of page?
6106     br(CS, STUB); // at the end of page then go to stub
6107     subs(len, len, wordSize);
6108     br(LT, END);
6109 
6110   BIND(LOOP);
6111     ldr(rscratch1, Address(post(ary1, wordSize)));
6112     tst(rscratch1, UPPER_BIT_MASK);
6113     br(NE, SET_RESULT);
6114     subs(len, len, wordSize);
6115     br(GE, LOOP);
6116     cmpw(len, -wordSize);
6117     br(EQ, DONE);
6118 
6119   BIND(END);
6120     ldr(rscratch1, Address(ary1));
6121     sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes
6122     lslv(rscratch1, rscratch1, rscratch2);
6123     tst(rscratch1, UPPER_BIT_MASK);
6124     br(NE, SET_RESULT);
6125     b(DONE);
6126 
6127   BIND(STUB);
6128     RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives());
6129     assert(count_pos.target() != nullptr, "count_positives stub has not been generated");
6130     address tpc1 = trampoline_call(count_pos);
6131     if (tpc1 == nullptr) {
6132       DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE));
6133       postcond(pc() == badAddress);
6134       return nullptr;
6135     }
6136     b(DONE);
6137 
6138   BIND(STUB_LONG);
6139     RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long());
6140     assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated");
6141     address tpc2 = trampoline_call(count_pos_long);
6142     if (tpc2 == nullptr) {
6143       DEBUG_ONLY(reset_labels(SET_RESULT, DONE));
6144       postcond(pc() == badAddress);
6145       return nullptr;
6146     }
6147     b(DONE);
6148 
6149   BIND(SET_RESULT);
6150 
6151     add(len, len, wordSize);
6152     sub(result, result, len);
6153 
6154   BIND(DONE);
6155   postcond(pc() != badAddress);
6156   return pc();
6157 }
6158 
6159 // Clobbers: rscratch1, rscratch2, rflags
6160 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals)
6161 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
6162                                       Register tmp4, Register tmp5, Register result,
6163                                       Register cnt1, int elem_size) {
6164   Label DONE, SAME;
6165   Register tmp1 = rscratch1;
6166   Register tmp2 = rscratch2;
6167   int elem_per_word = wordSize/elem_size;
6168   int log_elem_size = exact_log2(elem_size);
6169   int klass_offset  = arrayOopDesc::klass_offset_in_bytes();
6170   int length_offset = arrayOopDesc::length_offset_in_bytes();
6171   int base_offset
6172     = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE);
6173   // When the length offset is not aligned to 8 bytes,
6174   // then we align it down. This is valid because the new
6175   // offset will always be the klass which is the same
6176   // for type arrays.
6177   int start_offset = align_down(length_offset, BytesPerWord);
6178   int extra_length = base_offset - start_offset;
6179   assert(start_offset == length_offset || start_offset == klass_offset,
6180          "start offset must be 8-byte-aligned or be the klass offset");
6181   assert(base_offset != start_offset, "must include the length field");
6182   extra_length = extra_length / elem_size; // We count in elements, not bytes.
6183   int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16);
6184 
6185   assert(elem_size == 1 || elem_size == 2, "must be char or byte");
6186   assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
6187 
6188 #ifndef PRODUCT
6189   {
6190     const char kind = (elem_size == 2) ? 'U' : 'L';
6191     char comment[64];
6192     snprintf(comment, sizeof comment, "array_equals%c{", kind);
6193     BLOCK_COMMENT(comment);
6194   }
6195 #endif
6196 
6197   // if (a1 == a2)
6198   //     return true;
6199   cmpoop(a1, a2); // May have read barriers for a1 and a2.
6200   br(EQ, SAME);
6201 
6202   if (UseSimpleArrayEquals) {
6203     Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL;
6204     // if (a1 == nullptr || a2 == nullptr)
6205     //     return false;
6206     // a1 & a2 == 0 means (some-pointer is null) or
6207     // (very-rare-or-even-probably-impossible-pointer-values)
6208     // so, we can save one branch in most cases
6209     tst(a1, a2);
6210     mov(result, false);
6211     br(EQ, A_MIGHT_BE_NULL);
6212     // if (a1.length != a2.length)
6213     //      return false;
6214     bind(A_IS_NOT_NULL);
6215     ldrw(cnt1, Address(a1, length_offset));
6216     // Increase loop counter by diff between base- and actual start-offset.
6217     addw(cnt1, cnt1, extra_length);
6218     lea(a1, Address(a1, start_offset));
6219     lea(a2, Address(a2, start_offset));
6220     // Check for short strings, i.e. smaller than wordSize.
6221     subs(cnt1, cnt1, elem_per_word);
6222     br(Assembler::LT, SHORT);
6223     // Main 8 byte comparison loop.
6224     bind(NEXT_WORD); {
6225       ldr(tmp1, Address(post(a1, wordSize)));
6226       ldr(tmp2, Address(post(a2, wordSize)));
6227       subs(cnt1, cnt1, elem_per_word);
6228       eor(tmp5, tmp1, tmp2);
6229       cbnz(tmp5, DONE);
6230     } br(GT, NEXT_WORD);
6231     // Last longword.  In the case where length == 4 we compare the
6232     // same longword twice, but that's still faster than another
6233     // conditional branch.
6234     // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when
6235     // length == 4.
6236     if (log_elem_size > 0)
6237       lsl(cnt1, cnt1, log_elem_size);
6238     ldr(tmp3, Address(a1, cnt1));
6239     ldr(tmp4, Address(a2, cnt1));
6240     eor(tmp5, tmp3, tmp4);
6241     cbnz(tmp5, DONE);
6242     b(SAME);
6243     bind(A_MIGHT_BE_NULL);
6244     // in case both a1 and a2 are not-null, proceed with loads
6245     cbz(a1, DONE);
6246     cbz(a2, DONE);
6247     b(A_IS_NOT_NULL);
6248     bind(SHORT);
6249 
6250     tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left.
6251     {
6252       ldrw(tmp1, Address(post(a1, 4)));
6253       ldrw(tmp2, Address(post(a2, 4)));
6254       eorw(tmp5, tmp1, tmp2);
6255       cbnzw(tmp5, DONE);
6256     }
6257     bind(TAIL03);
6258     tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left.
6259     {
6260       ldrh(tmp3, Address(post(a1, 2)));
6261       ldrh(tmp4, Address(post(a2, 2)));
6262       eorw(tmp5, tmp3, tmp4);
6263       cbnzw(tmp5, DONE);
6264     }
6265     bind(TAIL01);
6266     if (elem_size == 1) { // Only needed when comparing byte arrays.
6267       tbz(cnt1, 0, SAME); // 0-1 bytes left.
6268       {
6269         ldrb(tmp1, a1);
6270         ldrb(tmp2, a2);
6271         eorw(tmp5, tmp1, tmp2);
6272         cbnzw(tmp5, DONE);
6273       }
6274     }
6275   } else {
6276     Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB,
6277         CSET_EQ, LAST_CHECK;
6278     mov(result, false);
6279     cbz(a1, DONE);
6280     ldrw(cnt1, Address(a1, length_offset));
6281     cbz(a2, DONE);
6282     // Increase loop counter by diff between base- and actual start-offset.
6283     addw(cnt1, cnt1, extra_length);
6284 
6285     // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's
6286     // faster to perform another branch before comparing a1 and a2
6287     cmp(cnt1, (u1)elem_per_word);
6288     br(LE, SHORT); // short or same
6289     ldr(tmp3, Address(pre(a1, start_offset)));
6290     subs(zr, cnt1, stubBytesThreshold);
6291     br(GE, STUB);
6292     ldr(tmp4, Address(pre(a2, start_offset)));
6293     sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size);
6294 
6295     // Main 16 byte comparison loop with 2 exits
6296     bind(NEXT_DWORD); {
6297       ldr(tmp1, Address(pre(a1, wordSize)));
6298       ldr(tmp2, Address(pre(a2, wordSize)));
6299       subs(cnt1, cnt1, 2 * elem_per_word);
6300       br(LE, TAIL);
6301       eor(tmp4, tmp3, tmp4);
6302       cbnz(tmp4, DONE);
6303       ldr(tmp3, Address(pre(a1, wordSize)));
6304       ldr(tmp4, Address(pre(a2, wordSize)));
6305       cmp(cnt1, (u1)elem_per_word);
6306       br(LE, TAIL2);
6307       cmp(tmp1, tmp2);
6308     } br(EQ, NEXT_DWORD);
6309     b(DONE);
6310 
6311     bind(TAIL);
6312     eor(tmp4, tmp3, tmp4);
6313     eor(tmp2, tmp1, tmp2);
6314     lslv(tmp2, tmp2, tmp5);
6315     orr(tmp5, tmp4, tmp2);
6316     cmp(tmp5, zr);
6317     b(CSET_EQ);
6318 
6319     bind(TAIL2);
6320     eor(tmp2, tmp1, tmp2);
6321     cbnz(tmp2, DONE);
6322     b(LAST_CHECK);
6323 
6324     bind(STUB);
6325     ldr(tmp4, Address(pre(a2, start_offset)));
6326     if (elem_size == 2) { // convert to byte counter
6327       lsl(cnt1, cnt1, 1);
6328     }
6329     eor(tmp5, tmp3, tmp4);
6330     cbnz(tmp5, DONE);
6331     RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals());
6332     assert(stub.target() != nullptr, "array_equals_long stub has not been generated");
6333     address tpc = trampoline_call(stub);
6334     if (tpc == nullptr) {
6335       DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE));
6336       postcond(pc() == badAddress);
6337       return nullptr;
6338     }
6339     b(DONE);
6340 
6341     // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2)
6342     // so, if a2 == null => return false(0), else return true, so we can return a2
6343     mov(result, a2);
6344     b(DONE);
6345     bind(SHORT);
6346     sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size);
6347     ldr(tmp3, Address(a1, start_offset));
6348     ldr(tmp4, Address(a2, start_offset));
6349     bind(LAST_CHECK);
6350     eor(tmp4, tmp3, tmp4);
6351     lslv(tmp5, tmp4, tmp5);
6352     cmp(tmp5, zr);
6353     bind(CSET_EQ);
6354     cset(result, EQ);
6355     b(DONE);
6356   }
6357 
6358   bind(SAME);
6359   mov(result, true);
6360   // That's it.
6361   bind(DONE);
6362 
6363   BLOCK_COMMENT("} array_equals");
6364   postcond(pc() != badAddress);
6365   return pc();
6366 }
6367 
6368 // Compare Strings
6369 
6370 // For Strings we're passed the address of the first characters in a1
6371 // and a2 and the length in cnt1.
6372 // There are two implementations.  For arrays >= 8 bytes, all
6373 // comparisons (including the final one, which may overlap) are
6374 // performed 8 bytes at a time.  For strings < 8 bytes, we compare a
6375 // halfword, then a short, and then a byte.
6376 
6377 void MacroAssembler::string_equals(Register a1, Register a2,
6378                                    Register result, Register cnt1)
6379 {
6380   Label SAME, DONE, SHORT, NEXT_WORD;
6381   Register tmp1 = rscratch1;
6382   Register tmp2 = rscratch2;
6383   Register cnt2 = tmp2;  // cnt2 only used in array length compare
6384 
6385   assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
6386 
6387 #ifndef PRODUCT
6388   {
6389     char comment[64];
6390     snprintf(comment, sizeof comment, "{string_equalsL");
6391     BLOCK_COMMENT(comment);
6392   }
6393 #endif
6394 
6395   mov(result, false);
6396 
6397   // Check for short strings, i.e. smaller than wordSize.
6398   subs(cnt1, cnt1, wordSize);
6399   br(Assembler::LT, SHORT);
6400   // Main 8 byte comparison loop.
6401   bind(NEXT_WORD); {
6402     ldr(tmp1, Address(post(a1, wordSize)));
6403     ldr(tmp2, Address(post(a2, wordSize)));
6404     subs(cnt1, cnt1, wordSize);
6405     eor(tmp1, tmp1, tmp2);
6406     cbnz(tmp1, DONE);
6407   } br(GT, NEXT_WORD);
6408   // Last longword.  In the case where length == 4 we compare the
6409   // same longword twice, but that's still faster than another
6410   // conditional branch.
6411   // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when
6412   // length == 4.
6413   ldr(tmp1, Address(a1, cnt1));
6414   ldr(tmp2, Address(a2, cnt1));
6415   eor(tmp2, tmp1, tmp2);
6416   cbnz(tmp2, DONE);
6417   b(SAME);
6418 
6419   bind(SHORT);
6420   Label TAIL03, TAIL01;
6421 
6422   tbz(cnt1, 2, TAIL03); // 0-7 bytes left.
6423   {
6424     ldrw(tmp1, Address(post(a1, 4)));
6425     ldrw(tmp2, Address(post(a2, 4)));
6426     eorw(tmp1, tmp1, tmp2);
6427     cbnzw(tmp1, DONE);
6428   }
6429   bind(TAIL03);
6430   tbz(cnt1, 1, TAIL01); // 0-3 bytes left.
6431   {
6432     ldrh(tmp1, Address(post(a1, 2)));
6433     ldrh(tmp2, Address(post(a2, 2)));
6434     eorw(tmp1, tmp1, tmp2);
6435     cbnzw(tmp1, DONE);
6436   }
6437   bind(TAIL01);
6438   tbz(cnt1, 0, SAME); // 0-1 bytes left.
6439     {
6440     ldrb(tmp1, a1);
6441     ldrb(tmp2, a2);
6442     eorw(tmp1, tmp1, tmp2);
6443     cbnzw(tmp1, DONE);
6444   }
6445   // Arrays are equal.
6446   bind(SAME);
6447   mov(result, true);
6448 
6449   // That's it.
6450   bind(DONE);
6451   BLOCK_COMMENT("} string_equals");
6452 }
6453 
6454 
6455 // The size of the blocks erased by the zero_blocks stub.  We must
6456 // handle anything smaller than this ourselves in zero_words().
6457 const int MacroAssembler::zero_words_block_size = 8;
6458 
6459 // zero_words() is used by C2 ClearArray patterns and by
6460 // C1_MacroAssembler.  It is as small as possible, handling small word
6461 // counts locally and delegating anything larger to the zero_blocks
6462 // stub.  It is expanded many times in compiled code, so it is
6463 // important to keep it short.
6464 
6465 // ptr:   Address of a buffer to be zeroed.
6466 // cnt:   Count in HeapWords.
6467 //
6468 // ptr, cnt, rscratch1, and rscratch2 are clobbered.
6469 address MacroAssembler::zero_words(Register ptr, Register cnt)
6470 {
6471   assert(is_power_of_2(zero_words_block_size), "adjust this");
6472 
6473   BLOCK_COMMENT("zero_words {");
6474   assert(ptr == r10 && cnt == r11, "mismatch in register usage");
6475   RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
6476   assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated");
6477 
6478   subs(rscratch1, cnt, zero_words_block_size);
6479   Label around;
6480   br(LO, around);
6481   {
6482     RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks());
6483     assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated");
6484     // Make sure this is a C2 compilation. C1 allocates space only for
6485     // trampoline stubs generated by Call LIR ops, and in any case it
6486     // makes sense for a C1 compilation task to proceed as quickly as
6487     // possible.
6488     CompileTask* task;
6489     if (StubRoutines::aarch64::complete()
6490         && Thread::current()->is_Compiler_thread()
6491         && (task = ciEnv::current()->task())
6492         && is_c2_compile(task->comp_level())) {
6493       address tpc = trampoline_call(zero_blocks);
6494       if (tpc == nullptr) {
6495         DEBUG_ONLY(reset_labels(around));
6496         return nullptr;
6497       }
6498     } else {
6499       far_call(zero_blocks);
6500     }
6501   }
6502   bind(around);
6503 
6504   // We have a few words left to do. zero_blocks has adjusted r10 and r11
6505   // for us.
6506   for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) {
6507     Label l;
6508     tbz(cnt, exact_log2(i), l);
6509     for (int j = 0; j < i; j += 2) {
6510       stp(zr, zr, post(ptr, 2 * BytesPerWord));
6511     }
6512     bind(l);
6513   }
6514   {
6515     Label l;
6516     tbz(cnt, 0, l);
6517     str(zr, Address(ptr));
6518     bind(l);
6519   }
6520 
6521   BLOCK_COMMENT("} zero_words");
6522   return pc();
6523 }
6524 
6525 // base:         Address of a buffer to be zeroed, 8 bytes aligned.
6526 // cnt:          Immediate count in HeapWords.
6527 //
6528 // r10, r11, rscratch1, and rscratch2 are clobbered.
6529 address MacroAssembler::zero_words(Register base, uint64_t cnt)
6530 {
6531   assert(wordSize <= BlockZeroingLowLimit,
6532             "increase BlockZeroingLowLimit");
6533   address result = nullptr;
6534   if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) {
6535 #ifndef PRODUCT
6536     {
6537       char buf[64];
6538       snprintf(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt);
6539       BLOCK_COMMENT(buf);
6540     }
6541 #endif
6542     if (cnt >= 16) {
6543       uint64_t loops = cnt/16;
6544       if (loops > 1) {
6545         mov(rscratch2, loops - 1);
6546       }
6547       {
6548         Label loop;
6549         bind(loop);
6550         for (int i = 0; i < 16; i += 2) {
6551           stp(zr, zr, Address(base, i * BytesPerWord));
6552         }
6553         add(base, base, 16 * BytesPerWord);
6554         if (loops > 1) {
6555           subs(rscratch2, rscratch2, 1);
6556           br(GE, loop);
6557         }
6558       }
6559     }
6560     cnt %= 16;
6561     int i = cnt & 1;  // store any odd word to start
6562     if (i) str(zr, Address(base));
6563     for (; i < (int)cnt; i += 2) {
6564       stp(zr, zr, Address(base, i * wordSize));
6565     }
6566     BLOCK_COMMENT("} zero_words");
6567     result = pc();
6568   } else {
6569     mov(r10, base); mov(r11, cnt);
6570     result = zero_words(r10, r11);
6571   }
6572   return result;
6573 }
6574 
6575 // Zero blocks of memory by using DC ZVA.
6576 //
6577 // Aligns the base address first sufficiently for DC ZVA, then uses
6578 // DC ZVA repeatedly for every full block.  cnt is the size to be
6579 // zeroed in HeapWords.  Returns the count of words left to be zeroed
6580 // in cnt.
6581 //
6582 // NOTE: This is intended to be used in the zero_blocks() stub.  If
6583 // you want to use it elsewhere, note that cnt must be >= 2*zva_length.
6584 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) {
6585   Register tmp = rscratch1;
6586   Register tmp2 = rscratch2;
6587   int zva_length = VM_Version::zva_length();
6588   Label initial_table_end, loop_zva;
6589   Label fini;
6590 
6591   // Base must be 16 byte aligned. If not just return and let caller handle it
6592   tst(base, 0x0f);
6593   br(Assembler::NE, fini);
6594   // Align base with ZVA length.
6595   neg(tmp, base);
6596   andr(tmp, tmp, zva_length - 1);
6597 
6598   // tmp: the number of bytes to be filled to align the base with ZVA length.
6599   add(base, base, tmp);
6600   sub(cnt, cnt, tmp, Assembler::ASR, 3);
6601   adr(tmp2, initial_table_end);
6602   sub(tmp2, tmp2, tmp, Assembler::LSR, 2);
6603   br(tmp2);
6604 
6605   for (int i = -zva_length + 16; i < 0; i += 16)
6606     stp(zr, zr, Address(base, i));
6607   bind(initial_table_end);
6608 
6609   sub(cnt, cnt, zva_length >> 3);
6610   bind(loop_zva);
6611   dc(Assembler::ZVA, base);
6612   subs(cnt, cnt, zva_length >> 3);
6613   add(base, base, zva_length);
6614   br(Assembler::GE, loop_zva);
6615   add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA
6616   bind(fini);
6617 }
6618 
6619 // base:   Address of a buffer to be filled, 8 bytes aligned.
6620 // cnt:    Count in 8-byte unit.
6621 // value:  Value to be filled with.
6622 // base will point to the end of the buffer after filling.
6623 void MacroAssembler::fill_words(Register base, Register cnt, Register value)
6624 {
6625 //  Algorithm:
6626 //
6627 //    if (cnt == 0) {
6628 //      return;
6629 //    }
6630 //    if ((p & 8) != 0) {
6631 //      *p++ = v;
6632 //    }
6633 //
6634 //    scratch1 = cnt & 14;
6635 //    cnt -= scratch1;
6636 //    p += scratch1;
6637 //    switch (scratch1 / 2) {
6638 //      do {
6639 //        cnt -= 16;
6640 //          p[-16] = v;
6641 //          p[-15] = v;
6642 //        case 7:
6643 //          p[-14] = v;
6644 //          p[-13] = v;
6645 //        case 6:
6646 //          p[-12] = v;
6647 //          p[-11] = v;
6648 //          // ...
6649 //        case 1:
6650 //          p[-2] = v;
6651 //          p[-1] = v;
6652 //        case 0:
6653 //          p += 16;
6654 //      } while (cnt);
6655 //    }
6656 //    if ((cnt & 1) == 1) {
6657 //      *p++ = v;
6658 //    }
6659 
6660   assert_different_registers(base, cnt, value, rscratch1, rscratch2);
6661 
6662   Label fini, skip, entry, loop;
6663   const int unroll = 8; // Number of stp instructions we'll unroll
6664 
6665   cbz(cnt, fini);
6666   tbz(base, 3, skip);
6667   str(value, Address(post(base, 8)));
6668   sub(cnt, cnt, 1);
6669   bind(skip);
6670 
6671   andr(rscratch1, cnt, (unroll-1) * 2);
6672   sub(cnt, cnt, rscratch1);
6673   add(base, base, rscratch1, Assembler::LSL, 3);
6674   adr(rscratch2, entry);
6675   sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1);
6676   br(rscratch2);
6677 
6678   bind(loop);
6679   add(base, base, unroll * 16);
6680   for (int i = -unroll; i < 0; i++)
6681     stp(value, value, Address(base, i * 16));
6682   bind(entry);
6683   subs(cnt, cnt, unroll * 2);
6684   br(Assembler::GE, loop);
6685 
6686   tbz(cnt, 0, fini);
6687   str(value, Address(post(base, 8)));
6688   bind(fini);
6689 }
6690 
6691 // Intrinsic for
6692 //
6693 // - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray
6694 //     return the number of characters copied.
6695 // - java/lang/StringUTF16.compress
6696 //     return index of non-latin1 character if copy fails, otherwise 'len'.
6697 //
6698 // This version always returns the number of characters copied, and does not
6699 // clobber the 'len' register. A successful copy will complete with the post-
6700 // condition: 'res' == 'len', while an unsuccessful copy will exit with the
6701 // post-condition: 0 <= 'res' < 'len'.
6702 //
6703 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to
6704 //       degrade performance (on Ampere Altra - Neoverse N1), to an extent
6705 //       beyond the acceptable, even though the footprint would be smaller.
6706 //       Using 'umaxv' in the ASCII-case comes with a small penalty but does
6707 //       avoid additional bloat.
6708 //
6709 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags
6710 void MacroAssembler::encode_iso_array(Register src, Register dst,
6711                                       Register len, Register res, bool ascii,
6712                                       FloatRegister vtmp0, FloatRegister vtmp1,
6713                                       FloatRegister vtmp2, FloatRegister vtmp3,
6714                                       FloatRegister vtmp4, FloatRegister vtmp5)
6715 {
6716   Register cnt = res;
6717   Register max = rscratch1;
6718   Register chk = rscratch2;
6719 
6720   prfm(Address(src), PLDL1STRM);
6721   movw(cnt, len);
6722 
6723 #define ASCII(insn) do { if (ascii) { insn; } } while (0)
6724 
6725   Label LOOP_32, DONE_32, FAIL_32;
6726 
6727   BIND(LOOP_32);
6728   {
6729     cmpw(cnt, 32);
6730     br(LT, DONE_32);
6731     ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64)));
6732     // Extract lower bytes.
6733     FloatRegister vlo0 = vtmp4;
6734     FloatRegister vlo1 = vtmp5;
6735     uzp1(vlo0, T16B, vtmp0, vtmp1);
6736     uzp1(vlo1, T16B, vtmp2, vtmp3);
6737     // Merge bits...
6738     orr(vtmp0, T16B, vtmp0, vtmp1);
6739     orr(vtmp2, T16B, vtmp2, vtmp3);
6740     // Extract merged upper bytes.
6741     FloatRegister vhix = vtmp0;
6742     uzp2(vhix, T16B, vtmp0, vtmp2);
6743     // ISO-check on hi-parts (all zero).
6744     //                          ASCII-check on lo-parts (no sign).
6745     FloatRegister vlox = vtmp1; // Merge lower bytes.
6746                                 ASCII(orr(vlox, T16B, vlo0, vlo1));
6747     umov(chk, vhix, D, 1);      ASCII(cm(LT, vlox, T16B, vlox));
6748     fmovd(max, vhix);           ASCII(umaxv(vlox, T16B, vlox));
6749     orr(chk, chk, max);         ASCII(umov(max, vlox, B, 0));
6750                                 ASCII(orr(chk, chk, max));
6751     cbnz(chk, FAIL_32);
6752     subw(cnt, cnt, 32);
6753     st1(vlo0, vlo1, T16B, Address(post(dst, 32)));
6754     b(LOOP_32);
6755   }
6756   BIND(FAIL_32);
6757   sub(src, src, 64);
6758   BIND(DONE_32);
6759 
6760   Label LOOP_8, SKIP_8;
6761 
6762   BIND(LOOP_8);
6763   {
6764     cmpw(cnt, 8);
6765     br(LT, SKIP_8);
6766     FloatRegister vhi = vtmp0;
6767     FloatRegister vlo = vtmp1;
6768     ld1(vtmp3, T8H, src);
6769     uzp1(vlo, T16B, vtmp3, vtmp3);
6770     uzp2(vhi, T16B, vtmp3, vtmp3);
6771     // ISO-check on hi-parts (all zero).
6772     //                          ASCII-check on lo-parts (no sign).
6773                                 ASCII(cm(LT, vtmp2, T16B, vlo));
6774     fmovd(chk, vhi);            ASCII(umaxv(vtmp2, T16B, vtmp2));
6775                                 ASCII(umov(max, vtmp2, B, 0));
6776                                 ASCII(orr(chk, chk, max));
6777     cbnz(chk, SKIP_8);
6778 
6779     strd(vlo, Address(post(dst, 8)));
6780     subw(cnt, cnt, 8);
6781     add(src, src, 16);
6782     b(LOOP_8);
6783   }
6784   BIND(SKIP_8);
6785 
6786 #undef ASCII
6787 
6788   Label LOOP, DONE;
6789 
6790   cbz(cnt, DONE);
6791   BIND(LOOP);
6792   {
6793     Register chr = rscratch1;
6794     ldrh(chr, Address(post(src, 2)));
6795     tst(chr, ascii ? 0xff80 : 0xff00);
6796     br(NE, DONE);
6797     strb(chr, Address(post(dst, 1)));
6798     subs(cnt, cnt, 1);
6799     br(GT, LOOP);
6800   }
6801   BIND(DONE);
6802   // Return index where we stopped.
6803   subw(res, len, cnt);
6804 }
6805 
6806 // Inflate byte[] array to char[].
6807 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6
6808 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
6809                                            FloatRegister vtmp1, FloatRegister vtmp2,
6810                                            FloatRegister vtmp3, Register tmp4) {
6811   Label big, done, after_init, to_stub;
6812 
6813   assert_different_registers(src, dst, len, tmp4, rscratch1);
6814 
6815   fmovd(vtmp1, 0.0);
6816   lsrw(tmp4, len, 3);
6817   bind(after_init);
6818   cbnzw(tmp4, big);
6819   // Short string: less than 8 bytes.
6820   {
6821     Label loop, tiny;
6822 
6823     cmpw(len, 4);
6824     br(LT, tiny);
6825     // Use SIMD to do 4 bytes.
6826     ldrs(vtmp2, post(src, 4));
6827     zip1(vtmp3, T8B, vtmp2, vtmp1);
6828     subw(len, len, 4);
6829     strd(vtmp3, post(dst, 8));
6830 
6831     cbzw(len, done);
6832 
6833     // Do the remaining bytes by steam.
6834     bind(loop);
6835     ldrb(tmp4, post(src, 1));
6836     strh(tmp4, post(dst, 2));
6837     subw(len, len, 1);
6838 
6839     bind(tiny);
6840     cbnz(len, loop);
6841 
6842     b(done);
6843   }
6844 
6845   if (SoftwarePrefetchHintDistance >= 0) {
6846     bind(to_stub);
6847       RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate());
6848       assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated");
6849       address tpc = trampoline_call(stub);
6850       if (tpc == nullptr) {
6851         DEBUG_ONLY(reset_labels(big, done));
6852         postcond(pc() == badAddress);
6853         return nullptr;
6854       }
6855       b(after_init);
6856   }
6857 
6858   // Unpack the bytes 8 at a time.
6859   bind(big);
6860   {
6861     Label loop, around, loop_last, loop_start;
6862 
6863     if (SoftwarePrefetchHintDistance >= 0) {
6864       const int large_loop_threshold = (64 + 16)/8;
6865       ldrd(vtmp2, post(src, 8));
6866       andw(len, len, 7);
6867       cmp(tmp4, (u1)large_loop_threshold);
6868       br(GE, to_stub);
6869       b(loop_start);
6870 
6871       bind(loop);
6872       ldrd(vtmp2, post(src, 8));
6873       bind(loop_start);
6874       subs(tmp4, tmp4, 1);
6875       br(EQ, loop_last);
6876       zip1(vtmp2, T16B, vtmp2, vtmp1);
6877       ldrd(vtmp3, post(src, 8));
6878       st1(vtmp2, T8H, post(dst, 16));
6879       subs(tmp4, tmp4, 1);
6880       zip1(vtmp3, T16B, vtmp3, vtmp1);
6881       st1(vtmp3, T8H, post(dst, 16));
6882       br(NE, loop);
6883       b(around);
6884       bind(loop_last);
6885       zip1(vtmp2, T16B, vtmp2, vtmp1);
6886       st1(vtmp2, T8H, post(dst, 16));
6887       bind(around);
6888       cbz(len, done);
6889     } else {
6890       andw(len, len, 7);
6891       bind(loop);
6892       ldrd(vtmp2, post(src, 8));
6893       sub(tmp4, tmp4, 1);
6894       zip1(vtmp3, T16B, vtmp2, vtmp1);
6895       st1(vtmp3, T8H, post(dst, 16));
6896       cbnz(tmp4, loop);
6897     }
6898   }
6899 
6900   // Do the tail of up to 8 bytes.
6901   add(src, src, len);
6902   ldrd(vtmp3, Address(src, -8));
6903   add(dst, dst, len, ext::uxtw, 1);
6904   zip1(vtmp3, T16B, vtmp3, vtmp1);
6905   strq(vtmp3, Address(dst, -16));
6906 
6907   bind(done);
6908   postcond(pc() != badAddress);
6909   return pc();
6910 }
6911 
6912 // Compress char[] array to byte[].
6913 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
6914 // Return the array length if every element in array can be encoded,
6915 // otherwise, the index of first non-latin1 (> 0xff) character.
6916 void MacroAssembler::char_array_compress(Register src, Register dst, Register len,
6917                                          Register res,
6918                                          FloatRegister tmp0, FloatRegister tmp1,
6919                                          FloatRegister tmp2, FloatRegister tmp3,
6920                                          FloatRegister tmp4, FloatRegister tmp5) {
6921   encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5);
6922 }
6923 
6924 // java.math.round(double a)
6925 // Returns the closest long to the argument, with ties rounding to
6926 // positive infinity.  This requires some fiddling for corner
6927 // cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5).
6928 void MacroAssembler::java_round_double(Register dst, FloatRegister src,
6929                                        FloatRegister ftmp) {
6930   Label DONE;
6931   BLOCK_COMMENT("java_round_double: { ");
6932   fmovd(rscratch1, src);
6933   // Use RoundToNearestTiesAway unless src small and -ve.
6934   fcvtasd(dst, src);
6935   // Test if src >= 0 || abs(src) >= 0x1.0p52
6936   eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit
6937   mov(rscratch2, julong_cast(0x1.0p52));
6938   cmp(rscratch1, rscratch2);
6939   br(HS, DONE); {
6940     // src < 0 && abs(src) < 0x1.0p52
6941     // src may have a fractional part, so add 0.5
6942     fmovd(ftmp, 0.5);
6943     faddd(ftmp, src, ftmp);
6944     // Convert double to jlong, use RoundTowardsNegative
6945     fcvtmsd(dst, ftmp);
6946   }
6947   bind(DONE);
6948   BLOCK_COMMENT("} java_round_double");
6949 }
6950 
6951 void MacroAssembler::java_round_float(Register dst, FloatRegister src,
6952                                       FloatRegister ftmp) {
6953   Label DONE;
6954   BLOCK_COMMENT("java_round_float: { ");
6955   fmovs(rscratch1, src);
6956   // Use RoundToNearestTiesAway unless src small and -ve.
6957   fcvtassw(dst, src);
6958   // Test if src >= 0 || abs(src) >= 0x1.0p23
6959   eor(rscratch1, rscratch1, 0x80000000); // flip sign bit
6960   mov(rscratch2, jint_cast(0x1.0p23f));
6961   cmp(rscratch1, rscratch2);
6962   br(HS, DONE); {
6963     // src < 0 && |src| < 0x1.0p23
6964     // src may have a fractional part, so add 0.5
6965     fmovs(ftmp, 0.5f);
6966     fadds(ftmp, src, ftmp);
6967     // Convert float to jint, use RoundTowardsNegative
6968     fcvtmssw(dst, ftmp);
6969   }
6970   bind(DONE);
6971   BLOCK_COMMENT("} java_round_float");
6972 }
6973 
6974 // get_thread() can be called anywhere inside generated code so we
6975 // need to save whatever non-callee save context might get clobbered
6976 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
6977 // the call setup code.
6978 //
6979 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags.
6980 // On other systems, the helper is a usual C function.
6981 //
6982 void MacroAssembler::get_thread(Register dst) {
6983   RegSet saved_regs =
6984     LINUX_ONLY(RegSet::range(r0, r1)  + lr - dst)
6985     NOT_LINUX (RegSet::range(r0, r17) + lr - dst);
6986 
6987   protect_return_address();
6988   push(saved_regs, sp);
6989 
6990   mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
6991   blr(lr);
6992   if (dst != c_rarg0) {
6993     mov(dst, c_rarg0);
6994   }
6995 
6996   pop(saved_regs, sp);
6997   authenticate_return_address();
6998 }
6999 
7000 #ifdef COMPILER2
7001 // C2 compiled method's prolog code
7002 // Moved here from aarch64.ad to support Valhalla code belows
7003 void MacroAssembler::verified_entry(Compile* C, int sp_inc) {
7004   if (C->clinit_barrier_on_entry()) {
7005     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
7006 
7007     Label L_skip_barrier;
7008 
7009     mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
7010     clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
7011     far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
7012     bind(L_skip_barrier);
7013   }
7014 
7015   if (C->max_vector_size() > 0) {
7016     reinitialize_ptrue();
7017   }
7018 
7019   int bangsize = C->output()->bang_size_in_bytes();
7020   if (C->output()->need_stack_bang(bangsize))
7021     generate_stack_overflow_check(bangsize);
7022 
7023   // n.b. frame size includes space for return pc and rfp
7024   const long framesize = C->output()->frame_size_in_bytes();
7025   build_frame(framesize);
7026 
7027   if (C->needs_stack_repair()) {
7028     save_stack_increment(sp_inc, framesize);
7029   }
7030 
7031   if (VerifyStackAtCalls) {
7032     Unimplemented();
7033   }
7034 }
7035 #endif // COMPILER2
7036 
7037 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) {
7038   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
7039   // An inline type might be returned. If fields are in registers we
7040   // need to allocate an inline type instance and initialize it with
7041   // the value of the fields.
7042   Label skip;
7043   // We only need a new buffered inline type if a new one is not returned
7044   tbz(r0, 0, skip);
7045   int call_offset = -1;
7046 
7047   // Be careful not to clobber r1-7 which hold returned fields
7048   // Also do not use callee-saved registers as these may be live in the interpreter
7049   Register tmp1 = r13, tmp2 = r14, klass = r15, r0_preserved = r12;
7050 
7051   // The following code is similar to allocate_instance but has some slight differences,
7052   // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after
7053   // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these.
7054   Label slow_case;
7055   // 1. Try to allocate a new buffered inline instance either from TLAB or eden space
7056   mov(r0_preserved, r0); // save r0 for slow_case since *_allocate may corrupt it when allocation failed
7057 
7058   if (vk != nullptr) {
7059     // Called from C1, where the return type is statically known.
7060     movptr(klass, (intptr_t)vk->get_InlineKlass());
7061     jint lh = vk->layout_helper();
7062     assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
7063     if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) {
7064       tlab_allocate(r0, noreg, lh, tmp1, tmp2, slow_case);
7065     } else {
7066       b(slow_case);
7067     }
7068   } else {
7069     // Call from interpreter. R0 contains ((the InlineKlass* of the return type) | 0x01)
7070     andr(klass, r0, -2);
7071     if (UseTLAB) {
7072       ldrw(tmp2, Address(klass, Klass::layout_helper_offset()));
7073       tst(tmp2, Klass::_lh_instance_slow_path_bit);
7074       br(Assembler::NE, slow_case);
7075       tlab_allocate(r0, tmp2, 0, tmp1, tmp2, slow_case);
7076     } else {
7077       b(slow_case);
7078     }
7079   }
7080   if (UseTLAB) {
7081     // 2. Initialize buffered inline instance header
7082     Register buffer_obj = r0;
7083     if (UseCompactObjectHeaders) {
7084       ldr(rscratch1, Address(klass, Klass::prototype_header_offset()));
7085       str(rscratch1, Address(buffer_obj, oopDesc::mark_offset_in_bytes()));
7086     } else {
7087       mov(rscratch1, (intptr_t)markWord::inline_type_prototype().value());
7088       str(rscratch1, Address(buffer_obj, oopDesc::mark_offset_in_bytes()));
7089       store_klass_gap(buffer_obj, zr);
7090       if (vk == nullptr) {
7091         // store_klass corrupts klass, so save it for later use (interpreter case only).
7092         mov(tmp1, klass);
7093       }
7094       store_klass(buffer_obj, klass);
7095     }
7096     // 3. Initialize its fields with an inline class specific handler
7097     if (vk != nullptr) {
7098       far_call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
7099     } else {
7100       // tmp1 holds klass preserved above
7101       ldr(tmp1, Address(tmp1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
7102       ldr(tmp1, Address(tmp1, InlineKlass::pack_handler_offset()));
7103       blr(tmp1);
7104     }
7105 
7106     membar(Assembler::StoreStore);
7107     b(skip);
7108   } else {
7109     // Must have already branched to slow_case above.
7110     DEBUG_ONLY(should_not_reach_here());
7111   }
7112   bind(slow_case);
7113   // We failed to allocate a new inline type, fall back to a runtime
7114   // call. Some oop field may be live in some registers but we can't
7115   // tell. That runtime call will take care of preserving them
7116   // across a GC if there's one.
7117   mov(r0, r0_preserved);
7118 
7119   if (from_interpreter) {
7120     super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf());
7121   } else {
7122     far_call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf()));
7123     call_offset = offset();
7124   }
7125   membar(Assembler::StoreStore);
7126 
7127   bind(skip);
7128   return call_offset;
7129 }
7130 
7131 // Move a value between registers/stack slots and update the reg_state
7132 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) {
7133   assert(from->is_valid() && to->is_valid(), "source and destination must be valid");
7134   if (reg_state[to->value()] == reg_written) {
7135     return true; // Already written
7136   }
7137 
7138   if (from != to && bt != T_VOID) {
7139     if (reg_state[to->value()] == reg_readonly) {
7140       return false; // Not yet writable
7141     }
7142     if (from->is_reg()) {
7143       if (to->is_reg()) {
7144         if (from->is_Register() && to->is_Register()) {
7145           mov(to->as_Register(), from->as_Register());
7146         } else if (from->is_FloatRegister() && to->is_FloatRegister()) {
7147           fmovd(to->as_FloatRegister(), from->as_FloatRegister());
7148         } else {
7149           ShouldNotReachHere();
7150         }
7151       } else {
7152         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size;
7153         Address to_addr = Address(sp, st_off);
7154         if (from->is_FloatRegister()) {
7155           if (bt == T_DOUBLE) {
7156              strd(from->as_FloatRegister(), to_addr);
7157           } else {
7158              assert(bt == T_FLOAT, "must be float");
7159              strs(from->as_FloatRegister(), to_addr);
7160           }
7161         } else {
7162           str(from->as_Register(), to_addr);
7163         }
7164       }
7165     } else {
7166       Address from_addr = Address(sp, from->reg2stack() * VMRegImpl::stack_slot_size);
7167       if (to->is_reg()) {
7168         if (to->is_FloatRegister()) {
7169           if (bt == T_DOUBLE) {
7170             ldrd(to->as_FloatRegister(), from_addr);
7171           } else {
7172             assert(bt == T_FLOAT, "must be float");
7173             ldrs(to->as_FloatRegister(), from_addr);
7174           }
7175         } else {
7176           ldr(to->as_Register(), from_addr);
7177         }
7178       } else {
7179         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size;
7180         ldr(rscratch1, from_addr);
7181         str(rscratch1, Address(sp, st_off));
7182       }
7183     }
7184   }
7185 
7186   // Update register states
7187   reg_state[from->value()] = reg_writable;
7188   reg_state[to->value()] = reg_written;
7189   return true;
7190 }
7191 
7192 // Calculate the extra stack space required for packing or unpacking inline
7193 // args and adjust the stack pointer
7194 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) {
7195   int sp_inc = args_on_stack * VMRegImpl::stack_slot_size;
7196   sp_inc = align_up(sp_inc, StackAlignmentInBytes);
7197   assert(sp_inc > 0, "sanity");
7198 
7199   // Save a copy of the FP and LR here for deoptimization patching and frame walking
7200   stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
7201 
7202   // Adjust the stack pointer. This will be repaired on return by MacroAssembler::remove_frame
7203   if (sp_inc < (1 << 9)) {
7204     sub(sp, sp, sp_inc);   // Fits in an immediate
7205   } else {
7206     mov(rscratch1, sp_inc);
7207     sub(sp, sp, rscratch1);
7208   }
7209 
7210   return sp_inc + 2 * wordSize;  // Account for the FP/LR space
7211 }
7212 
7213 // Read all fields from an inline type oop and store the values in registers/stack slots
7214 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
7215                                           VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
7216                                           RegState reg_state[]) {
7217   assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
7218   assert(from->is_valid(), "source must be valid");
7219   bool progress = false;
7220 #ifdef ASSERT
7221   const int start_offset = offset();
7222 #endif
7223 
7224   Label L_null, L_notNull;
7225   // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for)
7226   Register tmp1 = r10;
7227   Register tmp2 = r11;
7228   Register fromReg = noreg;
7229   ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1);
7230   bool done = true;
7231   bool mark_done = true;
7232   VMReg toReg;
7233   BasicType bt;
7234   // Check if argument requires a null check
7235   bool null_check = false;
7236   VMReg nullCheckReg;
7237   while (stream.next(nullCheckReg, bt)) {
7238     if (sig->at(stream.sig_index())._offset == -1) {
7239       null_check = true;
7240       break;
7241     }
7242   }
7243   stream.reset(sig_index, to_index);
7244   while (stream.next(toReg, bt)) {
7245     assert(toReg->is_valid(), "destination must be valid");
7246     int idx = (int)toReg->value();
7247     if (reg_state[idx] == reg_readonly) {
7248       if (idx != from->value()) {
7249         mark_done = false;
7250       }
7251       done = false;
7252       continue;
7253     } else if (reg_state[idx] == reg_written) {
7254       continue;
7255     }
7256     assert(reg_state[idx] == reg_writable, "must be writable");
7257     reg_state[idx] = reg_written;
7258     progress = true;
7259 
7260     if (fromReg == noreg) {
7261       if (from->is_reg()) {
7262         fromReg = from->as_Register();
7263       } else {
7264         int st_off = from->reg2stack() * VMRegImpl::stack_slot_size;
7265         ldr(tmp1, Address(sp, st_off));
7266         fromReg = tmp1;
7267       }
7268       if (null_check) {
7269         // Nullable inline type argument, emit null check
7270         cbz(fromReg, L_null);
7271       }
7272     }
7273     int off = sig->at(stream.sig_index())._offset;
7274     if (off == -1) {
7275       assert(null_check, "Missing null check at");
7276       if (toReg->is_stack()) {
7277         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
7278         mov(tmp2, 1);
7279         str(tmp2, Address(sp, st_off));
7280       } else {
7281         mov(toReg->as_Register(), 1);
7282       }
7283       continue;
7284     }
7285     assert(off > 0, "offset in object should be positive");
7286     Address fromAddr = Address(fromReg, off);
7287     if (!toReg->is_FloatRegister()) {
7288       Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register();
7289       if (is_reference_type(bt)) {
7290         load_heap_oop(dst, fromAddr, rscratch1, rscratch2);
7291       } else {
7292         bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
7293         load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
7294       }
7295       if (toReg->is_stack()) {
7296         int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
7297         str(dst, Address(sp, st_off));
7298       }
7299     } else if (bt == T_DOUBLE) {
7300       ldrd(toReg->as_FloatRegister(), fromAddr);
7301     } else {
7302       assert(bt == T_FLOAT, "must be float");
7303       ldrs(toReg->as_FloatRegister(), fromAddr);
7304     }
7305   }
7306   if (progress && null_check) {
7307     if (done) {
7308       b(L_notNull);
7309       bind(L_null);
7310       // Set IsInit field to zero to signal that the argument is null.
7311       // Also set all oop fields to zero to make the GC happy.
7312       stream.reset(sig_index, to_index);
7313       while (stream.next(toReg, bt)) {
7314         if (sig->at(stream.sig_index())._offset == -1 ||
7315             bt == T_OBJECT || bt == T_ARRAY) {
7316           if (toReg->is_stack()) {
7317             int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
7318             str(zr, Address(sp, st_off));
7319           } else {
7320             mov(toReg->as_Register(), zr);
7321           }
7322         }
7323       }
7324       bind(L_notNull);
7325     } else {
7326       bind(L_null);
7327     }
7328   }
7329 
7330   sig_index = stream.sig_index();
7331   to_index = stream.regs_index();
7332 
7333   if (mark_done && reg_state[from->value()] != reg_written) {
7334     // This is okay because no one else will write to that slot
7335     reg_state[from->value()] = reg_writable;
7336   }
7337   from_index--;
7338   assert(progress || (start_offset == offset()), "should not emit code");
7339   return done;
7340 }
7341 
7342 // Pack fields back into an inline type oop
7343 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
7344                                         VMRegPair* from, int from_count, int& from_index, VMReg to,
7345                                         RegState reg_state[], Register val_array) {
7346   assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter");
7347   assert(to->is_valid(), "destination must be valid");
7348 
7349   if (reg_state[to->value()] == reg_written) {
7350     skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
7351     return true; // Already written
7352   }
7353 
7354   // The GC barrier expanded by store_heap_oop below may call into the
7355   // runtime so use callee-saved registers for any values that need to be
7356   // preserved. The GC barrier assembler should take care of saving the
7357   // Java argument registers.
7358   // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value?
7359   // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for).
7360   Register val_obj_tmp = r21;
7361   Register from_reg_tmp = r22;
7362   Register tmp1 = r14;
7363   Register tmp2 = r13;
7364   Register tmp3 = r12;
7365   Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
7366 
7367   assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array);
7368 
7369   if (reg_state[to->value()] == reg_readonly) {
7370     if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) {
7371       skip_unpacked_fields(sig, sig_index, from, from_count, from_index);
7372       return false; // Not yet writable
7373     }
7374     val_obj = val_obj_tmp;
7375   }
7376 
7377   int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT);
7378   load_heap_oop(val_obj, Address(val_array, index), tmp1, tmp2);
7379 
7380   ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index);
7381   VMReg fromReg;
7382   BasicType bt;
7383   Label L_null;
7384   while (stream.next(fromReg, bt)) {
7385     assert(fromReg->is_valid(), "source must be valid");
7386     reg_state[fromReg->value()] = reg_writable;
7387 
7388     int off = sig->at(stream.sig_index())._offset;
7389     if (off == -1) {
7390       // Nullable inline type argument, emit null check
7391       Label L_notNull;
7392       if (fromReg->is_stack()) {
7393         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size;
7394         ldrb(tmp2, Address(sp, ld_off));
7395         cbnz(tmp2, L_notNull);
7396       } else {
7397         cbnz(fromReg->as_Register(), L_notNull);
7398       }
7399       mov(val_obj, 0);
7400       b(L_null);
7401       bind(L_notNull);
7402       continue;
7403     }
7404 
7405     assert(off > 0, "offset in object should be positive");
7406     size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
7407 
7408     // Pack the scalarized field into the value object.
7409     Address dst(val_obj, off);
7410 
7411     if (!fromReg->is_FloatRegister()) {
7412       Register src;
7413       if (fromReg->is_stack()) {
7414         src = from_reg_tmp;
7415         int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size;
7416         load_sized_value(src, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
7417       } else {
7418         src = fromReg->as_Register();
7419       }
7420       assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array);
7421       if (is_reference_type(bt)) {
7422         store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
7423       } else {
7424         store_sized_value(dst, src, size_in_bytes);
7425       }
7426     } else if (bt == T_DOUBLE) {
7427       strd(fromReg->as_FloatRegister(), dst);
7428     } else {
7429       assert(bt == T_FLOAT, "must be float");
7430       strs(fromReg->as_FloatRegister(), dst);
7431     }
7432   }
7433   bind(L_null);
7434   sig_index = stream.sig_index();
7435   from_index = stream.regs_index();
7436 
7437   assert(reg_state[to->value()] == reg_writable, "must have already been read");
7438   bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state);
7439   assert(success, "to register must be writeable");
7440 
7441   return true;
7442 }
7443 
7444 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
7445   return (reg->is_FloatRegister()) ? v8->as_VMReg() : r14->as_VMReg();
7446 }
7447 
7448 void MacroAssembler::cache_wb(Address line) {
7449   assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset");
7450   assert(line.index() == noreg, "index should be noreg");
7451   assert(line.offset() == 0, "offset should be 0");
7452   // would like to assert this
7453   // assert(line._ext.shift == 0, "shift should be zero");
7454   if (VM_Version::supports_dcpop()) {
7455     // writeback using clear virtual address to point of persistence
7456     dc(Assembler::CVAP, line.base());
7457   } else {
7458     // no need to generate anything as Unsafe.writebackMemory should
7459     // never invoke this stub
7460   }
7461 }
7462 
7463 void MacroAssembler::cache_wbsync(bool is_pre) {
7464   // we only need a barrier post sync
7465   if (!is_pre) {
7466     membar(Assembler::AnyAny);
7467   }
7468 }
7469 
7470 void MacroAssembler::verify_sve_vector_length(Register tmp) {
7471   if (!UseSVE || VM_Version::get_max_supported_sve_vector_length() == FloatRegister::sve_vl_min) {
7472     return;
7473   }
7474   // Make sure that native code does not change SVE vector length.
7475   Label verify_ok;
7476   movw(tmp, zr);
7477   sve_inc(tmp, B);
7478   subsw(zr, tmp, VM_Version::get_initial_sve_vector_length());
7479   br(EQ, verify_ok);
7480   stop("Error: SVE vector length has changed since jvm startup");
7481   bind(verify_ok);
7482 }
7483 
7484 void MacroAssembler::verify_ptrue() {
7485   Label verify_ok;
7486   if (!UseSVE) {
7487     return;
7488   }
7489   sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count.
7490   sve_dec(rscratch1, B);
7491   cbz(rscratch1, verify_ok);
7492   stop("Error: the preserved predicate register (p7) elements are not all true");
7493   bind(verify_ok);
7494 }
7495 
7496 void MacroAssembler::safepoint_isb() {
7497   isb();
7498 #ifndef PRODUCT
7499   if (VerifyCrossModifyFence) {
7500     // Clear the thread state.
7501     strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
7502   }
7503 #endif
7504 }
7505 
7506 #ifndef PRODUCT
7507 void MacroAssembler::verify_cross_modify_fence_not_required() {
7508   if (VerifyCrossModifyFence) {
7509     // Check if thread needs a cross modify fence.
7510     ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
7511     Label fence_not_required;
7512     cbz(rscratch1, fence_not_required);
7513     // If it does then fail.
7514     lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure)));
7515     mov(c_rarg0, rthread);
7516     blr(rscratch1);
7517     bind(fence_not_required);
7518   }
7519 }
7520 #endif
7521 
7522 void MacroAssembler::spin_wait() {
7523   for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) {
7524     switch (VM_Version::spin_wait_desc().inst()) {
7525       case SpinWait::NOP:
7526         nop();
7527         break;
7528       case SpinWait::ISB:
7529         isb();
7530         break;
7531       case SpinWait::YIELD:
7532         yield();
7533         break;
7534       default:
7535         ShouldNotReachHere();
7536     }
7537   }
7538 }
7539 
7540 // Stack frame creation/removal
7541 
7542 void MacroAssembler::enter(bool strip_ret_addr) {
7543   if (strip_ret_addr) {
7544     // Addresses can only be signed once. If there are multiple nested frames being created
7545     // in the same function, then the return address needs stripping first.
7546     strip_return_address();
7547   }
7548   protect_return_address();
7549   stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
7550   mov(rfp, sp);
7551 }
7552 
7553 void MacroAssembler::leave() {
7554   mov(sp, rfp);
7555   ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
7556   authenticate_return_address();
7557 }
7558 
7559 // ROP Protection
7560 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/
7561 // destroying stack frames or whenever directly loading/storing the LR to memory.
7562 // If ROP protection is not set then these functions are no-ops.
7563 // For more details on PAC see pauth_aarch64.hpp.
7564 
7565 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory.
7566 // Uses value zero as the modifier.
7567 //
7568 void MacroAssembler::protect_return_address() {
7569   if (VM_Version::use_rop_protection()) {
7570     check_return_address();
7571     paciaz();
7572   }
7573 }
7574 
7575 // Sign the return value in the given register. Use before updating the LR in the existing stack
7576 // frame for the current function.
7577 // Uses value zero as the modifier.
7578 //
7579 void MacroAssembler::protect_return_address(Register return_reg) {
7580   if (VM_Version::use_rop_protection()) {
7581     check_return_address(return_reg);
7582     paciza(return_reg);
7583   }
7584 }
7585 
7586 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory.
7587 // Uses value zero as the modifier.
7588 //
7589 void MacroAssembler::authenticate_return_address() {
7590   if (VM_Version::use_rop_protection()) {
7591     autiaz();
7592     check_return_address();
7593   }
7594 }
7595 
7596 // Authenticate the return value in the given register. Use before updating the LR in the existing
7597 // stack frame for the current function.
7598 // Uses value zero as the modifier.
7599 //
7600 void MacroAssembler::authenticate_return_address(Register return_reg) {
7601   if (VM_Version::use_rop_protection()) {
7602     autiza(return_reg);
7603     check_return_address(return_reg);
7604   }
7605 }
7606 
7607 // Strip any PAC data from LR without performing any authentication. Use with caution - only if
7608 // there is no guaranteed way of authenticating the LR.
7609 //
7610 void MacroAssembler::strip_return_address() {
7611   if (VM_Version::use_rop_protection()) {
7612     xpaclri();
7613   }
7614 }
7615 
7616 #ifndef PRODUCT
7617 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only
7618 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point
7619 // it is difficult to debug back to the callee function.
7620 // This function simply loads from the address in the given register.
7621 // Use directly after authentication to catch authentication failures.
7622 // Also use before signing to check that the pointer is valid and hasn't already been signed.
7623 //
7624 void MacroAssembler::check_return_address(Register return_reg) {
7625   if (VM_Version::use_rop_protection()) {
7626     ldr(zr, Address(return_reg));
7627   }
7628 }
7629 #endif
7630 
7631 // The java_calling_convention describes stack locations as ideal slots on
7632 // a frame with no abi restrictions. Since we must observe abi restrictions
7633 // (like the placement of the register window) the slots must be biased by
7634 // the following value.
7635 static int reg2offset_in(VMReg r) {
7636   // Account for saved rfp and lr
7637   // This should really be in_preserve_stack_slots
7638   return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
7639 }
7640 
7641 static int reg2offset_out(VMReg r) {
7642   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
7643 }
7644 
7645 // On 64bit we will store integer like items to the stack as
7646 // 64bits items (AArch64 ABI) even though java would only store
7647 // 32bits for a parameter. On 32bit it will simply be 32bits
7648 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
7649 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) {
7650   if (src.first()->is_stack()) {
7651     if (dst.first()->is_stack()) {
7652       // stack to stack
7653       ldr(tmp, Address(rfp, reg2offset_in(src.first())));
7654       str(tmp, Address(sp, reg2offset_out(dst.first())));
7655     } else {
7656       // stack to reg
7657       ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
7658     }
7659   } else if (dst.first()->is_stack()) {
7660     // reg to stack
7661     str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
7662   } else {
7663     if (dst.first() != src.first()) {
7664       sxtw(dst.first()->as_Register(), src.first()->as_Register());
7665     }
7666   }
7667 }
7668 
7669 // An oop arg. Must pass a handle not the oop itself
7670 void MacroAssembler::object_move(
7671                         OopMap* map,
7672                         int oop_handle_offset,
7673                         int framesize_in_slots,
7674                         VMRegPair src,
7675                         VMRegPair dst,
7676                         bool is_receiver,
7677                         int* receiver_offset) {
7678 
7679   // must pass a handle. First figure out the location we use as a handle
7680 
7681   Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register();
7682 
7683   // See if oop is null if it is we need no handle
7684 
7685   if (src.first()->is_stack()) {
7686 
7687     // Oop is already on the stack as an argument
7688     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
7689     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
7690     if (is_receiver) {
7691       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
7692     }
7693 
7694     ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
7695     lea(rHandle, Address(rfp, reg2offset_in(src.first())));
7696     // conditionally move a null
7697     cmp(rscratch1, zr);
7698     csel(rHandle, zr, rHandle, Assembler::EQ);
7699   } else {
7700 
7701     // Oop is in an a register we must store it to the space we reserve
7702     // on the stack for oop_handles and pass a handle if oop is non-null
7703 
7704     const Register rOop = src.first()->as_Register();
7705     int oop_slot;
7706     if (rOop == j_rarg0)
7707       oop_slot = 0;
7708     else if (rOop == j_rarg1)
7709       oop_slot = 1;
7710     else if (rOop == j_rarg2)
7711       oop_slot = 2;
7712     else if (rOop == j_rarg3)
7713       oop_slot = 3;
7714     else if (rOop == j_rarg4)
7715       oop_slot = 4;
7716     else if (rOop == j_rarg5)
7717       oop_slot = 5;
7718     else if (rOop == j_rarg6)
7719       oop_slot = 6;
7720     else {
7721       assert(rOop == j_rarg7, "wrong register");
7722       oop_slot = 7;
7723     }
7724 
7725     oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
7726     int offset = oop_slot*VMRegImpl::stack_slot_size;
7727 
7728     map->set_oop(VMRegImpl::stack2reg(oop_slot));
7729     // Store oop in handle area, may be null
7730     str(rOop, Address(sp, offset));
7731     if (is_receiver) {
7732       *receiver_offset = offset;
7733     }
7734 
7735     cmp(rOop, zr);
7736     lea(rHandle, Address(sp, offset));
7737     // conditionally move a null
7738     csel(rHandle, zr, rHandle, Assembler::EQ);
7739   }
7740 
7741   // If arg is on the stack then place it otherwise it is already in correct reg.
7742   if (dst.first()->is_stack()) {
7743     str(rHandle, Address(sp, reg2offset_out(dst.first())));
7744   }
7745 }
7746 
7747 // A float arg may have to do float reg int reg conversion
7748 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) {
7749  if (src.first()->is_stack()) {
7750     if (dst.first()->is_stack()) {
7751       ldrw(tmp, Address(rfp, reg2offset_in(src.first())));
7752       strw(tmp, Address(sp, reg2offset_out(dst.first())));
7753     } else {
7754       ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
7755     }
7756   } else if (src.first() != dst.first()) {
7757     if (src.is_single_phys_reg() && dst.is_single_phys_reg())
7758       fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
7759     else
7760       strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
7761   }
7762 }
7763 
7764 // A long move
7765 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) {
7766   if (src.first()->is_stack()) {
7767     if (dst.first()->is_stack()) {
7768       // stack to stack
7769       ldr(tmp, Address(rfp, reg2offset_in(src.first())));
7770       str(tmp, Address(sp, reg2offset_out(dst.first())));
7771     } else {
7772       // stack to reg
7773       ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
7774     }
7775   } else if (dst.first()->is_stack()) {
7776     // reg to stack
7777     // Do we really have to sign extend???
7778     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
7779     str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
7780   } else {
7781     if (dst.first() != src.first()) {
7782       mov(dst.first()->as_Register(), src.first()->as_Register());
7783     }
7784   }
7785 }
7786 
7787 
7788 // A double move
7789 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
7790  if (src.first()->is_stack()) {
7791     if (dst.first()->is_stack()) {
7792       ldr(tmp, Address(rfp, reg2offset_in(src.first())));
7793       str(tmp, Address(sp, reg2offset_out(dst.first())));
7794     } else {
7795       ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
7796     }
7797   } else if (src.first() != dst.first()) {
7798     if (src.is_single_phys_reg() && dst.is_single_phys_reg())
7799       fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
7800     else
7801       strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
7802   }
7803 }
7804 
7805 // Implements lightweight-locking.
7806 //
7807 //  - obj: the object to be locked
7808 //  - t1, t2, t3: temporary registers, will be destroyed
7809 //  - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding).
7810 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) {
7811   assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
7812   assert_different_registers(basic_lock, obj, t1, t2, t3, rscratch1);
7813 
7814   Label push;
7815   const Register top = t1;
7816   const Register mark = t2;
7817   const Register t = t3;
7818 
7819   // Preload the markWord. It is important that this is the first
7820   // instruction emitted as it is part of C1's null check semantics.
7821   ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
7822 
7823   if (UseObjectMonitorTable) {
7824     // Clear cache in case fast locking succeeds.
7825     str(zr, Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))));
7826   }
7827 
7828   // Check if the lock-stack is full.
7829   ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7830   cmpw(top, (unsigned)LockStack::end_offset());
7831   br(Assembler::GE, slow);
7832 
7833   // Check for recursion.
7834   subw(t, top, oopSize);
7835   ldr(t, Address(rthread, t));
7836   cmp(obj, t);
7837   br(Assembler::EQ, push);
7838 
7839   // Check header for monitor (0b10).
7840   tst(mark, markWord::monitor_value);
7841   br(Assembler::NE, slow);
7842 
7843   // Try to lock. Transition lock bits 0b01 => 0b00
7844   assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
7845   orr(mark, mark, markWord::unlocked_value);
7846   if (EnableValhalla) {
7847     // Mask inline_type bit such that we go to the slow path if object is an inline type
7848     andr(mark, mark, ~((int) markWord::inline_type_bit_in_place));
7849   }
7850   eor(t, mark, markWord::unlocked_value);
7851   cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword,
7852           /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg);
7853   br(Assembler::NE, slow);
7854 
7855   bind(push);
7856   // After successful lock, push object on lock-stack.
7857   str(obj, Address(rthread, top));
7858   addw(top, top, oopSize);
7859   strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7860 }
7861 
7862 // Implements lightweight-unlocking.
7863 //
7864 // - obj: the object to be unlocked
7865 // - t1, t2, t3: temporary registers
7866 // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding).
7867 void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
7868   assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
7869   // cmpxchg clobbers rscratch1.
7870   assert_different_registers(obj, t1, t2, t3, rscratch1);
7871 
7872 #ifdef ASSERT
7873   {
7874     // Check for lock-stack underflow.
7875     Label stack_ok;
7876     ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
7877     cmpw(t1, (unsigned)LockStack::start_offset());
7878     br(Assembler::GE, stack_ok);
7879     STOP("Lock-stack underflow");
7880     bind(stack_ok);
7881   }
7882 #endif
7883 
7884   Label unlocked, push_and_slow;
7885   const Register top = t1;
7886   const Register mark = t2;
7887   const Register t = t3;
7888 
7889   // Check if obj is top of lock-stack.
7890   ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7891   subw(top, top, oopSize);
7892   ldr(t, Address(rthread, top));
7893   cmp(obj, t);
7894   br(Assembler::NE, slow);
7895 
7896   // Pop lock-stack.
7897   DEBUG_ONLY(str(zr, Address(rthread, top));)
7898   strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7899 
7900   // Check if recursive.
7901   subw(t, top, oopSize);
7902   ldr(t, Address(rthread, t));
7903   cmp(obj, t);
7904   br(Assembler::EQ, unlocked);
7905 
7906   // Not recursive. Check header for monitor (0b10).
7907   ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
7908   tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow);
7909 
7910 #ifdef ASSERT
7911   // Check header not unlocked (0b01).
7912   Label not_unlocked;
7913   tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked);
7914   stop("lightweight_unlock already unlocked");
7915   bind(not_unlocked);
7916 #endif
7917 
7918   // Try to unlock. Transition lock bits 0b00 => 0b01
7919   assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
7920   orr(t, mark, markWord::unlocked_value);
7921   cmpxchg(obj, mark, t, Assembler::xword,
7922           /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg);
7923   br(Assembler::EQ, unlocked);
7924 
7925   bind(push_and_slow);
7926   // Restore lock-stack and handle the unlock in runtime.
7927   DEBUG_ONLY(str(obj, Address(rthread, top));)
7928   addw(top, top, oopSize);
7929   strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
7930   b(slow);
7931 
7932   bind(unlocked);
7933 }