1 /*
   2  * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
  27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
  28 
  29 #include "asm/assembler.inline.hpp"
  30 #include "metaprogramming/enableIf.hpp"
  31 #include "oops/compressedOops.hpp"
  32 #include "runtime/vm_version.hpp"
  33 #include "utilities/powerOfTwo.hpp"
  34 
  35 // MacroAssembler extends Assembler by frequently used macros.
  36 //
  37 // Instructions for which a 'better' code sequence exists depending
  38 // on arguments should also go in here.
  39 
  40 class MacroAssembler: public Assembler {
  41   friend class LIR_Assembler;
  42 
  43  public:
  44   using Assembler::mov;
  45   using Assembler::movi;
  46 
  47  protected:
  48 
  49   // Support for VM calls
  50   //
  51   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  52   // may customize this version by overriding it for its purposes (e.g., to save/restore
  53   // additional registers when doing a VM call).
  54   virtual void call_VM_leaf_base(
  55     address entry_point,               // the entry point
  56     int     number_of_arguments,        // the number of arguments to pop after the call
  57     Label *retaddr = NULL
  58   );
  59 
  60   virtual void call_VM_leaf_base(
  61     address entry_point,               // the entry point
  62     int     number_of_arguments,        // the number of arguments to pop after the call
  63     Label &retaddr) {
  64     call_VM_leaf_base(entry_point, number_of_arguments, &retaddr);
  65   }
  66 
  67   // This is the base routine called by the different versions of call_VM. The interpreter
  68   // may customize this version by overriding it for its purposes (e.g., to save/restore
  69   // additional registers when doing a VM call).
  70   //
  71   // If no java_thread register is specified (noreg) than rthread will be used instead. call_VM_base
  72   // returns the register which contains the thread upon return. If a thread register has been
  73   // specified, the return value will correspond to that register. If no last_java_sp is specified
  74   // (noreg) than rsp will be used instead.
  75   virtual void call_VM_base(           // returns the register containing the thread upon return
  76     Register oop_result,               // where an oop-result ends up if any; use noreg otherwise
  77     Register java_thread,              // the thread if computed before     ; use noreg otherwise
  78     Register last_java_sp,             // to set up last_Java_frame in stubs; use noreg otherwise
  79     address  entry_point,              // the entry point
  80     int      number_of_arguments,      // the number of arguments (w/o thread) to pop after the call
  81     bool     check_exceptions          // whether to check for pending exceptions after return
  82   );
  83 
  84   void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
  85 
  86  public:
  87 
  88   enum KlassDecodeMode {
  89     KlassDecodeNone,
  90     KlassDecodeZero,
  91     KlassDecodeXor,
  92     KlassDecodeMovk
  93   };
  94 
  95   // Return the current narrow Klass pointer decode mode. Initialized on first call.
  96   static KlassDecodeMode klass_decode_mode();
  97 
  98   // Given an arbitrary base address, return the KlassDecodeMode that would be used. Return KlassDecodeNone
  99   // if base address is not valid for encoding.
 100   static KlassDecodeMode klass_decode_mode_for_base(address base);
 101 
 102   // Returns a static string
 103   static const char* describe_klass_decode_mode(KlassDecodeMode mode);
 104 
 105  private:
 106 
 107   static KlassDecodeMode _klass_decode_mode;
 108 
 109  public:
 110   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
 111 
 112  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
 113  // The implementation is only non-empty for the InterpreterMacroAssembler,
 114  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
 115  virtual void check_and_handle_popframe(Register java_thread);
 116  virtual void check_and_handle_earlyret(Register java_thread);
 117 
 118   void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod);
 119 
 120   // Helper functions for statistics gathering.
 121   // Unconditional atomic increment.
 122   void atomic_incw(Register counter_addr, Register tmp, Register tmp2);
 123   void atomic_incw(Address counter_addr, Register tmp1, Register tmp2, Register tmp3) {
 124     lea(tmp1, counter_addr);
 125     atomic_incw(tmp1, tmp2, tmp3);
 126   }
 127   // Load Effective Address
 128   void lea(Register r, const Address &a) {
 129     InstructionMark im(this);
 130     code_section()->relocate(inst_mark(), a.rspec());
 131     a.lea(this, r);
 132   }
 133 
 134   /* Sometimes we get misaligned loads and stores, usually from Unsafe
 135      accesses, and these can exceed the offset range. */
 136   Address legitimize_address(const Address &a, int size, Register scratch) {
 137     if (a.getMode() == Address::base_plus_offset) {
 138       if (! Address::offset_ok_for_immed(a.offset(), exact_log2(size))) {
 139         block_comment("legitimize_address {");
 140         lea(scratch, a);
 141         block_comment("} legitimize_address");
 142         return Address(scratch);
 143       }
 144     }
 145     return a;
 146   }
 147 
 148   void addmw(Address a, Register incr, Register scratch) {
 149     ldrw(scratch, a);
 150     addw(scratch, scratch, incr);
 151     strw(scratch, a);
 152   }
 153 
 154   // Add constant to memory word
 155   void addmw(Address a, int imm, Register scratch) {
 156     ldrw(scratch, a);
 157     if (imm > 0)
 158       addw(scratch, scratch, (unsigned)imm);
 159     else
 160       subw(scratch, scratch, (unsigned)-imm);
 161     strw(scratch, a);
 162   }
 163 
 164   void bind(Label& L) {
 165     Assembler::bind(L);
 166     code()->clear_last_insn();
 167   }
 168 
 169   void membar(Membar_mask_bits order_constraint);
 170 
 171   using Assembler::ldr;
 172   using Assembler::str;
 173   using Assembler::ldrw;
 174   using Assembler::strw;
 175 
 176   void ldr(Register Rx, const Address &adr);
 177   void ldrw(Register Rw, const Address &adr);
 178   void str(Register Rx, const Address &adr);
 179   void strw(Register Rx, const Address &adr);
 180 
 181   // Frame creation and destruction shared between JITs.
 182   void build_frame(int framesize);
 183   void remove_frame(int framesize);
 184 
 185   virtual void _call_Unimplemented(address call_site) {
 186     mov(rscratch2, call_site);
 187   }
 188 
 189 // Microsoft's MSVC team thinks that the __FUNCSIG__ is approximately (sympathy for calling conventions) equivalent to __PRETTY_FUNCTION__
 190 // Also, from Clang patch: "It is very similar to GCC's PRETTY_FUNCTION, except it prints the calling convention."
 191 // https://reviews.llvm.org/D3311
 192 
 193 #ifdef _WIN64
 194 #define call_Unimplemented() _call_Unimplemented((address)__FUNCSIG__)
 195 #else
 196 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__)
 197 #endif
 198 
 199   // aliases defined in AARCH64 spec
 200 
 201   template<class T>
 202   inline void cmpw(Register Rd, T imm)  { subsw(zr, Rd, imm); }
 203 
 204   inline void cmp(Register Rd, unsigned char imm8)  { subs(zr, Rd, imm8); }
 205   inline void cmp(Register Rd, unsigned imm) = delete;
 206 
 207   inline void cmnw(Register Rd, unsigned imm) { addsw(zr, Rd, imm); }
 208   inline void cmn(Register Rd, unsigned imm) { adds(zr, Rd, imm); }
 209 
 210   void cset(Register Rd, Assembler::Condition cond) {
 211     csinc(Rd, zr, zr, ~cond);
 212   }
 213   void csetw(Register Rd, Assembler::Condition cond) {
 214     csincw(Rd, zr, zr, ~cond);
 215   }
 216 
 217   void cneg(Register Rd, Register Rn, Assembler::Condition cond) {
 218     csneg(Rd, Rn, Rn, ~cond);
 219   }
 220   void cnegw(Register Rd, Register Rn, Assembler::Condition cond) {
 221     csnegw(Rd, Rn, Rn, ~cond);
 222   }
 223 
 224   inline void movw(Register Rd, Register Rn) {
 225     if (Rd == sp || Rn == sp) {
 226       addw(Rd, Rn, 0U);
 227     } else {
 228       orrw(Rd, zr, Rn);
 229     }
 230   }
 231   inline void mov(Register Rd, Register Rn) {
 232     assert(Rd != r31_sp && Rn != r31_sp, "should be");
 233     if (Rd == Rn) {
 234     } else if (Rd == sp || Rn == sp) {
 235       add(Rd, Rn, 0U);
 236     } else {
 237       orr(Rd, zr, Rn);
 238     }
 239   }
 240 
 241   inline void moviw(Register Rd, unsigned imm) { orrw(Rd, zr, imm); }
 242   inline void movi(Register Rd, unsigned imm) { orr(Rd, zr, imm); }
 243 
 244   inline void tstw(Register Rd, Register Rn) { andsw(zr, Rd, Rn); }
 245   inline void tst(Register Rd, Register Rn) { ands(zr, Rd, Rn); }
 246 
 247   inline void tstw(Register Rd, uint64_t imm) { andsw(zr, Rd, imm); }
 248   inline void tst(Register Rd, uint64_t imm) { ands(zr, Rd, imm); }
 249 
 250   inline void bfiw(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 251     bfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1));
 252   }
 253   inline void bfi(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 254     bfm(Rd, Rn, ((64 - lsb) & 63), (width - 1));
 255   }
 256 
 257   inline void bfxilw(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 258     bfmw(Rd, Rn, lsb, (lsb + width - 1));
 259   }
 260   inline void bfxil(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 261     bfm(Rd, Rn, lsb , (lsb + width - 1));
 262   }
 263 
 264   inline void sbfizw(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 265     sbfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1));
 266   }
 267   inline void sbfiz(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 268     sbfm(Rd, Rn, ((64 - lsb) & 63), (width - 1));
 269   }
 270 
 271   inline void sbfxw(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 272     sbfmw(Rd, Rn, lsb, (lsb + width - 1));
 273   }
 274   inline void sbfx(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 275     sbfm(Rd, Rn, lsb , (lsb + width - 1));
 276   }
 277 
 278   inline void ubfizw(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 279     ubfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1));
 280   }
 281   inline void ubfiz(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 282     ubfm(Rd, Rn, ((64 - lsb) & 63), (width - 1));
 283   }
 284 
 285   inline void ubfxw(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 286     ubfmw(Rd, Rn, lsb, (lsb + width - 1));
 287   }
 288   inline void ubfx(Register Rd, Register Rn, unsigned lsb, unsigned width) {
 289     ubfm(Rd, Rn, lsb , (lsb + width - 1));
 290   }
 291 
 292   inline void asrw(Register Rd, Register Rn, unsigned imm) {
 293     sbfmw(Rd, Rn, imm, 31);
 294   }
 295 
 296   inline void asr(Register Rd, Register Rn, unsigned imm) {
 297     sbfm(Rd, Rn, imm, 63);
 298   }
 299 
 300   inline void lslw(Register Rd, Register Rn, unsigned imm) {
 301     ubfmw(Rd, Rn, ((32 - imm) & 31), (31 - imm));
 302   }
 303 
 304   inline void lsl(Register Rd, Register Rn, unsigned imm) {
 305     ubfm(Rd, Rn, ((64 - imm) & 63), (63 - imm));
 306   }
 307 
 308   inline void lsrw(Register Rd, Register Rn, unsigned imm) {
 309     ubfmw(Rd, Rn, imm, 31);
 310   }
 311 
 312   inline void lsr(Register Rd, Register Rn, unsigned imm) {
 313     ubfm(Rd, Rn, imm, 63);
 314   }
 315 
 316   inline void rorw(Register Rd, Register Rn, unsigned imm) {
 317     extrw(Rd, Rn, Rn, imm);
 318   }
 319 
 320   inline void ror(Register Rd, Register Rn, unsigned imm) {
 321     extr(Rd, Rn, Rn, imm);
 322   }
 323 
 324   inline void sxtbw(Register Rd, Register Rn) {
 325     sbfmw(Rd, Rn, 0, 7);
 326   }
 327   inline void sxthw(Register Rd, Register Rn) {
 328     sbfmw(Rd, Rn, 0, 15);
 329   }
 330   inline void sxtb(Register Rd, Register Rn) {
 331     sbfm(Rd, Rn, 0, 7);
 332   }
 333   inline void sxth(Register Rd, Register Rn) {
 334     sbfm(Rd, Rn, 0, 15);
 335   }
 336   inline void sxtw(Register Rd, Register Rn) {
 337     sbfm(Rd, Rn, 0, 31);
 338   }
 339 
 340   inline void uxtbw(Register Rd, Register Rn) {
 341     ubfmw(Rd, Rn, 0, 7);
 342   }
 343   inline void uxthw(Register Rd, Register Rn) {
 344     ubfmw(Rd, Rn, 0, 15);
 345   }
 346   inline void uxtb(Register Rd, Register Rn) {
 347     ubfm(Rd, Rn, 0, 7);
 348   }
 349   inline void uxth(Register Rd, Register Rn) {
 350     ubfm(Rd, Rn, 0, 15);
 351   }
 352   inline void uxtw(Register Rd, Register Rn) {
 353     ubfm(Rd, Rn, 0, 31);
 354   }
 355 
 356   inline void cmnw(Register Rn, Register Rm) {
 357     addsw(zr, Rn, Rm);
 358   }
 359   inline void cmn(Register Rn, Register Rm) {
 360     adds(zr, Rn, Rm);
 361   }
 362 
 363   inline void cmpw(Register Rn, Register Rm) {
 364     subsw(zr, Rn, Rm);
 365   }
 366   inline void cmp(Register Rn, Register Rm) {
 367     subs(zr, Rn, Rm);
 368   }
 369 
 370   inline void negw(Register Rd, Register Rn) {
 371     subw(Rd, zr, Rn);
 372   }
 373 
 374   inline void neg(Register Rd, Register Rn) {
 375     sub(Rd, zr, Rn);
 376   }
 377 
 378   inline void negsw(Register Rd, Register Rn) {
 379     subsw(Rd, zr, Rn);
 380   }
 381 
 382   inline void negs(Register Rd, Register Rn) {
 383     subs(Rd, zr, Rn);
 384   }
 385 
 386   inline void cmnw(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) {
 387     addsw(zr, Rn, Rm, kind, shift);
 388   }
 389   inline void cmn(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) {
 390     adds(zr, Rn, Rm, kind, shift);
 391   }
 392 
 393   inline void cmpw(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) {
 394     subsw(zr, Rn, Rm, kind, shift);
 395   }
 396   inline void cmp(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) {
 397     subs(zr, Rn, Rm, kind, shift);
 398   }
 399 
 400   inline void negw(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) {
 401     subw(Rd, zr, Rn, kind, shift);
 402   }
 403 
 404   inline void neg(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) {
 405     sub(Rd, zr, Rn, kind, shift);
 406   }
 407 
 408   inline void negsw(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) {
 409     subsw(Rd, zr, Rn, kind, shift);
 410   }
 411 
 412   inline void negs(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) {
 413     subs(Rd, zr, Rn, kind, shift);
 414   }
 415 
 416   inline void mnegw(Register Rd, Register Rn, Register Rm) {
 417     msubw(Rd, Rn, Rm, zr);
 418   }
 419   inline void mneg(Register Rd, Register Rn, Register Rm) {
 420     msub(Rd, Rn, Rm, zr);
 421   }
 422 
 423   inline void mulw(Register Rd, Register Rn, Register Rm) {
 424     maddw(Rd, Rn, Rm, zr);
 425   }
 426   inline void mul(Register Rd, Register Rn, Register Rm) {
 427     madd(Rd, Rn, Rm, zr);
 428   }
 429 
 430   inline void smnegl(Register Rd, Register Rn, Register Rm) {
 431     smsubl(Rd, Rn, Rm, zr);
 432   }
 433   inline void smull(Register Rd, Register Rn, Register Rm) {
 434     smaddl(Rd, Rn, Rm, zr);
 435   }
 436 
 437   inline void umnegl(Register Rd, Register Rn, Register Rm) {
 438     umsubl(Rd, Rn, Rm, zr);
 439   }
 440   inline void umull(Register Rd, Register Rn, Register Rm) {
 441     umaddl(Rd, Rn, Rm, zr);
 442   }
 443 
 444 #define WRAP(INSN)                                                            \
 445   void INSN(Register Rd, Register Rn, Register Rm, Register Ra) {             \
 446     if (VM_Version::supports_a53mac() && Ra != zr)                            \
 447       nop();                                                                  \
 448     Assembler::INSN(Rd, Rn, Rm, Ra);                                          \
 449   }
 450 
 451   WRAP(madd) WRAP(msub) WRAP(maddw) WRAP(msubw)
 452   WRAP(smaddl) WRAP(smsubl) WRAP(umaddl) WRAP(umsubl)
 453 #undef WRAP
 454 
 455 
 456   // macro assembly operations needed for aarch64
 457 
 458   // first two private routines for loading 32 bit or 64 bit constants
 459 private:
 460 
 461   void mov_immediate64(Register dst, uint64_t imm64);
 462   void mov_immediate32(Register dst, uint32_t imm32);
 463 
 464   int push(unsigned int bitset, Register stack);
 465   int pop(unsigned int bitset, Register stack);
 466 
 467   int push_fp(unsigned int bitset, Register stack);
 468   int pop_fp(unsigned int bitset, Register stack);
 469 
 470   int push_p(unsigned int bitset, Register stack);
 471   int pop_p(unsigned int bitset, Register stack);
 472 
 473   void mov(Register dst, Address a);
 474 
 475 public:
 476   void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); }
 477   void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); }
 478 
 479   void push_fp(FloatRegSet regs, Register stack) { if (regs.bits()) push_fp(regs.bits(), stack); }
 480   void pop_fp(FloatRegSet regs, Register stack) { if (regs.bits()) pop_fp(regs.bits(), stack); }
 481 
 482   static RegSet call_clobbered_gp_registers();
 483 
 484   void push_p(PRegSet regs, Register stack) { if (regs.bits()) push_p(regs.bits(), stack); }
 485   void pop_p(PRegSet regs, Register stack) { if (regs.bits()) pop_p(regs.bits(), stack); }
 486 
 487   // Push and pop everything that might be clobbered by a native
 488   // runtime call except rscratch1 and rscratch2.  (They are always
 489   // scratch, so we don't have to protect them.)  Only save the lower
 490   // 64 bits of each vector register. Additional registers can be excluded
 491   // in a passed RegSet.
 492   void push_call_clobbered_registers_except(RegSet exclude);
 493   void pop_call_clobbered_registers_except(RegSet exclude);
 494 
 495   void push_call_clobbered_registers() {
 496     push_call_clobbered_registers_except(RegSet());
 497   }
 498   void pop_call_clobbered_registers() {
 499     pop_call_clobbered_registers_except(RegSet());
 500   }
 501 
 502 
 503   // now mov instructions for loading absolute addresses and 32 or
 504   // 64 bit integers
 505 
 506   inline void mov(Register dst, address addr)             { mov_immediate64(dst, (uint64_t)addr); }
 507 
 508   template<typename T, ENABLE_IF(std::is_integral<T>::value)>
 509   inline void mov(Register dst, T o)                      { mov_immediate64(dst, (uint64_t)o); }
 510 
 511   inline void movw(Register dst, uint32_t imm32)          { mov_immediate32(dst, imm32); }
 512 
 513   void mov(Register dst, RegisterOrConstant src) {
 514     if (src.is_register())
 515       mov(dst, src.as_register());
 516     else
 517       mov(dst, src.as_constant());
 518   }
 519 
 520   void movptr(Register r, uintptr_t imm64);
 521 
 522   void mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64);
 523 
 524   void mov(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) {
 525     orr(Vd, T, Vn, Vn);
 526   }
 527 
 528 
 529 public:
 530 
 531   // Generalized Test Bit And Branch, including a "far" variety which
 532   // spans more than 32KiB.
 533   void tbr(Condition cond, Register Rt, int bitpos, Label &dest, bool isfar = false) {
 534     assert(cond == EQ || cond == NE, "must be");
 535 
 536     if (isfar)
 537       cond = ~cond;
 538 
 539     void (Assembler::* branch)(Register Rt, int bitpos, Label &L);
 540     if (cond == Assembler::EQ)
 541       branch = &Assembler::tbz;
 542     else
 543       branch = &Assembler::tbnz;
 544 
 545     if (isfar) {
 546       Label L;
 547       (this->*branch)(Rt, bitpos, L);
 548       b(dest);
 549       bind(L);
 550     } else {
 551       (this->*branch)(Rt, bitpos, dest);
 552     }
 553   }
 554 
 555   // macro instructions for accessing and updating floating point
 556   // status register
 557   //
 558   // FPSR : op1 == 011
 559   //        CRn == 0100
 560   //        CRm == 0100
 561   //        op2 == 001
 562 
 563   inline void get_fpsr(Register reg)
 564   {
 565     mrs(0b11, 0b0100, 0b0100, 0b001, reg);
 566   }
 567 
 568   inline void set_fpsr(Register reg)
 569   {
 570     msr(0b011, 0b0100, 0b0100, 0b001, reg);
 571   }
 572 
 573   inline void clear_fpsr()
 574   {
 575     msr(0b011, 0b0100, 0b0100, 0b001, zr);
 576   }
 577 
 578   // DCZID_EL0: op1 == 011
 579   //            CRn == 0000
 580   //            CRm == 0000
 581   //            op2 == 111
 582   inline void get_dczid_el0(Register reg)
 583   {
 584     mrs(0b011, 0b0000, 0b0000, 0b111, reg);
 585   }
 586 
 587   // CTR_EL0:   op1 == 011
 588   //            CRn == 0000
 589   //            CRm == 0000
 590   //            op2 == 001
 591   inline void get_ctr_el0(Register reg)
 592   {
 593     mrs(0b011, 0b0000, 0b0000, 0b001, reg);
 594   }
 595 
 596   // idiv variant which deals with MINLONG as dividend and -1 as divisor
 597   int corrected_idivl(Register result, Register ra, Register rb,
 598                       bool want_remainder, Register tmp = rscratch1);
 599   int corrected_idivq(Register result, Register ra, Register rb,
 600                       bool want_remainder, Register tmp = rscratch1);
 601 
 602   // Support for NULL-checks
 603   //
 604   // Generates code that causes a NULL OS exception if the content of reg is NULL.
 605   // If the accessed location is M[reg + offset] and the offset is known, provide the
 606   // offset. No explicit code generation is needed if the offset is within a certain
 607   // range (0 <= offset <= page_size).
 608 
 609   virtual void null_check(Register reg, int offset = -1);
 610   static bool needs_explicit_null_check(intptr_t offset);
 611   static bool uses_implicit_null_check(void* address);
 612 
 613   static address target_addr_for_insn(address insn_addr, unsigned insn);
 614   static address target_addr_for_insn_or_null(address insn_addr, unsigned insn);
 615   static address target_addr_for_insn(address insn_addr) {
 616     unsigned insn = *(unsigned*)insn_addr;
 617     return target_addr_for_insn(insn_addr, insn);
 618   }
 619   static address target_addr_for_insn_or_null(address insn_addr) {
 620     unsigned insn = *(unsigned*)insn_addr;
 621     return target_addr_for_insn_or_null(insn_addr, insn);
 622   }
 623 
 624   // Required platform-specific helpers for Label::patch_instructions.
 625   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 626   static int pd_patch_instruction_size(address branch, address target);
 627   static void pd_patch_instruction(address branch, address target, const char* file = NULL, int line = 0) {
 628     pd_patch_instruction_size(branch, target);
 629   }
 630   static address pd_call_destination(address branch) {
 631     return target_addr_for_insn(branch);
 632   }
 633 #ifndef PRODUCT
 634   static void pd_print_patched_instruction(address branch);
 635 #endif
 636 
 637   static int patch_oop(address insn_addr, address o);
 638   static int patch_narrow_klass(address insn_addr, narrowKlass n);
 639 
 640   address emit_trampoline_stub(int insts_call_instruction_offset, address target);
 641   void emit_static_call_stub();
 642 
 643   // The following 4 methods return the offset of the appropriate move instruction
 644 
 645   // Support for fast byte/short loading with zero extension (depending on particular CPU)
 646   int load_unsigned_byte(Register dst, Address src);
 647   int load_unsigned_short(Register dst, Address src);
 648 
 649   // Support for fast byte/short loading with sign extension (depending on particular CPU)
 650   int load_signed_byte(Register dst, Address src);
 651   int load_signed_short(Register dst, Address src);
 652 
 653   int load_signed_byte32(Register dst, Address src);
 654   int load_signed_short32(Register dst, Address src);
 655 
 656   // Support for sign-extension (hi:lo = extend_sign(lo))
 657   void extend_sign(Register hi, Register lo);
 658 
 659   // Load and store values by size and signed-ness
 660   void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
 661   void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
 662 
 663   // Support for inc/dec with optimal instruction selection depending on value
 664 
 665   // x86_64 aliases an unqualified register/address increment and
 666   // decrement to call incrementq and decrementq but also supports
 667   // explicitly sized calls to incrementq/decrementq or
 668   // incrementl/decrementl
 669 
 670   // for aarch64 the proper convention would be to use
 671   // increment/decrement for 64 bit operations and
 672   // incrementw/decrementw for 32 bit operations. so when porting
 673   // x86_64 code we can leave calls to increment/decrement as is,
 674   // replace incrementq/decrementq with increment/decrement and
 675   // replace incrementl/decrementl with incrementw/decrementw.
 676 
 677   // n.b. increment/decrement calls with an Address destination will
 678   // need to use a scratch register to load the value to be
 679   // incremented. increment/decrement calls which add or subtract a
 680   // constant value greater than 2^12 will need to use a 2nd scratch
 681   // register to hold the constant. so, a register increment/decrement
 682   // may trash rscratch2 and an address increment/decrement trash
 683   // rscratch and rscratch2
 684 
 685   void decrementw(Address dst, int value = 1);
 686   void decrementw(Register reg, int value = 1);
 687 
 688   void decrement(Register reg, int value = 1);
 689   void decrement(Address dst, int value = 1);
 690 
 691   void incrementw(Address dst, int value = 1);
 692   void incrementw(Register reg, int value = 1);
 693 
 694   void increment(Register reg, int value = 1);
 695   void increment(Address dst, int value = 1);
 696 
 697 
 698   // Alignment
 699   void align(int modulus);
 700 
 701   // Stack frame creation/removal
 702   void enter(bool strip_ret_addr = false);
 703   void leave();
 704 
 705   // ROP Protection
 706   void protect_return_address();
 707   void protect_return_address(Register return_reg, Register temp_reg);
 708   void authenticate_return_address(Register return_reg = lr);
 709   void authenticate_return_address(Register return_reg, Register temp_reg);
 710   void strip_return_address();
 711   void check_return_address(Register return_reg=lr) PRODUCT_RETURN;
 712 
 713   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
 714   // The pointer will be loaded into the thread register.
 715   void get_thread(Register thread);
 716 
 717 
 718   // Support for VM calls
 719   //
 720   // It is imperative that all calls into the VM are handled via the call_VM macros.
 721   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 722   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
 723 
 724 
 725   void call_VM(Register oop_result,
 726                address entry_point,
 727                bool check_exceptions = true);
 728   void call_VM(Register oop_result,
 729                address entry_point,
 730                Register arg_1,
 731                bool check_exceptions = true);
 732   void call_VM(Register oop_result,
 733                address entry_point,
 734                Register arg_1, Register arg_2,
 735                bool check_exceptions = true);
 736   void call_VM(Register oop_result,
 737                address entry_point,
 738                Register arg_1, Register arg_2, Register arg_3,
 739                bool check_exceptions = true);
 740 
 741   // Overloadings with last_Java_sp
 742   void call_VM(Register oop_result,
 743                Register last_java_sp,
 744                address entry_point,
 745                int number_of_arguments = 0,
 746                bool check_exceptions = true);
 747   void call_VM(Register oop_result,
 748                Register last_java_sp,
 749                address entry_point,
 750                Register arg_1, bool
 751                check_exceptions = true);
 752   void call_VM(Register oop_result,
 753                Register last_java_sp,
 754                address entry_point,
 755                Register arg_1, Register arg_2,
 756                bool check_exceptions = true);
 757   void call_VM(Register oop_result,
 758                Register last_java_sp,
 759                address entry_point,
 760                Register arg_1, Register arg_2, Register arg_3,
 761                bool check_exceptions = true);
 762 
 763   void get_vm_result  (Register oop_result, Register thread);
 764   void get_vm_result_2(Register metadata_result, Register thread);
 765 
 766   // These always tightly bind to MacroAssembler::call_VM_base
 767   // bypassing the virtual implementation
 768   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
 769   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
 770   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
 771   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
 772   void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
 773 
 774   void call_VM_leaf(address entry_point,
 775                     int number_of_arguments = 0);
 776   void call_VM_leaf(address entry_point,
 777                     Register arg_1);
 778   void call_VM_leaf(address entry_point,
 779                     Register arg_1, Register arg_2);
 780   void call_VM_leaf(address entry_point,
 781                     Register arg_1, Register arg_2, Register arg_3);
 782 
 783   // These always tightly bind to MacroAssembler::call_VM_leaf_base
 784   // bypassing the virtual implementation
 785   void super_call_VM_leaf(address entry_point);
 786   void super_call_VM_leaf(address entry_point, Register arg_1);
 787   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
 788   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
 789   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
 790 
 791   // last Java Frame (fills frame anchor)
 792   void set_last_Java_frame(Register last_java_sp,
 793                            Register last_java_fp,
 794                            address last_java_pc,
 795                            Register scratch);
 796 
 797   void set_last_Java_frame(Register last_java_sp,
 798                            Register last_java_fp,
 799                            Label &last_java_pc,
 800                            Register scratch);
 801 
 802   void set_last_Java_frame(Register last_java_sp,
 803                            Register last_java_fp,
 804                            Register last_java_pc,
 805                            Register scratch);
 806 
 807   void reset_last_Java_frame(Register thread);
 808 
 809   // thread in the default location (rthread)
 810   void reset_last_Java_frame(bool clear_fp);
 811 
 812   // Stores
 813   void store_check(Register obj);                // store check for obj - register is destroyed afterwards
 814   void store_check(Register obj, Address dst);   // same as above, dst is exact store location (reg. is destroyed)
 815 
 816   void resolve_jobject(Register value, Register thread, Register tmp);
 817 
 818   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
 819   void c2bool(Register x);
 820 
 821   void load_method_holder_cld(Register rresult, Register rmethod);
 822   void load_method_holder(Register holder, Register method);
 823 
 824   // oop manipulations
 825   void load_nklass(Register dst, Register src);
 826   void load_klass(Register dst, Register src);
 827   void cmp_klass(Register oop, Register trial_klass, Register tmp);
 828 
 829   void resolve_weak_handle(Register result, Register tmp);
 830   void resolve_oop_handle(Register result, Register tmp = r5);
 831   void load_mirror(Register dst, Register method, Register tmp = r5);
 832 
 833   void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 834                       Register tmp1, Register tmp_thread);
 835 
 836   void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
 837                        Register tmp1, Register tmp_thread);
 838 
 839   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
 840                      Register thread_tmp = noreg, DecoratorSet decorators = 0);
 841 
 842   void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
 843                               Register thread_tmp = noreg, DecoratorSet decorators = 0);
 844   void store_heap_oop(Address dst, Register src, Register tmp1 = noreg,
 845                       Register tmp_thread = noreg, DecoratorSet decorators = 0);
 846 
 847   // currently unimplemented
 848   // Used for storing NULL. All other oop constants should be
 849   // stored using routines that take a jobject.
 850   void store_heap_oop_null(Address dst);
 851 
 852   // This dummy is to prevent a call to store_heap_oop from
 853   // converting a zero (like NULL) into a Register by giving
 854   // the compiler two choices it can't resolve
 855 
 856   void store_heap_oop(Address dst, void* dummy);
 857 
 858   void encode_heap_oop(Register d, Register s);
 859   void encode_heap_oop(Register r) { encode_heap_oop(r, r); }
 860   void decode_heap_oop(Register d, Register s);
 861   void decode_heap_oop(Register r) { decode_heap_oop(r, r); }
 862   void encode_heap_oop_not_null(Register r);
 863   void decode_heap_oop_not_null(Register r);
 864   void encode_heap_oop_not_null(Register dst, Register src);
 865   void decode_heap_oop_not_null(Register dst, Register src);
 866 
 867   void set_narrow_oop(Register dst, jobject obj);
 868 
 869   void encode_klass_not_null(Register r);
 870   void decode_klass_not_null(Register r);
 871   void encode_klass_not_null(Register dst, Register src);
 872   void decode_klass_not_null(Register dst, Register src);
 873 
 874   void set_narrow_klass(Register dst, Klass* k);
 875 
 876   // if heap base register is used - reinit it with the correct value
 877   void reinit_heapbase();
 878 
 879   DEBUG_ONLY(void verify_heapbase(const char* msg);)
 880 
 881   void push_CPU_state(bool save_vectors = false, bool use_sve = false,
 882                       int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
 883   void pop_CPU_state(bool restore_vectors = false, bool use_sve = false,
 884                      int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0);
 885 
 886   // Round up to a power of two
 887   void round_to(Register reg, int modulus);
 888 
 889   // java.lang.Math::round intrinsics
 890   void java_round_double(Register dst, FloatRegister src, FloatRegister ftmp);
 891   void java_round_float(Register dst, FloatRegister src, FloatRegister ftmp);
 892 
 893   // allocation
 894   void eden_allocate(
 895     Register obj,                      // result: pointer to object after successful allocation
 896     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 897     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 898     Register t1,                       // temp register
 899     Label&   slow_case                 // continuation point if fast allocation fails
 900   );
 901   void tlab_allocate(
 902     Register obj,                      // result: pointer to object after successful allocation
 903     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 904     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 905     Register t1,                       // temp register
 906     Register t2,                       // temp register
 907     Label&   slow_case                 // continuation point if fast allocation fails
 908   );
 909   void verify_tlab();
 910 
 911   // interface method calling
 912   void lookup_interface_method(Register recv_klass,
 913                                Register intf_klass,
 914                                RegisterOrConstant itable_index,
 915                                Register method_result,
 916                                Register scan_temp,
 917                                Label& no_such_interface,
 918                    bool return_method = true);
 919 
 920   // virtual method calling
 921   // n.b. x86 allows RegisterOrConstant for vtable_index
 922   void lookup_virtual_method(Register recv_klass,
 923                              RegisterOrConstant vtable_index,
 924                              Register method_result);
 925 
 926   // Test sub_klass against super_klass, with fast and slow paths.
 927 
 928   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 929   // One of the three labels can be NULL, meaning take the fall-through.
 930   // If super_check_offset is -1, the value is loaded up from super_klass.
 931   // No registers are killed, except temp_reg.
 932   void check_klass_subtype_fast_path(Register sub_klass,
 933                                      Register super_klass,
 934                                      Register temp_reg,
 935                                      Label* L_success,
 936                                      Label* L_failure,
 937                                      Label* L_slow_path,
 938                 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
 939 
 940   // The rest of the type check; must be wired to a corresponding fast path.
 941   // It does not repeat the fast path logic, so don't use it standalone.
 942   // The temp_reg and temp2_reg can be noreg, if no temps are available.
 943   // Updates the sub's secondary super cache as necessary.
 944   // If set_cond_codes, condition codes will be Z on success, NZ on failure.
 945   void check_klass_subtype_slow_path(Register sub_klass,
 946                                      Register super_klass,
 947                                      Register temp_reg,
 948                                      Register temp2_reg,
 949                                      Label* L_success,
 950                                      Label* L_failure,
 951                                      bool set_cond_codes = false);
 952 
 953   // Simplified, combined version, good for typical uses.
 954   // Falls through on failure.
 955   void check_klass_subtype(Register sub_klass,
 956                            Register super_klass,
 957                            Register temp_reg,
 958                            Label& L_success);
 959 
 960   void clinit_barrier(Register klass,
 961                       Register thread,
 962                       Label* L_fast_path = NULL,
 963                       Label* L_slow_path = NULL);
 964 
 965   Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
 966 
 967   void verify_sve_vector_length();
 968   void reinitialize_ptrue() {
 969     if (UseSVE > 0) {
 970       sve_ptrue(ptrue, B);
 971     }
 972   }
 973   void verify_ptrue();
 974 
 975   // Debugging
 976 
 977   // only if +VerifyOops
 978   void _verify_oop(Register reg, const char* s, const char* file, int line);
 979   void _verify_oop_addr(Address addr, const char * s, const char* file, int line);
 980 
 981   void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
 982     if (VerifyOops) {
 983       _verify_oop(reg, s, file, line);
 984     }
 985   }
 986   void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
 987     if (VerifyOops) {
 988       _verify_oop_addr(reg, s, file, line);
 989     }
 990   }
 991 
 992 // TODO: verify method and klass metadata (compare against vptr?)
 993   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
 994   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
 995 
 996 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__)
 997 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__)
 998 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__)
 999 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
1000 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
1001 
1002   // only if +VerifyFPU
1003   void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
1004 
1005   // prints msg, dumps registers and stops execution
1006   void stop(const char* msg);
1007 
1008   static void debug64(char* msg, int64_t pc, int64_t regs[]);
1009 
1010   void untested()                                { stop("untested"); }
1011 
1012   void unimplemented(const char* what = "");
1013 
1014   void should_not_reach_here()                   { stop("should not reach here"); }
1015 
1016   // Stack overflow checking
1017   void bang_stack_with_offset(int offset) {
1018     // stack grows down, caller passes positive offset
1019     assert(offset > 0, "must bang with negative offset");
1020     sub(rscratch2, sp, offset);
1021     str(zr, Address(rscratch2));
1022   }
1023 
1024   // Writes to stack successive pages until offset reached to check for
1025   // stack overflow + shadow pages.  Also, clobbers tmp
1026   void bang_stack_size(Register size, Register tmp);
1027 
1028   // Check for reserved stack access in method being exited (for JIT)
1029   void reserved_stack_check();
1030 
1031   // Arithmetics
1032 
1033   void addptr(const Address &dst, int32_t src);
1034   void cmpptr(Register src1, Address src2);
1035 
1036   void cmpoop(Register obj1, Register obj2);
1037 
1038   // Various forms of CAS
1039 
1040   void cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp,
1041                           Label &succeed, Label *fail);
1042   void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp,
1043                   Label &succeed, Label *fail);
1044 
1045   void cmpxchgw(Register oldv, Register newv, Register addr, Register tmp,
1046                   Label &succeed, Label *fail);
1047 
1048   void atomic_add(Register prev, RegisterOrConstant incr, Register addr);
1049   void atomic_addw(Register prev, RegisterOrConstant incr, Register addr);
1050   void atomic_addal(Register prev, RegisterOrConstant incr, Register addr);
1051   void atomic_addalw(Register prev, RegisterOrConstant incr, Register addr);
1052 
1053   void atomic_xchg(Register prev, Register newv, Register addr);
1054   void atomic_xchgw(Register prev, Register newv, Register addr);
1055   void atomic_xchgl(Register prev, Register newv, Register addr);
1056   void atomic_xchglw(Register prev, Register newv, Register addr);
1057   void atomic_xchgal(Register prev, Register newv, Register addr);
1058   void atomic_xchgalw(Register prev, Register newv, Register addr);
1059 
1060   void orptr(Address adr, RegisterOrConstant src) {
1061     ldr(rscratch1, adr);
1062     if (src.is_register())
1063       orr(rscratch1, rscratch1, src.as_register());
1064     else
1065       orr(rscratch1, rscratch1, src.as_constant());
1066     str(rscratch1, adr);
1067   }
1068 
1069   // A generic CAS; success or failure is in the EQ flag.
1070   // Clobbers rscratch1
1071   void cmpxchg(Register addr, Register expected, Register new_val,
1072                enum operand_size size,
1073                bool acquire, bool release, bool weak,
1074                Register result);
1075 
1076 private:
1077   void compare_eq(Register rn, Register rm, enum operand_size size);
1078 
1079 #ifdef ASSERT
1080   // Template short-hand support to clean-up after a failed call to trampoline
1081   // call generation (see trampoline_call() below),  when a set of Labels must
1082   // be reset (before returning).
1083   template<typename Label, typename... More>
1084   void reset_labels(Label &lbl, More&... more) {
1085     lbl.reset(); reset_labels(more...);
1086   }
1087   template<typename Label>
1088   void reset_labels(Label &lbl) {
1089     lbl.reset();
1090   }
1091 #endif
1092 
1093 public:
1094   // Calls
1095 
1096   address trampoline_call(Address entry, CodeBuffer* cbuf = NULL);
1097 
1098   static bool far_branches() {
1099     return ReservedCodeCacheSize > branch_range;
1100   }
1101 
1102   // Check if branches to the the non nmethod section require a far jump
1103   static bool codestub_branch_needs_far_jump() {
1104     return CodeCache::max_distance_to_non_nmethod() > branch_range;
1105   }
1106 
1107   // Jumps that can reach anywhere in the code cache.
1108   // Trashes tmp.
1109   void far_call(Address entry, CodeBuffer *cbuf = NULL, Register tmp = rscratch1);
1110   int far_jump(Address entry, CodeBuffer *cbuf = NULL, Register tmp = rscratch1);
1111 
1112   static int far_codestub_branch_size() {
1113     if (codestub_branch_needs_far_jump()) {
1114       return 3 * 4;  // adrp, add, br
1115     } else {
1116       return 4;
1117     }
1118   }
1119 
1120   // Emit the CompiledIC call idiom
1121   address ic_call(address entry, jint method_index = 0);
1122 
1123 public:
1124 
1125   // Data
1126 
1127   void mov_metadata(Register dst, Metadata* obj);
1128   Address allocate_metadata_address(Metadata* obj);
1129   Address constant_oop_address(jobject obj);
1130 
1131   void movoop(Register dst, jobject obj, bool immediate = false);
1132 
1133   // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
1134   void kernel_crc32(Register crc, Register buf, Register len,
1135         Register table0, Register table1, Register table2, Register table3,
1136         Register tmp, Register tmp2, Register tmp3);
1137   // CRC32 code for java.util.zip.CRC32C::updateBytes() intrinsic.
1138   void kernel_crc32c(Register crc, Register buf, Register len,
1139         Register table0, Register table1, Register table2, Register table3,
1140         Register tmp, Register tmp2, Register tmp3);
1141 
1142   // Stack push and pop individual 64 bit registers
1143   void push(Register src);
1144   void pop(Register dst);
1145 
1146   void repne_scan(Register addr, Register value, Register count,
1147                   Register scratch);
1148   void repne_scanw(Register addr, Register value, Register count,
1149                    Register scratch);
1150 
1151   typedef void (MacroAssembler::* add_sub_imm_insn)(Register Rd, Register Rn, unsigned imm);
1152   typedef void (MacroAssembler::* add_sub_reg_insn)(Register Rd, Register Rn, Register Rm, enum shift_kind kind, unsigned shift);
1153 
1154   // If a constant does not fit in an immediate field, generate some
1155   // number of MOV instructions and then perform the operation
1156   void wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm,
1157                              add_sub_imm_insn insn1,
1158                              add_sub_reg_insn insn2);
1159   // Separate vsn which sets the flags
1160   void wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm,
1161                              add_sub_imm_insn insn1,
1162                              add_sub_reg_insn insn2);
1163 
1164 #define WRAP(INSN)                                                      \
1165   void INSN(Register Rd, Register Rn, unsigned imm) {                   \
1166     wrap_add_sub_imm_insn(Rd, Rn, imm, &Assembler::INSN, &Assembler::INSN); \
1167   }                                                                     \
1168                                                                         \
1169   void INSN(Register Rd, Register Rn, Register Rm,                      \
1170              enum shift_kind kind, unsigned shift = 0) {                \
1171     Assembler::INSN(Rd, Rn, Rm, kind, shift);                           \
1172   }                                                                     \
1173                                                                         \
1174   void INSN(Register Rd, Register Rn, Register Rm) {                    \
1175     Assembler::INSN(Rd, Rn, Rm);                                        \
1176   }                                                                     \
1177                                                                         \
1178   void INSN(Register Rd, Register Rn, Register Rm,                      \
1179            ext::operation option, int amount = 0) {                     \
1180     Assembler::INSN(Rd, Rn, Rm, option, amount);                        \
1181   }
1182 
1183   WRAP(add) WRAP(addw) WRAP(sub) WRAP(subw)
1184 
1185 #undef WRAP
1186 #define WRAP(INSN)                                                      \
1187   void INSN(Register Rd, Register Rn, unsigned imm) {                   \
1188     wrap_adds_subs_imm_insn(Rd, Rn, imm, &Assembler::INSN, &Assembler::INSN); \
1189   }                                                                     \
1190                                                                         \
1191   void INSN(Register Rd, Register Rn, Register Rm,                      \
1192              enum shift_kind kind, unsigned shift = 0) {                \
1193     Assembler::INSN(Rd, Rn, Rm, kind, shift);                           \
1194   }                                                                     \
1195                                                                         \
1196   void INSN(Register Rd, Register Rn, Register Rm) {                    \
1197     Assembler::INSN(Rd, Rn, Rm);                                        \
1198   }                                                                     \
1199                                                                         \
1200   void INSN(Register Rd, Register Rn, Register Rm,                      \
1201            ext::operation option, int amount = 0) {                     \
1202     Assembler::INSN(Rd, Rn, Rm, option, amount);                        \
1203   }
1204 
1205   WRAP(adds) WRAP(addsw) WRAP(subs) WRAP(subsw)
1206 
1207   void add(Register Rd, Register Rn, RegisterOrConstant increment);
1208   void addw(Register Rd, Register Rn, RegisterOrConstant increment);
1209   void sub(Register Rd, Register Rn, RegisterOrConstant decrement);
1210   void subw(Register Rd, Register Rn, RegisterOrConstant decrement);
1211 
1212   void adrp(Register reg1, const Address &dest, uint64_t &byte_offset);
1213 
1214   void tableswitch(Register index, jint lowbound, jint highbound,
1215                    Label &jumptable, Label &jumptable_end, int stride = 1) {
1216     adr(rscratch1, jumptable);
1217     subsw(rscratch2, index, lowbound);
1218     subsw(zr, rscratch2, highbound - lowbound);
1219     br(Assembler::HS, jumptable_end);
1220     add(rscratch1, rscratch1, rscratch2,
1221         ext::sxtw, exact_log2(stride * Assembler::instruction_size));
1222     br(rscratch1);
1223   }
1224 
1225   // Form an address from base + offset in Rd.  Rd may or may not
1226   // actually be used: you must use the Address that is returned.  It
1227   // is up to you to ensure that the shift provided matches the size
1228   // of your data.
1229   Address form_address(Register Rd, Register base, int64_t byte_offset, int shift);
1230 
1231   // Return true iff an address is within the 48-bit AArch64 address
1232   // space.
1233   bool is_valid_AArch64_address(address a) {
1234     return ((uint64_t)a >> 48) == 0;
1235   }
1236 
1237   // Load the base of the cardtable byte map into reg.
1238   void load_byte_map_base(Register reg);
1239 
1240   // Prolog generator routines to support switch between x86 code and
1241   // generated ARM code
1242 
1243   // routine to generate an x86 prolog for a stub function which
1244   // bootstraps into the generated ARM code which directly follows the
1245   // stub
1246   //
1247 
1248   public:
1249 
1250   void ldr_constant(Register dest, const Address &const_addr) {
1251     if (NearCpool) {
1252       ldr(dest, const_addr);
1253     } else {
1254       uint64_t offset;
1255       adrp(dest, InternalAddress(const_addr.target()), offset);
1256       ldr(dest, Address(dest, offset));
1257     }
1258   }
1259 
1260   address read_polling_page(Register r, relocInfo::relocType rtype);
1261   void get_polling_page(Register dest, relocInfo::relocType rtype);
1262 
1263   // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
1264   void update_byte_crc32(Register crc, Register val, Register table);
1265   void update_word_crc32(Register crc, Register v, Register tmp,
1266         Register table0, Register table1, Register table2, Register table3,
1267         bool upper = false);
1268 
1269   address count_positives(Register ary1, Register len, Register result);
1270 
1271   address arrays_equals(Register a1, Register a2, Register result, Register cnt1,
1272                         Register tmp1, Register tmp2, Register tmp3, int elem_size);
1273 
1274   void string_equals(Register a1, Register a2, Register result, Register cnt1,
1275                      int elem_size);
1276 
1277   void fill_words(Register base, Register cnt, Register value);
1278   void zero_words(Register base, uint64_t cnt);
1279   address zero_words(Register ptr, Register cnt);
1280   void zero_dcache_blocks(Register base, Register cnt);
1281 
1282   static const int zero_words_block_size;
1283 
1284   address byte_array_inflate(Register src, Register dst, Register len,
1285                              FloatRegister vtmp1, FloatRegister vtmp2,
1286                              FloatRegister vtmp3, Register tmp4);
1287 
1288   void char_array_compress(Register src, Register dst, Register len,
1289                            Register res,
1290                            FloatRegister vtmp0, FloatRegister vtmp1,
1291                            FloatRegister vtmp2, FloatRegister vtmp3);
1292 
1293   void encode_iso_array(Register src, Register dst,
1294                         Register len, Register res, bool ascii,
1295                         FloatRegister vtmp0, FloatRegister vtmp1,
1296                         FloatRegister vtmp2, FloatRegister vtmp3);
1297 
1298   void fast_log(FloatRegister vtmp0, FloatRegister vtmp1, FloatRegister vtmp2,
1299                 FloatRegister vtmp3, FloatRegister vtmp4, FloatRegister vtmp5,
1300                 FloatRegister tmpC1, FloatRegister tmpC2, FloatRegister tmpC3,
1301                 FloatRegister tmpC4, Register tmp1, Register tmp2,
1302                 Register tmp3, Register tmp4, Register tmp5);
1303   void generate_dsin_dcos(bool isCos, address npio2_hw, address two_over_pi,
1304       address pio2, address dsin_coef, address dcos_coef);
1305  private:
1306   // begin trigonometric functions support block
1307   void generate__ieee754_rem_pio2(address npio2_hw, address two_over_pi, address pio2);
1308   void generate__kernel_rem_pio2(address two_over_pi, address pio2);
1309   void generate_kernel_sin(FloatRegister x, bool iyIsOne, address dsin_coef);
1310   void generate_kernel_cos(FloatRegister x, address dcos_coef);
1311   // end trigonometric functions support block
1312   void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
1313                        Register src1, Register src2);
1314   void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) {
1315     add2_with_carry(dest_hi, dest_hi, dest_lo, src1, src2);
1316   }
1317   void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1318                              Register y, Register y_idx, Register z,
1319                              Register carry, Register product,
1320                              Register idx, Register kdx);
1321   void multiply_128_x_128_loop(Register y, Register z,
1322                                Register carry, Register carry2,
1323                                Register idx, Register jdx,
1324                                Register yz_idx1, Register yz_idx2,
1325                                Register tmp, Register tmp3, Register tmp4,
1326                                Register tmp7, Register product_hi);
1327   void kernel_crc32_using_crc32(Register crc, Register buf,
1328         Register len, Register tmp0, Register tmp1, Register tmp2,
1329         Register tmp3);
1330   void kernel_crc32c_using_crc32c(Register crc, Register buf,
1331         Register len, Register tmp0, Register tmp1, Register tmp2,
1332         Register tmp3);
1333 
1334   void ghash_modmul (FloatRegister result,
1335                      FloatRegister result_lo, FloatRegister result_hi, FloatRegister b,
1336                      FloatRegister a, FloatRegister vzr, FloatRegister a1_xor_a0, FloatRegister p,
1337                      FloatRegister t1, FloatRegister t2, FloatRegister t3);
1338   void ghash_load_wide(int index, Register data, FloatRegister result, FloatRegister state);
1339 public:
1340   void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z,
1341                        Register zlen, Register tmp1, Register tmp2, Register tmp3,
1342                        Register tmp4, Register tmp5, Register tmp6, Register tmp7);
1343   void mul_add(Register out, Register in, Register offs, Register len, Register k);
1344   void ghash_multiply(FloatRegister result_lo, FloatRegister result_hi,
1345                       FloatRegister a, FloatRegister b, FloatRegister a1_xor_a0,
1346                       FloatRegister tmp1, FloatRegister tmp2, FloatRegister tmp3);
1347   void ghash_multiply_wide(int index,
1348                            FloatRegister result_lo, FloatRegister result_hi,
1349                            FloatRegister a, FloatRegister b, FloatRegister a1_xor_a0,
1350                            FloatRegister tmp1, FloatRegister tmp2, FloatRegister tmp3);
1351   void ghash_reduce(FloatRegister result, FloatRegister lo, FloatRegister hi,
1352                     FloatRegister p, FloatRegister z, FloatRegister t1);
1353   void ghash_reduce_wide(int index, FloatRegister result, FloatRegister lo, FloatRegister hi,
1354                     FloatRegister p, FloatRegister z, FloatRegister t1);
1355   void ghash_processBlocks_wide(address p, Register state, Register subkeyH,
1356                                 Register data, Register blocks, int unrolls);
1357 
1358 
1359   void aesenc_loadkeys(Register key, Register keylen);
1360   void aesecb_encrypt(Register from, Register to, Register keylen,
1361                       FloatRegister data = v0, int unrolls = 1);
1362   void aesecb_decrypt(Register from, Register to, Register key, Register keylen);
1363   void aes_round(FloatRegister input, FloatRegister subkey);
1364 
1365   // Place an ISB after code may have been modified due to a safepoint.
1366   void safepoint_isb();
1367 
1368 private:
1369   // Return the effective address r + (r1 << ext) + offset.
1370   // Uses rscratch2.
1371   Address offsetted_address(Register r, Register r1, Address::extend ext,
1372                             int offset, int size);
1373 
1374 private:
1375   // Returns an address on the stack which is reachable with a ldr/str of size
1376   // Uses rscratch2 if the address is not directly reachable
1377   Address spill_address(int size, int offset, Register tmp=rscratch2);
1378   Address sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp=rscratch2);
1379 
1380   bool merge_alignment_check(Register base, size_t size, int64_t cur_offset, int64_t prev_offset) const;
1381 
1382   // Check whether two loads/stores can be merged into ldp/stp.
1383   bool ldst_can_merge(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store) const;
1384 
1385   // Merge current load/store with previous load/store into ldp/stp.
1386   void merge_ldst(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store);
1387 
1388   // Try to merge two loads/stores into ldp/stp. If success, returns true else false.
1389   bool try_merge_ldst(Register rt, const Address &adr, size_t cur_size_in_bytes, bool is_store);
1390 
1391 public:
1392   void spill(Register Rx, bool is64, int offset) {
1393     if (is64) {
1394       str(Rx, spill_address(8, offset));
1395     } else {
1396       strw(Rx, spill_address(4, offset));
1397     }
1398   }
1399   void spill(FloatRegister Vx, SIMD_RegVariant T, int offset) {
1400     str(Vx, T, spill_address(1 << (int)T, offset));
1401   }
1402 
1403   void spill_sve_vector(FloatRegister Zx, int offset, int vector_reg_size_in_bytes) {
1404     sve_str(Zx, sve_spill_address(vector_reg_size_in_bytes, offset));
1405   }
1406   void spill_sve_predicate(PRegister pr, int offset, int predicate_reg_size_in_bytes) {
1407     sve_str(pr, sve_spill_address(predicate_reg_size_in_bytes, offset));
1408   }
1409 
1410   void unspill(Register Rx, bool is64, int offset) {
1411     if (is64) {
1412       ldr(Rx, spill_address(8, offset));
1413     } else {
1414       ldrw(Rx, spill_address(4, offset));
1415     }
1416   }
1417   void unspill(FloatRegister Vx, SIMD_RegVariant T, int offset) {
1418     ldr(Vx, T, spill_address(1 << (int)T, offset));
1419   }
1420 
1421   void unspill_sve_vector(FloatRegister Zx, int offset, int vector_reg_size_in_bytes) {
1422     sve_ldr(Zx, sve_spill_address(vector_reg_size_in_bytes, offset));
1423   }
1424   void unspill_sve_predicate(PRegister pr, int offset, int predicate_reg_size_in_bytes) {
1425     sve_ldr(pr, sve_spill_address(predicate_reg_size_in_bytes, offset));
1426   }
1427 
1428   void spill_copy128(int src_offset, int dst_offset,
1429                      Register tmp1=rscratch1, Register tmp2=rscratch2) {
1430     if (src_offset < 512 && (src_offset & 7) == 0 &&
1431         dst_offset < 512 && (dst_offset & 7) == 0) {
1432       ldp(tmp1, tmp2, Address(sp, src_offset));
1433       stp(tmp1, tmp2, Address(sp, dst_offset));
1434     } else {
1435       unspill(tmp1, true, src_offset);
1436       spill(tmp1, true, dst_offset);
1437       unspill(tmp1, true, src_offset+8);
1438       spill(tmp1, true, dst_offset+8);
1439     }
1440   }
1441   void spill_copy_sve_vector_stack_to_stack(int src_offset, int dst_offset,
1442                                             int sve_vec_reg_size_in_bytes) {
1443     assert(sve_vec_reg_size_in_bytes % 16 == 0, "unexpected sve vector reg size");
1444     for (int i = 0; i < sve_vec_reg_size_in_bytes / 16; i++) {
1445       spill_copy128(src_offset, dst_offset);
1446       src_offset += 16;
1447       dst_offset += 16;
1448     }
1449   }
1450   void spill_copy_sve_predicate_stack_to_stack(int src_offset, int dst_offset,
1451                                                int sve_predicate_reg_size_in_bytes) {
1452     sve_ldr(ptrue, sve_spill_address(sve_predicate_reg_size_in_bytes, src_offset));
1453     sve_str(ptrue, sve_spill_address(sve_predicate_reg_size_in_bytes, dst_offset));
1454     reinitialize_ptrue();
1455   }
1456   void cache_wb(Address line);
1457   void cache_wbsync(bool is_pre);
1458 
1459   // Code for java.lang.Thread::onSpinWait() intrinsic.
1460   void spin_wait();
1461 
1462 private:
1463   // Check the current thread doesn't need a cross modify fence.
1464   void verify_cross_modify_fence_not_required() PRODUCT_RETURN;
1465 
1466 };
1467 
1468 #ifdef ASSERT
1469 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
1470 #endif
1471 
1472 /**
1473  * class SkipIfEqual:
1474  *
1475  * Instantiating this class will result in assembly code being output that will
1476  * jump around any code emitted between the creation of the instance and it's
1477  * automatic destruction at the end of a scope block, depending on the value of
1478  * the flag passed to the constructor, which will be checked at run-time.
1479  */
1480 class SkipIfEqual {
1481  private:
1482   MacroAssembler* _masm;
1483   Label _label;
1484 
1485  public:
1486    SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
1487    ~SkipIfEqual();
1488 };
1489 
1490 struct tableswitch {
1491   Register _reg;
1492   int _insn_index; jint _first_key; jint _last_key;
1493   Label _after;
1494   Label _branches;
1495 };
1496 
1497 #endif // CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP