1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP 27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP 28 29 #include "asm/assembler.inline.hpp" 30 #include "code/vmreg.hpp" 31 #include "metaprogramming/enableIf.hpp" 32 #include "oops/compressedOops.hpp" 33 #include "oops/compressedKlass.hpp" 34 #include "runtime/vm_version.hpp" 35 #include "utilities/macros.hpp" 36 #include "utilities/powerOfTwo.hpp" 37 #include "runtime/signature.hpp" 38 39 40 class ciInlineKlass; 41 42 class OopMap; 43 44 // MacroAssembler extends Assembler by frequently used macros. 45 // 46 // Instructions for which a 'better' code sequence exists depending 47 // on arguments should also go in here. 48 49 class MacroAssembler: public Assembler { 50 friend class LIR_Assembler; 51 52 public: 53 using Assembler::mov; 54 using Assembler::movi; 55 56 protected: 57 58 // Support for VM calls 59 // 60 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 61 // may customize this version by overriding it for its purposes (e.g., to save/restore 62 // additional registers when doing a VM call). 63 virtual void call_VM_leaf_base( 64 address entry_point, // the entry point 65 int number_of_arguments, // the number of arguments to pop after the call 66 Label *retaddr = nullptr 67 ); 68 69 virtual void call_VM_leaf_base( 70 address entry_point, // the entry point 71 int number_of_arguments, // the number of arguments to pop after the call 72 Label &retaddr) { 73 call_VM_leaf_base(entry_point, number_of_arguments, &retaddr); 74 } 75 76 // This is the base routine called by the different versions of call_VM. The interpreter 77 // may customize this version by overriding it for its purposes (e.g., to save/restore 78 // additional registers when doing a VM call). 79 // 80 // If no java_thread register is specified (noreg) than rthread will be used instead. call_VM_base 81 // returns the register which contains the thread upon return. If a thread register has been 82 // specified, the return value will correspond to that register. If no last_java_sp is specified 83 // (noreg) than rsp will be used instead. 84 virtual void call_VM_base( // returns the register containing the thread upon return 85 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 86 Register java_thread, // the thread if computed before ; use noreg otherwise 87 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 88 address entry_point, // the entry point 89 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 90 bool check_exceptions // whether to check for pending exceptions after return 91 ); 92 93 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 94 95 enum KlassDecodeMode { 96 KlassDecodeNone, 97 KlassDecodeZero, 98 KlassDecodeXor, 99 KlassDecodeMovk 100 }; 101 102 KlassDecodeMode klass_decode_mode(); 103 104 private: 105 static KlassDecodeMode _klass_decode_mode; 106 107 public: 108 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 109 110 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 111 // The implementation is only non-empty for the InterpreterMacroAssembler, 112 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 113 virtual void check_and_handle_popframe(Register java_thread); 114 virtual void check_and_handle_earlyret(Register java_thread); 115 116 void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp = rscratch1); 117 void rt_call(address dest, Register tmp = rscratch1); 118 119 // Load Effective Address 120 void lea(Register r, const Address &a) { 121 InstructionMark im(this); 122 a.lea(this, r); 123 } 124 125 /* Sometimes we get misaligned loads and stores, usually from Unsafe 126 accesses, and these can exceed the offset range. */ 127 Address legitimize_address(const Address &a, int size, Register scratch) { 128 if (a.getMode() == Address::base_plus_offset) { 129 if (! Address::offset_ok_for_immed(a.offset(), exact_log2(size))) { 130 block_comment("legitimize_address {"); 131 lea(scratch, a); 132 block_comment("} legitimize_address"); 133 return Address(scratch); 134 } 135 } 136 return a; 137 } 138 139 void addmw(Address a, Register incr, Register scratch) { 140 ldrw(scratch, a); 141 addw(scratch, scratch, incr); 142 strw(scratch, a); 143 } 144 145 // Add constant to memory word 146 void addmw(Address a, int imm, Register scratch) { 147 ldrw(scratch, a); 148 if (imm > 0) 149 addw(scratch, scratch, (unsigned)imm); 150 else 151 subw(scratch, scratch, (unsigned)-imm); 152 strw(scratch, a); 153 } 154 155 void bind(Label& L) { 156 Assembler::bind(L); 157 code()->clear_last_insn(); 158 } 159 160 void membar(Membar_mask_bits order_constraint); 161 162 using Assembler::ldr; 163 using Assembler::str; 164 using Assembler::ldrw; 165 using Assembler::strw; 166 167 void ldr(Register Rx, const Address &adr); 168 void ldrw(Register Rw, const Address &adr); 169 void str(Register Rx, const Address &adr); 170 void strw(Register Rx, const Address &adr); 171 172 // Frame creation and destruction shared between JITs. 173 void build_frame(int framesize); 174 void remove_frame(int framesize); 175 176 virtual void _call_Unimplemented(address call_site) { 177 mov(rscratch2, call_site); 178 } 179 180 // Microsoft's MSVC team thinks that the __FUNCSIG__ is approximately (sympathy for calling conventions) equivalent to __PRETTY_FUNCTION__ 181 // Also, from Clang patch: "It is very similar to GCC's PRETTY_FUNCTION, except it prints the calling convention." 182 // https://reviews.llvm.org/D3311 183 184 #ifdef _WIN64 185 #define call_Unimplemented() _call_Unimplemented((address)__FUNCSIG__) 186 #else 187 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__) 188 #endif 189 190 // aliases defined in AARCH64 spec 191 192 template<class T> 193 inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); } 194 195 inline void cmp(Register Rd, unsigned char imm8) { subs(zr, Rd, imm8); } 196 inline void cmp(Register Rd, unsigned imm) = delete; 197 198 template<class T> 199 inline void cmnw(Register Rd, T imm) { addsw(zr, Rd, imm); } 200 201 inline void cmn(Register Rd, unsigned char imm8) { adds(zr, Rd, imm8); } 202 inline void cmn(Register Rd, unsigned imm) = delete; 203 204 void cset(Register Rd, Assembler::Condition cond) { 205 csinc(Rd, zr, zr, ~cond); 206 } 207 void csetw(Register Rd, Assembler::Condition cond) { 208 csincw(Rd, zr, zr, ~cond); 209 } 210 211 void cneg(Register Rd, Register Rn, Assembler::Condition cond) { 212 csneg(Rd, Rn, Rn, ~cond); 213 } 214 void cnegw(Register Rd, Register Rn, Assembler::Condition cond) { 215 csnegw(Rd, Rn, Rn, ~cond); 216 } 217 218 inline void movw(Register Rd, Register Rn) { 219 if (Rd == sp || Rn == sp) { 220 Assembler::addw(Rd, Rn, 0U); 221 } else { 222 orrw(Rd, zr, Rn); 223 } 224 } 225 inline void mov(Register Rd, Register Rn) { 226 assert(Rd != r31_sp && Rn != r31_sp, "should be"); 227 if (Rd == Rn) { 228 } else if (Rd == sp || Rn == sp) { 229 Assembler::add(Rd, Rn, 0U); 230 } else { 231 orr(Rd, zr, Rn); 232 } 233 } 234 235 inline void moviw(Register Rd, unsigned imm) { orrw(Rd, zr, imm); } 236 inline void movi(Register Rd, unsigned imm) { orr(Rd, zr, imm); } 237 238 inline void tstw(Register Rd, Register Rn) { andsw(zr, Rd, Rn); } 239 inline void tst(Register Rd, Register Rn) { ands(zr, Rd, Rn); } 240 241 inline void tstw(Register Rd, uint64_t imm) { andsw(zr, Rd, imm); } 242 inline void tst(Register Rd, uint64_t imm) { ands(zr, Rd, imm); } 243 244 inline void bfiw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 245 bfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1)); 246 } 247 inline void bfi(Register Rd, Register Rn, unsigned lsb, unsigned width) { 248 bfm(Rd, Rn, ((64 - lsb) & 63), (width - 1)); 249 } 250 251 inline void bfxilw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 252 bfmw(Rd, Rn, lsb, (lsb + width - 1)); 253 } 254 inline void bfxil(Register Rd, Register Rn, unsigned lsb, unsigned width) { 255 bfm(Rd, Rn, lsb , (lsb + width - 1)); 256 } 257 258 inline void sbfizw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 259 sbfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1)); 260 } 261 inline void sbfiz(Register Rd, Register Rn, unsigned lsb, unsigned width) { 262 sbfm(Rd, Rn, ((64 - lsb) & 63), (width - 1)); 263 } 264 265 inline void sbfxw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 266 sbfmw(Rd, Rn, lsb, (lsb + width - 1)); 267 } 268 inline void sbfx(Register Rd, Register Rn, unsigned lsb, unsigned width) { 269 sbfm(Rd, Rn, lsb , (lsb + width - 1)); 270 } 271 272 inline void ubfizw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 273 ubfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1)); 274 } 275 inline void ubfiz(Register Rd, Register Rn, unsigned lsb, unsigned width) { 276 ubfm(Rd, Rn, ((64 - lsb) & 63), (width - 1)); 277 } 278 279 inline void ubfxw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 280 ubfmw(Rd, Rn, lsb, (lsb + width - 1)); 281 } 282 inline void ubfx(Register Rd, Register Rn, unsigned lsb, unsigned width) { 283 ubfm(Rd, Rn, lsb , (lsb + width - 1)); 284 } 285 286 inline void asrw(Register Rd, Register Rn, unsigned imm) { 287 sbfmw(Rd, Rn, imm, 31); 288 } 289 290 inline void asr(Register Rd, Register Rn, unsigned imm) { 291 sbfm(Rd, Rn, imm, 63); 292 } 293 294 inline void lslw(Register Rd, Register Rn, unsigned imm) { 295 ubfmw(Rd, Rn, ((32 - imm) & 31), (31 - imm)); 296 } 297 298 inline void lsl(Register Rd, Register Rn, unsigned imm) { 299 ubfm(Rd, Rn, ((64 - imm) & 63), (63 - imm)); 300 } 301 302 inline void lsrw(Register Rd, Register Rn, unsigned imm) { 303 ubfmw(Rd, Rn, imm, 31); 304 } 305 306 inline void lsr(Register Rd, Register Rn, unsigned imm) { 307 ubfm(Rd, Rn, imm, 63); 308 } 309 310 inline void rorw(Register Rd, Register Rn, unsigned imm) { 311 extrw(Rd, Rn, Rn, imm); 312 } 313 314 inline void ror(Register Rd, Register Rn, unsigned imm) { 315 extr(Rd, Rn, Rn, imm); 316 } 317 318 inline void sxtbw(Register Rd, Register Rn) { 319 sbfmw(Rd, Rn, 0, 7); 320 } 321 inline void sxthw(Register Rd, Register Rn) { 322 sbfmw(Rd, Rn, 0, 15); 323 } 324 inline void sxtb(Register Rd, Register Rn) { 325 sbfm(Rd, Rn, 0, 7); 326 } 327 inline void sxth(Register Rd, Register Rn) { 328 sbfm(Rd, Rn, 0, 15); 329 } 330 inline void sxtw(Register Rd, Register Rn) { 331 sbfm(Rd, Rn, 0, 31); 332 } 333 334 inline void uxtbw(Register Rd, Register Rn) { 335 ubfmw(Rd, Rn, 0, 7); 336 } 337 inline void uxthw(Register Rd, Register Rn) { 338 ubfmw(Rd, Rn, 0, 15); 339 } 340 inline void uxtb(Register Rd, Register Rn) { 341 ubfm(Rd, Rn, 0, 7); 342 } 343 inline void uxth(Register Rd, Register Rn) { 344 ubfm(Rd, Rn, 0, 15); 345 } 346 inline void uxtw(Register Rd, Register Rn) { 347 ubfm(Rd, Rn, 0, 31); 348 } 349 350 inline void cmnw(Register Rn, Register Rm) { 351 addsw(zr, Rn, Rm); 352 } 353 inline void cmn(Register Rn, Register Rm) { 354 adds(zr, Rn, Rm); 355 } 356 357 inline void cmpw(Register Rn, Register Rm) { 358 subsw(zr, Rn, Rm); 359 } 360 inline void cmp(Register Rn, Register Rm) { 361 subs(zr, Rn, Rm); 362 } 363 364 inline void negw(Register Rd, Register Rn) { 365 subw(Rd, zr, Rn); 366 } 367 368 inline void neg(Register Rd, Register Rn) { 369 sub(Rd, zr, Rn); 370 } 371 372 inline void negsw(Register Rd, Register Rn) { 373 subsw(Rd, zr, Rn); 374 } 375 376 inline void negs(Register Rd, Register Rn) { 377 subs(Rd, zr, Rn); 378 } 379 380 inline void cmnw(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 381 addsw(zr, Rn, Rm, kind, shift); 382 } 383 inline void cmn(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 384 adds(zr, Rn, Rm, kind, shift); 385 } 386 387 inline void cmpw(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 388 subsw(zr, Rn, Rm, kind, shift); 389 } 390 inline void cmp(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 391 subs(zr, Rn, Rm, kind, shift); 392 } 393 394 inline void negw(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 395 subw(Rd, zr, Rn, kind, shift); 396 } 397 398 inline void neg(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 399 sub(Rd, zr, Rn, kind, shift); 400 } 401 402 inline void negsw(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 403 subsw(Rd, zr, Rn, kind, shift); 404 } 405 406 inline void negs(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 407 subs(Rd, zr, Rn, kind, shift); 408 } 409 410 inline void mnegw(Register Rd, Register Rn, Register Rm) { 411 msubw(Rd, Rn, Rm, zr); 412 } 413 inline void mneg(Register Rd, Register Rn, Register Rm) { 414 msub(Rd, Rn, Rm, zr); 415 } 416 417 inline void mulw(Register Rd, Register Rn, Register Rm) { 418 maddw(Rd, Rn, Rm, zr); 419 } 420 inline void mul(Register Rd, Register Rn, Register Rm) { 421 madd(Rd, Rn, Rm, zr); 422 } 423 424 inline void smnegl(Register Rd, Register Rn, Register Rm) { 425 smsubl(Rd, Rn, Rm, zr); 426 } 427 inline void smull(Register Rd, Register Rn, Register Rm) { 428 smaddl(Rd, Rn, Rm, zr); 429 } 430 431 inline void umnegl(Register Rd, Register Rn, Register Rm) { 432 umsubl(Rd, Rn, Rm, zr); 433 } 434 inline void umull(Register Rd, Register Rn, Register Rm) { 435 umaddl(Rd, Rn, Rm, zr); 436 } 437 438 #define WRAP(INSN) \ 439 void INSN(Register Rd, Register Rn, Register Rm, Register Ra) { \ 440 if (VM_Version::supports_a53mac() && Ra != zr) \ 441 nop(); \ 442 Assembler::INSN(Rd, Rn, Rm, Ra); \ 443 } 444 445 WRAP(madd) WRAP(msub) WRAP(maddw) WRAP(msubw) 446 WRAP(smaddl) WRAP(smsubl) WRAP(umaddl) WRAP(umsubl) 447 #undef WRAP 448 449 450 // macro assembly operations needed for aarch64 451 452 // first two private routines for loading 32 bit or 64 bit constants 453 private: 454 455 void mov_immediate64(Register dst, uint64_t imm64); 456 void mov_immediate32(Register dst, uint32_t imm32); 457 458 int push(unsigned int bitset, Register stack); 459 int pop(unsigned int bitset, Register stack); 460 461 int push_fp(unsigned int bitset, Register stack); 462 int pop_fp(unsigned int bitset, Register stack); 463 464 int push_p(unsigned int bitset, Register stack); 465 int pop_p(unsigned int bitset, Register stack); 466 467 void mov(Register dst, Address a); 468 469 public: 470 void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); } 471 void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); } 472 473 void push_fp(FloatRegSet regs, Register stack) { if (regs.bits()) push_fp(regs.bits(), stack); } 474 void pop_fp(FloatRegSet regs, Register stack) { if (regs.bits()) pop_fp(regs.bits(), stack); } 475 476 static RegSet call_clobbered_gp_registers(); 477 478 void push_p(PRegSet regs, Register stack) { if (regs.bits()) push_p(regs.bits(), stack); } 479 void pop_p(PRegSet regs, Register stack) { if (regs.bits()) pop_p(regs.bits(), stack); } 480 481 // Push and pop everything that might be clobbered by a native 482 // runtime call except rscratch1 and rscratch2. (They are always 483 // scratch, so we don't have to protect them.) Only save the lower 484 // 64 bits of each vector register. Additional registers can be excluded 485 // in a passed RegSet. 486 void push_call_clobbered_registers_except(RegSet exclude); 487 void pop_call_clobbered_registers_except(RegSet exclude); 488 489 void push_call_clobbered_registers() { 490 push_call_clobbered_registers_except(RegSet()); 491 } 492 void pop_call_clobbered_registers() { 493 pop_call_clobbered_registers_except(RegSet()); 494 } 495 496 497 // now mov instructions for loading absolute addresses and 32 or 498 // 64 bit integers 499 500 inline void mov(Register dst, address addr) { mov_immediate64(dst, (uint64_t)addr); } 501 502 template<typename T, ENABLE_IF(std::is_integral<T>::value)> 503 inline void mov(Register dst, T o) { mov_immediate64(dst, (uint64_t)o); } 504 505 inline void movw(Register dst, uint32_t imm32) { mov_immediate32(dst, imm32); } 506 507 void mov(Register dst, RegisterOrConstant src) { 508 if (src.is_register()) 509 mov(dst, src.as_register()); 510 else 511 mov(dst, src.as_constant()); 512 } 513 514 void movptr(Register r, uintptr_t imm64); 515 516 void mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64); 517 518 void mov(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { 519 orr(Vd, T, Vn, Vn); 520 } 521 522 void flt_to_flt16(Register dst, FloatRegister src, FloatRegister tmp) { 523 fcvtsh(tmp, src); 524 smov(dst, tmp, H, 0); 525 } 526 527 void flt16_to_flt(FloatRegister dst, Register src, FloatRegister tmp) { 528 mov(tmp, H, 0, src); 529 fcvths(dst, tmp); 530 } 531 532 // Generalized Test Bit And Branch, including a "far" variety which 533 // spans more than 32KiB. 534 void tbr(Condition cond, Register Rt, int bitpos, Label &dest, bool isfar = false) { 535 assert(cond == EQ || cond == NE, "must be"); 536 537 if (isfar) 538 cond = ~cond; 539 540 void (Assembler::* branch)(Register Rt, int bitpos, Label &L); 541 if (cond == Assembler::EQ) 542 branch = &Assembler::tbz; 543 else 544 branch = &Assembler::tbnz; 545 546 if (isfar) { 547 Label L; 548 (this->*branch)(Rt, bitpos, L); 549 b(dest); 550 bind(L); 551 } else { 552 (this->*branch)(Rt, bitpos, dest); 553 } 554 } 555 556 // macro instructions for accessing and updating floating point 557 // status register 558 // 559 // FPSR : op1 == 011 560 // CRn == 0100 561 // CRm == 0100 562 // op2 == 001 563 564 inline void get_fpsr(Register reg) 565 { 566 mrs(0b11, 0b0100, 0b0100, 0b001, reg); 567 } 568 569 inline void set_fpsr(Register reg) 570 { 571 msr(0b011, 0b0100, 0b0100, 0b001, reg); 572 } 573 574 inline void clear_fpsr() 575 { 576 msr(0b011, 0b0100, 0b0100, 0b001, zr); 577 } 578 579 // DCZID_EL0: op1 == 011 580 // CRn == 0000 581 // CRm == 0000 582 // op2 == 111 583 inline void get_dczid_el0(Register reg) 584 { 585 mrs(0b011, 0b0000, 0b0000, 0b111, reg); 586 } 587 588 // CTR_EL0: op1 == 011 589 // CRn == 0000 590 // CRm == 0000 591 // op2 == 001 592 inline void get_ctr_el0(Register reg) 593 { 594 mrs(0b011, 0b0000, 0b0000, 0b001, reg); 595 } 596 597 inline void get_nzcv(Register reg) { 598 mrs(0b011, 0b0100, 0b0010, 0b000, reg); 599 } 600 601 inline void set_nzcv(Register reg) { 602 msr(0b011, 0b0100, 0b0010, 0b000, reg); 603 } 604 605 // idiv variant which deals with MINLONG as dividend and -1 as divisor 606 int corrected_idivl(Register result, Register ra, Register rb, 607 bool want_remainder, Register tmp = rscratch1); 608 int corrected_idivq(Register result, Register ra, Register rb, 609 bool want_remainder, Register tmp = rscratch1); 610 611 // Support for null-checks 612 // 613 // Generates code that causes a null OS exception if the content of reg is null. 614 // If the accessed location is M[reg + offset] and the offset is known, provide the 615 // offset. No explicit code generation is needed if the offset is within a certain 616 // range (0 <= offset <= page_size). 617 618 virtual void null_check(Register reg, int offset = -1); 619 static bool needs_explicit_null_check(intptr_t offset); 620 static bool uses_implicit_null_check(void* address); 621 622 // markWord tests, kills markWord reg 623 void test_markword_is_inline_type(Register markword, Label& is_inline_type); 624 625 // inlineKlass queries, kills temp_reg 626 void test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type); 627 void test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type); 628 void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type); 629 630 // Get the default value oop for the given InlineKlass 631 void get_default_value_oop(Register inline_klass, Register temp_reg, Register obj); 632 // The empty value oop, for the given InlineKlass ("empty" as in no instance fields) 633 // get_default_value_oop with extra assertion for empty inline klass 634 void get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj); 635 636 void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free); 637 void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free); 638 void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat); 639 640 // Check oops for special arrays, i.e. flat arrays and/or null-free arrays 641 void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label); 642 void test_flat_array_oop(Register klass, Register temp_reg, Label& is_flat_array); 643 void test_non_flat_array_oop(Register oop, Register temp_reg, Label&is_non_flat_array); 644 void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array); 645 void test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array); 646 647 // Check array klass layout helper for flat or null-free arrays... 648 void test_flat_array_layout(Register lh, Label& is_flat_array); 649 void test_non_flat_array_layout(Register lh, Label& is_non_flat_array); 650 void test_null_free_array_layout(Register lh, Label& is_null_free_array); 651 void test_non_null_free_array_layout(Register lh, Label& is_non_null_free_array); 652 653 static address target_addr_for_insn(address insn_addr, unsigned insn); 654 static address target_addr_for_insn_or_null(address insn_addr, unsigned insn); 655 static address target_addr_for_insn(address insn_addr) { 656 unsigned insn = *(unsigned*)insn_addr; 657 return target_addr_for_insn(insn_addr, insn); 658 } 659 static address target_addr_for_insn_or_null(address insn_addr) { 660 unsigned insn = *(unsigned*)insn_addr; 661 return target_addr_for_insn_or_null(insn_addr, insn); 662 } 663 664 // Required platform-specific helpers for Label::patch_instructions. 665 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 666 static int pd_patch_instruction_size(address branch, address target); 667 static void pd_patch_instruction(address branch, address target, const char* file = nullptr, int line = 0) { 668 pd_patch_instruction_size(branch, target); 669 } 670 static address pd_call_destination(address branch) { 671 return target_addr_for_insn(branch); 672 } 673 #ifndef PRODUCT 674 static void pd_print_patched_instruction(address branch); 675 #endif 676 677 static int patch_oop(address insn_addr, address o); 678 static int patch_narrow_klass(address insn_addr, narrowKlass n); 679 680 // Return whether code is emitted to a scratch blob. 681 virtual bool in_scratch_emit_size() { 682 return false; 683 } 684 address emit_trampoline_stub(int insts_call_instruction_offset, address target); 685 static int max_trampoline_stub_size(); 686 void emit_static_call_stub(); 687 static int static_call_stub_size(); 688 689 // The following 4 methods return the offset of the appropriate move instruction 690 691 // Support for fast byte/short loading with zero extension (depending on particular CPU) 692 int load_unsigned_byte(Register dst, Address src); 693 int load_unsigned_short(Register dst, Address src); 694 695 // Support for fast byte/short loading with sign extension (depending on particular CPU) 696 int load_signed_byte(Register dst, Address src); 697 int load_signed_short(Register dst, Address src); 698 699 int load_signed_byte32(Register dst, Address src); 700 int load_signed_short32(Register dst, Address src); 701 702 // Support for sign-extension (hi:lo = extend_sign(lo)) 703 void extend_sign(Register hi, Register lo); 704 705 // Load and store values by size and signed-ness 706 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed); 707 void store_sized_value(Address dst, Register src, size_t size_in_bytes); 708 709 // Support for inc/dec with optimal instruction selection depending on value 710 711 // x86_64 aliases an unqualified register/address increment and 712 // decrement to call incrementq and decrementq but also supports 713 // explicitly sized calls to incrementq/decrementq or 714 // incrementl/decrementl 715 716 // for aarch64 the proper convention would be to use 717 // increment/decrement for 64 bit operations and 718 // incrementw/decrementw for 32 bit operations. so when porting 719 // x86_64 code we can leave calls to increment/decrement as is, 720 // replace incrementq/decrementq with increment/decrement and 721 // replace incrementl/decrementl with incrementw/decrementw. 722 723 // n.b. increment/decrement calls with an Address destination will 724 // need to use a scratch register to load the value to be 725 // incremented. increment/decrement calls which add or subtract a 726 // constant value greater than 2^12 will need to use a 2nd scratch 727 // register to hold the constant. so, a register increment/decrement 728 // may trash rscratch2 and an address increment/decrement trash 729 // rscratch and rscratch2 730 731 void decrementw(Address dst, int value = 1); 732 void decrementw(Register reg, int value = 1); 733 734 void decrement(Register reg, int value = 1); 735 void decrement(Address dst, int value = 1); 736 737 void incrementw(Address dst, int value = 1); 738 void incrementw(Register reg, int value = 1); 739 740 void increment(Register reg, int value = 1); 741 void increment(Address dst, int value = 1); 742 743 744 // Alignment 745 void align(int modulus); 746 747 // nop 748 void post_call_nop(); 749 750 // Stack frame creation/removal 751 void enter(bool strip_ret_addr = false); 752 void leave(); 753 754 // ROP Protection 755 void protect_return_address(); 756 void protect_return_address(Register return_reg, Register temp_reg); 757 void authenticate_return_address(Register return_reg = lr); 758 void authenticate_return_address(Register return_reg, Register temp_reg); 759 void strip_return_address(); 760 void check_return_address(Register return_reg=lr) PRODUCT_RETURN; 761 762 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 763 // The pointer will be loaded into the thread register. 764 void get_thread(Register thread); 765 766 // support for argument shuffling 767 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rscratch1); 768 void float_move(VMRegPair src, VMRegPair dst, Register tmp = rscratch1); 769 void long_move(VMRegPair src, VMRegPair dst, Register tmp = rscratch1); 770 void double_move(VMRegPair src, VMRegPair dst, Register tmp = rscratch1); 771 void object_move( 772 OopMap* map, 773 int oop_handle_offset, 774 int framesize_in_slots, 775 VMRegPair src, 776 VMRegPair dst, 777 bool is_receiver, 778 int* receiver_offset); 779 780 781 // Support for VM calls 782 // 783 // It is imperative that all calls into the VM are handled via the call_VM macros. 784 // They make sure that the stack linkage is setup correctly. call_VM's correspond 785 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 786 787 788 void call_VM(Register oop_result, 789 address entry_point, 790 bool check_exceptions = true); 791 void call_VM(Register oop_result, 792 address entry_point, 793 Register arg_1, 794 bool check_exceptions = true); 795 void call_VM(Register oop_result, 796 address entry_point, 797 Register arg_1, Register arg_2, 798 bool check_exceptions = true); 799 void call_VM(Register oop_result, 800 address entry_point, 801 Register arg_1, Register arg_2, Register arg_3, 802 bool check_exceptions = true); 803 804 // Overloadings with last_Java_sp 805 void call_VM(Register oop_result, 806 Register last_java_sp, 807 address entry_point, 808 int number_of_arguments = 0, 809 bool check_exceptions = true); 810 void call_VM(Register oop_result, 811 Register last_java_sp, 812 address entry_point, 813 Register arg_1, bool 814 check_exceptions = true); 815 void call_VM(Register oop_result, 816 Register last_java_sp, 817 address entry_point, 818 Register arg_1, Register arg_2, 819 bool check_exceptions = true); 820 void call_VM(Register oop_result, 821 Register last_java_sp, 822 address entry_point, 823 Register arg_1, Register arg_2, Register arg_3, 824 bool check_exceptions = true); 825 826 void get_vm_result (Register oop_result, Register thread); 827 void get_vm_result_2(Register metadata_result, Register thread); 828 829 // These always tightly bind to MacroAssembler::call_VM_base 830 // bypassing the virtual implementation 831 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 832 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 833 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 834 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 835 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 836 837 void call_VM_leaf(address entry_point, 838 int number_of_arguments = 0); 839 void call_VM_leaf(address entry_point, 840 Register arg_1); 841 void call_VM_leaf(address entry_point, 842 Register arg_1, Register arg_2); 843 void call_VM_leaf(address entry_point, 844 Register arg_1, Register arg_2, Register arg_3); 845 846 // These always tightly bind to MacroAssembler::call_VM_leaf_base 847 // bypassing the virtual implementation 848 void super_call_VM_leaf(address entry_point); 849 void super_call_VM_leaf(address entry_point, Register arg_1); 850 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 851 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 852 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 853 854 // last Java Frame (fills frame anchor) 855 void set_last_Java_frame(Register last_java_sp, 856 Register last_java_fp, 857 address last_java_pc, 858 Register scratch); 859 860 void set_last_Java_frame(Register last_java_sp, 861 Register last_java_fp, 862 Label &last_java_pc, 863 Register scratch); 864 865 void set_last_Java_frame(Register last_java_sp, 866 Register last_java_fp, 867 Register last_java_pc, 868 Register scratch); 869 870 void reset_last_Java_frame(Register thread); 871 872 // thread in the default location (rthread) 873 void reset_last_Java_frame(bool clear_fp); 874 875 // Stores 876 void store_check(Register obj); // store check for obj - register is destroyed afterwards 877 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed) 878 879 void resolve_jobject(Register value, Register tmp1, Register tmp2); 880 void resolve_global_jobject(Register value, Register tmp1, Register tmp2); 881 882 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 883 void c2bool(Register x); 884 885 void load_method_holder_cld(Register rresult, Register rmethod); 886 void load_method_holder(Register holder, Register method); 887 888 // oop manipulations 889 void load_metadata(Register dst, Register src); 890 891 void load_klass(Register dst, Register src); 892 void store_klass(Register dst, Register src); 893 void cmp_klass(Register oop, Register trial_klass, Register tmp); 894 895 void resolve_weak_handle(Register result, Register tmp1, Register tmp2); 896 void resolve_oop_handle(Register result, Register tmp1, Register tmp2); 897 void load_mirror(Register dst, Register method, Register tmp1, Register tmp2); 898 899 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 900 Register tmp1, Register tmp2); 901 902 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 903 Register tmp1, Register tmp2, Register tmp3); 904 905 void access_value_copy(DecoratorSet decorators, Register src, Register dst, Register inline_klass); 906 907 // inline type data payload offsets... 908 void first_field_offset(Register inline_klass, Register offset); 909 void data_for_oop(Register oop, Register data, Register inline_klass); 910 // get data payload ptr a flat value array at index, kills rcx and index 911 void data_for_value_array_index(Register array, Register array_klass, 912 Register index, Register data); 913 914 void load_heap_oop(Register dst, Address src, Register tmp1, 915 Register tmp2, DecoratorSet decorators = 0); 916 917 void load_heap_oop_not_null(Register dst, Address src, Register tmp1, 918 Register tmp2, DecoratorSet decorators = 0); 919 void store_heap_oop(Address dst, Register val, Register tmp1, 920 Register tmp2, Register tmp3, DecoratorSet decorators = 0); 921 922 // currently unimplemented 923 // Used for storing null. All other oop constants should be 924 // stored using routines that take a jobject. 925 void store_heap_oop_null(Address dst); 926 927 void load_prototype_header(Register dst, Register src); 928 929 void store_klass_gap(Register dst, Register src); 930 931 // This dummy is to prevent a call to store_heap_oop from 932 // converting a zero (like null) into a Register by giving 933 // the compiler two choices it can't resolve 934 935 void store_heap_oop(Address dst, void* dummy); 936 937 void encode_heap_oop(Register d, Register s); 938 void encode_heap_oop(Register r) { encode_heap_oop(r, r); } 939 void decode_heap_oop(Register d, Register s); 940 void decode_heap_oop(Register r) { decode_heap_oop(r, r); } 941 void encode_heap_oop_not_null(Register r); 942 void decode_heap_oop_not_null(Register r); 943 void encode_heap_oop_not_null(Register dst, Register src); 944 void decode_heap_oop_not_null(Register dst, Register src); 945 946 void set_narrow_oop(Register dst, jobject obj); 947 948 void encode_klass_not_null(Register r); 949 void decode_klass_not_null(Register r); 950 void encode_klass_not_null(Register dst, Register src); 951 void decode_klass_not_null(Register dst, Register src); 952 953 void set_narrow_klass(Register dst, Klass* k); 954 955 // if heap base register is used - reinit it with the correct value 956 void reinit_heapbase(); 957 958 DEBUG_ONLY(void verify_heapbase(const char* msg);) 959 960 void push_CPU_state(bool save_vectors = false, bool use_sve = false, 961 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0); 962 void pop_CPU_state(bool restore_vectors = false, bool use_sve = false, 963 int sve_vector_size_in_bytes = 0, int total_predicate_in_bytes = 0); 964 965 void push_cont_fastpath(Register java_thread); 966 void pop_cont_fastpath(Register java_thread); 967 968 // Round up to a power of two 969 void round_to(Register reg, int modulus); 970 971 // java.lang.Math::round intrinsics 972 void java_round_double(Register dst, FloatRegister src, FloatRegister ftmp); 973 void java_round_float(Register dst, FloatRegister src, FloatRegister ftmp); 974 975 // allocation 976 977 // Object / value buffer allocation... 978 // Allocate instance of klass, assumes klass initialized by caller 979 // new_obj prefers to be rax 980 // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64) 981 void allocate_instance(Register klass, Register new_obj, 982 Register t1, Register t2, 983 bool clear_fields, Label& alloc_failed); 984 985 void tlab_allocate( 986 Register obj, // result: pointer to object after successful allocation 987 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 988 int con_size_in_bytes, // object size in bytes if known at compile time 989 Register t1, // temp register 990 Register t2, // temp register 991 Label& slow_case // continuation point if fast allocation fails 992 ); 993 void verify_tlab(); 994 995 // For field "index" within "klass", return inline_klass ... 996 void get_inline_type_field_klass(Register klass, Register index, Register inline_klass); 997 998 // interface method calling 999 void lookup_interface_method(Register recv_klass, 1000 Register intf_klass, 1001 RegisterOrConstant itable_index, 1002 Register method_result, 1003 Register scan_temp, 1004 Label& no_such_interface, 1005 bool return_method = true); 1006 1007 // virtual method calling 1008 // n.b. x86 allows RegisterOrConstant for vtable_index 1009 void lookup_virtual_method(Register recv_klass, 1010 RegisterOrConstant vtable_index, 1011 Register method_result); 1012 1013 // Test sub_klass against super_klass, with fast and slow paths. 1014 1015 // The fast path produces a tri-state answer: yes / no / maybe-slow. 1016 // One of the three labels can be null, meaning take the fall-through. 1017 // If super_check_offset is -1, the value is loaded up from super_klass. 1018 // No registers are killed, except temp_reg. 1019 void check_klass_subtype_fast_path(Register sub_klass, 1020 Register super_klass, 1021 Register temp_reg, 1022 Label* L_success, 1023 Label* L_failure, 1024 Label* L_slow_path, 1025 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 1026 1027 // The rest of the type check; must be wired to a corresponding fast path. 1028 // It does not repeat the fast path logic, so don't use it standalone. 1029 // The temp_reg and temp2_reg can be noreg, if no temps are available. 1030 // Updates the sub's secondary super cache as necessary. 1031 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 1032 void check_klass_subtype_slow_path(Register sub_klass, 1033 Register super_klass, 1034 Register temp_reg, 1035 Register temp2_reg, 1036 Label* L_success, 1037 Label* L_failure, 1038 bool set_cond_codes = false); 1039 1040 // Simplified, combined version, good for typical uses. 1041 // Falls through on failure. 1042 void check_klass_subtype(Register sub_klass, 1043 Register super_klass, 1044 Register temp_reg, 1045 Label& L_success); 1046 1047 void clinit_barrier(Register klass, 1048 Register thread, 1049 Label* L_fast_path = nullptr, 1050 Label* L_slow_path = nullptr); 1051 1052 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 1053 1054 void verify_sve_vector_length(Register tmp = rscratch1); 1055 void reinitialize_ptrue() { 1056 if (UseSVE > 0) { 1057 sve_ptrue(ptrue, B); 1058 } 1059 } 1060 void verify_ptrue(); 1061 1062 // Debugging 1063 1064 // only if +VerifyOops 1065 void _verify_oop(Register reg, const char* s, const char* file, int line); 1066 void _verify_oop_addr(Address addr, const char * s, const char* file, int line); 1067 1068 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) { 1069 if (VerifyOops) { 1070 _verify_oop(reg, s, file, line); 1071 } 1072 } 1073 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) { 1074 if (VerifyOops) { 1075 _verify_oop_addr(reg, s, file, line); 1076 } 1077 } 1078 1079 // TODO: verify method and klass metadata (compare against vptr?) 1080 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 1081 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 1082 1083 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__) 1084 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__) 1085 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__) 1086 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 1087 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 1088 1089 // only if +VerifyFPU 1090 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 1091 1092 // prints msg, dumps registers and stops execution 1093 void stop(const char* msg); 1094 1095 static void debug64(char* msg, int64_t pc, int64_t regs[]); 1096 1097 void untested() { stop("untested"); } 1098 1099 void unimplemented(const char* what = ""); 1100 1101 void should_not_reach_here() { stop("should not reach here"); } 1102 1103 void _assert_asm(Condition cc, const char* msg); 1104 #define assert_asm0(cc, msg) _assert_asm(cc, FILE_AND_LINE ": " msg) 1105 #define assert_asm(masm, command, cc, msg) DEBUG_ONLY(command; (masm)->_assert_asm(cc, FILE_AND_LINE ": " #command " " #cc ": " msg)) 1106 1107 // Stack overflow checking 1108 void bang_stack_with_offset(int offset) { 1109 // stack grows down, caller passes positive offset 1110 assert(offset > 0, "must bang with negative offset"); 1111 sub(rscratch2, sp, offset); 1112 str(zr, Address(rscratch2)); 1113 } 1114 1115 // Writes to stack successive pages until offset reached to check for 1116 // stack overflow + shadow pages. Also, clobbers tmp 1117 void bang_stack_size(Register size, Register tmp); 1118 1119 // Check for reserved stack access in method being exited (for JIT) 1120 void reserved_stack_check(); 1121 1122 // Arithmetics 1123 1124 void addptr(const Address &dst, int32_t src); 1125 void cmpptr(Register src1, Address src2); 1126 1127 void cmpoop(Register obj1, Register obj2); 1128 1129 // Various forms of CAS 1130 1131 void cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, 1132 Label &succeed, Label *fail); 1133 void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 1134 Label &succeed, Label *fail); 1135 1136 void cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 1137 Label &succeed, Label *fail); 1138 1139 void atomic_add(Register prev, RegisterOrConstant incr, Register addr); 1140 void atomic_addw(Register prev, RegisterOrConstant incr, Register addr); 1141 void atomic_addal(Register prev, RegisterOrConstant incr, Register addr); 1142 void atomic_addalw(Register prev, RegisterOrConstant incr, Register addr); 1143 1144 void atomic_xchg(Register prev, Register newv, Register addr); 1145 void atomic_xchgw(Register prev, Register newv, Register addr); 1146 void atomic_xchgl(Register prev, Register newv, Register addr); 1147 void atomic_xchglw(Register prev, Register newv, Register addr); 1148 void atomic_xchgal(Register prev, Register newv, Register addr); 1149 void atomic_xchgalw(Register prev, Register newv, Register addr); 1150 1151 void orptr(Address adr, RegisterOrConstant src) { 1152 ldr(rscratch1, adr); 1153 if (src.is_register()) 1154 orr(rscratch1, rscratch1, src.as_register()); 1155 else 1156 orr(rscratch1, rscratch1, src.as_constant()); 1157 str(rscratch1, adr); 1158 } 1159 1160 // A generic CAS; success or failure is in the EQ flag. 1161 // Clobbers rscratch1 1162 void cmpxchg(Register addr, Register expected, Register new_val, 1163 enum operand_size size, 1164 bool acquire, bool release, bool weak, 1165 Register result); 1166 1167 #ifdef ASSERT 1168 // Template short-hand support to clean-up after a failed call to trampoline 1169 // call generation (see trampoline_call() below), when a set of Labels must 1170 // be reset (before returning). 1171 template<typename Label, typename... More> 1172 void reset_labels(Label &lbl, More&... more) { 1173 lbl.reset(); reset_labels(more...); 1174 } 1175 template<typename Label> 1176 void reset_labels(Label &lbl) { 1177 lbl.reset(); 1178 } 1179 #endif 1180 1181 private: 1182 void compare_eq(Register rn, Register rm, enum operand_size size); 1183 1184 public: 1185 // AArch64 OpenJDK uses four different types of calls: 1186 // - direct call: bl pc_relative_offset 1187 // This is the shortest and the fastest, but the offset has the range: 1188 // +/-128MB for the release build, +/-2MB for the debug build. 1189 // 1190 // - far call: adrp reg, pc_relative_offset; add; bl reg 1191 // This is longer than a direct call. The offset has 1192 // the range +/-4GB. As the code cache size is limited to 4GB, 1193 // far calls can reach anywhere in the code cache. If a jump is 1194 // needed rather than a call, a far jump 'b reg' can be used instead. 1195 // All instructions are embedded at a call site. 1196 // 1197 // - trampoline call: 1198 // This is only available in C1/C2-generated code (nmethod). It is a combination 1199 // of a direct call, which is used if the destination of a call is in range, 1200 // and a register-indirect call. It has the advantages of reaching anywhere in 1201 // the AArch64 address space and being patchable at runtime when the generated 1202 // code is being executed by other threads. 1203 // 1204 // [Main code section] 1205 // bl trampoline 1206 // [Stub code section] 1207 // trampoline: 1208 // ldr reg, pc + 8 1209 // br reg 1210 // <64-bit destination address> 1211 // 1212 // If the destination is in range when the generated code is moved to the code 1213 // cache, 'bl trampoline' is replaced with 'bl destination' and the trampoline 1214 // is not used. 1215 // The optimization does not remove the trampoline from the stub section. 1216 // This is necessary because the trampoline may well be redirected later when 1217 // code is patched, and the new destination may not be reachable by a simple BR 1218 // instruction. 1219 // 1220 // - indirect call: move reg, address; blr reg 1221 // This too can reach anywhere in the address space, but it cannot be 1222 // patched while code is running, so it must only be modified at a safepoint. 1223 // This form of call is most suitable for targets at fixed addresses, which 1224 // will never be patched. 1225 // 1226 // The patching we do conforms to the "Concurrent modification and 1227 // execution of instructions" section of the Arm Architectural 1228 // Reference Manual, which only allows B, BL, BRK, HVC, ISB, NOP, SMC, 1229 // or SVC instructions to be modified while another thread is 1230 // executing them. 1231 // 1232 // To patch a trampoline call when the BL can't reach, we first modify 1233 // the 64-bit destination address in the trampoline, then modify the 1234 // BL to point to the trampoline, then flush the instruction cache to 1235 // broadcast the change to all executing threads. See 1236 // NativeCall::set_destination_mt_safe for the details. 1237 // 1238 // There is a benign race in that the other thread might observe the 1239 // modified BL before it observes the modified 64-bit destination 1240 // address. That does not matter because the destination method has been 1241 // invalidated, so there will be a trap at its start. 1242 // For this to work, the destination address in the trampoline is 1243 // always updated, even if we're not using the trampoline. 1244 1245 // Emit a direct call if the entry address will always be in range, 1246 // otherwise a trampoline call. 1247 // Supported entry.rspec(): 1248 // - relocInfo::runtime_call_type 1249 // - relocInfo::opt_virtual_call_type 1250 // - relocInfo::static_call_type 1251 // - relocInfo::virtual_call_type 1252 // 1253 // Return: the call PC or null if CodeCache is full. 1254 address trampoline_call(Address entry); 1255 1256 static bool far_branches() { 1257 return ReservedCodeCacheSize > branch_range; 1258 } 1259 1260 // Check if branches to the non nmethod section require a far jump 1261 static bool codestub_branch_needs_far_jump() { 1262 return CodeCache::max_distance_to_non_nmethod() > branch_range; 1263 } 1264 1265 // Emit a direct call/jump if the entry address will always be in range, 1266 // otherwise a far call/jump. 1267 // The address must be inside the code cache. 1268 // Supported entry.rspec(): 1269 // - relocInfo::external_word_type 1270 // - relocInfo::runtime_call_type 1271 // - relocInfo::none 1272 // In the case of a far call/jump, the entry address is put in the tmp register. 1273 // The tmp register is invalidated. 1274 // 1275 // Far_jump returns the amount of the emitted code. 1276 void far_call(Address entry, Register tmp = rscratch1); 1277 int far_jump(Address entry, Register tmp = rscratch1); 1278 1279 static int far_codestub_branch_size() { 1280 if (codestub_branch_needs_far_jump()) { 1281 return 3 * 4; // adrp, add, br 1282 } else { 1283 return 4; 1284 } 1285 } 1286 1287 // Emit the CompiledIC call idiom 1288 address ic_call(address entry, jint method_index = 0); 1289 1290 public: 1291 1292 // Data 1293 1294 void mov_metadata(Register dst, Metadata* obj); 1295 Address allocate_metadata_address(Metadata* obj); 1296 Address constant_oop_address(jobject obj); 1297 1298 void movoop(Register dst, jobject obj); 1299 1300 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 1301 void kernel_crc32(Register crc, Register buf, Register len, 1302 Register table0, Register table1, Register table2, Register table3, 1303 Register tmp, Register tmp2, Register tmp3); 1304 // CRC32 code for java.util.zip.CRC32C::updateBytes() intrinsic. 1305 void kernel_crc32c(Register crc, Register buf, Register len, 1306 Register table0, Register table1, Register table2, Register table3, 1307 Register tmp, Register tmp2, Register tmp3); 1308 1309 // Stack push and pop individual 64 bit registers 1310 void push(Register src); 1311 void pop(Register dst); 1312 1313 void repne_scan(Register addr, Register value, Register count, 1314 Register scratch); 1315 void repne_scanw(Register addr, Register value, Register count, 1316 Register scratch); 1317 1318 typedef void (MacroAssembler::* add_sub_imm_insn)(Register Rd, Register Rn, unsigned imm); 1319 typedef void (MacroAssembler::* add_sub_reg_insn)(Register Rd, Register Rn, Register Rm, enum shift_kind kind, unsigned shift); 1320 1321 // If a constant does not fit in an immediate field, generate some 1322 // number of MOV instructions and then perform the operation 1323 void wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm, 1324 add_sub_imm_insn insn1, 1325 add_sub_reg_insn insn2, bool is32); 1326 // Separate vsn which sets the flags 1327 void wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm, 1328 add_sub_imm_insn insn1, 1329 add_sub_reg_insn insn2, bool is32); 1330 1331 #define WRAP(INSN, is32) \ 1332 void INSN(Register Rd, Register Rn, uint64_t imm) { \ 1333 wrap_add_sub_imm_insn(Rd, Rn, imm, &Assembler::INSN, &Assembler::INSN, is32); \ 1334 } \ 1335 \ 1336 void INSN(Register Rd, Register Rn, Register Rm, \ 1337 enum shift_kind kind, unsigned shift = 0) { \ 1338 Assembler::INSN(Rd, Rn, Rm, kind, shift); \ 1339 } \ 1340 \ 1341 void INSN(Register Rd, Register Rn, Register Rm) { \ 1342 Assembler::INSN(Rd, Rn, Rm); \ 1343 } \ 1344 \ 1345 void INSN(Register Rd, Register Rn, Register Rm, \ 1346 ext::operation option, int amount = 0) { \ 1347 Assembler::INSN(Rd, Rn, Rm, option, amount); \ 1348 } 1349 1350 WRAP(add, false) WRAP(addw, true) WRAP(sub, false) WRAP(subw, true) 1351 1352 #undef WRAP 1353 #define WRAP(INSN, is32) \ 1354 void INSN(Register Rd, Register Rn, uint64_t imm) { \ 1355 wrap_adds_subs_imm_insn(Rd, Rn, imm, &Assembler::INSN, &Assembler::INSN, is32); \ 1356 } \ 1357 \ 1358 void INSN(Register Rd, Register Rn, Register Rm, \ 1359 enum shift_kind kind, unsigned shift = 0) { \ 1360 Assembler::INSN(Rd, Rn, Rm, kind, shift); \ 1361 } \ 1362 \ 1363 void INSN(Register Rd, Register Rn, Register Rm) { \ 1364 Assembler::INSN(Rd, Rn, Rm); \ 1365 } \ 1366 \ 1367 void INSN(Register Rd, Register Rn, Register Rm, \ 1368 ext::operation option, int amount = 0) { \ 1369 Assembler::INSN(Rd, Rn, Rm, option, amount); \ 1370 } 1371 1372 WRAP(adds, false) WRAP(addsw, true) WRAP(subs, false) WRAP(subsw, true) 1373 1374 void add(Register Rd, Register Rn, RegisterOrConstant increment); 1375 void addw(Register Rd, Register Rn, RegisterOrConstant increment); 1376 void sub(Register Rd, Register Rn, RegisterOrConstant decrement); 1377 void subw(Register Rd, Register Rn, RegisterOrConstant decrement); 1378 1379 void adrp(Register reg1, const Address &dest, uint64_t &byte_offset); 1380 1381 void verified_entry(Compile* C, int sp_inc); 1382 1383 // Inline type specific methods 1384 #include "asm/macroAssembler_common.hpp" 1385 1386 int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true); 1387 bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]); 1388 bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, 1389 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index, 1390 RegState reg_state[]); 1391 bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index, 1392 VMRegPair* from, int from_count, int& from_index, VMReg to, 1393 RegState reg_state[], Register val_array); 1394 int extend_stack_for_inline_args(int args_on_stack); 1395 void remove_frame(int initial_framesize, bool needs_stack_repair); 1396 VMReg spill_reg_for(VMReg reg); 1397 void save_stack_increment(int sp_inc, int frame_size); 1398 1399 void tableswitch(Register index, jint lowbound, jint highbound, 1400 Label &jumptable, Label &jumptable_end, int stride = 1) { 1401 adr(rscratch1, jumptable); 1402 subsw(rscratch2, index, lowbound); 1403 subsw(zr, rscratch2, highbound - lowbound); 1404 br(Assembler::HS, jumptable_end); 1405 add(rscratch1, rscratch1, rscratch2, 1406 ext::sxtw, exact_log2(stride * Assembler::instruction_size)); 1407 br(rscratch1); 1408 } 1409 1410 // Form an address from base + offset in Rd. Rd may or may not 1411 // actually be used: you must use the Address that is returned. It 1412 // is up to you to ensure that the shift provided matches the size 1413 // of your data. 1414 Address form_address(Register Rd, Register base, int64_t byte_offset, int shift); 1415 1416 // Return true iff an address is within the 48-bit AArch64 address 1417 // space. 1418 bool is_valid_AArch64_address(address a) { 1419 return ((uint64_t)a >> 48) == 0; 1420 } 1421 1422 // Load the base of the cardtable byte map into reg. 1423 void load_byte_map_base(Register reg); 1424 1425 // Prolog generator routines to support switch between x86 code and 1426 // generated ARM code 1427 1428 // routine to generate an x86 prolog for a stub function which 1429 // bootstraps into the generated ARM code which directly follows the 1430 // stub 1431 // 1432 1433 public: 1434 1435 void ldr_constant(Register dest, const Address &const_addr) { 1436 if (NearCpool) { 1437 ldr(dest, const_addr); 1438 } else { 1439 uint64_t offset; 1440 adrp(dest, InternalAddress(const_addr.target()), offset); 1441 ldr(dest, Address(dest, offset)); 1442 } 1443 } 1444 1445 address read_polling_page(Register r, relocInfo::relocType rtype); 1446 void get_polling_page(Register dest, relocInfo::relocType rtype); 1447 1448 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 1449 void update_byte_crc32(Register crc, Register val, Register table); 1450 void update_word_crc32(Register crc, Register v, Register tmp, 1451 Register table0, Register table1, Register table2, Register table3, 1452 bool upper = false); 1453 1454 address count_positives(Register ary1, Register len, Register result); 1455 1456 address arrays_equals(Register a1, Register a2, Register result, Register cnt1, 1457 Register tmp1, Register tmp2, Register tmp3, int elem_size); 1458 1459 void string_equals(Register a1, Register a2, Register result, Register cnt1, 1460 int elem_size); 1461 1462 void fill_words(Register base, Register cnt, Register value); 1463 void fill_words(Register base, uint64_t cnt, Register value); 1464 1465 address zero_words(Register base, uint64_t cnt); 1466 address zero_words(Register ptr, Register cnt); 1467 void zero_dcache_blocks(Register base, Register cnt); 1468 1469 static const int zero_words_block_size; 1470 1471 address byte_array_inflate(Register src, Register dst, Register len, 1472 FloatRegister vtmp1, FloatRegister vtmp2, 1473 FloatRegister vtmp3, Register tmp4); 1474 1475 void char_array_compress(Register src, Register dst, Register len, 1476 Register res, 1477 FloatRegister vtmp0, FloatRegister vtmp1, 1478 FloatRegister vtmp2, FloatRegister vtmp3, 1479 FloatRegister vtmp4, FloatRegister vtmp5); 1480 1481 void encode_iso_array(Register src, Register dst, 1482 Register len, Register res, bool ascii, 1483 FloatRegister vtmp0, FloatRegister vtmp1, 1484 FloatRegister vtmp2, FloatRegister vtmp3, 1485 FloatRegister vtmp4, FloatRegister vtmp5); 1486 1487 void fast_log(FloatRegister vtmp0, FloatRegister vtmp1, FloatRegister vtmp2, 1488 FloatRegister vtmp3, FloatRegister vtmp4, FloatRegister vtmp5, 1489 FloatRegister tmpC1, FloatRegister tmpC2, FloatRegister tmpC3, 1490 FloatRegister tmpC4, Register tmp1, Register tmp2, 1491 Register tmp3, Register tmp4, Register tmp5); 1492 void generate_dsin_dcos(bool isCos, address npio2_hw, address two_over_pi, 1493 address pio2, address dsin_coef, address dcos_coef); 1494 private: 1495 // begin trigonometric functions support block 1496 void generate__ieee754_rem_pio2(address npio2_hw, address two_over_pi, address pio2); 1497 void generate__kernel_rem_pio2(address two_over_pi, address pio2); 1498 void generate_kernel_sin(FloatRegister x, bool iyIsOne, address dsin_coef); 1499 void generate_kernel_cos(FloatRegister x, address dcos_coef); 1500 // end trigonometric functions support block 1501 void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 1502 Register src1, Register src2); 1503 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 1504 add2_with_carry(dest_hi, dest_hi, dest_lo, src1, src2); 1505 } 1506 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1507 Register y, Register y_idx, Register z, 1508 Register carry, Register product, 1509 Register idx, Register kdx); 1510 void multiply_128_x_128_loop(Register y, Register z, 1511 Register carry, Register carry2, 1512 Register idx, Register jdx, 1513 Register yz_idx1, Register yz_idx2, 1514 Register tmp, Register tmp3, Register tmp4, 1515 Register tmp7, Register product_hi); 1516 void kernel_crc32_using_crypto_pmull(Register crc, Register buf, 1517 Register len, Register tmp0, Register tmp1, Register tmp2, 1518 Register tmp3); 1519 void kernel_crc32_using_crc32(Register crc, Register buf, 1520 Register len, Register tmp0, Register tmp1, Register tmp2, 1521 Register tmp3); 1522 void kernel_crc32c_using_crypto_pmull(Register crc, Register buf, 1523 Register len, Register tmp0, Register tmp1, Register tmp2, 1524 Register tmp3); 1525 void kernel_crc32c_using_crc32c(Register crc, Register buf, 1526 Register len, Register tmp0, Register tmp1, Register tmp2, 1527 Register tmp3); 1528 void kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf, 1529 Register len, Register tmp0, Register tmp1, Register tmp2, 1530 size_t table_offset); 1531 1532 void ghash_modmul (FloatRegister result, 1533 FloatRegister result_lo, FloatRegister result_hi, FloatRegister b, 1534 FloatRegister a, FloatRegister vzr, FloatRegister a1_xor_a0, FloatRegister p, 1535 FloatRegister t1, FloatRegister t2, FloatRegister t3); 1536 void ghash_load_wide(int index, Register data, FloatRegister result, FloatRegister state); 1537 public: 1538 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, 1539 Register zlen, Register tmp1, Register tmp2, Register tmp3, 1540 Register tmp4, Register tmp5, Register tmp6, Register tmp7); 1541 void mul_add(Register out, Register in, Register offs, Register len, Register k); 1542 void ghash_multiply(FloatRegister result_lo, FloatRegister result_hi, 1543 FloatRegister a, FloatRegister b, FloatRegister a1_xor_a0, 1544 FloatRegister tmp1, FloatRegister tmp2, FloatRegister tmp3); 1545 void ghash_multiply_wide(int index, 1546 FloatRegister result_lo, FloatRegister result_hi, 1547 FloatRegister a, FloatRegister b, FloatRegister a1_xor_a0, 1548 FloatRegister tmp1, FloatRegister tmp2, FloatRegister tmp3); 1549 void ghash_reduce(FloatRegister result, FloatRegister lo, FloatRegister hi, 1550 FloatRegister p, FloatRegister z, FloatRegister t1); 1551 void ghash_reduce_wide(int index, FloatRegister result, FloatRegister lo, FloatRegister hi, 1552 FloatRegister p, FloatRegister z, FloatRegister t1); 1553 void ghash_processBlocks_wide(address p, Register state, Register subkeyH, 1554 Register data, Register blocks, int unrolls); 1555 1556 1557 void aesenc_loadkeys(Register key, Register keylen); 1558 void aesecb_encrypt(Register from, Register to, Register keylen, 1559 FloatRegister data = v0, int unrolls = 1); 1560 void aesecb_decrypt(Register from, Register to, Register key, Register keylen); 1561 void aes_round(FloatRegister input, FloatRegister subkey); 1562 1563 // ChaCha20 functions support block 1564 void cc20_quarter_round(FloatRegister aVec, FloatRegister bVec, 1565 FloatRegister cVec, FloatRegister dVec, FloatRegister scratch, 1566 FloatRegister tbl); 1567 void cc20_shift_lane_org(FloatRegister bVec, FloatRegister cVec, 1568 FloatRegister dVec, bool colToDiag); 1569 1570 // Place an ISB after code may have been modified due to a safepoint. 1571 void safepoint_isb(); 1572 1573 private: 1574 // Return the effective address r + (r1 << ext) + offset. 1575 // Uses rscratch2. 1576 Address offsetted_address(Register r, Register r1, Address::extend ext, 1577 int offset, int size); 1578 1579 private: 1580 // Returns an address on the stack which is reachable with a ldr/str of size 1581 // Uses rscratch2 if the address is not directly reachable 1582 Address spill_address(int size, int offset, Register tmp=rscratch2); 1583 Address sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp=rscratch2); 1584 1585 bool merge_alignment_check(Register base, size_t size, int64_t cur_offset, int64_t prev_offset) const; 1586 1587 // Check whether two loads/stores can be merged into ldp/stp. 1588 bool ldst_can_merge(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store) const; 1589 1590 // Merge current load/store with previous load/store into ldp/stp. 1591 void merge_ldst(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store); 1592 1593 // Try to merge two loads/stores into ldp/stp. If success, returns true else false. 1594 bool try_merge_ldst(Register rt, const Address &adr, size_t cur_size_in_bytes, bool is_store); 1595 1596 public: 1597 void spill(Register Rx, bool is64, int offset) { 1598 if (is64) { 1599 str(Rx, spill_address(8, offset)); 1600 } else { 1601 strw(Rx, spill_address(4, offset)); 1602 } 1603 } 1604 void spill(FloatRegister Vx, SIMD_RegVariant T, int offset) { 1605 str(Vx, T, spill_address(1 << (int)T, offset)); 1606 } 1607 1608 void spill_sve_vector(FloatRegister Zx, int offset, int vector_reg_size_in_bytes) { 1609 sve_str(Zx, sve_spill_address(vector_reg_size_in_bytes, offset)); 1610 } 1611 void spill_sve_predicate(PRegister pr, int offset, int predicate_reg_size_in_bytes) { 1612 sve_str(pr, sve_spill_address(predicate_reg_size_in_bytes, offset)); 1613 } 1614 1615 void unspill(Register Rx, bool is64, int offset) { 1616 if (is64) { 1617 ldr(Rx, spill_address(8, offset)); 1618 } else { 1619 ldrw(Rx, spill_address(4, offset)); 1620 } 1621 } 1622 void unspill(FloatRegister Vx, SIMD_RegVariant T, int offset) { 1623 ldr(Vx, T, spill_address(1 << (int)T, offset)); 1624 } 1625 1626 void unspill_sve_vector(FloatRegister Zx, int offset, int vector_reg_size_in_bytes) { 1627 sve_ldr(Zx, sve_spill_address(vector_reg_size_in_bytes, offset)); 1628 } 1629 void unspill_sve_predicate(PRegister pr, int offset, int predicate_reg_size_in_bytes) { 1630 sve_ldr(pr, sve_spill_address(predicate_reg_size_in_bytes, offset)); 1631 } 1632 1633 void spill_copy128(int src_offset, int dst_offset, 1634 Register tmp1=rscratch1, Register tmp2=rscratch2) { 1635 if (src_offset < 512 && (src_offset & 7) == 0 && 1636 dst_offset < 512 && (dst_offset & 7) == 0) { 1637 ldp(tmp1, tmp2, Address(sp, src_offset)); 1638 stp(tmp1, tmp2, Address(sp, dst_offset)); 1639 } else { 1640 unspill(tmp1, true, src_offset); 1641 spill(tmp1, true, dst_offset); 1642 unspill(tmp1, true, src_offset+8); 1643 spill(tmp1, true, dst_offset+8); 1644 } 1645 } 1646 void spill_copy_sve_vector_stack_to_stack(int src_offset, int dst_offset, 1647 int sve_vec_reg_size_in_bytes) { 1648 assert(sve_vec_reg_size_in_bytes % 16 == 0, "unexpected sve vector reg size"); 1649 for (int i = 0; i < sve_vec_reg_size_in_bytes / 16; i++) { 1650 spill_copy128(src_offset, dst_offset); 1651 src_offset += 16; 1652 dst_offset += 16; 1653 } 1654 } 1655 void spill_copy_sve_predicate_stack_to_stack(int src_offset, int dst_offset, 1656 int sve_predicate_reg_size_in_bytes) { 1657 sve_ldr(ptrue, sve_spill_address(sve_predicate_reg_size_in_bytes, src_offset)); 1658 sve_str(ptrue, sve_spill_address(sve_predicate_reg_size_in_bytes, dst_offset)); 1659 reinitialize_ptrue(); 1660 } 1661 void cache_wb(Address line); 1662 void cache_wbsync(bool is_pre); 1663 1664 // Code for java.lang.Thread::onSpinWait() intrinsic. 1665 void spin_wait(); 1666 1667 void fast_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow); 1668 void fast_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow); 1669 1670 private: 1671 // Check the current thread doesn't need a cross modify fence. 1672 void verify_cross_modify_fence_not_required() PRODUCT_RETURN; 1673 1674 }; 1675 1676 #ifdef ASSERT 1677 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; } 1678 #endif 1679 1680 /** 1681 * class SkipIfEqual: 1682 * 1683 * Instantiating this class will result in assembly code being output that will 1684 * jump around any code emitted between the creation of the instance and it's 1685 * automatic destruction at the end of a scope block, depending on the value of 1686 * the flag passed to the constructor, which will be checked at run-time. 1687 */ 1688 class SkipIfEqual { 1689 private: 1690 MacroAssembler* _masm; 1691 Label _label; 1692 1693 public: 1694 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); 1695 ~SkipIfEqual(); 1696 }; 1697 1698 struct tableswitch { 1699 Register _reg; 1700 int _insn_index; jint _first_key; jint _last_key; 1701 Label _after; 1702 Label _branches; 1703 }; 1704 1705 #endif // CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP