1 /* 2 * Copyright (c) 2013, Red Hat Inc. 3 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. 4 * All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #ifndef CPU_AARCH64_VM_MACROASSEMBLER_AARCH64_HPP 28 #define CPU_AARCH64_VM_MACROASSEMBLER_AARCH64_HPP 29 30 #include "asm/assembler.hpp" 31 32 // MacroAssembler extends Assembler by frequently used macros. 33 // 34 // Instructions for which a 'better' code sequence exists depending 35 // on arguments should also go in here. 36 37 class MacroAssembler: public Assembler { 38 friend class LIR_Assembler; 39 40 public: 41 using Assembler::mov; 42 using Assembler::movi; 43 44 protected: 45 46 // Support for VM calls 47 // 48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 49 // may customize this version by overriding it for its purposes (e.g., to save/restore 50 // additional registers when doing a VM call). 51 #ifdef CC_INTERP 52 // c++ interpreter never wants to use interp_masm version of call_VM 53 #define VIRTUAL 54 #else 55 #define VIRTUAL virtual 56 #endif 57 58 VIRTUAL void call_VM_leaf_base( 59 address entry_point, // the entry point 60 int number_of_arguments, // the number of arguments to pop after the call 61 Label *retaddr = NULL 62 ); 63 64 VIRTUAL void call_VM_leaf_base( 65 address entry_point, // the entry point 66 int number_of_arguments, // the number of arguments to pop after the call 67 Label &retaddr) { 68 call_VM_leaf_base(entry_point, number_of_arguments, &retaddr); 69 } 70 71 // This is the base routine called by the different versions of call_VM. The interpreter 72 // may customize this version by overriding it for its purposes (e.g., to save/restore 73 // additional registers when doing a VM call). 74 // 75 // If no java_thread register is specified (noreg) than rthread will be used instead. call_VM_base 76 // returns the register which contains the thread upon return. If a thread register has been 77 // specified, the return value will correspond to that register. If no last_java_sp is specified 78 // (noreg) than rsp will be used instead. 79 VIRTUAL void call_VM_base( // returns the register containing the thread upon return 80 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 81 Register java_thread, // the thread if computed before ; use noreg otherwise 82 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 83 address entry_point, // the entry point 84 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 85 bool check_exceptions // whether to check for pending exceptions after return 86 ); 87 88 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 89 // The implementation is only non-empty for the InterpreterMacroAssembler, 90 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 91 virtual void check_and_handle_popframe(Register java_thread); 92 virtual void check_and_handle_earlyret(Register java_thread); 93 94 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 95 96 // Maximum size of class area in Metaspace when compressed 97 uint64_t use_XOR_for_compressed_class_base; 98 99 public: 100 MacroAssembler(CodeBuffer* code) : Assembler(code) { 101 use_XOR_for_compressed_class_base 102 = (operand_valid_for_logical_immediate(false /*is32*/, 103 (uint64_t)Universe::narrow_klass_base()) 104 && ((uint64_t)Universe::narrow_klass_base() 105 > (1u << log2_intptr(CompressedClassSpaceSize)))); 106 } 107 108 // Biased locking support 109 // lock_reg and obj_reg must be loaded up with the appropriate values. 110 // swap_reg is killed. 111 // tmp_reg must be supplied and must not be rscratch1 or rscratch2 112 // Optional slow case is for implementations (interpreter and C1) which branch to 113 // slow case directly. Leaves condition codes set for C2's Fast_Lock node. 114 // Returns offset of first potentially-faulting instruction for null 115 // check info (currently consumed only by C1). If 116 // swap_reg_contains_mark is true then returns -1 as it is assumed 117 // the calling code has already passed any potential faults. 118 int biased_locking_enter(Register lock_reg, Register obj_reg, 119 Register swap_reg, Register tmp_reg, 120 bool swap_reg_contains_mark, 121 Label& done, Label* slow_case = NULL, 122 BiasedLockingCounters* counters = NULL); 123 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); 124 125 126 // Helper functions for statistics gathering. 127 // Unconditional atomic increment. 128 void atomic_incw(Register counter_addr, Register tmp, Register tmp2); 129 void atomic_incw(Address counter_addr, Register tmp1, Register tmp2, Register tmp3) { 130 lea(tmp1, counter_addr); 131 atomic_incw(tmp1, tmp2, tmp3); 132 } 133 // Load Effective Address 134 void lea(Register r, const Address &a) { 135 InstructionMark im(this); 136 code_section()->relocate(inst_mark(), a.rspec()); 137 a.lea(this, r); 138 } 139 140 void addmw(Address a, Register incr, Register scratch) { 141 ldrw(scratch, a); 142 addw(scratch, scratch, incr); 143 strw(scratch, a); 144 } 145 146 // Add constant to memory word 147 void addmw(Address a, int imm, Register scratch) { 148 ldrw(scratch, a); 149 if (imm > 0) 150 addw(scratch, scratch, (unsigned)imm); 151 else 152 subw(scratch, scratch, (unsigned)-imm); 153 strw(scratch, a); 154 } 155 156 // Frame creation and destruction shared between JITs. 157 void build_frame(int framesize); 158 void remove_frame(int framesize); 159 160 virtual void _call_Unimplemented(address call_site) { 161 mov(rscratch2, call_site); 162 } 163 164 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__) 165 166 // aliases defined in AARCH64 spec 167 168 template<class T> 169 inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); } 170 inline void cmp(Register Rd, unsigned imm) { subs(zr, Rd, imm); } 171 172 inline void cmnw(Register Rd, unsigned imm) { addsw(zr, Rd, imm); } 173 inline void cmn(Register Rd, unsigned imm) { adds(zr, Rd, imm); } 174 175 void cset(Register Rd, Assembler::Condition cond) { 176 csinc(Rd, zr, zr, ~cond); 177 } 178 void csetw(Register Rd, Assembler::Condition cond) { 179 csincw(Rd, zr, zr, ~cond); 180 } 181 182 void cneg(Register Rd, Register Rn, Assembler::Condition cond) { 183 csneg(Rd, Rn, Rn, ~cond); 184 } 185 void cnegw(Register Rd, Register Rn, Assembler::Condition cond) { 186 csnegw(Rd, Rn, Rn, ~cond); 187 } 188 189 inline void movw(Register Rd, Register Rn) { 190 if (Rd == sp || Rn == sp) { 191 addw(Rd, Rn, 0U); 192 } else { 193 orrw(Rd, zr, Rn); 194 } 195 } 196 inline void mov(Register Rd, Register Rn) { 197 assert(Rd != r31_sp && Rn != r31_sp, "should be"); 198 if (Rd == Rn) { 199 } else if (Rd == sp || Rn == sp) { 200 add(Rd, Rn, 0U); 201 } else { 202 orr(Rd, zr, Rn); 203 } 204 } 205 206 inline void moviw(Register Rd, unsigned imm) { orrw(Rd, zr, imm); } 207 inline void movi(Register Rd, unsigned imm) { orr(Rd, zr, imm); } 208 209 inline void tstw(Register Rd, Register Rn) { andsw(zr, Rd, Rn); } 210 inline void tst(Register Rd, Register Rn) { ands(zr, Rd, Rn); } 211 212 inline void tstw(Register Rd, uint64_t imm) { andsw(zr, Rd, imm); } 213 inline void tst(Register Rd, uint64_t imm) { ands(zr, Rd, imm); } 214 215 inline void bfiw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 216 bfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1)); 217 } 218 inline void bfi(Register Rd, Register Rn, unsigned lsb, unsigned width) { 219 bfm(Rd, Rn, ((64 - lsb) & 63), (width - 1)); 220 } 221 222 inline void bfxilw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 223 bfmw(Rd, Rn, lsb, (lsb + width - 1)); 224 } 225 inline void bfxil(Register Rd, Register Rn, unsigned lsb, unsigned width) { 226 bfm(Rd, Rn, lsb , (lsb + width - 1)); 227 } 228 229 inline void sbfizw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 230 sbfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1)); 231 } 232 inline void sbfiz(Register Rd, Register Rn, unsigned lsb, unsigned width) { 233 sbfm(Rd, Rn, ((64 - lsb) & 63), (width - 1)); 234 } 235 236 inline void sbfxw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 237 sbfmw(Rd, Rn, lsb, (lsb + width - 1)); 238 } 239 inline void sbfx(Register Rd, Register Rn, unsigned lsb, unsigned width) { 240 sbfm(Rd, Rn, lsb , (lsb + width - 1)); 241 } 242 243 inline void ubfizw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 244 ubfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1)); 245 } 246 inline void ubfiz(Register Rd, Register Rn, unsigned lsb, unsigned width) { 247 ubfm(Rd, Rn, ((64 - lsb) & 63), (width - 1)); 248 } 249 250 inline void ubfxw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 251 ubfmw(Rd, Rn, lsb, (lsb + width - 1)); 252 } 253 inline void ubfx(Register Rd, Register Rn, unsigned lsb, unsigned width) { 254 ubfm(Rd, Rn, lsb , (lsb + width - 1)); 255 } 256 257 inline void asrw(Register Rd, Register Rn, unsigned imm) { 258 sbfmw(Rd, Rn, imm, 31); 259 } 260 261 inline void asr(Register Rd, Register Rn, unsigned imm) { 262 sbfm(Rd, Rn, imm, 63); 263 } 264 265 inline void lslw(Register Rd, Register Rn, unsigned imm) { 266 ubfmw(Rd, Rn, ((32 - imm) & 31), (31 - imm)); 267 } 268 269 inline void lsl(Register Rd, Register Rn, unsigned imm) { 270 ubfm(Rd, Rn, ((64 - imm) & 63), (63 - imm)); 271 } 272 273 inline void lsrw(Register Rd, Register Rn, unsigned imm) { 274 ubfmw(Rd, Rn, imm, 31); 275 } 276 277 inline void lsr(Register Rd, Register Rn, unsigned imm) { 278 ubfm(Rd, Rn, imm, 63); 279 } 280 281 inline void rorw(Register Rd, Register Rn, unsigned imm) { 282 extrw(Rd, Rn, Rn, imm); 283 } 284 285 inline void ror(Register Rd, Register Rn, unsigned imm) { 286 extr(Rd, Rn, Rn, imm); 287 } 288 289 inline void sxtbw(Register Rd, Register Rn) { 290 sbfmw(Rd, Rn, 0, 7); 291 } 292 inline void sxthw(Register Rd, Register Rn) { 293 sbfmw(Rd, Rn, 0, 15); 294 } 295 inline void sxtb(Register Rd, Register Rn) { 296 sbfm(Rd, Rn, 0, 7); 297 } 298 inline void sxth(Register Rd, Register Rn) { 299 sbfm(Rd, Rn, 0, 15); 300 } 301 inline void sxtw(Register Rd, Register Rn) { 302 sbfm(Rd, Rn, 0, 31); 303 } 304 305 inline void uxtbw(Register Rd, Register Rn) { 306 ubfmw(Rd, Rn, 0, 7); 307 } 308 inline void uxthw(Register Rd, Register Rn) { 309 ubfmw(Rd, Rn, 0, 15); 310 } 311 inline void uxtb(Register Rd, Register Rn) { 312 ubfm(Rd, Rn, 0, 7); 313 } 314 inline void uxth(Register Rd, Register Rn) { 315 ubfm(Rd, Rn, 0, 15); 316 } 317 inline void uxtw(Register Rd, Register Rn) { 318 ubfm(Rd, Rn, 0, 31); 319 } 320 321 inline void cmnw(Register Rn, Register Rm) { 322 addsw(zr, Rn, Rm); 323 } 324 inline void cmn(Register Rn, Register Rm) { 325 adds(zr, Rn, Rm); 326 } 327 328 inline void cmpw(Register Rn, Register Rm) { 329 subsw(zr, Rn, Rm); 330 } 331 inline void cmp(Register Rn, Register Rm) { 332 subs(zr, Rn, Rm); 333 } 334 335 inline void negw(Register Rd, Register Rn) { 336 subw(Rd, zr, Rn); 337 } 338 339 inline void neg(Register Rd, Register Rn) { 340 sub(Rd, zr, Rn); 341 } 342 343 inline void negsw(Register Rd, Register Rn) { 344 subsw(Rd, zr, Rn); 345 } 346 347 inline void negs(Register Rd, Register Rn) { 348 subs(Rd, zr, Rn); 349 } 350 351 inline void cmnw(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 352 addsw(zr, Rn, Rm, kind, shift); 353 } 354 inline void cmn(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 355 adds(zr, Rn, Rm, kind, shift); 356 } 357 358 inline void cmpw(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 359 subsw(zr, Rn, Rm, kind, shift); 360 } 361 inline void cmp(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 362 subs(zr, Rn, Rm, kind, shift); 363 } 364 365 inline void negw(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 366 subw(Rd, zr, Rn, kind, shift); 367 } 368 369 inline void neg(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 370 sub(Rd, zr, Rn, kind, shift); 371 } 372 373 inline void negsw(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 374 subsw(Rd, zr, Rn, kind, shift); 375 } 376 377 inline void negs(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 378 subs(Rd, zr, Rn, kind, shift); 379 } 380 381 inline void mnegw(Register Rd, Register Rn, Register Rm) { 382 msubw(Rd, Rn, Rm, zr); 383 } 384 inline void mneg(Register Rd, Register Rn, Register Rm) { 385 msub(Rd, Rn, Rm, zr); 386 } 387 388 inline void mulw(Register Rd, Register Rn, Register Rm) { 389 maddw(Rd, Rn, Rm, zr); 390 } 391 inline void mul(Register Rd, Register Rn, Register Rm) { 392 madd(Rd, Rn, Rm, zr); 393 } 394 395 inline void smnegl(Register Rd, Register Rn, Register Rm) { 396 smsubl(Rd, Rn, Rm, zr); 397 } 398 inline void smull(Register Rd, Register Rn, Register Rm) { 399 smaddl(Rd, Rn, Rm, zr); 400 } 401 402 inline void umnegl(Register Rd, Register Rn, Register Rm) { 403 umsubl(Rd, Rn, Rm, zr); 404 } 405 inline void umull(Register Rd, Register Rn, Register Rm) { 406 umaddl(Rd, Rn, Rm, zr); 407 } 408 409 #define WRAP(INSN) \ 410 void INSN(Register Rd, Register Rn, Register Rm, Register Ra) { \ 411 if ((VM_Version::cpu_cpuFeatures() & VM_Version::CPU_A53MAC) && Ra != zr) \ 412 nop(); \ 413 Assembler::INSN(Rd, Rn, Rm, Ra); \ 414 } 415 416 WRAP(madd) WRAP(msub) WRAP(maddw) WRAP(msubw) 417 WRAP(smaddl) WRAP(smsubl) WRAP(umaddl) WRAP(umsubl) 418 #undef WRAP 419 420 // macro assembly operations needed for aarch64 421 422 // first two private routines for loading 32 bit or 64 bit constants 423 private: 424 425 void mov_immediate64(Register dst, u_int64_t imm64); 426 void mov_immediate32(Register dst, u_int32_t imm32); 427 428 int push(unsigned int bitset, Register stack); 429 int pop(unsigned int bitset, Register stack); 430 431 void mov(Register dst, Address a); 432 433 public: 434 void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); } 435 void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); } 436 437 // Push and pop everything that might be clobbered by a native 438 // runtime call except rscratch1 and rscratch2. (They are always 439 // scratch, so we don't have to protect them.) Only save the lower 440 // 64 bits of each vector register. 441 void push_call_clobbered_registers(); 442 void pop_call_clobbered_registers(); 443 void push_call_clobbered_fp_registers(); 444 void pop_call_clobbered_fp_registers(); 445 446 // now mov instructions for loading absolute addresses and 32 or 447 // 64 bit integers 448 449 void mov(Register dst, address addr); 450 451 inline void mov(Register dst, u_int64_t imm64) 452 { 453 mov_immediate64(dst, imm64); 454 } 455 456 inline void movw(Register dst, u_int32_t imm32) 457 { 458 mov_immediate32(dst, imm32); 459 } 460 461 inline void mov(Register dst, long l) 462 { 463 mov(dst, (u_int64_t)l); 464 } 465 466 inline void mov(Register dst, int i) 467 { 468 mov(dst, (long)i); 469 } 470 471 void mov(Register dst, RegisterOrConstant src) { 472 if (src.is_register()) 473 mov(dst, src.as_register()); 474 else 475 mov(dst, src.as_constant()); 476 } 477 478 void movptr(Register r, uintptr_t imm64); 479 480 void mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32); 481 482 void mov(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { 483 orr(Vd, T, Vn, Vn); 484 } 485 486 public: 487 488 // Generalized Test Bit And Branch, including a "far" variety which 489 // spans more than 32KiB. 490 void tbr(Condition cond, Register Rt, int bitpos, Label &dest, bool far = false) { 491 assert(cond == EQ || cond == NE, "must be"); 492 493 if (far) 494 cond = ~cond; 495 496 void (Assembler::* branch)(Register Rt, int bitpos, Label &L); 497 if (cond == Assembler::EQ) 498 branch = &Assembler::tbz; 499 else 500 branch = &Assembler::tbnz; 501 502 if (far) { 503 Label L; 504 (this->*branch)(Rt, bitpos, L); 505 b(dest); 506 bind(L); 507 } else { 508 (this->*branch)(Rt, bitpos, dest); 509 } 510 } 511 512 // macro instructions for accessing and updating floating point 513 // status register 514 // 515 // FPSR : op1 == 011 516 // CRn == 0100 517 // CRm == 0100 518 // op2 == 001 519 520 inline void get_fpsr(Register reg) 521 { 522 mrs(0b11, 0b0100, 0b0100, 0b001, reg); 523 } 524 525 inline void set_fpsr(Register reg) 526 { 527 msr(0b011, 0b0100, 0b0100, 0b001, reg); 528 } 529 530 inline void clear_fpsr() 531 { 532 msr(0b011, 0b0100, 0b0100, 0b001, zr); 533 } 534 535 // DCZID_EL0: op1 == 011 536 // CRn == 0000 537 // CRm == 0000 538 // op2 == 111 539 inline void get_dczid_el0(Register reg) 540 { 541 mrs(0b011, 0b0000, 0b0000, 0b111, reg); 542 } 543 544 // CTR_EL0: op1 == 011 545 // CRn == 0000 546 // CRm == 0000 547 // op2 == 001 548 inline void get_ctr_el0(Register reg) 549 { 550 mrs(0b011, 0b0000, 0b0000, 0b001, reg); 551 } 552 553 // idiv variant which deals with MINLONG as dividend and -1 as divisor 554 int corrected_idivl(Register result, Register ra, Register rb, 555 bool want_remainder, Register tmp = rscratch1); 556 int corrected_idivq(Register result, Register ra, Register rb, 557 bool want_remainder, Register tmp = rscratch1); 558 559 // Support for NULL-checks 560 // 561 // Generates code that causes a NULL OS exception if the content of reg is NULL. 562 // If the accessed location is M[reg + offset] and the offset is known, provide the 563 // offset. No explicit code generation is needed if the offset is within a certain 564 // range (0 <= offset <= page_size). 565 566 virtual void null_check(Register reg, int offset = -1); 567 static bool needs_explicit_null_check(intptr_t offset); 568 569 static address target_addr_for_insn(address insn_addr, unsigned insn); 570 static address target_addr_for_insn(address insn_addr) { 571 unsigned insn = *(unsigned*)insn_addr; 572 return target_addr_for_insn(insn_addr, insn); 573 } 574 575 // Required platform-specific helpers for Label::patch_instructions. 576 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 577 static int pd_patch_instruction_size(address branch, address target); 578 static void pd_patch_instruction(address branch, address target) { 579 pd_patch_instruction_size(branch, target); 580 } 581 static address pd_call_destination(address branch) { 582 return target_addr_for_insn(branch); 583 } 584 #ifndef PRODUCT 585 static void pd_print_patched_instruction(address branch); 586 #endif 587 588 static int patch_oop(address insn_addr, address o); 589 590 address emit_trampoline_stub(int insts_call_instruction_offset, address target); 591 592 // The following 4 methods return the offset of the appropriate move instruction 593 594 // Support for fast byte/short loading with zero extension (depending on particular CPU) 595 int load_unsigned_byte(Register dst, Address src); 596 int load_unsigned_short(Register dst, Address src); 597 598 // Support for fast byte/short loading with sign extension (depending on particular CPU) 599 int load_signed_byte(Register dst, Address src); 600 int load_signed_short(Register dst, Address src); 601 602 int load_signed_byte32(Register dst, Address src); 603 int load_signed_short32(Register dst, Address src); 604 605 // Support for sign-extension (hi:lo = extend_sign(lo)) 606 void extend_sign(Register hi, Register lo); 607 608 // Load and store values by size and signed-ness 609 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 610 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 611 612 // Support for inc/dec with optimal instruction selection depending on value 613 614 // x86_64 aliases an unqualified register/address increment and 615 // decrement to call incrementq and decrementq but also supports 616 // explicitly sized calls to incrementq/decrementq or 617 // incrementl/decrementl 618 619 // for aarch64 the proper convention would be to use 620 // increment/decrement for 64 bit operatons and 621 // incrementw/decrementw for 32 bit operations. so when porting 622 // x86_64 code we can leave calls to increment/decrement as is, 623 // replace incrementq/decrementq with increment/decrement and 624 // replace incrementl/decrementl with incrementw/decrementw. 625 626 // n.b. increment/decrement calls with an Address destination will 627 // need to use a scratch register to load the value to be 628 // incremented. increment/decrement calls which add or subtract a 629 // constant value greater than 2^12 will need to use a 2nd scratch 630 // register to hold the constant. so, a register increment/decrement 631 // may trash rscratch2 and an address increment/decrement trash 632 // rscratch and rscratch2 633 634 void decrementw(Address dst, int value = 1); 635 void decrementw(Register reg, int value = 1); 636 637 void decrement(Register reg, int value = 1); 638 void decrement(Address dst, int value = 1); 639 640 void incrementw(Address dst, int value = 1); 641 void incrementw(Register reg, int value = 1); 642 643 void increment(Register reg, int value = 1); 644 void increment(Address dst, int value = 1); 645 646 647 // Alignment 648 void align(int modulus); 649 650 // Stack frame creation/removal 651 void enter() 652 { 653 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 654 mov(rfp, sp); 655 } 656 void leave() 657 { 658 mov(sp, rfp); 659 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 660 } 661 662 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 663 // The pointer will be loaded into the thread register. 664 void get_thread(Register thread); 665 666 667 // Support for VM calls 668 // 669 // It is imperative that all calls into the VM are handled via the call_VM macros. 670 // They make sure that the stack linkage is setup correctly. call_VM's correspond 671 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 672 673 674 void call_VM(Register oop_result, 675 address entry_point, 676 bool check_exceptions = true); 677 void call_VM(Register oop_result, 678 address entry_point, 679 Register arg_1, 680 bool check_exceptions = true); 681 void call_VM(Register oop_result, 682 address entry_point, 683 Register arg_1, Register arg_2, 684 bool check_exceptions = true); 685 void call_VM(Register oop_result, 686 address entry_point, 687 Register arg_1, Register arg_2, Register arg_3, 688 bool check_exceptions = true); 689 690 // Overloadings with last_Java_sp 691 void call_VM(Register oop_result, 692 Register last_java_sp, 693 address entry_point, 694 int number_of_arguments = 0, 695 bool check_exceptions = true); 696 void call_VM(Register oop_result, 697 Register last_java_sp, 698 address entry_point, 699 Register arg_1, bool 700 check_exceptions = true); 701 void call_VM(Register oop_result, 702 Register last_java_sp, 703 address entry_point, 704 Register arg_1, Register arg_2, 705 bool check_exceptions = true); 706 void call_VM(Register oop_result, 707 Register last_java_sp, 708 address entry_point, 709 Register arg_1, Register arg_2, Register arg_3, 710 bool check_exceptions = true); 711 712 void get_vm_result (Register oop_result, Register thread); 713 void get_vm_result_2(Register metadata_result, Register thread); 714 715 // These always tightly bind to MacroAssembler::call_VM_base 716 // bypassing the virtual implementation 717 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 718 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 719 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 720 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 721 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 722 723 void call_VM_leaf(address entry_point, 724 int number_of_arguments = 0); 725 void call_VM_leaf(address entry_point, 726 Register arg_1); 727 void call_VM_leaf(address entry_point, 728 Register arg_1, Register arg_2); 729 void call_VM_leaf(address entry_point, 730 Register arg_1, Register arg_2, Register arg_3); 731 732 // These always tightly bind to MacroAssembler::call_VM_leaf_base 733 // bypassing the virtual implementation 734 void super_call_VM_leaf(address entry_point); 735 void super_call_VM_leaf(address entry_point, Register arg_1); 736 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 737 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 738 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 739 740 // last Java Frame (fills frame anchor) 741 void set_last_Java_frame(Register last_java_sp, 742 Register last_java_fp, 743 address last_java_pc, 744 Register scratch); 745 746 void set_last_Java_frame(Register last_java_sp, 747 Register last_java_fp, 748 Label &last_java_pc, 749 Register scratch); 750 751 void set_last_Java_frame(Register last_java_sp, 752 Register last_java_fp, 753 Register last_java_pc, 754 Register scratch); 755 756 void reset_last_Java_frame(Register thread); 757 758 // thread in the default location (rthread) 759 void reset_last_Java_frame(bool clear_fp); 760 761 // Stores 762 void store_check(Register obj); // store check for obj - register is destroyed afterwards 763 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed) 764 765 #if INCLUDE_ALL_GCS 766 767 void g1_write_barrier_pre(Register obj, 768 Register pre_val, 769 Register thread, 770 Register tmp, 771 bool tosca_live, 772 bool expand_call); 773 774 void g1_write_barrier_post(Register store_addr, 775 Register new_val, 776 Register thread, 777 Register tmp, 778 Register tmp2); 779 780 #endif // INCLUDE_ALL_GCS 781 782 // split store_check(Register obj) to enhance instruction interleaving 783 void store_check_part_1(Register obj); 784 void store_check_part_2(Register obj); 785 786 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 787 void c2bool(Register x); 788 789 // oop manipulations 790 void load_klass(Register dst, Register src); 791 void store_klass(Register dst, Register src); 792 void cmp_klass(Register oop, Register trial_klass, Register tmp); 793 794 void load_heap_oop(Register dst, Address src); 795 796 void load_heap_oop_not_null(Register dst, Address src); 797 void store_heap_oop(Address dst, Register src); 798 799 // currently unimplemented 800 // Used for storing NULL. All other oop constants should be 801 // stored using routines that take a jobject. 802 void store_heap_oop_null(Address dst); 803 804 void load_prototype_header(Register dst, Register src); 805 806 void store_klass_gap(Register dst, Register src); 807 808 // This dummy is to prevent a call to store_heap_oop from 809 // converting a zero (like NULL) into a Register by giving 810 // the compiler two choices it can't resolve 811 812 void store_heap_oop(Address dst, void* dummy); 813 814 void encode_heap_oop(Register d, Register s); 815 void encode_heap_oop(Register r) { encode_heap_oop(r, r); } 816 void decode_heap_oop(Register d, Register s); 817 void decode_heap_oop(Register r) { decode_heap_oop(r, r); } 818 void encode_heap_oop_not_null(Register r); 819 void decode_heap_oop_not_null(Register r); 820 void encode_heap_oop_not_null(Register dst, Register src); 821 void decode_heap_oop_not_null(Register dst, Register src); 822 823 void set_narrow_oop(Register dst, jobject obj); 824 825 void encode_klass_not_null(Register r); 826 void decode_klass_not_null(Register r); 827 void encode_klass_not_null(Register dst, Register src); 828 void decode_klass_not_null(Register dst, Register src); 829 830 void set_narrow_klass(Register dst, Klass* k); 831 832 // if heap base register is used - reinit it with the correct value 833 void reinit_heapbase(); 834 835 DEBUG_ONLY(void verify_heapbase(const char* msg);) 836 837 void push_CPU_state(bool save_vectors = false); 838 void pop_CPU_state(bool restore_vectors = false) ; 839 840 // Round up to a power of two 841 void round_to(Register reg, int modulus); 842 843 // allocation 844 void eden_allocate( 845 Register obj, // result: pointer to object after successful allocation 846 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 847 int con_size_in_bytes, // object size in bytes if known at compile time 848 Register t1, // temp register 849 Label& slow_case // continuation point if fast allocation fails 850 ); 851 void tlab_allocate( 852 Register obj, // result: pointer to object after successful allocation 853 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 854 int con_size_in_bytes, // object size in bytes if known at compile time 855 Register t1, // temp register 856 Register t2, // temp register 857 Label& slow_case // continuation point if fast allocation fails 858 ); 859 Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address 860 void verify_tlab(); 861 862 void incr_allocated_bytes(Register thread, 863 Register var_size_in_bytes, int con_size_in_bytes, 864 Register t1 = noreg); 865 866 // interface method calling 867 void lookup_interface_method(Register recv_klass, 868 Register intf_klass, 869 RegisterOrConstant itable_index, 870 Register method_result, 871 Register scan_temp, 872 Label& no_such_interface, 873 bool return_method = true); 874 875 // virtual method calling 876 // n.b. x86 allows RegisterOrConstant for vtable_index 877 void lookup_virtual_method(Register recv_klass, 878 RegisterOrConstant vtable_index, 879 Register method_result); 880 881 // Test sub_klass against super_klass, with fast and slow paths. 882 883 // The fast path produces a tri-state answer: yes / no / maybe-slow. 884 // One of the three labels can be NULL, meaning take the fall-through. 885 // If super_check_offset is -1, the value is loaded up from super_klass. 886 // No registers are killed, except temp_reg. 887 void check_klass_subtype_fast_path(Register sub_klass, 888 Register super_klass, 889 Register temp_reg, 890 Label* L_success, 891 Label* L_failure, 892 Label* L_slow_path, 893 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 894 895 // The rest of the type check; must be wired to a corresponding fast path. 896 // It does not repeat the fast path logic, so don't use it standalone. 897 // The temp_reg and temp2_reg can be noreg, if no temps are available. 898 // Updates the sub's secondary super cache as necessary. 899 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 900 void check_klass_subtype_slow_path(Register sub_klass, 901 Register super_klass, 902 Register temp_reg, 903 Register temp2_reg, 904 Label* L_success, 905 Label* L_failure, 906 bool set_cond_codes = false); 907 908 // Simplified, combined version, good for typical uses. 909 // Falls through on failure. 910 void check_klass_subtype(Register sub_klass, 911 Register super_klass, 912 Register temp_reg, 913 Label& L_success); 914 915 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 916 917 918 // Debugging 919 920 // only if +VerifyOops 921 void verify_oop(Register reg, const char* s = "broken oop"); 922 void verify_oop_addr(Address addr, const char * s = "broken oop addr"); 923 924 // TODO: verify method and klass metadata (compare against vptr?) 925 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 926 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 927 928 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 929 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 930 931 // only if +VerifyFPU 932 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 933 934 // prints msg, dumps registers and stops execution 935 void stop(const char* msg); 936 937 // prints msg and continues 938 void warn(const char* msg); 939 940 static void debug64(char* msg, int64_t pc, int64_t regs[]); 941 942 void untested() { stop("untested"); } 943 944 void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); } 945 946 void should_not_reach_here() { stop("should not reach here"); } 947 948 // Stack overflow checking 949 void bang_stack_with_offset(int offset) { 950 // stack grows down, caller passes positive offset 951 assert(offset > 0, "must bang with negative offset"); 952 mov(rscratch2, -offset); 953 str(zr, Address(sp, rscratch2)); 954 } 955 956 // Writes to stack successive pages until offset reached to check for 957 // stack overflow + shadow pages. Also, clobbers tmp 958 void bang_stack_size(Register size, Register tmp); 959 960 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, 961 Register tmp, 962 int offset); 963 964 // Support for serializing memory accesses between threads 965 void serialize_memory(Register thread, Register tmp); 966 967 // Arithmetics 968 969 void addptr(const Address &dst, int32_t src); 970 void cmpptr(Register src1, Address src2); 971 972 void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 973 Label &suceed, Label *fail); 974 975 void cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 976 Label &suceed, Label *fail); 977 978 void atomic_add(Register prev, RegisterOrConstant incr, Register addr); 979 void atomic_addw(Register prev, RegisterOrConstant incr, Register addr); 980 void atomic_addal(Register prev, RegisterOrConstant incr, Register addr); 981 void atomic_addalw(Register prev, RegisterOrConstant incr, Register addr); 982 983 void atomic_xchg(Register prev, Register newv, Register addr); 984 void atomic_xchgw(Register prev, Register newv, Register addr); 985 void atomic_xchgal(Register prev, Register newv, Register addr); 986 void atomic_xchgalw(Register prev, Register newv, Register addr); 987 988 void orptr(Address adr, RegisterOrConstant src) { 989 ldr(rscratch2, adr); 990 if (src.is_register()) 991 orr(rscratch2, rscratch2, src.as_register()); 992 else 993 orr(rscratch2, rscratch2, src.as_constant()); 994 str(rscratch2, adr); 995 } 996 997 // A generic CAS; success or failure is in the EQ flag. 998 void cmpxchg(Register addr, Register expected, Register new_val, 999 enum operand_size size, 1000 bool acquire, bool release, 1001 Register tmp = rscratch1); 1002 1003 // Calls 1004 1005 address trampoline_call(Address entry, CodeBuffer *cbuf = NULL); 1006 1007 static bool far_branches() { 1008 return ReservedCodeCacheSize > branch_range; 1009 } 1010 1011 // Jumps that can reach anywhere in the code cache. 1012 // Trashes tmp. 1013 void far_call(Address entry, CodeBuffer *cbuf = NULL, Register tmp = rscratch1); 1014 void far_jump(Address entry, CodeBuffer *cbuf = NULL, Register tmp = rscratch1); 1015 1016 static int far_branch_size() { 1017 if (far_branches()) { 1018 return 3 * 4; // adrp, add, br 1019 } else { 1020 return 4; 1021 } 1022 } 1023 1024 // Emit the CompiledIC call idiom 1025 address ic_call(address entry); 1026 1027 public: 1028 1029 // Data 1030 1031 void mov_metadata(Register dst, Metadata* obj); 1032 Address allocate_metadata_address(Metadata* obj); 1033 Address constant_oop_address(jobject obj); 1034 1035 void movoop(Register dst, jobject obj, bool immediate = false); 1036 1037 // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic. 1038 void kernel_crc32(Register crc, Register buf, Register len, 1039 Register table0, Register table1, Register table2, Register table3, 1040 Register tmp, Register tmp2, Register tmp3); 1041 1042 #undef VIRTUAL 1043 1044 // Stack push and pop individual 64 bit registers 1045 void push(Register src); 1046 void pop(Register dst); 1047 1048 // push all registers onto the stack 1049 void pusha(); 1050 void popa(); 1051 1052 void repne_scan(Register addr, Register value, Register count, 1053 Register scratch); 1054 void repne_scanw(Register addr, Register value, Register count, 1055 Register scratch); 1056 1057 typedef void (MacroAssembler::* add_sub_imm_insn)(Register Rd, Register Rn, unsigned imm); 1058 typedef void (MacroAssembler::* add_sub_reg_insn)(Register Rd, Register Rn, Register Rm, enum shift_kind kind, unsigned shift); 1059 1060 // If a constant does not fit in an immediate field, generate some 1061 // number of MOV instructions and then perform the operation 1062 void wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm, 1063 add_sub_imm_insn insn1, 1064 add_sub_reg_insn insn2); 1065 // Seperate vsn which sets the flags 1066 void wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm, 1067 add_sub_imm_insn insn1, 1068 add_sub_reg_insn insn2); 1069 1070 #define WRAP(INSN) \ 1071 void INSN(Register Rd, Register Rn, unsigned imm) { \ 1072 wrap_add_sub_imm_insn(Rd, Rn, imm, &Assembler::INSN, &Assembler::INSN); \ 1073 } \ 1074 \ 1075 void INSN(Register Rd, Register Rn, Register Rm, \ 1076 enum shift_kind kind, unsigned shift = 0) { \ 1077 Assembler::INSN(Rd, Rn, Rm, kind, shift); \ 1078 } \ 1079 \ 1080 void INSN(Register Rd, Register Rn, Register Rm) { \ 1081 Assembler::INSN(Rd, Rn, Rm); \ 1082 } \ 1083 \ 1084 void INSN(Register Rd, Register Rn, Register Rm, \ 1085 ext::operation option, int amount = 0) { \ 1086 Assembler::INSN(Rd, Rn, Rm, option, amount); \ 1087 } 1088 1089 WRAP(add) WRAP(addw) WRAP(sub) WRAP(subw) 1090 1091 #undef WRAP 1092 #define WRAP(INSN) \ 1093 void INSN(Register Rd, Register Rn, unsigned imm) { \ 1094 wrap_adds_subs_imm_insn(Rd, Rn, imm, &Assembler::INSN, &Assembler::INSN); \ 1095 } \ 1096 \ 1097 void INSN(Register Rd, Register Rn, Register Rm, \ 1098 enum shift_kind kind, unsigned shift = 0) { \ 1099 Assembler::INSN(Rd, Rn, Rm, kind, shift); \ 1100 } \ 1101 \ 1102 void INSN(Register Rd, Register Rn, Register Rm) { \ 1103 Assembler::INSN(Rd, Rn, Rm); \ 1104 } \ 1105 \ 1106 void INSN(Register Rd, Register Rn, Register Rm, \ 1107 ext::operation option, int amount = 0) { \ 1108 Assembler::INSN(Rd, Rn, Rm, option, amount); \ 1109 } 1110 1111 WRAP(adds) WRAP(addsw) WRAP(subs) WRAP(subsw) 1112 1113 void add(Register Rd, Register Rn, RegisterOrConstant increment); 1114 void addw(Register Rd, Register Rn, RegisterOrConstant increment); 1115 void sub(Register Rd, Register Rn, RegisterOrConstant decrement); 1116 void subw(Register Rd, Register Rn, RegisterOrConstant decrement); 1117 1118 void adrp(Register reg1, const Address &dest, unsigned long &byte_offset); 1119 1120 void tableswitch(Register index, jint lowbound, jint highbound, 1121 Label &jumptable, Label &jumptable_end, int stride = 1) { 1122 adr(rscratch1, jumptable); 1123 subsw(rscratch2, index, lowbound); 1124 subsw(zr, rscratch2, highbound - lowbound); 1125 br(Assembler::HS, jumptable_end); 1126 add(rscratch1, rscratch1, rscratch2, 1127 ext::sxtw, exact_log2(stride * Assembler::instruction_size)); 1128 br(rscratch1); 1129 } 1130 1131 // Form an address from base + offset in Rd. Rd may or may not 1132 // actually be used: you must use the Address that is returned. It 1133 // is up to you to ensure that the shift provided matches the size 1134 // of your data. 1135 Address form_address(Register Rd, Register base, long byte_offset, int shift); 1136 1137 // Return true iff an address is within the 48-bit AArch64 address 1138 // space. 1139 bool is_valid_AArch64_address(address a) { 1140 return ((uint64_t)a >> 48) == 0; 1141 } 1142 1143 // Load the base of the cardtable byte map into reg. 1144 void load_byte_map_base(Register reg); 1145 1146 // Prolog generator routines to support switch between x86 code and 1147 // generated ARM code 1148 1149 // routine to generate an x86 prolog for a stub function which 1150 // bootstraps into the generated ARM code which directly follows the 1151 // stub 1152 // 1153 1154 public: 1155 1156 void ldr_constant(Register dest, const Address &const_addr) { 1157 if (NearCpool) { 1158 ldr(dest, const_addr); 1159 } else { 1160 unsigned long offset; 1161 adrp(dest, InternalAddress(const_addr.target()), offset); 1162 ldr(dest, Address(dest, offset)); 1163 } 1164 } 1165 1166 address read_polling_page(Register r, address page, relocInfo::relocType rtype); 1167 address read_polling_page(Register r, relocInfo::relocType rtype); 1168 1169 // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic. 1170 void update_byte_crc32(Register crc, Register val, Register table); 1171 void update_word_crc32(Register crc, Register v, Register tmp, 1172 Register table0, Register table1, Register table2, Register table3, 1173 bool upper = false); 1174 1175 void string_compare(Register str1, Register str2, 1176 Register cnt1, Register cnt2, Register result, 1177 Register tmp1); 1178 void string_equals(Register str1, Register str2, 1179 Register cnt, Register result, 1180 Register tmp1); 1181 void char_arrays_equals(Register ary1, Register ary2, 1182 Register result, Register tmp1); 1183 void fill_words(Register base, Register cnt, Register value); 1184 void zero_words(Register base, u_int64_t cnt); 1185 void zero_words(Register base, Register cnt); 1186 void block_zero(Register base, Register cnt, bool is_large = false); 1187 1188 void encode_iso_array(Register src, Register dst, 1189 Register len, Register result, 1190 FloatRegister Vtmp1, FloatRegister Vtmp2, 1191 FloatRegister Vtmp3, FloatRegister Vtmp4); 1192 void string_indexof(Register str1, Register str2, 1193 Register cnt1, Register cnt2, 1194 Register tmp1, Register tmp2, 1195 Register tmp3, Register tmp4, 1196 int int_cnt1, Register result); 1197 private: 1198 void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 1199 Register src1, Register src2); 1200 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 1201 add2_with_carry(dest_hi, dest_hi, dest_lo, src1, src2); 1202 } 1203 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1204 Register y, Register y_idx, Register z, 1205 Register carry, Register product, 1206 Register idx, Register kdx); 1207 void multiply_128_x_128_loop(Register y, Register z, 1208 Register carry, Register carry2, 1209 Register idx, Register jdx, 1210 Register yz_idx1, Register yz_idx2, 1211 Register tmp, Register tmp3, Register tmp4, 1212 Register tmp7, Register product_hi); 1213 public: 1214 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, 1215 Register zlen, Register tmp1, Register tmp2, Register tmp3, 1216 Register tmp4, Register tmp5, Register tmp6, Register tmp7); 1217 // ISB may be needed because of a safepoint 1218 void maybe_isb() { isb(); } 1219 1220 private: 1221 // Return the effective address r + (r1 << ext) + offset. 1222 // Uses rscratch2. 1223 Address offsetted_address(Register r, Register r1, Address::extend ext, 1224 int offset, int size); 1225 1226 private: 1227 // Returns an address on the stack which is reachable with a ldr/str of size 1228 // Uses rscratch2 if the address is not directly reachable 1229 Address spill_address(int size, int offset, Register tmp=rscratch2); 1230 1231 public: 1232 void spill(Register Rx, bool is64, int offset) { 1233 if (is64) { 1234 str(Rx, spill_address(8, offset)); 1235 } else { 1236 strw(Rx, spill_address(4, offset)); 1237 } 1238 } 1239 void spill(FloatRegister Vx, SIMD_RegVariant T, int offset) { 1240 str(Vx, T, spill_address(1 << (int)T, offset)); 1241 } 1242 void unspill(Register Rx, bool is64, int offset) { 1243 if (is64) { 1244 ldr(Rx, spill_address(8, offset)); 1245 } else { 1246 ldrw(Rx, spill_address(4, offset)); 1247 } 1248 } 1249 void unspill(FloatRegister Vx, SIMD_RegVariant T, int offset) { 1250 ldr(Vx, T, spill_address(1 << (int)T, offset)); 1251 } 1252 void spill_copy128(int src_offset, int dst_offset, 1253 Register tmp1=rscratch1, Register tmp2=rscratch2) { 1254 if (src_offset < 512 && (src_offset & 7) == 0 && 1255 dst_offset < 512 && (dst_offset & 7) == 0) { 1256 ldp(tmp1, tmp2, Address(sp, src_offset)); 1257 stp(tmp1, tmp2, Address(sp, dst_offset)); 1258 } else { 1259 unspill(tmp1, true, src_offset); 1260 spill(tmp1, true, dst_offset); 1261 unspill(tmp1, true, src_offset+8); 1262 spill(tmp1, true, dst_offset+8); 1263 } 1264 } 1265 }; 1266 1267 #ifdef ASSERT 1268 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; } 1269 #endif 1270 1271 /** 1272 * class SkipIfEqual: 1273 * 1274 * Instantiating this class will result in assembly code being output that will 1275 * jump around any code emitted between the creation of the instance and it's 1276 * automatic destruction at the end of a scope block, depending on the value of 1277 * the flag passed to the constructor, which will be checked at run-time. 1278 */ 1279 class SkipIfEqual { 1280 private: 1281 MacroAssembler* _masm; 1282 Label _label; 1283 1284 public: 1285 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); 1286 ~SkipIfEqual(); 1287 }; 1288 1289 struct tableswitch { 1290 Register _reg; 1291 int _insn_index; jint _first_key; jint _last_key; 1292 Label _after; 1293 Label _branches; 1294 }; 1295 1296 #endif // CPU_AARCH64_VM_MACROASSEMBLER_AARCH64_HPP