1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. 4 * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #ifndef CPU_RISCV_MACROASSEMBLER_RISCV_HPP 28 #define CPU_RISCV_MACROASSEMBLER_RISCV_HPP 29 30 #include "asm/assembler.hpp" 31 #include "code/vmreg.hpp" 32 #include "metaprogramming/enableIf.hpp" 33 #include "nativeInst_riscv.hpp" 34 #include "oops/compressedOops.hpp" 35 #include "utilities/powerOfTwo.hpp" 36 37 // MacroAssembler extends Assembler by frequently used macros. 38 // 39 // Instructions for which a 'better' code sequence exists depending 40 // on arguments should also go in here. 41 42 class MacroAssembler: public Assembler { 43 44 public: 45 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 46 47 void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod); 48 49 // Alignment 50 int align(int modulus, int extra_offset = 0); 51 52 static inline void assert_alignment(address pc, int alignment = NativeInstruction::instruction_size) { 53 assert(is_aligned(pc, alignment), "bad alignment"); 54 } 55 56 // nop 57 void post_call_nop(); 58 59 // Stack frame creation/removal 60 // Note that SP must be updated to the right place before saving/restoring RA and FP 61 // because signal based thread suspend/resume could happen asynchronously. 62 void enter() { 63 addi(sp, sp, - 2 * wordSize); 64 sd(ra, Address(sp, wordSize)); 65 sd(fp, Address(sp)); 66 addi(fp, sp, 2 * wordSize); 67 } 68 69 void leave() { 70 addi(sp, fp, - 2 * wordSize); 71 ld(fp, Address(sp)); 72 ld(ra, Address(sp, wordSize)); 73 addi(sp, sp, 2 * wordSize); 74 } 75 76 77 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 78 // The pointer will be loaded into the thread register. 79 void get_thread(Register thread); 80 81 // Support for VM calls 82 // 83 // It is imperative that all calls into the VM are handled via the call_VM macros. 84 // They make sure that the stack linkage is setup correctly. call_VM's correspond 85 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 86 87 void call_VM(Register oop_result, 88 address entry_point, 89 bool check_exceptions = true); 90 void call_VM(Register oop_result, 91 address entry_point, 92 Register arg_1, 93 bool check_exceptions = true); 94 void call_VM(Register oop_result, 95 address entry_point, 96 Register arg_1, Register arg_2, 97 bool check_exceptions = true); 98 void call_VM(Register oop_result, 99 address entry_point, 100 Register arg_1, Register arg_2, Register arg_3, 101 bool check_exceptions = true); 102 103 // Overloadings with last_Java_sp 104 void call_VM(Register oop_result, 105 Register last_java_sp, 106 address entry_point, 107 int number_of_arguments = 0, 108 bool check_exceptions = true); 109 void call_VM(Register oop_result, 110 Register last_java_sp, 111 address entry_point, 112 Register arg_1, 113 bool check_exceptions = true); 114 void call_VM(Register oop_result, 115 Register last_java_sp, 116 address entry_point, 117 Register arg_1, Register arg_2, 118 bool check_exceptions = true); 119 void call_VM(Register oop_result, 120 Register last_java_sp, 121 address entry_point, 122 Register arg_1, Register arg_2, Register arg_3, 123 bool check_exceptions = true); 124 125 void get_vm_result(Register oop_result, Register java_thread); 126 void get_vm_result_2(Register metadata_result, Register java_thread); 127 128 // These always tightly bind to MacroAssembler::call_VM_leaf_base 129 // bypassing the virtual implementation 130 void call_VM_leaf(address entry_point, 131 int number_of_arguments = 0); 132 void call_VM_leaf(address entry_point, 133 Register arg_0); 134 void call_VM_leaf(address entry_point, 135 Register arg_0, Register arg_1); 136 void call_VM_leaf(address entry_point, 137 Register arg_0, Register arg_1, Register arg_2); 138 139 // These always tightly bind to MacroAssembler::call_VM_base 140 // bypassing the virtual implementation 141 void super_call_VM_leaf(address entry_point, Register arg_0); 142 void super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1); 143 void super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2); 144 void super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3); 145 146 // last Java Frame (fills frame anchor) 147 void set_last_Java_frame(Register last_java_sp, Register last_java_fp, address last_java_pc, Register tmp); 148 void set_last_Java_frame(Register last_java_sp, Register last_java_fp, Label &last_java_pc, Register tmp); 149 void set_last_Java_frame(Register last_java_sp, Register last_java_fp, Register last_java_pc, Register tmp); 150 151 // thread in the default location (xthread) 152 void reset_last_Java_frame(bool clear_fp); 153 154 virtual void call_VM_leaf_base( 155 address entry_point, // the entry point 156 int number_of_arguments, // the number of arguments to pop after the call 157 Label* retaddr = NULL 158 ); 159 160 virtual void call_VM_leaf_base( 161 address entry_point, // the entry point 162 int number_of_arguments, // the number of arguments to pop after the call 163 Label& retaddr) { 164 call_VM_leaf_base(entry_point, number_of_arguments, &retaddr); 165 } 166 167 virtual void call_VM_base( // returns the register containing the thread upon return 168 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 169 Register java_thread, // the thread if computed before ; use noreg otherwise 170 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 171 address entry_point, // the entry point 172 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 173 bool check_exceptions // whether to check for pending exceptions after return 174 ); 175 176 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions); 177 178 virtual void check_and_handle_earlyret(Register java_thread); 179 virtual void check_and_handle_popframe(Register java_thread); 180 181 void resolve_weak_handle(Register result, Register tmp1, Register tmp2); 182 void resolve_oop_handle(Register result, Register tmp1, Register tmp2); 183 void resolve_jobject(Register value, Register tmp1, Register tmp2); 184 void resolve_global_jobject(Register value, Register tmp1, Register tmp2); 185 186 void movoop(Register dst, jobject obj); 187 void mov_metadata(Register dst, Metadata* obj); 188 void bang_stack_size(Register size, Register tmp); 189 void set_narrow_oop(Register dst, jobject obj); 190 void set_narrow_klass(Register dst, Klass* k); 191 192 void load_mirror(Register dst, Register method, Register tmp1, Register tmp2); 193 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, 194 Address src, Register tmp1, Register tmp2); 195 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, 196 Register val, Register tmp1, Register tmp2, Register tmp3); 197 void load_klass(Register dst, Register src, Register tmp = t0); 198 void load_klass_check_null(Register dst, Register src, Register tmp = t0); 199 void store_klass(Register dst, Register src, Register tmp = t0); 200 void cmp_klass(Register oop, Register trial_klass, Register tmp1, Register tmp2, Label &L); 201 202 void encode_klass_not_null(Register r, Register tmp = t0); 203 void decode_klass_not_null(Register r, Register tmp = t0); 204 void encode_klass_not_null(Register dst, Register src, Register tmp); 205 void decode_klass_not_null(Register dst, Register src, Register tmp); 206 void decode_heap_oop_not_null(Register r); 207 void decode_heap_oop_not_null(Register dst, Register src); 208 void decode_heap_oop(Register d, Register s); 209 void decode_heap_oop(Register r) { decode_heap_oop(r, r); } 210 void encode_heap_oop(Register d, Register s); 211 void encode_heap_oop(Register r) { encode_heap_oop(r, r); }; 212 void load_heap_oop(Register dst, Address src, Register tmp1, 213 Register tmp2, DecoratorSet decorators = 0); 214 void load_heap_oop_not_null(Register dst, Address src, Register tmp1, 215 Register tmp2, DecoratorSet decorators = 0); 216 void store_heap_oop(Address dst, Register val, Register tmp1, 217 Register tmp2, Register tmp3, DecoratorSet decorators = 0); 218 219 void store_klass_gap(Register dst, Register src); 220 221 // currently unimplemented 222 // Used for storing NULL. All other oop constants should be 223 // stored using routines that take a jobject. 224 void store_heap_oop_null(Address dst); 225 226 // This dummy is to prevent a call to store_heap_oop from 227 // converting a zero (linked NULL) into a Register by giving 228 // the compiler two choices it can't resolve 229 230 void store_heap_oop(Address dst, void* dummy); 231 232 // Support for NULL-checks 233 // 234 // Generates code that causes a NULL OS exception if the content of reg is NULL. 235 // If the accessed location is M[reg + offset] and the offset is known, provide the 236 // offset. No explicit code generateion is needed if the offset is within a certain 237 // range (0 <= offset <= page_size). 238 239 virtual void null_check(Register reg, int offset = -1); 240 static bool needs_explicit_null_check(intptr_t offset); 241 static bool uses_implicit_null_check(void* address); 242 243 // idiv variant which deals with MINLONG as dividend and -1 as divisor 244 int corrected_idivl(Register result, Register rs1, Register rs2, 245 bool want_remainder); 246 int corrected_idivq(Register result, Register rs1, Register rs2, 247 bool want_remainder); 248 249 // interface method calling 250 void lookup_interface_method(Register recv_klass, 251 Register intf_klass, 252 RegisterOrConstant itable_index, 253 Register method_result, 254 Register scan_tmp, 255 Label& no_such_interface, 256 bool return_method = true); 257 258 // virtual method calling 259 // n.n. x86 allows RegisterOrConstant for vtable_index 260 void lookup_virtual_method(Register recv_klass, 261 RegisterOrConstant vtable_index, 262 Register method_result); 263 264 // Form an address from base + offset in Rd. Rd my or may not 265 // actually be used: you must use the Address that is returned. It 266 // is up to you to ensure that the shift provided matches the size 267 // of your data. 268 Address form_address(Register Rd, Register base, int64_t byte_offset); 269 270 // Sometimes we get misaligned loads and stores, usually from Unsafe 271 // accesses, and these can exceed the offset range. 272 Address legitimize_address(Register Rd, const Address &adr) { 273 if (adr.getMode() == Address::base_plus_offset) { 274 if (!is_offset_in_range(adr.offset(), 12)) { 275 return form_address(Rd, adr.base(), adr.offset()); 276 } 277 } 278 return adr; 279 } 280 281 // allocation 282 void tlab_allocate( 283 Register obj, // result: pointer to object after successful allocation 284 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 285 int con_size_in_bytes, // object size in bytes if known at compile time 286 Register tmp1, // temp register 287 Register tmp2, // temp register 288 Label& slow_case, // continuation point of fast allocation fails 289 bool is_far = false 290 ); 291 292 // Test sub_klass against super_klass, with fast and slow paths. 293 294 // The fast path produces a tri-state answer: yes / no / maybe-slow. 295 // One of the three labels can be NULL, meaning take the fall-through. 296 // If super_check_offset is -1, the value is loaded up from super_klass. 297 // No registers are killed, except tmp_reg 298 void check_klass_subtype_fast_path(Register sub_klass, 299 Register super_klass, 300 Register tmp_reg, 301 Label* L_success, 302 Label* L_failure, 303 Label* L_slow_path, 304 Register super_check_offset = noreg); 305 306 // The reset of the type check; must be wired to a corresponding fast path. 307 // It does not repeat the fast path logic, so don't use it standalone. 308 // The tmp1_reg and tmp2_reg can be noreg, if no temps are available. 309 // Updates the sub's secondary super cache as necessary. 310 void check_klass_subtype_slow_path(Register sub_klass, 311 Register super_klass, 312 Register tmp1_reg, 313 Register tmp2_reg, 314 Label* L_success, 315 Label* L_failure); 316 317 void check_klass_subtype(Register sub_klass, 318 Register super_klass, 319 Register tmp_reg, 320 Label& L_success); 321 322 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 323 324 // only if +VerifyOops 325 void _verify_oop(Register reg, const char* s, const char* file, int line); 326 void _verify_oop_addr(Address addr, const char* s, const char* file, int line); 327 328 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) { 329 if (VerifyOops) { 330 _verify_oop(reg, s, file, line); 331 } 332 } 333 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) { 334 if (VerifyOops) { 335 _verify_oop_addr(reg, s, file, line); 336 } 337 } 338 339 void _verify_method_ptr(Register reg, const char* msg, const char* file, int line) {} 340 void _verify_klass_ptr(Register reg, const char* msg, const char* file, int line) {} 341 342 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__) 343 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__) 344 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__) 345 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 346 #define verify_klass_ptr(reg) _verify_method_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 347 348 // A more convenient access to fence for our purposes 349 // We used four bit to indicate the read and write bits in the predecessors and successors, 350 // and extended i for r, o for w if UseConservativeFence enabled. 351 enum Membar_mask_bits { 352 StoreStore = 0b0101, // (pred = ow + succ = ow) 353 LoadStore = 0b1001, // (pred = ir + succ = ow) 354 StoreLoad = 0b0110, // (pred = ow + succ = ir) 355 LoadLoad = 0b1010, // (pred = ir + succ = ir) 356 AnyAny = LoadStore | StoreLoad // (pred = iorw + succ = iorw) 357 }; 358 359 void membar(uint32_t order_constraint); 360 361 static void membar_mask_to_pred_succ(uint32_t order_constraint, 362 uint32_t& predecessor, uint32_t& successor) { 363 predecessor = (order_constraint >> 2) & 0x3; 364 successor = order_constraint & 0x3; 365 366 // extend rw -> iorw: 367 // 01(w) -> 0101(ow) 368 // 10(r) -> 1010(ir) 369 // 11(rw)-> 1111(iorw) 370 if (UseConservativeFence) { 371 predecessor |= predecessor << 2; 372 successor |= successor << 2; 373 } 374 } 375 376 static int pred_succ_to_membar_mask(uint32_t predecessor, uint32_t successor) { 377 return ((predecessor & 0x3) << 2) | (successor & 0x3); 378 } 379 380 void pause() { 381 fence(w, 0); 382 } 383 384 // prints msg, dumps registers and stops execution 385 void stop(const char* msg); 386 387 static void debug64(char* msg, int64_t pc, int64_t regs[]); 388 389 void unimplemented(const char* what = ""); 390 391 void should_not_reach_here() { stop("should not reach here"); } 392 393 static address target_addr_for_insn(address insn_addr); 394 395 // Required platform-specific helpers for Label::patch_instructions. 396 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 397 static int pd_patch_instruction_size(address branch, address target); 398 static void pd_patch_instruction(address branch, address target, const char* file = NULL, int line = 0) { 399 pd_patch_instruction_size(branch, target); 400 } 401 static address pd_call_destination(address branch) { 402 return target_addr_for_insn(branch); 403 } 404 405 static int patch_oop(address insn_addr, address o); 406 407 static address get_target_of_li32(address insn_addr); 408 static int patch_imm_in_li32(address branch, int32_t target); 409 410 // Return whether code is emitted to a scratch blob. 411 virtual bool in_scratch_emit_size() { 412 return false; 413 } 414 415 address emit_trampoline_stub(int insts_call_instruction_offset, address target); 416 static int max_trampoline_stub_size(); 417 void emit_static_call_stub(); 418 static int static_call_stub_size(); 419 420 // The following 4 methods return the offset of the appropriate move instruction 421 422 // Support for fast byte/short loading with zero extension (depending on particular CPU) 423 int load_unsigned_byte(Register dst, Address src); 424 int load_unsigned_short(Register dst, Address src); 425 426 // Support for fast byte/short loading with sign extension (depending on particular CPU) 427 int load_signed_byte(Register dst, Address src); 428 int load_signed_short(Register dst, Address src); 429 430 // Load and store values by size and signed-ness 431 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed); 432 void store_sized_value(Address dst, Register src, size_t size_in_bytes); 433 434 public: 435 // Standard pseudo instructions 436 inline void nop() { 437 addi(x0, x0, 0); 438 } 439 440 inline void mv(Register Rd, Register Rs) { 441 if (Rd != Rs) { 442 addi(Rd, Rs, 0); 443 } 444 } 445 446 inline void notr(Register Rd, Register Rs) { 447 xori(Rd, Rs, -1); 448 } 449 450 inline void neg(Register Rd, Register Rs) { 451 sub(Rd, x0, Rs); 452 } 453 454 inline void negw(Register Rd, Register Rs) { 455 subw(Rd, x0, Rs); 456 } 457 458 inline void sext_w(Register Rd, Register Rs) { 459 addiw(Rd, Rs, 0); 460 } 461 462 inline void zext_b(Register Rd, Register Rs) { 463 andi(Rd, Rs, 0xFF); 464 } 465 466 inline void seqz(Register Rd, Register Rs) { 467 sltiu(Rd, Rs, 1); 468 } 469 470 inline void snez(Register Rd, Register Rs) { 471 sltu(Rd, x0, Rs); 472 } 473 474 inline void sltz(Register Rd, Register Rs) { 475 slt(Rd, Rs, x0); 476 } 477 478 inline void sgtz(Register Rd, Register Rs) { 479 slt(Rd, x0, Rs); 480 } 481 482 // Bit-manipulation extension pseudo instructions 483 // zero extend word 484 inline void zext_w(Register Rd, Register Rs) { 485 add_uw(Rd, Rs, zr); 486 } 487 488 // Floating-point data-processing pseudo instructions 489 inline void fmv_s(FloatRegister Rd, FloatRegister Rs) { 490 if (Rd != Rs) { 491 fsgnj_s(Rd, Rs, Rs); 492 } 493 } 494 495 inline void fabs_s(FloatRegister Rd, FloatRegister Rs) { 496 fsgnjx_s(Rd, Rs, Rs); 497 } 498 499 inline void fneg_s(FloatRegister Rd, FloatRegister Rs) { 500 fsgnjn_s(Rd, Rs, Rs); 501 } 502 503 inline void fmv_d(FloatRegister Rd, FloatRegister Rs) { 504 if (Rd != Rs) { 505 fsgnj_d(Rd, Rs, Rs); 506 } 507 } 508 509 inline void fabs_d(FloatRegister Rd, FloatRegister Rs) { 510 fsgnjx_d(Rd, Rs, Rs); 511 } 512 513 inline void fneg_d(FloatRegister Rd, FloatRegister Rs) { 514 fsgnjn_d(Rd, Rs, Rs); 515 } 516 517 // Control and status pseudo instructions 518 void rdinstret(Register Rd); // read instruction-retired counter 519 void rdcycle(Register Rd); // read cycle counter 520 void rdtime(Register Rd); // read time 521 void csrr(Register Rd, unsigned csr); // read csr 522 void csrw(unsigned csr, Register Rs); // write csr 523 void csrs(unsigned csr, Register Rs); // set bits in csr 524 void csrc(unsigned csr, Register Rs); // clear bits in csr 525 void csrwi(unsigned csr, unsigned imm); 526 void csrsi(unsigned csr, unsigned imm); 527 void csrci(unsigned csr, unsigned imm); 528 void frcsr(Register Rd); // read float-point csr 529 void fscsr(Register Rd, Register Rs); // swap float-point csr 530 void fscsr(Register Rs); // write float-point csr 531 void frrm(Register Rd); // read float-point rounding mode 532 void fsrm(Register Rd, Register Rs); // swap float-point rounding mode 533 void fsrm(Register Rs); // write float-point rounding mode 534 void fsrmi(Register Rd, unsigned imm); 535 void fsrmi(unsigned imm); 536 void frflags(Register Rd); // read float-point exception flags 537 void fsflags(Register Rd, Register Rs); // swap float-point exception flags 538 void fsflags(Register Rs); // write float-point exception flags 539 void fsflagsi(Register Rd, unsigned imm); 540 void fsflagsi(unsigned imm); 541 542 // Control transfer pseudo instructions 543 void beqz(Register Rs, const address dest); 544 void bnez(Register Rs, const address dest); 545 void blez(Register Rs, const address dest); 546 void bgez(Register Rs, const address dest); 547 void bltz(Register Rs, const address dest); 548 void bgtz(Register Rs, const address dest); 549 550 void j(Label &l, Register temp = t0); 551 void j(const address dest, Register temp = t0); 552 void j(const Address &adr, Register temp = t0); 553 void jal(Label &l, Register temp = t0); 554 void jal(const address dest, Register temp = t0); 555 void jal(const Address &adr, Register temp = t0); 556 void jal(Register Rd, Label &L, Register temp = t0); 557 void jal(Register Rd, const address dest, Register temp = t0); 558 559 //label 560 void beqz(Register Rs, Label &l, bool is_far = false); 561 void bnez(Register Rs, Label &l, bool is_far = false); 562 void blez(Register Rs, Label &l, bool is_far = false); 563 void bgez(Register Rs, Label &l, bool is_far = false); 564 void bltz(Register Rs, Label &l, bool is_far = false); 565 void bgtz(Register Rs, Label &l, bool is_far = false); 566 567 void beq (Register Rs1, Register Rs2, Label &L, bool is_far = false); 568 void bne (Register Rs1, Register Rs2, Label &L, bool is_far = false); 569 void blt (Register Rs1, Register Rs2, Label &L, bool is_far = false); 570 void bge (Register Rs1, Register Rs2, Label &L, bool is_far = false); 571 void bltu(Register Rs1, Register Rs2, Label &L, bool is_far = false); 572 void bgeu(Register Rs1, Register Rs2, Label &L, bool is_far = false); 573 574 void bgt (Register Rs, Register Rt, const address dest); 575 void ble (Register Rs, Register Rt, const address dest); 576 void bgtu(Register Rs, Register Rt, const address dest); 577 void bleu(Register Rs, Register Rt, const address dest); 578 579 void bgt (Register Rs, Register Rt, Label &l, bool is_far = false); 580 void ble (Register Rs, Register Rt, Label &l, bool is_far = false); 581 void bgtu(Register Rs, Register Rt, Label &l, bool is_far = false); 582 void bleu(Register Rs, Register Rt, Label &l, bool is_far = false); 583 584 #define INSN_ENTRY_RELOC(result_type, header) \ 585 result_type header { \ 586 guarantee(rtype == relocInfo::internal_word_type, \ 587 "only internal_word_type relocs make sense here"); \ 588 relocate(InternalAddress(dest).rspec()); \ 589 IncompressibleRegion ir(this); /* relocations */ 590 591 #define INSN(NAME) \ 592 void NAME(Register Rs1, Register Rs2, const address dest) { \ 593 assert_cond(dest != NULL); \ 594 int64_t offset = dest - pc(); \ 595 guarantee(is_imm_in_range(offset, 12, 1), "offset is invalid."); \ 596 Assembler::NAME(Rs1, Rs2, offset); \ 597 } \ 598 INSN_ENTRY_RELOC(void, NAME(Register Rs1, Register Rs2, address dest, relocInfo::relocType rtype)) \ 599 NAME(Rs1, Rs2, dest); \ 600 } 601 602 INSN(beq); 603 INSN(bne); 604 INSN(bge); 605 INSN(bgeu); 606 INSN(blt); 607 INSN(bltu); 608 609 #undef INSN 610 611 #undef INSN_ENTRY_RELOC 612 613 void float_beq(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); 614 void float_bne(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); 615 void float_ble(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); 616 void float_bge(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); 617 void float_blt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); 618 void float_bgt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); 619 620 void double_beq(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); 621 void double_bne(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); 622 void double_ble(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); 623 void double_bge(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); 624 void double_blt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); 625 void double_bgt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false); 626 627 private: 628 int push_reg(unsigned int bitset, Register stack); 629 int pop_reg(unsigned int bitset, Register stack); 630 int push_fp(unsigned int bitset, Register stack); 631 int pop_fp(unsigned int bitset, Register stack); 632 #ifdef COMPILER2 633 int push_v(unsigned int bitset, Register stack); 634 int pop_v(unsigned int bitset, Register stack); 635 #endif // COMPILER2 636 637 public: 638 void push_reg(Register Rs); 639 void pop_reg(Register Rd); 640 void push_reg(RegSet regs, Register stack) { if (regs.bits()) push_reg(regs.bits(), stack); } 641 void pop_reg(RegSet regs, Register stack) { if (regs.bits()) pop_reg(regs.bits(), stack); } 642 void push_fp(FloatRegSet regs, Register stack) { if (regs.bits()) push_fp(regs.bits(), stack); } 643 void pop_fp(FloatRegSet regs, Register stack) { if (regs.bits()) pop_fp(regs.bits(), stack); } 644 #ifdef COMPILER2 645 void push_v(VectorRegSet regs, Register stack) { if (regs.bits()) push_v(regs.bits(), stack); } 646 void pop_v(VectorRegSet regs, Register stack) { if (regs.bits()) pop_v(regs.bits(), stack); } 647 #endif // COMPILER2 648 649 // Push and pop everything that might be clobbered by a native 650 // runtime call except t0 and t1. (They are always 651 // temporary registers, so we don't have to protect them.) 652 // Additional registers can be excluded in a passed RegSet. 653 void push_call_clobbered_registers_except(RegSet exclude); 654 void pop_call_clobbered_registers_except(RegSet exclude); 655 656 void push_call_clobbered_registers() { 657 push_call_clobbered_registers_except(RegSet()); 658 } 659 void pop_call_clobbered_registers() { 660 pop_call_clobbered_registers_except(RegSet()); 661 } 662 663 void push_CPU_state(bool save_vectors = false, int vector_size_in_bytes = 0); 664 void pop_CPU_state(bool restore_vectors = false, int vector_size_in_bytes = 0); 665 666 void push_cont_fastpath(Register java_thread); 667 void pop_cont_fastpath(Register java_thread); 668 669 // if heap base register is used - reinit it with the correct value 670 void reinit_heapbase(); 671 672 void bind(Label& L) { 673 Assembler::bind(L); 674 // fences across basic blocks should not be merged 675 code()->clear_last_insn(); 676 } 677 678 typedef void (MacroAssembler::* compare_and_branch_insn)(Register Rs1, Register Rs2, const address dest); 679 typedef void (MacroAssembler::* compare_and_branch_label_insn)(Register Rs1, Register Rs2, Label &L, bool is_far); 680 typedef void (MacroAssembler::* jal_jalr_insn)(Register Rt, address dest); 681 typedef void (MacroAssembler::* load_insn_by_temp)(Register Rt, address dest, Register temp); 682 683 void wrap_label(Register r, Label &L, Register t, load_insn_by_temp insn); 684 void wrap_label(Register r, Label &L, jal_jalr_insn insn); 685 void wrap_label(Register r1, Register r2, Label &L, 686 compare_and_branch_insn insn, 687 compare_and_branch_label_insn neg_insn, bool is_far = false); 688 689 void la(Register Rd, Label &label); 690 void la(Register Rd, const address dest); 691 void la(Register Rd, const Address &adr); 692 693 void li32(Register Rd, int32_t imm); 694 void li64(Register Rd, int64_t imm); 695 void li (Register Rd, int64_t imm); // optimized load immediate 696 697 // mv 698 void mv(Register Rd, address addr) { li(Rd, (int64_t)addr); } 699 void mv(Register Rd, address addr, int32_t &offset) { 700 // Split address into a lower 12-bit sign-extended offset and the remainder, 701 // so that the offset could be encoded in jalr or load/store instruction. 702 offset = ((int32_t)(int64_t)addr << 20) >> 20; 703 li(Rd, (int64_t)addr - offset); 704 } 705 706 template<typename T, ENABLE_IF(std::is_integral<T>::value)> 707 inline void mv(Register Rd, T o) { li(Rd, (int64_t)o); } 708 709 void mv(Register Rd, Address dest) { 710 assert(dest.getMode() == Address::literal, "Address mode should be Address::literal"); 711 relocate(dest.rspec(), [&] { 712 movptr(Rd, dest.target()); 713 }); 714 } 715 716 void mv(Register Rd, RegisterOrConstant src) { 717 if (src.is_register()) { 718 mv(Rd, src.as_register()); 719 } else { 720 mv(Rd, src.as_constant()); 721 } 722 } 723 724 void movptr(Register Rd, address addr, int32_t &offset); 725 726 void movptr(Register Rd, address addr) { 727 int offset = 0; 728 movptr(Rd, addr, offset); 729 addi(Rd, Rd, offset); 730 } 731 732 inline void movptr(Register Rd, uintptr_t imm64) { 733 movptr(Rd, (address)imm64); 734 } 735 736 // arith 737 void add (Register Rd, Register Rn, int64_t increment, Register temp = t0); 738 void addw(Register Rd, Register Rn, int32_t increment, Register temp = t0); 739 void sub (Register Rd, Register Rn, int64_t decrement, Register temp = t0); 740 void subw(Register Rd, Register Rn, int32_t decrement, Register temp = t0); 741 742 #define INSN(NAME) \ 743 inline void NAME(Register Rd, Register Rs1, Register Rs2) { \ 744 Assembler::NAME(Rd, Rs1, Rs2); \ 745 } 746 747 INSN(add); 748 INSN(addw); 749 INSN(sub); 750 INSN(subw); 751 752 #undef INSN 753 754 // logic 755 void andrw(Register Rd, Register Rs1, Register Rs2); 756 void orrw(Register Rd, Register Rs1, Register Rs2); 757 void xorrw(Register Rd, Register Rs1, Register Rs2); 758 759 // revb 760 void revb_h_h(Register Rd, Register Rs, Register tmp = t0); // reverse bytes in halfword in lower 16 bits, sign-extend 761 void revb_w_w(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1); // reverse bytes in lower word, sign-extend 762 void revb_h_h_u(Register Rd, Register Rs, Register tmp = t0); // reverse bytes in halfword in lower 16 bits, zero-extend 763 void revb_h_w_u(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1); // reverse bytes in halfwords in lower 32 bits, zero-extend 764 void revb_h_helper(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2= t1); // reverse bytes in upper 16 bits (48:63) and move to lower 765 void revb_h(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2= t1); // reverse bytes in each halfword 766 void revb_w(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2= t1); // reverse bytes in each word 767 void revb(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1); // reverse bytes in doubleword 768 769 void ror_imm(Register dst, Register src, uint32_t shift, Register tmp = t0); 770 void andi(Register Rd, Register Rn, int64_t imm, Register tmp = t0); 771 void orptr(Address adr, RegisterOrConstant src, Register tmp1 = t0, Register tmp2 = t1); 772 773 // Load and Store Instructions 774 #define INSN_ENTRY_RELOC(result_type, header) \ 775 result_type header { \ 776 guarantee(rtype == relocInfo::internal_word_type, \ 777 "only internal_word_type relocs make sense here"); \ 778 relocate(InternalAddress(dest).rspec()); \ 779 IncompressibleRegion ir(this); /* relocations */ 780 781 #define INSN(NAME) \ 782 void NAME(Register Rd, address dest) { \ 783 assert_cond(dest != NULL); \ 784 int64_t distance = dest - pc(); \ 785 if (is_offset_in_range(distance, 32)) { \ 786 auipc(Rd, (int32_t)distance + 0x800); \ 787 Assembler::NAME(Rd, Rd, ((int32_t)distance << 20) >> 20); \ 788 } else { \ 789 int32_t offset = 0; \ 790 movptr(Rd, dest, offset); \ 791 Assembler::NAME(Rd, Rd, offset); \ 792 } \ 793 } \ 794 INSN_ENTRY_RELOC(void, NAME(Register Rd, address dest, relocInfo::relocType rtype)) \ 795 NAME(Rd, dest); \ 796 } \ 797 void NAME(Register Rd, const Address &adr, Register temp = t0) { \ 798 switch (adr.getMode()) { \ 799 case Address::literal: { \ 800 relocate(adr.rspec(), [&] { \ 801 NAME(Rd, adr.target()); \ 802 }); \ 803 break; \ 804 } \ 805 case Address::base_plus_offset: { \ 806 if (is_offset_in_range(adr.offset(), 12)) { \ 807 Assembler::NAME(Rd, adr.base(), adr.offset()); \ 808 } else { \ 809 int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \ 810 if (Rd == adr.base()) { \ 811 la(temp, Address(adr.base(), adr.offset() - offset)); \ 812 Assembler::NAME(Rd, temp, offset); \ 813 } else { \ 814 la(Rd, Address(adr.base(), adr.offset() - offset)); \ 815 Assembler::NAME(Rd, Rd, offset); \ 816 } \ 817 } \ 818 break; \ 819 } \ 820 default: \ 821 ShouldNotReachHere(); \ 822 } \ 823 } \ 824 void NAME(Register Rd, Label &L) { \ 825 wrap_label(Rd, L, &MacroAssembler::NAME); \ 826 } 827 828 INSN(lb); 829 INSN(lbu); 830 INSN(lh); 831 INSN(lhu); 832 INSN(lw); 833 INSN(lwu); 834 INSN(ld); 835 836 #undef INSN 837 838 #define INSN(NAME) \ 839 void NAME(FloatRegister Rd, address dest, Register temp = t0) { \ 840 assert_cond(dest != NULL); \ 841 int64_t distance = dest - pc(); \ 842 if (is_offset_in_range(distance, 32)) { \ 843 auipc(temp, (int32_t)distance + 0x800); \ 844 Assembler::NAME(Rd, temp, ((int32_t)distance << 20) >> 20); \ 845 } else { \ 846 int32_t offset = 0; \ 847 movptr(temp, dest, offset); \ 848 Assembler::NAME(Rd, temp, offset); \ 849 } \ 850 } \ 851 INSN_ENTRY_RELOC(void, NAME(FloatRegister Rd, address dest, \ 852 relocInfo::relocType rtype, Register temp = t0)) \ 853 NAME(Rd, dest, temp); \ 854 } \ 855 void NAME(FloatRegister Rd, const Address &adr, Register temp = t0) { \ 856 switch (adr.getMode()) { \ 857 case Address::literal: { \ 858 relocate(adr.rspec(), [&] { \ 859 NAME(Rd, adr.target(), temp); \ 860 }); \ 861 break; \ 862 } \ 863 case Address::base_plus_offset: { \ 864 if (is_offset_in_range(adr.offset(), 12)) { \ 865 Assembler::NAME(Rd, adr.base(), adr.offset()); \ 866 } else { \ 867 int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \ 868 la(temp, Address(adr.base(), adr.offset() - offset)); \ 869 Assembler::NAME(Rd, temp, offset); \ 870 } \ 871 break; \ 872 } \ 873 default: \ 874 ShouldNotReachHere(); \ 875 } \ 876 } 877 878 INSN(flw); 879 INSN(fld); 880 881 #undef INSN 882 883 #define INSN(NAME, REGISTER) \ 884 INSN_ENTRY_RELOC(void, NAME(REGISTER Rs, address dest, \ 885 relocInfo::relocType rtype, Register temp = t0)) \ 886 NAME(Rs, dest, temp); \ 887 } 888 889 INSN(sb, Register); 890 INSN(sh, Register); 891 INSN(sw, Register); 892 INSN(sd, Register); 893 INSN(fsw, FloatRegister); 894 INSN(fsd, FloatRegister); 895 896 #undef INSN 897 898 #define INSN(NAME) \ 899 void NAME(Register Rs, address dest, Register temp = t0) { \ 900 assert_cond(dest != NULL); \ 901 assert_different_registers(Rs, temp); \ 902 int64_t distance = dest - pc(); \ 903 if (is_offset_in_range(distance, 32)) { \ 904 auipc(temp, (int32_t)distance + 0x800); \ 905 Assembler::NAME(Rs, temp, ((int32_t)distance << 20) >> 20); \ 906 } else { \ 907 int32_t offset = 0; \ 908 movptr(temp, dest, offset); \ 909 Assembler::NAME(Rs, temp, offset); \ 910 } \ 911 } \ 912 void NAME(Register Rs, const Address &adr, Register temp = t0) { \ 913 switch (adr.getMode()) { \ 914 case Address::literal: { \ 915 assert_different_registers(Rs, temp); \ 916 relocate(adr.rspec(), [&] { \ 917 NAME(Rs, adr.target(), temp); \ 918 }); \ 919 break; \ 920 } \ 921 case Address::base_plus_offset: { \ 922 if (is_offset_in_range(adr.offset(), 12)) { \ 923 Assembler::NAME(Rs, adr.base(), adr.offset()); \ 924 } else { \ 925 assert_different_registers(Rs, temp); \ 926 int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \ 927 la(temp, Address(adr.base(), adr.offset() - offset)); \ 928 Assembler::NAME(Rs, temp, offset); \ 929 } \ 930 break; \ 931 } \ 932 default: \ 933 ShouldNotReachHere(); \ 934 } \ 935 } 936 937 INSN(sb); 938 INSN(sh); 939 INSN(sw); 940 INSN(sd); 941 942 #undef INSN 943 944 #define INSN(NAME) \ 945 void NAME(FloatRegister Rs, address dest, Register temp = t0) { \ 946 assert_cond(dest != NULL); \ 947 int64_t distance = dest - pc(); \ 948 if (is_offset_in_range(distance, 32)) { \ 949 auipc(temp, (int32_t)distance + 0x800); \ 950 Assembler::NAME(Rs, temp, ((int32_t)distance << 20) >> 20); \ 951 } else { \ 952 int32_t offset = 0; \ 953 movptr(temp, dest, offset); \ 954 Assembler::NAME(Rs, temp, offset); \ 955 } \ 956 } \ 957 void NAME(FloatRegister Rs, const Address &adr, Register temp = t0) { \ 958 switch (adr.getMode()) { \ 959 case Address::literal: { \ 960 relocate(adr.rspec(), [&] { \ 961 NAME(Rs, adr.target(), temp); \ 962 }); \ 963 break; \ 964 } \ 965 case Address::base_plus_offset: { \ 966 if (is_offset_in_range(adr.offset(), 12)) { \ 967 Assembler::NAME(Rs, adr.base(), adr.offset()); \ 968 } else { \ 969 int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \ 970 la(temp, Address(adr.base(), adr.offset() - offset)); \ 971 Assembler::NAME(Rs, temp, offset); \ 972 } \ 973 break; \ 974 } \ 975 default: \ 976 ShouldNotReachHere(); \ 977 } \ 978 } 979 980 INSN(fsw); 981 INSN(fsd); 982 983 #undef INSN 984 985 #undef INSN_ENTRY_RELOC 986 987 void cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, Label &succeed, Label *fail); 988 void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, Label &succeed, Label *fail); 989 void cmpxchg(Register addr, Register expected, 990 Register new_val, 991 enum operand_size size, 992 Assembler::Aqrl acquire, Assembler::Aqrl release, 993 Register result, bool result_as_bool = false); 994 void cmpxchg_weak(Register addr, Register expected, 995 Register new_val, 996 enum operand_size size, 997 Assembler::Aqrl acquire, Assembler::Aqrl release, 998 Register result); 999 void cmpxchg_narrow_value_helper(Register addr, Register expected, 1000 Register new_val, 1001 enum operand_size size, 1002 Register tmp1, Register tmp2, Register tmp3); 1003 void cmpxchg_narrow_value(Register addr, Register expected, 1004 Register new_val, 1005 enum operand_size size, 1006 Assembler::Aqrl acquire, Assembler::Aqrl release, 1007 Register result, bool result_as_bool, 1008 Register tmp1, Register tmp2, Register tmp3); 1009 void weak_cmpxchg_narrow_value(Register addr, Register expected, 1010 Register new_val, 1011 enum operand_size size, 1012 Assembler::Aqrl acquire, Assembler::Aqrl release, 1013 Register result, 1014 Register tmp1, Register tmp2, Register tmp3); 1015 1016 void atomic_add(Register prev, RegisterOrConstant incr, Register addr); 1017 void atomic_addw(Register prev, RegisterOrConstant incr, Register addr); 1018 void atomic_addal(Register prev, RegisterOrConstant incr, Register addr); 1019 void atomic_addalw(Register prev, RegisterOrConstant incr, Register addr); 1020 1021 void atomic_xchg(Register prev, Register newv, Register addr); 1022 void atomic_xchgw(Register prev, Register newv, Register addr); 1023 void atomic_xchgal(Register prev, Register newv, Register addr); 1024 void atomic_xchgalw(Register prev, Register newv, Register addr); 1025 void atomic_xchgwu(Register prev, Register newv, Register addr); 1026 void atomic_xchgalwu(Register prev, Register newv, Register addr); 1027 1028 static bool far_branches() { 1029 return ReservedCodeCacheSize > branch_range; 1030 } 1031 1032 // Emit a direct call/jump if the entry address will always be in range, 1033 // otherwise a far call/jump. 1034 // The address must be inside the code cache. 1035 // Supported entry.rspec(): 1036 // - relocInfo::external_word_type 1037 // - relocInfo::runtime_call_type 1038 // - relocInfo::none 1039 // In the case of a far call/jump, the entry address is put in the tmp register. 1040 // The tmp register is invalidated. 1041 void far_call(Address entry, Register tmp = t0); 1042 void far_jump(Address entry, Register tmp = t0); 1043 1044 static int far_branch_size() { 1045 if (far_branches()) { 1046 return 2 * 4; // auipc + jalr, see far_call() & far_jump() 1047 } else { 1048 return 4; 1049 } 1050 } 1051 1052 void load_byte_map_base(Register reg); 1053 1054 void bang_stack_with_offset(int offset) { 1055 // stack grows down, caller passes positive offset 1056 assert(offset > 0, "must bang with negative offset"); 1057 sub(t0, sp, offset); 1058 sd(zr, Address(t0)); 1059 } 1060 1061 void la_patchable(Register reg1, const Address &dest, int32_t &offset); 1062 1063 virtual void _call_Unimplemented(address call_site) { 1064 mv(t1, call_site); 1065 } 1066 1067 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__) 1068 1069 // Frame creation and destruction shared between JITs. 1070 void build_frame(int framesize); 1071 void remove_frame(int framesize); 1072 1073 void reserved_stack_check(); 1074 1075 void get_polling_page(Register dest, relocInfo::relocType rtype); 1076 void read_polling_page(Register r, int32_t offset, relocInfo::relocType rtype); 1077 1078 // RISCV64 OpenJDK uses four different types of calls: 1079 // - direct call: jal pc_relative_offset 1080 // This is the shortest and the fastest, but the offset has the range: +/-1MB. 1081 // 1082 // - far call: auipc reg, pc_relative_offset; jalr ra, reg, offset 1083 // This is longer than a direct call. The offset has 1084 // the range [-(2G + 2K), 2G - 2K). Addresses out of the range in the code cache 1085 // requires indirect call. 1086 // If a jump is needed rather than a call, a far jump 'jalr x0, reg, offset' can 1087 // be used instead. 1088 // All instructions are embedded at a call site. 1089 // 1090 // - trampoline call: 1091 // This is only available in C1/C2-generated code (nmethod). It is a combination 1092 // of a direct call, which is used if the destination of a call is in range, 1093 // and a register-indirect call. It has the advantages of reaching anywhere in 1094 // the RISCV address space and being patchable at runtime when the generated 1095 // code is being executed by other threads. 1096 // 1097 // [Main code section] 1098 // jal trampoline 1099 // [Stub code section] 1100 // trampoline: 1101 // ld reg, pc + 8 (auipc + ld) 1102 // jr reg 1103 // <64-bit destination address> 1104 // 1105 // If the destination is in range when the generated code is moved to the code 1106 // cache, 'jal trampoline' is replaced with 'jal destination' and the trampoline 1107 // is not used. 1108 // The optimization does not remove the trampoline from the stub section. 1109 1110 // This is necessary because the trampoline may well be redirected later when 1111 // code is patched, and the new destination may not be reachable by a simple JAL 1112 // instruction. 1113 // 1114 // - indirect call: movptr + jalr 1115 // This too can reach anywhere in the address space, but it cannot be 1116 // patched while code is running, so it must only be modified at a safepoint. 1117 // This form of call is most suitable for targets at fixed addresses, which 1118 // will never be patched. 1119 // 1120 // 1121 // To patch a trampoline call when the JAL can't reach, we first modify 1122 // the 64-bit destination address in the trampoline, then modify the 1123 // JAL to point to the trampoline, then flush the instruction cache to 1124 // broadcast the change to all executing threads. See 1125 // NativeCall::set_destination_mt_safe for the details. 1126 // 1127 // There is a benign race in that the other thread might observe the 1128 // modified JAL before it observes the modified 64-bit destination 1129 // address. That does not matter because the destination method has been 1130 // invalidated, so there will be a trap at its start. 1131 // For this to work, the destination address in the trampoline is 1132 // always updated, even if we're not using the trampoline. 1133 1134 // Emit a direct call if the entry address will always be in range, 1135 // otherwise a trampoline call. 1136 // Supported entry.rspec(): 1137 // - relocInfo::runtime_call_type 1138 // - relocInfo::opt_virtual_call_type 1139 // - relocInfo::static_call_type 1140 // - relocInfo::virtual_call_type 1141 // 1142 // Return: the call PC or NULL if CodeCache is full. 1143 address trampoline_call(Address entry); 1144 address ic_call(address entry, jint method_index = 0); 1145 1146 // Support for memory inc/dec 1147 // n.b. increment/decrement calls with an Address destination will 1148 // need to use a scratch register to load the value to be 1149 // incremented. increment/decrement calls which add or subtract a 1150 // constant value other than sign-extended 12-bit immediate will need 1151 // to use a 2nd scratch register to hold the constant. so, an address 1152 // increment/decrement may trash both t0 and t1. 1153 1154 void increment(const Address dst, int64_t value = 1, Register tmp1 = t0, Register tmp2 = t1); 1155 void incrementw(const Address dst, int32_t value = 1, Register tmp1 = t0, Register tmp2 = t1); 1156 1157 void decrement(const Address dst, int64_t value = 1, Register tmp1 = t0, Register tmp2 = t1); 1158 void decrementw(const Address dst, int32_t value = 1, Register tmp1 = t0, Register tmp2 = t1); 1159 1160 void cmpptr(Register src1, Address src2, Label& equal); 1161 1162 void clinit_barrier(Register klass, Register tmp, Label* L_fast_path = NULL, Label* L_slow_path = NULL); 1163 void load_method_holder_cld(Register result, Register method); 1164 void load_method_holder(Register holder, Register method); 1165 1166 void compute_index(Register str1, Register trailing_zeros, Register match_mask, 1167 Register result, Register char_tmp, Register tmp, 1168 bool haystack_isL); 1169 void compute_match_mask(Register src, Register pattern, Register match_mask, 1170 Register mask1, Register mask2); 1171 1172 #ifdef COMPILER2 1173 void mul_add(Register out, Register in, Register offset, 1174 Register len, Register k, Register tmp); 1175 void cad(Register dst, Register src1, Register src2, Register carry); 1176 void cadc(Register dst, Register src1, Register src2, Register carry); 1177 void adc(Register dst, Register src1, Register src2, Register carry); 1178 void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 1179 Register src1, Register src2, Register carry); 1180 void multiply_32_x_32_loop(Register x, Register xstart, Register x_xstart, 1181 Register y, Register y_idx, Register z, 1182 Register carry, Register product, 1183 Register idx, Register kdx); 1184 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1185 Register y, Register y_idx, Register z, 1186 Register carry, Register product, 1187 Register idx, Register kdx); 1188 void multiply_128_x_128_loop(Register y, Register z, 1189 Register carry, Register carry2, 1190 Register idx, Register jdx, 1191 Register yz_idx1, Register yz_idx2, 1192 Register tmp, Register tmp3, Register tmp4, 1193 Register tmp6, Register product_hi); 1194 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, 1195 Register z, Register zlen, 1196 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 1197 Register tmp5, Register tmp6, Register product_hi); 1198 #endif 1199 1200 void inflate_lo32(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1); 1201 void inflate_hi32(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1); 1202 1203 void ctzc_bit(Register Rd, Register Rs, bool isLL = false, Register tmp1 = t0, Register tmp2 = t1); 1204 1205 void zero_words(Register base, uint64_t cnt); 1206 address zero_words(Register ptr, Register cnt); 1207 void fill_words(Register base, Register cnt, Register value); 1208 void zero_memory(Register addr, Register len, Register tmp); 1209 void zero_dcache_blocks(Register base, Register cnt, Register tmp1, Register tmp2); 1210 1211 // shift left by shamt and add 1212 void shadd(Register Rd, Register Rs1, Register Rs2, Register tmp, int shamt); 1213 1214 // Here the float instructions with safe deal with some exceptions. 1215 // e.g. convert from NaN, +Inf, -Inf to int, float, double 1216 // will trigger exception, we need to deal with these situations 1217 // to get correct results. 1218 void fcvt_w_s_safe(Register dst, FloatRegister src, Register tmp = t0); 1219 void fcvt_l_s_safe(Register dst, FloatRegister src, Register tmp = t0); 1220 void fcvt_w_d_safe(Register dst, FloatRegister src, Register tmp = t0); 1221 void fcvt_l_d_safe(Register dst, FloatRegister src, Register tmp = t0); 1222 1223 // vector load/store unit-stride instructions 1224 void vlex_v(VectorRegister vd, Register base, Assembler::SEW sew, VectorMask vm = unmasked) { 1225 switch (sew) { 1226 case Assembler::e64: 1227 vle64_v(vd, base, vm); 1228 break; 1229 case Assembler::e32: 1230 vle32_v(vd, base, vm); 1231 break; 1232 case Assembler::e16: 1233 vle16_v(vd, base, vm); 1234 break; 1235 case Assembler::e8: // fall through 1236 default: 1237 vle8_v(vd, base, vm); 1238 break; 1239 } 1240 } 1241 1242 void vsex_v(VectorRegister store_data, Register base, Assembler::SEW sew, VectorMask vm = unmasked) { 1243 switch (sew) { 1244 case Assembler::e64: 1245 vse64_v(store_data, base, vm); 1246 break; 1247 case Assembler::e32: 1248 vse32_v(store_data, base, vm); 1249 break; 1250 case Assembler::e16: 1251 vse16_v(store_data, base, vm); 1252 break; 1253 case Assembler::e8: // fall through 1254 default: 1255 vse8_v(store_data, base, vm); 1256 break; 1257 } 1258 } 1259 1260 // vector pseudo instructions 1261 inline void vmnot_m(VectorRegister vd, VectorRegister vs) { 1262 vmnand_mm(vd, vs, vs); 1263 } 1264 1265 inline void vncvt_x_x_w(VectorRegister vd, VectorRegister vs, VectorMask vm) { 1266 vnsrl_wx(vd, vs, x0, vm); 1267 } 1268 1269 inline void vneg_v(VectorRegister vd, VectorRegister vs) { 1270 vrsub_vx(vd, vs, x0); 1271 } 1272 1273 inline void vfneg_v(VectorRegister vd, VectorRegister vs) { 1274 vfsgnjn_vv(vd, vs, vs); 1275 } 1276 1277 static const int zero_words_block_size; 1278 1279 void cast_primitive_type(BasicType type, Register Rt) { 1280 switch (type) { 1281 case T_BOOLEAN: 1282 sltu(Rt, zr, Rt); 1283 break; 1284 case T_CHAR : 1285 zero_extend(Rt, Rt, 16); 1286 break; 1287 case T_BYTE : 1288 sign_extend(Rt, Rt, 8); 1289 break; 1290 case T_SHORT : 1291 sign_extend(Rt, Rt, 16); 1292 break; 1293 case T_INT : 1294 addw(Rt, Rt, zr); 1295 break; 1296 case T_LONG : /* nothing to do */ break; 1297 case T_VOID : /* nothing to do */ break; 1298 case T_FLOAT : /* nothing to do */ break; 1299 case T_DOUBLE : /* nothing to do */ break; 1300 default: ShouldNotReachHere(); 1301 } 1302 } 1303 1304 // float cmp with unordered_result 1305 void float_compare(Register result, FloatRegister Rs1, FloatRegister Rs2, int unordered_result); 1306 void double_compare(Register result, FloatRegister Rs1, FloatRegister Rs2, int unordered_result); 1307 1308 // Zero/Sign-extend 1309 void zero_extend(Register dst, Register src, int bits); 1310 void sign_extend(Register dst, Register src, int bits); 1311 1312 // compare src1 and src2 and get -1/0/1 in dst. 1313 // if [src1 > src2], dst = 1; 1314 // if [src1 == src2], dst = 0; 1315 // if [src1 < src2], dst = -1; 1316 void cmp_l2i(Register dst, Register src1, Register src2, Register tmp = t0); 1317 1318 // support for argument shuffling 1319 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = t0); 1320 void float_move(VMRegPair src, VMRegPair dst, Register tmp = t0); 1321 void long_move(VMRegPair src, VMRegPair dst, Register tmp = t0); 1322 void double_move(VMRegPair src, VMRegPair dst, Register tmp = t0); 1323 void object_move(OopMap* map, 1324 int oop_handle_offset, 1325 int framesize_in_slots, 1326 VMRegPair src, 1327 VMRegPair dst, 1328 bool is_receiver, 1329 int* receiver_offset); 1330 void rt_call(address dest, Register tmp = t0); 1331 1332 void call(const address dest, Register temp = t0) { 1333 assert_cond(dest != NULL); 1334 assert(temp != noreg, "expecting a register"); 1335 int32_t offset = 0; 1336 mv(temp, dest, offset); 1337 jalr(x1, temp, offset); 1338 } 1339 1340 inline void ret() { 1341 jalr(x0, x1, 0); 1342 } 1343 1344 #ifdef ASSERT 1345 // Template short-hand support to clean-up after a failed call to trampoline 1346 // call generation (see trampoline_call() below), when a set of Labels must 1347 // be reset (before returning). 1348 template<typename Label, typename... More> 1349 void reset_labels(Label& lbl, More&... more) { 1350 lbl.reset(); reset_labels(more...); 1351 } 1352 template<typename Label> 1353 void reset_labels(Label& lbl) { 1354 lbl.reset(); 1355 } 1356 #endif 1357 1358 private: 1359 1360 void repne_scan(Register addr, Register value, Register count, Register tmp); 1361 1362 // Return true if an address is within the 48-bit RISCV64 address space. 1363 bool is_valid_riscv64_address(address addr) { 1364 // sv48: must have bits 63–48 all equal to bit 47 1365 return ((uintptr_t)addr >> 47) == 0; 1366 } 1367 1368 void ld_constant(Register dest, const Address &const_addr) { 1369 if (NearCpool) { 1370 ld(dest, const_addr); 1371 } else { 1372 InternalAddress target(const_addr.target()); 1373 relocate(target.rspec(), [&] { 1374 int32_t offset; 1375 la_patchable(dest, target, offset); 1376 ld(dest, Address(dest, offset)); 1377 }); 1378 } 1379 } 1380 1381 int bitset_to_regs(unsigned int bitset, unsigned char* regs); 1382 Address add_memory_helper(const Address dst, Register tmp); 1383 1384 void load_reserved(Register addr, enum operand_size size, Assembler::Aqrl acquire); 1385 void store_conditional(Register addr, Register new_val, enum operand_size size, Assembler::Aqrl release); 1386 }; 1387 1388 #ifdef ASSERT 1389 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; } 1390 #endif 1391 1392 /** 1393 * class SkipIfEqual: 1394 * 1395 * Instantiating this class will result in assembly code being output that will 1396 * jump around any code emitted between the creation of the instance and it's 1397 * automatic destruction at the end of a scope block, depending on the value of 1398 * the flag passed to the constructor, which will be checked at run-time. 1399 */ 1400 class SkipIfEqual { 1401 private: 1402 MacroAssembler* _masm; 1403 Label _label; 1404 1405 public: 1406 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); 1407 ~SkipIfEqual(); 1408 }; 1409 1410 #endif // CPU_RISCV_MACROASSEMBLER_RISCV_HPP