1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
4 * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #ifndef CPU_RISCV_MACROASSEMBLER_RISCV_HPP
28 #define CPU_RISCV_MACROASSEMBLER_RISCV_HPP
29
30 #include "asm/assembler.inline.hpp"
31 #include "code/vmreg.hpp"
32 #include "metaprogramming/enableIf.hpp"
33 #include "oops/compressedOops.hpp"
34 #include "utilities/powerOfTwo.hpp"
35
36 // MacroAssembler extends Assembler by frequently used macros.
37 //
38 // Instructions for which a 'better' code sequence exists depending
39 // on arguments should also go in here.
40
41 class MacroAssembler: public Assembler {
42
43 public:
44
45 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
46
47 void safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod, Register tmp_reg = t0);
48
49 // Alignment
50 int align(int modulus, int extra_offset = 0);
51
52 static inline void assert_alignment(address pc, int alignment = MacroAssembler::instruction_size) {
53 assert(is_aligned(pc, alignment), "bad alignment");
54 }
55
56 // nop
57 void post_call_nop();
58
59 // Stack frame creation/removal
60 // Note that SP must be updated to the right place before saving/restoring RA and FP
61 // because signal based thread suspend/resume could happen asynchronously.
62 void enter() {
63 subi(sp, sp, 2 * wordSize);
64 sd(ra, Address(sp, wordSize));
65 sd(fp, Address(sp));
66 addi(fp, sp, 2 * wordSize);
67 }
68
69 void leave() {
70 subi(sp, fp, 2 * wordSize);
71 ld(fp, Address(sp));
72 ld(ra, Address(sp, wordSize));
73 addi(sp, sp, 2 * wordSize);
74 }
75
76
77 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
78 // The pointer will be loaded into the thread register.
79 void get_thread(Register thread);
80
81 // Support for VM calls
82 //
83 // It is imperative that all calls into the VM are handled via the call_VM macros.
84 // They make sure that the stack linkage is setup correctly. call_VM's correspond
85 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
86
87 void call_VM(Register oop_result,
88 address entry_point,
89 bool check_exceptions = true);
90 void call_VM(Register oop_result,
91 address entry_point,
92 Register arg_1,
93 bool check_exceptions = true);
94 void call_VM(Register oop_result,
95 address entry_point,
96 Register arg_1, Register arg_2,
97 bool check_exceptions = true);
98 void call_VM(Register oop_result,
99 address entry_point,
100 Register arg_1, Register arg_2, Register arg_3,
101 bool check_exceptions = true);
102
103 // Overloadings with last_Java_sp
104 void call_VM(Register oop_result,
105 Register last_java_sp,
106 address entry_point,
107 int number_of_arguments = 0,
108 bool check_exceptions = true);
109 void call_VM(Register oop_result,
110 Register last_java_sp,
111 address entry_point,
112 Register arg_1,
113 bool check_exceptions = true);
114 void call_VM(Register oop_result,
115 Register last_java_sp,
116 address entry_point,
117 Register arg_1, Register arg_2,
118 bool check_exceptions = true);
119 void call_VM(Register oop_result,
120 Register last_java_sp,
121 address entry_point,
122 Register arg_1, Register arg_2, Register arg_3,
123 bool check_exceptions = true);
124
125 void get_vm_result_oop(Register oop_result, Register java_thread);
126 void get_vm_result_metadata(Register metadata_result, Register java_thread);
127
128 // These always tightly bind to MacroAssembler::call_VM_leaf_base
129 // bypassing the virtual implementation
130 void call_VM_leaf(address entry_point,
131 int number_of_arguments = 0);
132 void call_VM_leaf(address entry_point,
133 Register arg_0);
134 void call_VM_leaf(address entry_point,
135 Register arg_0, Register arg_1);
136 void call_VM_leaf(address entry_point,
137 Register arg_0, Register arg_1, Register arg_2);
138
139 // These always tightly bind to MacroAssembler::call_VM_base
140 // bypassing the virtual implementation
141 void super_call_VM_leaf(address entry_point, Register arg_0);
142 void super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1);
143 void super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2);
144 void super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3);
145
146 // last Java Frame (fills frame anchor)
147 void set_last_Java_frame(Register last_java_sp, Register last_java_fp, address last_java_pc, Register tmp);
148 void set_last_Java_frame(Register last_java_sp, Register last_java_fp, Label &last_java_pc, Register tmp);
149 void set_last_Java_frame(Register last_java_sp, Register last_java_fp, Register last_java_pc);
150
151 // thread in the default location (xthread)
152 void reset_last_Java_frame(bool clear_fp);
153
154 virtual void call_VM_leaf_base(
155 address entry_point, // the entry point
156 int number_of_arguments, // the number of arguments to pop after the call
157 Label* retaddr = nullptr
158 );
159
160 virtual void call_VM_leaf_base(
161 address entry_point, // the entry point
162 int number_of_arguments, // the number of arguments to pop after the call
163 Label& retaddr) {
164 call_VM_leaf_base(entry_point, number_of_arguments, &retaddr);
165 }
166
167 virtual void call_VM_base( // returns the register containing the thread upon return
168 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
169 Register java_thread, // the thread if computed before ; use noreg otherwise
170 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
171 Label* return_pc, // to set up last_Java_frame; use nullptr otherwise
172 address entry_point, // the entry point
173 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
174 bool check_exceptions // whether to check for pending exceptions after return
175 );
176
177 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
178
179 virtual void check_and_handle_earlyret(Register java_thread);
180 virtual void check_and_handle_popframe(Register java_thread);
181
182 void resolve_weak_handle(Register result, Register tmp1, Register tmp2);
183 void resolve_oop_handle(Register result, Register tmp1, Register tmp2);
184 void resolve_jobject(Register value, Register tmp1, Register tmp2);
185 void resolve_global_jobject(Register value, Register tmp1, Register tmp2);
186
187 void movoop(Register dst, jobject obj);
188 void mov_metadata(Register dst, Metadata* obj);
189 void bang_stack_size(Register size, Register tmp);
190 void set_narrow_oop(Register dst, jobject obj);
191 void set_narrow_klass(Register dst, Klass* k);
192
193 void load_mirror(Register dst, Register method, Register tmp1, Register tmp2);
194 void access_load_at(BasicType type, DecoratorSet decorators, Register dst,
195 Address src, Register tmp1, Register tmp2);
196 void access_store_at(BasicType type, DecoratorSet decorators, Address dst,
197 Register val, Register tmp1, Register tmp2, Register tmp3);
198 void load_klass(Register dst, Register src, Register tmp = t0);
199 void load_narrow_klass_compact(Register dst, Register src);
200 void store_klass(Register dst, Register src, Register tmp = t0);
201 void cmp_klass_beq(Register obj, Register klass,
202 Register tmp1, Register tmp2,
203 Label &L, bool is_far = false);
204 void cmp_klass_bne(Register obj, Register klass,
205 Register tmp1, Register tmp2,
206 Label &L, bool is_far = false);
207
208 void encode_klass_not_null(Register r, Register tmp = t0);
209 void decode_klass_not_null(Register r, Register tmp = t0);
210 void encode_klass_not_null(Register dst, Register src, Register tmp);
211 void decode_klass_not_null(Register dst, Register src, Register tmp);
212 void decode_heap_oop_not_null(Register r);
213 void decode_heap_oop_not_null(Register dst, Register src);
214 void decode_heap_oop(Register d, Register s);
215 void decode_heap_oop(Register r) { decode_heap_oop(r, r); }
216 void encode_heap_oop_not_null(Register r);
217 void encode_heap_oop_not_null(Register dst, Register src);
218 void encode_heap_oop(Register d, Register s);
219 void encode_heap_oop(Register r) { encode_heap_oop(r, r); };
220 void load_heap_oop(Register dst, Address src, Register tmp1,
221 Register tmp2, DecoratorSet decorators = 0);
222 void load_heap_oop_not_null(Register dst, Address src, Register tmp1,
223 Register tmp2, DecoratorSet decorators = 0);
224 void store_heap_oop(Address dst, Register val, Register tmp1,
225 Register tmp2, Register tmp3, DecoratorSet decorators = 0);
226
227 void store_klass_gap(Register dst, Register src);
228
229 // currently unimplemented
230 // Used for storing null. All other oop constants should be
231 // stored using routines that take a jobject.
232 void store_heap_oop_null(Address dst);
233
234 // This dummy is to prevent a call to store_heap_oop from
235 // converting a zero (linked null) into a Register by giving
236 // the compiler two choices it can't resolve
237
238 void store_heap_oop(Address dst, void* dummy);
239
240 // Support for null-checks
241 //
242 // Generates code that causes a null OS exception if the content of reg is null.
243 // If the accessed location is M[reg + offset] and the offset is known, provide the
244 // offset. No explicit code generateion is needed if the offset is within a certain
245 // range (0 <= offset <= page_size).
246
247 virtual void null_check(Register reg, int offset = -1);
248 static bool needs_explicit_null_check(intptr_t offset);
249 static bool uses_implicit_null_check(void* address);
250
251 // interface method calling
252 void lookup_interface_method(Register recv_klass,
253 Register intf_klass,
254 RegisterOrConstant itable_index,
255 Register method_result,
256 Register scan_tmp,
257 Label& no_such_interface,
258 bool return_method = true);
259
260 void lookup_interface_method_stub(Register recv_klass,
261 Register holder_klass,
262 Register resolved_klass,
263 Register method_result,
264 Register temp_reg,
265 Register temp_reg2,
266 int itable_index,
267 Label& L_no_such_interface);
268
269 // virtual method calling
270 // n.n. x86 allows RegisterOrConstant for vtable_index
271 void lookup_virtual_method(Register recv_klass,
272 RegisterOrConstant vtable_index,
273 Register method_result);
274
275 // Form an address from base + offset in Rd. Rd my or may not
276 // actually be used: you must use the Address that is returned. It
277 // is up to you to ensure that the shift provided matches the size
278 // of your data.
279 Address form_address(Register Rd, Register base, int64_t byte_offset);
280
281 // Sometimes we get misaligned loads and stores, usually from Unsafe
282 // accesses, and these can exceed the offset range.
283 Address legitimize_address(Register Rd, const Address &adr) {
284 if (adr.getMode() == Address::base_plus_offset) {
285 if (!is_simm12(adr.offset())) {
286 return form_address(Rd, adr.base(), adr.offset());
287 }
288 }
289 return adr;
290 }
291
292 // allocation
293 void tlab_allocate(
294 Register obj, // result: pointer to object after successful allocation
295 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
296 int con_size_in_bytes, // object size in bytes if known at compile time
297 Register tmp1, // temp register
298 Register tmp2, // temp register
299 Label& slow_case, // continuation point of fast allocation fails
300 bool is_far = false
301 );
302
303 // Test sub_klass against super_klass, with fast and slow paths.
304
305 // The fast path produces a tri-state answer: yes / no / maybe-slow.
306 // One of the three labels can be null, meaning take the fall-through.
307 // If super_check_offset is -1, the value is loaded up from super_klass.
308 // No registers are killed, except tmp_reg
309 void check_klass_subtype_fast_path(Register sub_klass,
310 Register super_klass,
311 Register tmp_reg,
312 Label* L_success,
313 Label* L_failure,
314 Label* L_slow_path,
315 Register super_check_offset = noreg);
316
317 // The reset of the type check; must be wired to a corresponding fast path.
318 // It does not repeat the fast path logic, so don't use it standalone.
319 // The tmp1_reg and tmp2_reg can be noreg, if no temps are available.
320 // Updates the sub's secondary super cache as necessary.
321 void check_klass_subtype_slow_path(Register sub_klass,
322 Register super_klass,
323 Register tmp1_reg,
324 Register tmp2_reg,
325 Label* L_success,
326 Label* L_failure,
327 bool set_cond_codes = false);
328
329 void check_klass_subtype_slow_path_linear(Register sub_klass,
330 Register super_klass,
331 Register tmp1_reg,
332 Register tmp2_reg,
333 Label* L_success,
334 Label* L_failure,
335 bool set_cond_codes = false);
336
337 void check_klass_subtype_slow_path_table(Register sub_klass,
338 Register super_klass,
339 Register tmp1_reg,
340 Register tmp2_reg,
341 Label* L_success,
342 Label* L_failure,
343 bool set_cond_codes = false);
344
345 // If r is valid, return r.
346 // If r is invalid, remove a register r2 from available_regs, add r2
347 // to regs_to_push, then return r2.
348 Register allocate_if_noreg(const Register r,
349 RegSetIterator<Register> &available_regs,
350 RegSet ®s_to_push);
351
352 // Secondary subtype checking
353 void lookup_secondary_supers_table_var(Register sub_klass,
354 Register r_super_klass,
355 Register result,
356 Register tmp1,
357 Register tmp2,
358 Register tmp3,
359 Register tmp4,
360 Label *L_success);
361
362 void population_count(Register dst, Register src, Register tmp1, Register tmp2);
363
364 // As above, but with a constant super_klass.
365 // The result is in Register result, not the condition codes.
366 bool lookup_secondary_supers_table_const(Register r_sub_klass,
367 Register r_super_klass,
368 Register result,
369 Register tmp1,
370 Register tmp2,
371 Register tmp3,
372 Register tmp4,
373 u1 super_klass_slot,
374 bool stub_is_near = false);
375
376 void verify_secondary_supers_table(Register r_sub_klass,
377 Register r_super_klass,
378 Register result,
379 Register tmp1,
380 Register tmp2,
381 Register tmp3);
382
383 void lookup_secondary_supers_table_slow_path(Register r_super_klass,
384 Register r_array_base,
385 Register r_array_index,
386 Register r_bitmap,
387 Register result,
388 Register tmp,
389 bool is_stub = true);
390
391 void check_klass_subtype(Register sub_klass,
392 Register super_klass,
393 Register tmp_reg,
394 Label& L_success);
395
396 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
397
398 void profile_receiver_type(Register recv, Register mdp, int mdp_offset);
399
400 // only if +VerifyOops
401 void _verify_oop(Register reg, const char* s, const char* file, int line);
402 void _verify_oop_addr(Address addr, const char* s, const char* file, int line);
403
404 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
405 if (VerifyOops) {
406 _verify_oop(reg, s, file, line);
407 }
408 }
409 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
410 if (VerifyOops) {
411 _verify_oop_addr(reg, s, file, line);
412 }
413 }
414
415 void _verify_method_ptr(Register reg, const char* msg, const char* file, int line) {}
416 void _verify_klass_ptr(Register reg, const char* msg, const char* file, int line) {}
417
418 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__)
419 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__)
420 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__)
421 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
422 #define verify_klass_ptr(reg) _verify_method_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
423
424 // A more convenient access to fence for our purposes
425 // We used four bit to indicate the read and write bits in the predecessors and successors,
426 // and extended i for r, o for w if UseConservativeFence enabled.
427 enum Membar_mask_bits {
428 StoreStore = 0b0101, // (pred = w + succ = w)
429 LoadStore = 0b1001, // (pred = r + succ = w)
430 StoreLoad = 0b0110, // (pred = w + succ = r)
431 LoadLoad = 0b1010, // (pred = r + succ = r)
432 AnyAny = LoadStore | StoreLoad // (pred = rw + succ = rw)
433 };
434
435 void membar(uint32_t order_constraint);
436
437 private:
438
439 static void membar_mask_to_pred_succ(uint32_t order_constraint,
440 uint32_t& predecessor, uint32_t& successor) {
441 predecessor = (order_constraint >> 2) & 0x3;
442 successor = order_constraint & 0x3;
443
444 // extend rw -> iorw:
445 // 01(w) -> 0101(ow)
446 // 10(r) -> 1010(ir)
447 // 11(rw)-> 1111(iorw)
448 if (UseConservativeFence) {
449 predecessor |= predecessor << 2;
450 successor |= successor << 2;
451 }
452 }
453
454 static int pred_succ_to_membar_mask(uint32_t predecessor, uint32_t successor) {
455 return ((predecessor & 0x3) << 2) | (successor & 0x3);
456 }
457
458 public:
459
460 void cmodx_fence();
461
462 void pause() {
463 // Zihintpause
464 // PAUSE is encoded as a FENCE instruction with pred=W, succ=0, fm=0, rd=x0, and rs1=x0.
465 Assembler::fence(w, 0);
466 }
467
468 // prints msg, dumps registers and stops execution
469 void stop(const char* msg);
470
471 static void debug64(char* msg, int64_t pc, int64_t regs[]);
472
473 void unimplemented(const char* what = "");
474
475 void should_not_reach_here() { stop("should not reach here"); }
476
477 static address target_addr_for_insn(address insn_addr);
478
479 // Required platform-specific helpers for Label::patch_instructions.
480 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
481 static int pd_patch_instruction_size(address branch, address target);
482 static void pd_patch_instruction(address branch, address target, const char* file = nullptr, int line = 0) {
483 pd_patch_instruction_size(branch, target);
484 }
485 static address pd_call_destination(address branch) {
486 return target_addr_for_insn(branch);
487 }
488
489 static int patch_oop(address insn_addr, address o);
490
491 static address get_target_of_li32(address insn_addr);
492 static int patch_imm_in_li32(address branch, int32_t target);
493
494 // Return whether code is emitted to a scratch blob.
495 virtual bool in_scratch_emit_size() {
496 return false;
497 }
498
499 address emit_reloc_call_address_stub(int insts_call_instruction_offset, address target);
500 static int max_reloc_call_address_stub_size();
501
502 void emit_static_call_stub();
503 static int static_call_stub_size();
504
505 // The following 4 methods return the offset of the appropriate move instruction
506
507 // Support for fast byte/short loading with zero extension (depending on particular CPU)
508 int load_unsigned_byte(Register dst, Address src);
509 int load_unsigned_short(Register dst, Address src);
510
511 // Support for fast byte/short loading with sign extension (depending on particular CPU)
512 int load_signed_byte(Register dst, Address src);
513 int load_signed_short(Register dst, Address src);
514
515 // Load and store values by size and signed-ness
516 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed);
517 void store_sized_value(Address dst, Register src, size_t size_in_bytes);
518
519 // Misaligned loads, will use the best way, according to the AvoidUnalignedAccess flag
520 void load_short_misaligned(Register dst, Address src, Register tmp, bool is_signed, int granularity = 1);
521 void load_int_misaligned(Register dst, Address src, Register tmp, bool is_signed, int granularity = 1);
522 void load_long_misaligned(Register dst, Address src, Register tmp, int granularity = 1);
523
524 public:
525 // Standard pseudo instructions
526 inline void nop() {
527 addi(x0, x0, 0);
528 }
529
530 inline void mv(Register Rd, Register Rs) {
531 if (Rd != Rs) {
532 addi(Rd, Rs, 0);
533 }
534 }
535
536 inline void notr(Register Rd, Register Rs) {
537 if (do_compress_zcb(Rd, Rs) && (Rd == Rs)) {
538 c_not(Rd);
539 } else {
540 xori(Rd, Rs, -1);
541 }
542 }
543
544 inline void neg(Register Rd, Register Rs) {
545 sub(Rd, x0, Rs);
546 }
547
548 inline void negw(Register Rd, Register Rs) {
549 subw(Rd, x0, Rs);
550 }
551
552 inline void sext_w(Register Rd, Register Rs) {
553 addiw(Rd, Rs, 0);
554 }
555
556 inline void zext_b(Register Rd, Register Rs) {
557 if (do_compress_zcb(Rd, Rs) && (Rd == Rs)) {
558 c_zext_b(Rd);
559 } else {
560 andi(Rd, Rs, 0xFF);
561 }
562 }
563
564 inline void seqz(Register Rd, Register Rs) {
565 sltiu(Rd, Rs, 1);
566 }
567
568 inline void snez(Register Rd, Register Rs) {
569 sltu(Rd, x0, Rs);
570 }
571
572 inline void sltz(Register Rd, Register Rs) {
573 slt(Rd, Rs, x0);
574 }
575
576 inline void sgtz(Register Rd, Register Rs) {
577 slt(Rd, x0, Rs);
578 }
579
580 // Bit-manipulation extension pseudo instructions
581 // zero extend word
582 inline void zext_w(Register Rd, Register Rs) {
583 assert(UseZba, "must be");
584 if (do_compress_zcb(Rd, Rs) && (Rd == Rs)) {
585 c_zext_w(Rd);
586 } else {
587 add_uw(Rd, Rs, zr);
588 }
589 }
590
591 // Floating-point data-processing pseudo instructions
592 inline void fmv_s(FloatRegister Rd, FloatRegister Rs) {
593 if (Rd != Rs) {
594 fsgnj_s(Rd, Rs, Rs);
595 }
596 }
597
598 inline void fabs_s(FloatRegister Rd, FloatRegister Rs) {
599 fsgnjx_s(Rd, Rs, Rs);
600 }
601
602 inline void fneg_s(FloatRegister Rd, FloatRegister Rs) {
603 fsgnjn_s(Rd, Rs, Rs);
604 }
605
606 inline void fmv_d(FloatRegister Rd, FloatRegister Rs) {
607 if (Rd != Rs) {
608 fsgnj_d(Rd, Rs, Rs);
609 }
610 }
611
612 inline void fabs_d(FloatRegister Rd, FloatRegister Rs) {
613 fsgnjx_d(Rd, Rs, Rs);
614 }
615
616 inline void fneg_d(FloatRegister Rd, FloatRegister Rs) {
617 fsgnjn_d(Rd, Rs, Rs);
618 }
619
620 // Control and status pseudo instructions
621 void csrr(Register Rd, unsigned csr); // read csr
622 void csrw(unsigned csr, Register Rs); // write csr
623 void csrs(unsigned csr, Register Rs); // set bits in csr
624 void csrc(unsigned csr, Register Rs); // clear bits in csr
625 void csrwi(unsigned csr, unsigned imm);
626 void csrsi(unsigned csr, unsigned imm);
627 void csrci(unsigned csr, unsigned imm);
628 void frcsr(Register Rd) { csrr(Rd, CSR_FCSR); }; // read float-point csr
629 void fscsr(Register Rd, Register Rs); // swap float-point csr
630 void fscsr(Register Rs); // write float-point csr
631 void frrm(Register Rd) { csrr(Rd, CSR_FRM); }; // read float-point rounding mode
632 void fsrm(Register Rd, Register Rs); // swap float-point rounding mode
633 void fsrm(Register Rs); // write float-point rounding mode
634 void fsrmi(Register Rd, unsigned imm);
635 void fsrmi(unsigned imm);
636 void frflags(Register Rd) { csrr(Rd, CSR_FFLAGS); }; // read float-point exception flags
637 void fsflags(Register Rd, Register Rs); // swap float-point exception flags
638 void fsflags(Register Rs); // write float-point exception flags
639 void fsflagsi(Register Rd, unsigned imm);
640 void fsflagsi(unsigned imm);
641 // Requires Zicntr
642 void rdinstret(Register Rd) { csrr(Rd, CSR_INSTRET); }; // read instruction-retired counter
643 void rdcycle(Register Rd) { csrr(Rd, CSR_CYCLE); }; // read cycle counter
644 void rdtime(Register Rd) { csrr(Rd, CSR_TIME); }; // read time
645
646 // Restore cpu control state after JNI call
647 void restore_cpu_control_state_after_jni(Register tmp);
648
649 // Control transfer pseudo instructions
650 void beqz(Register Rs, const address dest);
651 void bnez(Register Rs, const address dest);
652 void blez(Register Rs, const address dest);
653 void bgez(Register Rs, const address dest);
654 void bltz(Register Rs, const address dest);
655 void bgtz(Register Rs, const address dest);
656
657 void cmov_eq(Register cmp1, Register cmp2, Register dst, Register src);
658 void cmov_ne(Register cmp1, Register cmp2, Register dst, Register src);
659 void cmov_le(Register cmp1, Register cmp2, Register dst, Register src);
660 void cmov_leu(Register cmp1, Register cmp2, Register dst, Register src);
661 void cmov_ge(Register cmp1, Register cmp2, Register dst, Register src);
662 void cmov_geu(Register cmp1, Register cmp2, Register dst, Register src);
663 void cmov_lt(Register cmp1, Register cmp2, Register dst, Register src);
664 void cmov_ltu(Register cmp1, Register cmp2, Register dst, Register src);
665 void cmov_gt(Register cmp1, Register cmp2, Register dst, Register src);
666 void cmov_gtu(Register cmp1, Register cmp2, Register dst, Register src);
667
668 void cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
669 void cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
670 void cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
671 void cmov_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
672 void cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
673 void cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
674
675 void cmov_fp_eq(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
676 void cmov_fp_ne(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
677 void cmov_fp_le(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
678 void cmov_fp_leu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
679 void cmov_fp_ge(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
680 void cmov_fp_geu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
681 void cmov_fp_lt(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
682 void cmov_fp_ltu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
683 void cmov_fp_gt(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
684 void cmov_fp_gtu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
685
686 void cmov_fp_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
687 void cmov_fp_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
688 void cmov_fp_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
689 void cmov_fp_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
690 void cmov_fp_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
691 void cmov_fp_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
692
693 public:
694 // We try to follow risc-v asm menomics.
695 // But as we don't layout a reachable GOT,
696 // we often need to resort to movptr, li <48imm>.
697 // https://github.com/riscv-non-isa/riscv-asm-manual/blob/main/src/asm-manual.adoc
698
699 // Hotspot only use the standard calling convention using x1/ra.
700 // The alternative calling convection using x5/t0 is not used.
701 // Using x5 as a temp causes the CPU to mispredict returns.
702
703 // JALR, return address stack updates:
704 // | rd is x1/x5 | rs1 is x1/x5 | rd=rs1 | RAS action
705 // | ----------- | ------------ | ------ |-------------
706 // | No | No | - | None
707 // | No | Yes | - | Pop
708 // | Yes | No | - | Push
709 // | Yes | Yes | No | Pop, then push
710 // | Yes | Yes | Yes | Push
711 //
712 // JAL, return address stack updates:
713 // | rd is x1/x5 | RAS action
714 // | ----------- | ----------
715 // | Yes | Push
716 // | No | None
717 //
718 // JUMPs uses Rd = x0/zero and Rs = x6/t1 or imm
719 // CALLS uses Rd = x1/ra and Rs = x6/t1 or imm (or x1/ra*)
720 // RETURNS uses Rd = x0/zero and Rs = x1/ra
721 // *use of x1/ra should not normally be used, special case only.
722
723 // jump: jal x0, offset
724 // For long reach uses temp register for:
725 // la + jr
726 void j(const address dest, Register temp = t1);
727 void j(const Address &dest, Register temp = t1);
728 void j(Label &l, Register temp = noreg);
729
730 // jump register: jalr x0, offset(rs)
731 void jr(Register Rd, int32_t offset = 0);
732
733 // call: la + jalr x1
734 void call(const address dest, Register temp = t1);
735
736 // jalr: jalr x1, offset(rs)
737 void jalr(Register Rs, int32_t offset = 0);
738
739 // Emit a runtime call. Only invalidates the tmp register which
740 // is used to keep the entry address for jalr/movptr.
741 // Uses call() for intra code cache, else movptr + jalr.
742 // Clobebrs t1
743 void rt_call(address dest, Register tmp = t1);
744
745 // ret: jalr x0, 0(x1)
746 inline void ret() {
747 Assembler::jalr(x0, x1, 0);
748 }
749
750 //label
751 void beqz(Register Rs, Label &l, bool is_far = false);
752 void bnez(Register Rs, Label &l, bool is_far = false);
753 void blez(Register Rs, Label &l, bool is_far = false);
754 void bgez(Register Rs, Label &l, bool is_far = false);
755 void bltz(Register Rs, Label &l, bool is_far = false);
756 void bgtz(Register Rs, Label &l, bool is_far = false);
757
758 void beq (Register Rs1, Register Rs2, Label &L, bool is_far = false);
759 void bne (Register Rs1, Register Rs2, Label &L, bool is_far = false);
760 void blt (Register Rs1, Register Rs2, Label &L, bool is_far = false);
761 void bge (Register Rs1, Register Rs2, Label &L, bool is_far = false);
762 void bltu(Register Rs1, Register Rs2, Label &L, bool is_far = false);
763 void bgeu(Register Rs1, Register Rs2, Label &L, bool is_far = false);
764
765 void bgt (Register Rs, Register Rt, const address dest);
766 void ble (Register Rs, Register Rt, const address dest);
767 void bgtu(Register Rs, Register Rt, const address dest);
768 void bleu(Register Rs, Register Rt, const address dest);
769
770 void bgt (Register Rs, Register Rt, Label &l, bool is_far = false);
771 void ble (Register Rs, Register Rt, Label &l, bool is_far = false);
772 void bgtu(Register Rs, Register Rt, Label &l, bool is_far = false);
773 void bleu(Register Rs, Register Rt, Label &l, bool is_far = false);
774
775 #define INSN_ENTRY_RELOC(result_type, header) \
776 result_type header { \
777 guarantee(rtype == relocInfo::internal_word_type, \
778 "only internal_word_type relocs make sense here"); \
779 relocate(InternalAddress(dest).rspec()); \
780 IncompressibleScope scope(this); /* relocations */
781
782 #define INSN(NAME) \
783 void NAME(Register Rs1, Register Rs2, const address dest) { \
784 assert_cond(dest != nullptr); \
785 int64_t offset = dest - pc(); \
786 guarantee(is_simm13(offset) && is_even(offset), \
787 "offset is invalid: is_simm_13: %s offset: " INT64_FORMAT, \
788 BOOL_TO_STR(is_simm13(offset)), offset); \
789 Assembler::NAME(Rs1, Rs2, offset); \
790 } \
791 INSN_ENTRY_RELOC(void, NAME(Register Rs1, Register Rs2, address dest, relocInfo::relocType rtype)) \
792 NAME(Rs1, Rs2, dest); \
793 }
794
795 INSN(beq);
796 INSN(bne);
797 INSN(bge);
798 INSN(bgeu);
799 INSN(blt);
800 INSN(bltu);
801
802 #undef INSN
803
804 #undef INSN_ENTRY_RELOC
805
806 void float_beq(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
807 void float_bne(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
808 void float_ble(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
809 void float_bge(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
810 void float_blt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
811 void float_bgt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
812
813 void double_beq(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
814 void double_bne(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
815 void double_ble(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
816 void double_bge(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
817 void double_blt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
818 void double_bgt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
819
820 private:
821 // The signed 20-bit upper imm can materialize at most negative 0xF...F80000000, two G.
822 // The following signed 12-bit imm can at max subtract 0x800, two K, from that previously loaded two G.
823 bool is_valid_32bit_offset(int64_t x) {
824 constexpr int64_t twoG = (2 * G);
825 constexpr int64_t twoK = (2 * K);
826 return x < (twoG - twoK) && x >= (-twoG - twoK);
827 }
828
829 // Ensure that the auipc can reach the destination at x from anywhere within
830 // the code cache so that if it is relocated we know it will still reach.
831 bool is_32bit_offset_from_codecache(int64_t x) {
832 int64_t low = (int64_t)CodeCache::low_bound();
833 int64_t high = (int64_t)CodeCache::high_bound();
834 return is_valid_32bit_offset(x - low) && is_valid_32bit_offset(x - high);
835 }
836
837 public:
838 // Stack push and pop individual 64 bit registers
839 void push_reg(Register Rs);
840 void pop_reg(Register Rd);
841
842 int push_reg(RegSet regset, Register stack);
843 int pop_reg(RegSet regset, Register stack);
844
845 int push_fp(FloatRegSet regset, Register stack);
846 int pop_fp(FloatRegSet regset, Register stack);
847
848 #ifdef COMPILER2
849 int push_v(VectorRegSet regset, Register stack);
850 int pop_v(VectorRegSet regset, Register stack);
851 #endif // COMPILER2
852
853 // Push and pop everything that might be clobbered by a native
854 // runtime call except t0 and t1. (They are always
855 // temporary registers, so we don't have to protect them.)
856 // Additional registers can be excluded in a passed RegSet.
857 void push_call_clobbered_registers_except(RegSet exclude);
858 void pop_call_clobbered_registers_except(RegSet exclude);
859
860 void push_call_clobbered_registers() {
861 push_call_clobbered_registers_except(RegSet());
862 }
863 void pop_call_clobbered_registers() {
864 pop_call_clobbered_registers_except(RegSet());
865 }
866
867 void push_CPU_state(bool save_vectors = false, int vector_size_in_bytes = 0);
868 void pop_CPU_state(bool restore_vectors = false, int vector_size_in_bytes = 0);
869
870 void push_cont_fastpath(Register java_thread = xthread);
871 void pop_cont_fastpath(Register java_thread = xthread);
872
873 // if heap base register is used - reinit it with the correct value
874 void reinit_heapbase();
875
876 void bind(Label& L) {
877 Assembler::bind(L);
878 // fences across basic blocks should not be merged
879 code()->clear_last_insn();
880 }
881
882 typedef void (MacroAssembler::* compare_and_branch_insn)(Register Rs1, Register Rs2, const address dest);
883 typedef void (MacroAssembler::* compare_and_branch_label_insn)(Register Rs1, Register Rs2, Label &L, bool is_far);
884 typedef void (MacroAssembler::* jal_jalr_insn)(Register Rt, address dest);
885
886 void wrap_label(Register r, Label &L, jal_jalr_insn insn);
887 void wrap_label(Register r1, Register r2, Label &L,
888 compare_and_branch_insn insn,
889 compare_and_branch_label_insn neg_insn, bool is_far = false);
890
891 void la(Register Rd, Label &label);
892 void la(Register Rd, const address addr);
893 void la(Register Rd, const address addr, int32_t &offset);
894 void la(Register Rd, const Address &adr);
895
896 void li16u(Register Rd, uint16_t imm);
897 void li32(Register Rd, int32_t imm);
898 void li (Register Rd, int64_t imm); // optimized load immediate
899
900 // mv
901 void mv(Register Rd, address addr) { li(Rd, (int64_t)addr); }
902 void mv(Register Rd, address addr, int32_t &offset) {
903 // Split address into a lower 12-bit sign-extended offset and the remainder,
904 // so that the offset could be encoded in jalr or load/store instruction.
905 offset = ((int32_t)(int64_t)addr << 20) >> 20;
906 li(Rd, (int64_t)addr - offset);
907 }
908
909 template<typename T, ENABLE_IF(std::is_integral<T>::value)>
910 inline void mv(Register Rd, T o) { li(Rd, (int64_t)o); }
911
912 void mv(Register Rd, RegisterOrConstant src) {
913 if (src.is_register()) {
914 mv(Rd, src.as_register());
915 } else {
916 mv(Rd, src.as_constant());
917 }
918 }
919
920 // Generates a load of a 48-bit constant which can be
921 // patched to any 48-bit constant, i.e. address.
922 // If common case supply additional temp register
923 // to shorten the instruction sequence.
924 void movptr(Register Rd, const Address &addr, Register tmp = noreg);
925 void movptr(Register Rd, address addr, Register tmp = noreg);
926 void movptr(Register Rd, address addr, int32_t &offset, Register tmp = noreg);
927
928 private:
929 void movptr1(Register Rd, uintptr_t addr, int32_t &offset);
930 void movptr2(Register Rd, uintptr_t addr, int32_t &offset, Register tmp);
931 public:
932 // float imm move
933 static bool can_hf_imm_load(short imm);
934 static bool can_fp_imm_load(float imm);
935 static bool can_dp_imm_load(double imm);
936 void fli_h(FloatRegister Rd, short imm);
937 void fli_s(FloatRegister Rd, float imm);
938 void fli_d(FloatRegister Rd, double imm);
939
940 // arith
941 void add (Register Rd, Register Rn, int64_t increment, Register tmp = t0);
942 void sub (Register Rd, Register Rn, int64_t decrement, Register tmp = t0);
943 void addw(Register Rd, Register Rn, int64_t increment, Register tmp = t0);
944 void subw(Register Rd, Register Rn, int64_t decrement, Register tmp = t0);
945
946 void subi(Register Rd, Register Rn, int64_t decrement) {
947 assert(is_simm12(-decrement), "Must be");
948 addi(Rd, Rn, -decrement);
949 }
950
951 void subiw(Register Rd, Register Rn, int64_t decrement) {
952 assert(is_simm12(-decrement), "Must be");
953 addiw(Rd, Rn, -decrement);
954 }
955
956 #define INSN(NAME) \
957 inline void NAME(Register Rd, Register Rs1, Register Rs2) { \
958 Assembler::NAME(Rd, Rs1, Rs2); \
959 }
960
961 INSN(add);
962 INSN(addw);
963 INSN(sub);
964 INSN(subw);
965
966 #undef INSN
967
968 // logic
969 void andrw(Register Rd, Register Rs1, Register Rs2);
970 void orrw(Register Rd, Register Rs1, Register Rs2);
971 void xorrw(Register Rd, Register Rs1, Register Rs2);
972
973 // logic with negate
974 void andn(Register Rd, Register Rs1, Register Rs2);
975 void orn(Register Rd, Register Rs1, Register Rs2);
976
977 // reverse bytes
978 void revbw(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2= t1); // reverse bytes in lower word, sign-extend
979 void revb(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1); // reverse bytes in doubleword
980
981 void ror(Register dst, Register src, Register shift, Register tmp = t0);
982 void ror(Register dst, Register src, uint32_t shift, Register tmp = t0);
983 void rolw(Register dst, Register src, uint32_t shift, Register tmp = t0);
984
985 void orptr(Address adr, RegisterOrConstant src, Register tmp1 = t0, Register tmp2 = t1);
986
987 // Load and Store Instructions
988 #define INSN_ENTRY_RELOC(result_type, header) \
989 result_type header { \
990 guarantee(rtype == relocInfo::internal_word_type, \
991 "only internal_word_type relocs make sense here"); \
992 relocate(InternalAddress(dest).rspec()); \
993 IncompressibleScope scope(this); /* relocations */
994
995 #define INSN(NAME) \
996 void NAME(Register Rd, address dest) { \
997 assert_cond(dest != nullptr); \
998 if (CodeCache::contains(dest)) { \
999 int64_t distance = dest - pc(); \
1000 assert(is_valid_32bit_offset(distance), "Must be"); \
1001 auipc(Rd, (int32_t)distance + 0x800); \
1002 Assembler::NAME(Rd, Rd, ((int32_t)distance << 20) >> 20); \
1003 } else { \
1004 int32_t offset = 0; \
1005 movptr(Rd, dest, offset); \
1006 Assembler::NAME(Rd, Rd, offset); \
1007 } \
1008 } \
1009 INSN_ENTRY_RELOC(void, NAME(Register Rd, address dest, relocInfo::relocType rtype)) \
1010 NAME(Rd, dest); \
1011 } \
1012 void NAME(Register Rd, const Address &adr, Register temp = t0) { \
1013 switch (adr.getMode()) { \
1014 case Address::literal: { \
1015 relocate(adr.rspec(), [&] { \
1016 NAME(Rd, adr.target()); \
1017 }); \
1018 break; \
1019 } \
1020 case Address::base_plus_offset: { \
1021 if (is_simm12(adr.offset())) { \
1022 Assembler::NAME(Rd, adr.base(), adr.offset()); \
1023 } else { \
1024 int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \
1025 if (Rd == adr.base()) { \
1026 la(temp, Address(adr.base(), adr.offset() - offset)); \
1027 Assembler::NAME(Rd, temp, offset); \
1028 } else { \
1029 la(Rd, Address(adr.base(), adr.offset() - offset)); \
1030 Assembler::NAME(Rd, Rd, offset); \
1031 } \
1032 } \
1033 break; \
1034 } \
1035 default: \
1036 ShouldNotReachHere(); \
1037 } \
1038 } \
1039 void NAME(Register Rd, Label &L) { \
1040 wrap_label(Rd, L, &MacroAssembler::NAME); \
1041 }
1042
1043 INSN(lb);
1044 INSN(lbu);
1045 INSN(lh);
1046 INSN(lhu);
1047 INSN(lw);
1048 INSN(lwu);
1049 INSN(ld);
1050
1051 #undef INSN
1052
1053 #define INSN(NAME) \
1054 void NAME(FloatRegister Rd, address dest, Register temp = t0) { \
1055 assert_cond(dest != nullptr); \
1056 if (CodeCache::contains(dest)) { \
1057 int64_t distance = dest - pc(); \
1058 assert(is_valid_32bit_offset(distance), "Must be"); \
1059 auipc(temp, (int32_t)distance + 0x800); \
1060 Assembler::NAME(Rd, temp, ((int32_t)distance << 20) >> 20); \
1061 } else { \
1062 int32_t offset = 0; \
1063 movptr(temp, dest, offset); \
1064 Assembler::NAME(Rd, temp, offset); \
1065 } \
1066 } \
1067 INSN_ENTRY_RELOC(void, NAME(FloatRegister Rd, address dest, \
1068 relocInfo::relocType rtype, Register temp = t0)) \
1069 NAME(Rd, dest, temp); \
1070 } \
1071 void NAME(FloatRegister Rd, const Address &adr, Register temp = t0) { \
1072 switch (adr.getMode()) { \
1073 case Address::literal: { \
1074 relocate(adr.rspec(), [&] { \
1075 NAME(Rd, adr.target(), temp); \
1076 }); \
1077 break; \
1078 } \
1079 case Address::base_plus_offset: { \
1080 if (is_simm12(adr.offset())) { \
1081 Assembler::NAME(Rd, adr.base(), adr.offset()); \
1082 } else { \
1083 int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \
1084 la(temp, Address(adr.base(), adr.offset() - offset)); \
1085 Assembler::NAME(Rd, temp, offset); \
1086 } \
1087 break; \
1088 } \
1089 default: \
1090 ShouldNotReachHere(); \
1091 } \
1092 }
1093
1094 INSN(flh);
1095 INSN(flw);
1096 INSN(fld);
1097
1098 #undef INSN
1099
1100 #define INSN(NAME, REGISTER) \
1101 INSN_ENTRY_RELOC(void, NAME(REGISTER Rs, address dest, \
1102 relocInfo::relocType rtype, Register temp = t0)) \
1103 NAME(Rs, dest, temp); \
1104 }
1105
1106 INSN(sb, Register);
1107 INSN(sh, Register);
1108 INSN(sw, Register);
1109 INSN(sd, Register);
1110 INSN(fsw, FloatRegister);
1111 INSN(fsd, FloatRegister);
1112
1113 #undef INSN
1114
1115 #define INSN(NAME) \
1116 void NAME(Register Rs, address dest, Register temp = t0) { \
1117 assert_cond(dest != nullptr); \
1118 assert_different_registers(Rs, temp); \
1119 if (CodeCache::contains(dest)) { \
1120 int64_t distance = dest - pc(); \
1121 assert(is_valid_32bit_offset(distance), "Must be"); \
1122 auipc(temp, (int32_t)distance + 0x800); \
1123 Assembler::NAME(Rs, temp, ((int32_t)distance << 20) >> 20); \
1124 } else { \
1125 int32_t offset = 0; \
1126 movptr(temp, dest, offset); \
1127 Assembler::NAME(Rs, temp, offset); \
1128 } \
1129 } \
1130 void NAME(Register Rs, const Address &adr, Register temp = t0) { \
1131 switch (adr.getMode()) { \
1132 case Address::literal: { \
1133 assert_different_registers(Rs, temp); \
1134 relocate(adr.rspec(), [&] { \
1135 NAME(Rs, adr.target(), temp); \
1136 }); \
1137 break; \
1138 } \
1139 case Address::base_plus_offset: { \
1140 if (is_simm12(adr.offset())) { \
1141 Assembler::NAME(Rs, adr.base(), adr.offset()); \
1142 } else { \
1143 assert_different_registers(Rs, temp); \
1144 int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \
1145 la(temp, Address(adr.base(), adr.offset() - offset)); \
1146 Assembler::NAME(Rs, temp, offset); \
1147 } \
1148 break; \
1149 } \
1150 default: \
1151 ShouldNotReachHere(); \
1152 } \
1153 }
1154
1155 INSN(sb);
1156 INSN(sh);
1157 INSN(sw);
1158 INSN(sd);
1159
1160 #undef INSN
1161
1162 #define INSN(NAME) \
1163 void NAME(FloatRegister Rs, address dest, Register temp = t0) { \
1164 assert_cond(dest != nullptr); \
1165 if (CodeCache::contains(dest)) { \
1166 int64_t distance = dest - pc(); \
1167 assert(is_valid_32bit_offset(distance), "Must be"); \
1168 auipc(temp, (int32_t)distance + 0x800); \
1169 Assembler::NAME(Rs, temp, ((int32_t)distance << 20) >> 20); \
1170 } else { \
1171 int32_t offset = 0; \
1172 movptr(temp, dest, offset); \
1173 Assembler::NAME(Rs, temp, offset); \
1174 } \
1175 } \
1176 void NAME(FloatRegister Rs, const Address &adr, Register temp = t0) { \
1177 switch (adr.getMode()) { \
1178 case Address::literal: { \
1179 relocate(adr.rspec(), [&] { \
1180 NAME(Rs, adr.target(), temp); \
1181 }); \
1182 break; \
1183 } \
1184 case Address::base_plus_offset: { \
1185 if (is_simm12(adr.offset())) { \
1186 Assembler::NAME(Rs, adr.base(), adr.offset()); \
1187 } else { \
1188 int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \
1189 la(temp, Address(adr.base(), adr.offset() - offset)); \
1190 Assembler::NAME(Rs, temp, offset); \
1191 } \
1192 break; \
1193 } \
1194 default: \
1195 ShouldNotReachHere(); \
1196 } \
1197 }
1198
1199 INSN(fsw);
1200 INSN(fsd);
1201
1202 #undef INSN
1203
1204 #undef INSN_ENTRY_RELOC
1205
1206 void cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, Label &succeed, Label *fail);
1207 void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, Label &succeed, Label *fail);
1208 void cmpxchg(Register addr, Register expected,
1209 Register new_val,
1210 Assembler::operand_size size,
1211 Assembler::Aqrl acquire, Assembler::Aqrl release,
1212 Register result, bool result_as_bool = false);
1213 void weak_cmpxchg(Register addr, Register expected,
1214 Register new_val,
1215 Assembler::operand_size size,
1216 Assembler::Aqrl acquire, Assembler::Aqrl release,
1217 Register result);
1218 void cmpxchg_narrow_value_helper(Register addr, Register expected, Register new_val,
1219 Assembler::operand_size size,
1220 Register shift, Register mask, Register aligned_addr);
1221 void cmpxchg_narrow_value(Register addr, Register expected,
1222 Register new_val,
1223 Assembler::operand_size size,
1224 Assembler::Aqrl acquire, Assembler::Aqrl release,
1225 Register result, bool result_as_bool,
1226 Register tmp1, Register tmp2, Register tmp3);
1227 void weak_cmpxchg_narrow_value(Register addr, Register expected,
1228 Register new_val,
1229 Assembler::operand_size size,
1230 Assembler::Aqrl acquire, Assembler::Aqrl release,
1231 Register result,
1232 Register tmp1, Register tmp2, Register tmp3);
1233
1234 void atomic_add(Register prev, RegisterOrConstant incr, Register addr);
1235 void atomic_addw(Register prev, RegisterOrConstant incr, Register addr);
1236 void atomic_addal(Register prev, RegisterOrConstant incr, Register addr);
1237 void atomic_addalw(Register prev, RegisterOrConstant incr, Register addr);
1238
1239 void atomic_xchg(Register prev, Register newv, Register addr);
1240 void atomic_xchgw(Register prev, Register newv, Register addr);
1241 void atomic_xchgal(Register prev, Register newv, Register addr);
1242 void atomic_xchgalw(Register prev, Register newv, Register addr);
1243 void atomic_xchgwu(Register prev, Register newv, Register addr);
1244 void atomic_xchgalwu(Register prev, Register newv, Register addr);
1245
1246 void atomic_cas(Register prev, Register newv, Register addr, Assembler::operand_size size,
1247 Assembler::Aqrl acquire = Assembler::relaxed, Assembler::Aqrl release = Assembler::relaxed);
1248
1249 // Emit a far call/jump. Only invalidates the tmp register which
1250 // is used to keep the entry address for jalr.
1251 // The address must be inside the code cache.
1252 // Supported entry.rspec():
1253 // - relocInfo::external_word_type
1254 // - relocInfo::runtime_call_type
1255 // - relocInfo::none
1256 // Clobbers t1 default.
1257 void far_call(const Address &entry, Register tmp = t1);
1258 void far_jump(const Address &entry, Register tmp = t1);
1259
1260 static int far_branch_size() {
1261 return 2 * MacroAssembler::instruction_size; // auipc + jalr, see far_call() & far_jump()
1262 }
1263
1264 void load_byte_map_base(Register reg);
1265
1266 void bang_stack_with_offset(int offset) {
1267 // stack grows down, caller passes positive offset
1268 assert(offset > 0, "must bang with negative offset");
1269 sub(t0, sp, offset);
1270 sd(zr, Address(t0));
1271 }
1272
1273 virtual void _call_Unimplemented(address call_site) {
1274 mv(t1, call_site);
1275 }
1276
1277 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__)
1278
1279 // Frame creation and destruction shared between JITs.
1280 void build_frame(int framesize);
1281 void remove_frame(int framesize);
1282
1283 void reserved_stack_check();
1284
1285 void get_polling_page(Register dest, relocInfo::relocType rtype);
1286 void read_polling_page(Register r, int32_t offset, relocInfo::relocType rtype);
1287
1288 // RISCV64 OpenJDK uses three different types of calls:
1289 //
1290 // - far call: auipc reg, pc_relative_offset; jalr ra, reg, offset
1291 // The offset has the range [-(2G + 2K), 2G - 2K). Addresses out of the
1292 // range in the code cache requires indirect call.
1293 // If a jump is needed rather than a call, a far jump 'jalr x0, reg, offset'
1294 // can be used instead.
1295 // All instructions are embedded at a call site.
1296 //
1297 // - indirect call: movptr + jalr
1298 // This can reach anywhere in the address space, but it cannot be patched
1299 // while code is running, so it must only be modified at a safepoint.
1300 // This form of call is most suitable for targets at fixed addresses,
1301 // which will never be patched.
1302 //
1303 // - reloc call:
1304 // This too can reach anywhere in the address space but is only available
1305 // in C1/C2-generated code (nmethod).
1306 //
1307 // [Main code section]
1308 // auipc
1309 // ld <address_from_stub_section>
1310 // jalr
1311 //
1312 // [Stub section]
1313 // address stub:
1314 // <64-bit destination address>
1315 //
1316 // To change the destination we simply atomically store the new
1317 // address in the stub section.
1318 // There is a benign race in that the other thread might observe the old
1319 // 64-bit destination address before it observes the new address. That does
1320 // not matter because the destination method has been invalidated, so there
1321 // will be a trap at its start.
1322
1323 // Emit a reloc call and create a stub to hold the entry point address.
1324 // Supported entry.rspec():
1325 // - relocInfo::runtime_call_type
1326 // - relocInfo::opt_virtual_call_type
1327 // - relocInfo::static_call_type
1328 // - relocInfo::virtual_call_type
1329 //
1330 // Return: the call PC or nullptr if CodeCache is full.
1331 address reloc_call(Address entry, Register tmp = t1);
1332
1333 address ic_call(address entry, jint method_index = 0);
1334 static int ic_check_size();
1335 int ic_check(int end_alignment = MacroAssembler::instruction_size);
1336
1337 // Support for memory inc/dec
1338 // n.b. increment/decrement calls with an Address destination will
1339 // need to use a scratch register to load the value to be
1340 // incremented. increment/decrement calls which add or subtract a
1341 // constant value other than sign-extended 12-bit immediate will need
1342 // to use a 2nd scratch register to hold the constant. so, an address
1343 // increment/decrement may trash both t0 and t1.
1344
1345 void increment(const Address dst, int64_t value = 1, Register tmp1 = t0, Register tmp2 = t1);
1346 void incrementw(const Address dst, int32_t value = 1, Register tmp1 = t0, Register tmp2 = t1);
1347
1348 void decrement(const Address dst, int64_t value = 1, Register tmp1 = t0, Register tmp2 = t1);
1349 void decrementw(const Address dst, int32_t value = 1, Register tmp1 = t0, Register tmp2 = t1);
1350
1351 void clinit_barrier(Register klass, Register tmp, Label* L_fast_path = nullptr, Label* L_slow_path = nullptr);
1352
1353 void load_method_holder_cld(Register result, Register method);
1354 void load_method_holder(Register holder, Register method);
1355
1356 void compute_index(Register str1, Register trailing_zeros, Register match_mask,
1357 Register result, Register char_tmp, Register tmp,
1358 bool haystack_isL);
1359 void compute_match_mask(Register src, Register pattern, Register match_mask,
1360 Register mask1, Register mask2);
1361
1362 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
1363 void kernel_crc32(Register crc, Register buf, Register len,
1364 Register table0, Register table1, Register table2, Register table3,
1365 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register tmp6);
1366 void update_word_crc32(Register crc, Register v, Register tmp1, Register tmp2, Register tmp3,
1367 Register table0, Register table1, Register table2, Register table3,
1368 bool upper);
1369 void update_byte_crc32(Register crc, Register val, Register table);
1370
1371 #ifdef COMPILER2
1372 void vector_update_crc32(Register crc, Register buf, Register len,
1373 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5,
1374 Register table0, Register table3);
1375 void kernel_crc32_vclmul_fold(Register crc, Register buf, Register len,
1376 Register table0, Register table1, Register table2, Register table3,
1377 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
1378 void crc32_vclmul_fold_to_16_bytes_vectorsize_32(VectorRegister vx, VectorRegister vy, VectorRegister vt,
1379 VectorRegister vtmp1, VectorRegister vtmp2, VectorRegister vtmp3, VectorRegister vtmp4);
1380 void kernel_crc32_vclmul_fold_vectorsize_32(Register crc, Register buf, Register len,
1381 Register vclmul_table, Register tmp1, Register tmp2);
1382 void crc32_vclmul_fold_16_bytes_vectorsize_16(VectorRegister vx, VectorRegister vt,
1383 VectorRegister vtmp1, VectorRegister vtmp2, VectorRegister vtmp3, VectorRegister vtmp4,
1384 Register buf, Register tmp, const int STEP);
1385 void crc32_vclmul_fold_16_bytes_vectorsize_16_2(VectorRegister vx, VectorRegister vy, VectorRegister vt,
1386 VectorRegister vtmp1, VectorRegister vtmp2, VectorRegister vtmp3, VectorRegister vtmp4,
1387 Register tmp);
1388 void crc32_vclmul_fold_16_bytes_vectorsize_16_3(VectorRegister vx, VectorRegister vy, VectorRegister vt,
1389 VectorRegister vtmp1, VectorRegister vtmp2, VectorRegister vtmp3, VectorRegister vtmp4,
1390 Register tmp);
1391 void kernel_crc32_vclmul_fold_vectorsize_16(Register crc, Register buf, Register len,
1392 Register vclmul_table, Register tmp1, Register tmp2);
1393
1394 void mul_add(Register out, Register in, Register offset,
1395 Register len, Register k, Register tmp);
1396 void wide_mul(Register prod_lo, Register prod_hi, Register n, Register m);
1397 void wide_madd(Register sum_lo, Register sum_hi, Register n,
1398 Register m, Register tmp1, Register tmp2);
1399 void cad(Register dst, Register src1, Register src2, Register carry);
1400 void cadc(Register dst, Register src1, Register src2, Register carry);
1401 void adc(Register dst, Register src1, Register src2, Register carry);
1402 void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
1403 Register src1, Register src2, Register carry);
1404 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1405 Register y, Register y_idx, Register z,
1406 Register carry, Register product,
1407 Register idx, Register kdx);
1408 void multiply_128_x_128_loop(Register y, Register z,
1409 Register carry, Register carry2,
1410 Register idx, Register jdx,
1411 Register yz_idx1, Register yz_idx2,
1412 Register tmp, Register tmp3, Register tmp4,
1413 Register tmp6, Register product_hi);
1414 void multiply_to_len(Register x, Register xlen, Register y, Register ylen,
1415 Register z, Register tmp0,
1416 Register tmp1, Register tmp2, Register tmp3, Register tmp4,
1417 Register tmp5, Register tmp6, Register product_hi);
1418
1419 #endif // COMPILER2
1420
1421 void inflate_lo32(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1);
1422 void inflate_hi32(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1);
1423
1424 void ctzc_bits(Register Rd, Register Rs, bool isLL = false,
1425 Register tmp1 = t0, Register tmp2 = t1);
1426
1427 void zero_words(Register base, uint64_t cnt);
1428 address zero_words(Register ptr, Register cnt);
1429 void fill_words(Register base, Register cnt, Register value);
1430 void zero_memory(Register addr, Register len, Register tmp);
1431 void zero_dcache_blocks(Register base, Register cnt, Register tmp1, Register tmp2);
1432
1433 // shift left by shamt and add
1434 void shadd(Register Rd, Register Rs1, Register Rs2, Register tmp, int shamt);
1435
1436 // test single bit in Rs, result is set to Rd
1437 void test_bit(Register Rd, Register Rs, uint32_t bit_pos);
1438
1439 // Here the float instructions with safe deal with some exceptions.
1440 // e.g. convert from NaN, +Inf, -Inf to int, float, double
1441 // will trigger exception, we need to deal with these situations
1442 // to get correct results.
1443 void fcvt_w_s_safe(Register dst, FloatRegister src, Register tmp = t0);
1444 void fcvt_l_s_safe(Register dst, FloatRegister src, Register tmp = t0);
1445 void fcvt_w_d_safe(Register dst, FloatRegister src, Register tmp = t0);
1446 void fcvt_l_d_safe(Register dst, FloatRegister src, Register tmp = t0);
1447
1448 void java_round_float(Register dst, FloatRegister src, FloatRegister ftmp);
1449 void java_round_double(Register dst, FloatRegister src, FloatRegister ftmp);
1450
1451 // Helper routine processing the slow path of NaN when converting float to float16
1452 void float_to_float16_NaN(Register dst, FloatRegister src, Register tmp1, Register tmp2);
1453
1454 // vector load/store unit-stride instructions
1455 void vlex_v(VectorRegister vd, Register base, Assembler::SEW sew, VectorMask vm = unmasked) {
1456 switch (sew) {
1457 case Assembler::e64:
1458 vle64_v(vd, base, vm);
1459 break;
1460 case Assembler::e32:
1461 vle32_v(vd, base, vm);
1462 break;
1463 case Assembler::e16:
1464 vle16_v(vd, base, vm);
1465 break;
1466 case Assembler::e8: // fall through
1467 default:
1468 vle8_v(vd, base, vm);
1469 break;
1470 }
1471 }
1472
1473 void vsex_v(VectorRegister store_data, Register base, Assembler::SEW sew, VectorMask vm = unmasked) {
1474 switch (sew) {
1475 case Assembler::e64:
1476 vse64_v(store_data, base, vm);
1477 break;
1478 case Assembler::e32:
1479 vse32_v(store_data, base, vm);
1480 break;
1481 case Assembler::e16:
1482 vse16_v(store_data, base, vm);
1483 break;
1484 case Assembler::e8: // fall through
1485 default:
1486 vse8_v(store_data, base, vm);
1487 break;
1488 }
1489 }
1490
1491 // vector pseudo instructions
1492 // rotate vector register left with shift bits, 32-bit version
1493 inline void vrole32_vi(VectorRegister vd, uint32_t shift, VectorRegister tmp_vr) {
1494 vsrl_vi(tmp_vr, vd, 32 - shift);
1495 vsll_vi(vd, vd, shift);
1496 vor_vv(vd, vd, tmp_vr);
1497 }
1498
1499 inline void vl1r_v(VectorRegister vd, Register rs) {
1500 vl1re8_v(vd, rs);
1501 }
1502
1503 inline void vmnot_m(VectorRegister vd, VectorRegister vs) {
1504 vmnand_mm(vd, vs, vs);
1505 }
1506
1507 inline void vncvt_x_x_w(VectorRegister vd, VectorRegister vs, VectorMask vm = unmasked) {
1508 vnsrl_wx(vd, vs, x0, vm);
1509 }
1510
1511 inline void vneg_v(VectorRegister vd, VectorRegister vs, VectorMask vm = unmasked) {
1512 vrsub_vx(vd, vs, x0, vm);
1513 }
1514
1515 inline void vfneg_v(VectorRegister vd, VectorRegister vs, VectorMask vm = unmasked) {
1516 vfsgnjn_vv(vd, vs, vs, vm);
1517 }
1518
1519 inline void vfabs_v(VectorRegister vd, VectorRegister vs, VectorMask vm = unmasked) {
1520 vfsgnjx_vv(vd, vs, vs, vm);
1521 }
1522
1523 inline void vmsgt_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) {
1524 vmslt_vv(vd, vs1, vs2, vm);
1525 }
1526
1527 inline void vmsgtu_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) {
1528 vmsltu_vv(vd, vs1, vs2, vm);
1529 }
1530
1531 inline void vmsge_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) {
1532 vmsle_vv(vd, vs1, vs2, vm);
1533 }
1534
1535 inline void vmsgeu_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) {
1536 vmsleu_vv(vd, vs1, vs2, vm);
1537 }
1538
1539 inline void vmfgt_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) {
1540 vmflt_vv(vd, vs1, vs2, vm);
1541 }
1542
1543 inline void vmfge_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) {
1544 vmfle_vv(vd, vs1, vs2, vm);
1545 }
1546
1547 inline void vmsltu_vi(VectorRegister Vd, VectorRegister Vs2, uint32_t imm, VectorMask vm = unmasked) {
1548 guarantee(imm >= 1 && imm <= 16, "imm is invalid");
1549 vmsleu_vi(Vd, Vs2, imm-1, vm);
1550 }
1551
1552 inline void vmsgeu_vi(VectorRegister Vd, VectorRegister Vs2, uint32_t imm, VectorMask vm = unmasked) {
1553 guarantee(imm >= 1 && imm <= 16, "imm is invalid");
1554 vmsgtu_vi(Vd, Vs2, imm-1, vm);
1555 }
1556
1557 // Copy mask register
1558 inline void vmmv_m(VectorRegister vd, VectorRegister vs) {
1559 vmand_mm(vd, vs, vs);
1560 }
1561
1562 // Clear mask register
1563 inline void vmclr_m(VectorRegister vd) {
1564 vmxor_mm(vd, vd, vd);
1565 }
1566
1567 // Set mask register
1568 inline void vmset_m(VectorRegister vd) {
1569 vmxnor_mm(vd, vd, vd);
1570 }
1571
1572 inline void vnot_v(VectorRegister Vd, VectorRegister Vs, VectorMask vm = unmasked) {
1573 vxor_vi(Vd, Vs, -1, vm);
1574 }
1575
1576 static const int zero_words_block_size;
1577
1578 void cast_primitive_type(BasicType type, Register Rt) {
1579 switch (type) {
1580 case T_BOOLEAN:
1581 sltu(Rt, zr, Rt);
1582 break;
1583 case T_CHAR :
1584 zext(Rt, Rt, 16);
1585 break;
1586 case T_BYTE :
1587 sext(Rt, Rt, 8);
1588 break;
1589 case T_SHORT :
1590 sext(Rt, Rt, 16);
1591 break;
1592 case T_INT :
1593 sext(Rt, Rt, 32);
1594 break;
1595 case T_LONG : /* nothing to do */ break;
1596 case T_VOID : /* nothing to do */ break;
1597 case T_FLOAT : /* nothing to do */ break;
1598 case T_DOUBLE : /* nothing to do */ break;
1599 default: ShouldNotReachHere();
1600 }
1601 }
1602
1603 // float cmp with unordered_result
1604 void float_compare(Register result, FloatRegister Rs1, FloatRegister Rs2, int unordered_result);
1605 void double_compare(Register result, FloatRegister Rs1, FloatRegister Rs2, int unordered_result);
1606
1607 // Zero/Sign-extend
1608 void zext(Register dst, Register src, int bits);
1609 void sext(Register dst, Register src, int bits);
1610
1611 private:
1612 void cmp_x2i(Register dst, Register src1, Register src2, Register tmp, bool is_signed = true);
1613
1614 public:
1615 // compare src1 and src2 and get -1/0/1 in dst.
1616 // if [src1 > src2], dst = 1;
1617 // if [src1 == src2], dst = 0;
1618 // if [src1 < src2], dst = -1;
1619 void cmp_l2i(Register dst, Register src1, Register src2, Register tmp = t0);
1620 void cmp_ul2i(Register dst, Register src1, Register src2, Register tmp = t0);
1621 void cmp_uw2i(Register dst, Register src1, Register src2, Register tmp = t0);
1622
1623 // support for argument shuffling
1624 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = t0);
1625 void float_move(VMRegPair src, VMRegPair dst, Register tmp = t0);
1626 void long_move(VMRegPair src, VMRegPair dst, Register tmp = t0);
1627 void double_move(VMRegPair src, VMRegPair dst, Register tmp = t0);
1628 void object_move(OopMap* map,
1629 int oop_handle_offset,
1630 int framesize_in_slots,
1631 VMRegPair src,
1632 VMRegPair dst,
1633 bool is_receiver,
1634 int* receiver_offset);
1635
1636 #ifdef ASSERT
1637 // Template short-hand support to clean-up after a failed call to trampoline
1638 // call generation (see trampoline_call() below), when a set of Labels must
1639 // be reset (before returning).
1640 template<typename Label, typename... More>
1641 void reset_labels(Label& lbl, More&... more) {
1642 lbl.reset(); reset_labels(more...);
1643 }
1644 template<typename Label>
1645 void reset_labels(Label& lbl) {
1646 lbl.reset();
1647 }
1648 #endif
1649
1650 private:
1651
1652 void repne_scan(Register addr, Register value, Register count, Register tmp);
1653
1654 int bitset_to_regs(unsigned int bitset, unsigned char* regs);
1655 Address add_memory_helper(const Address dst, Register tmp);
1656
1657 void load_reserved(Register dst, Register addr, Assembler::operand_size size, Assembler::Aqrl acquire);
1658 void store_conditional(Register dst, Register new_val, Register addr, Assembler::operand_size size, Assembler::Aqrl release);
1659
1660 public:
1661 void fast_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow);
1662 void fast_unlock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow);
1663
1664 public:
1665 enum {
1666 // movptr
1667 movptr1_instruction_size = 6 * MacroAssembler::instruction_size, // lui, addi, slli, addi, slli, addi. See movptr1().
1668 movptr2_instruction_size = 5 * MacroAssembler::instruction_size, // lui, lui, slli, add, addi. See movptr2().
1669 load_pc_relative_instruction_size = 2 * MacroAssembler::instruction_size // auipc, ld
1670 };
1671
1672 static bool is_load_pc_relative_at(address branch);
1673 static bool is_li16u_at(address instr);
1674
1675 static bool is_jal_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1101111; }
1676 static bool is_jalr_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1100111 && extract_funct3(instr) == 0b000; }
1677 static bool is_branch_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1100011; }
1678 static bool is_ld_at(address instr) { assert_cond(instr != nullptr); return is_load_at(instr) && extract_funct3(instr) == 0b011; }
1679 static bool is_load_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0000011; }
1680 static bool is_float_load_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0000111; }
1681 static bool is_auipc_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0010111; }
1682 static bool is_jump_at(address instr) { assert_cond(instr != nullptr); return is_branch_at(instr) || is_jal_at(instr) || is_jalr_at(instr); }
1683 static bool is_add_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0110011 && extract_funct3(instr) == 0b000; }
1684 static bool is_addi_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0010011 && extract_funct3(instr) == 0b000; }
1685 static bool is_addiw_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0011011 && extract_funct3(instr) == 0b000; }
1686 static bool is_addiw_to_zr_at(address instr){ assert_cond(instr != nullptr); return is_addiw_at(instr) && extract_rd(instr) == zr; }
1687 static bool is_lui_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0110111; }
1688 static bool is_lui_to_zr_at(address instr) { assert_cond(instr != nullptr); return is_lui_at(instr) && extract_rd(instr) == zr; }
1689
1690 static bool is_srli_at(address instr) {
1691 assert_cond(instr != nullptr);
1692 return extract_opcode(instr) == 0b0010011 &&
1693 extract_funct3(instr) == 0b101 &&
1694 Assembler::extract(((unsigned*)instr)[0], 31, 26) == 0b000000;
1695 }
1696
1697 static bool is_slli_shift_at(address instr, uint32_t shift) {
1698 assert_cond(instr != nullptr);
1699 return (extract_opcode(instr) == 0b0010011 && // opcode field
1700 extract_funct3(instr) == 0b001 && // funct3 field, select the type of operation
1701 Assembler::extract(Assembler::ld_instr(instr), 25, 20) == shift); // shamt field
1702 }
1703
1704 static bool is_movptr1_at(address instr);
1705 static bool is_movptr2_at(address instr);
1706
1707 static bool is_lwu_to_zr(address instr);
1708
1709 static Register extract_rs1(address instr);
1710 static Register extract_rs2(address instr);
1711 static Register extract_rd(address instr);
1712 static uint32_t extract_opcode(address instr);
1713 static uint32_t extract_funct3(address instr);
1714
1715 // the instruction sequence of movptr is as below:
1716 // lui
1717 // addi
1718 // slli
1719 // addi
1720 // slli
1721 // addi/jalr/load
1722 static bool check_movptr1_data_dependency(address instr) {
1723 address lui = instr;
1724 address addi1 = lui + MacroAssembler::instruction_size;
1725 address slli1 = addi1 + MacroAssembler::instruction_size;
1726 address addi2 = slli1 + MacroAssembler::instruction_size;
1727 address slli2 = addi2 + MacroAssembler::instruction_size;
1728 address last_instr = slli2 + MacroAssembler::instruction_size;
1729 return extract_rs1(addi1) == extract_rd(lui) &&
1730 extract_rs1(addi1) == extract_rd(addi1) &&
1731 extract_rs1(slli1) == extract_rd(addi1) &&
1732 extract_rs1(slli1) == extract_rd(slli1) &&
1733 extract_rs1(addi2) == extract_rd(slli1) &&
1734 extract_rs1(addi2) == extract_rd(addi2) &&
1735 extract_rs1(slli2) == extract_rd(addi2) &&
1736 extract_rs1(slli2) == extract_rd(slli2) &&
1737 extract_rs1(last_instr) == extract_rd(slli2);
1738 }
1739
1740 // the instruction sequence of movptr2 is as below:
1741 // lui
1742 // lui
1743 // slli
1744 // add
1745 // addi/jalr/load
1746 static bool check_movptr2_data_dependency(address instr) {
1747 address lui1 = instr;
1748 address lui2 = lui1 + MacroAssembler::instruction_size;
1749 address slli = lui2 + MacroAssembler::instruction_size;
1750 address add = slli + MacroAssembler::instruction_size;
1751 address last_instr = add + MacroAssembler::instruction_size;
1752 return extract_rd(add) == extract_rd(lui2) &&
1753 extract_rs1(add) == extract_rd(lui2) &&
1754 extract_rs2(add) == extract_rd(slli) &&
1755 extract_rs1(slli) == extract_rd(lui1) &&
1756 extract_rd(slli) == extract_rd(lui1) &&
1757 extract_rs1(last_instr) == extract_rd(add);
1758 }
1759
1760 // the instruction sequence of li16u is as below:
1761 // lui
1762 // srli
1763 static bool check_li16u_data_dependency(address instr) {
1764 address lui = instr;
1765 address srli = lui + MacroAssembler::instruction_size;
1766
1767 return extract_rs1(srli) == extract_rd(lui) &&
1768 extract_rs1(srli) == extract_rd(srli);
1769 }
1770
1771 // the instruction sequence of li32 is as below:
1772 // lui
1773 // addiw
1774 static bool check_li32_data_dependency(address instr) {
1775 address lui = instr;
1776 address addiw = lui + MacroAssembler::instruction_size;
1777
1778 return extract_rs1(addiw) == extract_rd(lui) &&
1779 extract_rs1(addiw) == extract_rd(addiw);
1780 }
1781
1782 // the instruction sequence of pc-relative is as below:
1783 // auipc
1784 // jalr/addi/load/float_load
1785 static bool check_pc_relative_data_dependency(address instr) {
1786 address auipc = instr;
1787 address last_instr = auipc + MacroAssembler::instruction_size;
1788
1789 return extract_rs1(last_instr) == extract_rd(auipc);
1790 }
1791
1792 // the instruction sequence of load_label is as below:
1793 // auipc
1794 // load
1795 static bool check_load_pc_relative_data_dependency(address instr) {
1796 address auipc = instr;
1797 address load = auipc + MacroAssembler::instruction_size;
1798
1799 return extract_rd(load) == extract_rd(auipc) &&
1800 extract_rs1(load) == extract_rd(load);
1801 }
1802
1803 static bool is_li32_at(address instr);
1804 static bool is_pc_relative_at(address branch);
1805
1806 static bool is_membar(address addr) {
1807 return (Bytes::get_native_u4(addr) & 0x7f) == 0b1111 && extract_funct3(addr) == 0;
1808 }
1809 static uint32_t get_membar_kind(address addr);
1810 static void set_membar_kind(address addr, uint32_t order_kind);
1811 };
1812
1813 #ifdef ASSERT
1814 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
1815 #endif
1816
1817 #endif // CPU_RISCV_MACROASSEMBLER_RISCV_HPP