1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
4 * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #ifndef CPU_RISCV_MACROASSEMBLER_RISCV_HPP
28 #define CPU_RISCV_MACROASSEMBLER_RISCV_HPP
29
30 #include "asm/assembler.inline.hpp"
31 #include "code/vmreg.hpp"
32 #include "metaprogramming/enableIf.hpp"
33 #include "oops/compressedOops.hpp"
34 #include "utilities/powerOfTwo.hpp"
35
36 // MacroAssembler extends Assembler by frequently used macros.
37 //
38 // Instructions for which a 'better' code sequence exists depending
39 // on arguments should also go in here.
40
41 class MacroAssembler: public Assembler {
42
43 public:
44
45 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
46
47 void safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod, Register tmp_reg = t0);
48
49 // Alignment
50 int align(int modulus, int extra_offset = 0);
51
52 static inline void assert_alignment(address pc, int alignment = MacroAssembler::instruction_size) {
53 assert(is_aligned(pc, alignment), "bad alignment");
54 }
55
56 // nop
57 void post_call_nop();
58
59 // Stack frame creation/removal
60 // Note that SP must be updated to the right place before saving/restoring RA and FP
61 // because signal based thread suspend/resume could happen asynchronously.
62 void enter() {
63 subi(sp, sp, 2 * wordSize);
64 sd(ra, Address(sp, wordSize));
65 sd(fp, Address(sp));
66 addi(fp, sp, 2 * wordSize);
67 }
68
69 void leave() {
70 subi(sp, fp, 2 * wordSize);
71 ld(fp, Address(sp));
72 ld(ra, Address(sp, wordSize));
73 addi(sp, sp, 2 * wordSize);
74 }
75
76
77 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
78 // The pointer will be loaded into the thread register.
79 void get_thread(Register thread);
80
81 // Support for VM calls
82 //
83 // It is imperative that all calls into the VM are handled via the call_VM macros.
84 // They make sure that the stack linkage is setup correctly. call_VM's correspond
85 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
86
87 void call_VM(Register oop_result,
88 address entry_point,
89 bool check_exceptions = true);
90 void call_VM(Register oop_result,
91 address entry_point,
92 Register arg_1,
93 bool check_exceptions = true);
94 void call_VM(Register oop_result,
95 address entry_point,
96 Register arg_1, Register arg_2,
97 bool check_exceptions = true);
98 void call_VM(Register oop_result,
99 address entry_point,
100 Register arg_1, Register arg_2, Register arg_3,
101 bool check_exceptions = true);
102
103 // Overloadings with last_Java_sp
104 void call_VM(Register oop_result,
105 Register last_java_sp,
106 address entry_point,
107 int number_of_arguments = 0,
108 bool check_exceptions = true);
109 void call_VM(Register oop_result,
110 Register last_java_sp,
111 address entry_point,
112 Register arg_1,
113 bool check_exceptions = true);
114 void call_VM(Register oop_result,
115 Register last_java_sp,
116 address entry_point,
117 Register arg_1, Register arg_2,
118 bool check_exceptions = true);
119 void call_VM(Register oop_result,
120 Register last_java_sp,
121 address entry_point,
122 Register arg_1, Register arg_2, Register arg_3,
123 bool check_exceptions = true);
124
125 void get_vm_result_oop(Register oop_result, Register java_thread);
126 void get_vm_result_metadata(Register metadata_result, Register java_thread);
127
128 // These always tightly bind to MacroAssembler::call_VM_leaf_base
129 // bypassing the virtual implementation
130 void call_VM_leaf(address entry_point,
131 int number_of_arguments = 0);
132 void call_VM_leaf(address entry_point,
133 Register arg_0);
134 void call_VM_leaf(address entry_point,
135 Register arg_0, Register arg_1);
136 void call_VM_leaf(address entry_point,
137 Register arg_0, Register arg_1, Register arg_2);
138
139 // These always tightly bind to MacroAssembler::call_VM_base
140 // bypassing the virtual implementation
141 void super_call_VM_leaf(address entry_point, Register arg_0);
142 void super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1);
143 void super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2);
144 void super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3);
145
146 // last Java Frame (fills frame anchor)
147 void set_last_Java_frame(Register last_java_sp, Register last_java_fp, address last_java_pc, Register tmp);
148 void set_last_Java_frame(Register last_java_sp, Register last_java_fp, Label &last_java_pc, Register tmp);
149 void set_last_Java_frame(Register last_java_sp, Register last_java_fp, Register last_java_pc);
150
151 // thread in the default location (xthread)
152 void reset_last_Java_frame(bool clear_fp);
153
154 virtual void call_VM_leaf_base(
155 address entry_point, // the entry point
156 int number_of_arguments, // the number of arguments to pop after the call
157 Label* retaddr = nullptr
158 );
159
160 virtual void call_VM_leaf_base(
161 address entry_point, // the entry point
162 int number_of_arguments, // the number of arguments to pop after the call
163 Label& retaddr) {
164 call_VM_leaf_base(entry_point, number_of_arguments, &retaddr);
165 }
166
167 virtual void call_VM_base( // returns the register containing the thread upon return
168 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
169 Register java_thread, // the thread if computed before ; use noreg otherwise
170 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
171 address entry_point, // the entry point
172 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
173 bool check_exceptions // whether to check for pending exceptions after return
174 );
175
176 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
177
178 virtual void check_and_handle_earlyret(Register java_thread);
179 virtual void check_and_handle_popframe(Register java_thread);
180
181 void resolve_weak_handle(Register result, Register tmp1, Register tmp2);
182 void resolve_oop_handle(Register result, Register tmp1, Register tmp2);
183 void resolve_jobject(Register value, Register tmp1, Register tmp2);
184 void resolve_global_jobject(Register value, Register tmp1, Register tmp2);
185
186 void movoop(Register dst, jobject obj);
187 void mov_metadata(Register dst, Metadata* obj);
188 void bang_stack_size(Register size, Register tmp);
189 void set_narrow_oop(Register dst, jobject obj);
190 void set_narrow_klass(Register dst, Klass* k);
191
192 void load_mirror(Register dst, Register method, Register tmp1, Register tmp2);
193 void access_load_at(BasicType type, DecoratorSet decorators, Register dst,
194 Address src, Register tmp1, Register tmp2);
195 void access_store_at(BasicType type, DecoratorSet decorators, Address dst,
196 Register val, Register tmp1, Register tmp2, Register tmp3);
197 void load_klass(Register dst, Register src, Register tmp = t0);
198 void load_narrow_klass_compact(Register dst, Register src);
199 void store_klass(Register dst, Register src, Register tmp = t0);
200 void cmp_klass_compressed(Register oop, Register trial_klass, Register tmp, Label &L, bool equal);
201
202 void encode_klass_not_null(Register r, Register tmp = t0);
203 void decode_klass_not_null(Register r, Register tmp = t0);
204 void encode_klass_not_null(Register dst, Register src, Register tmp);
205 void decode_klass_not_null(Register dst, Register src, Register tmp);
206 void decode_heap_oop_not_null(Register r);
207 void decode_heap_oop_not_null(Register dst, Register src);
208 void decode_heap_oop(Register d, Register s);
209 void decode_heap_oop(Register r) { decode_heap_oop(r, r); }
210 void encode_heap_oop_not_null(Register r);
211 void encode_heap_oop_not_null(Register dst, Register src);
212 void encode_heap_oop(Register d, Register s);
213 void encode_heap_oop(Register r) { encode_heap_oop(r, r); };
214 void load_heap_oop(Register dst, Address src, Register tmp1,
215 Register tmp2, DecoratorSet decorators = 0);
216 void load_heap_oop_not_null(Register dst, Address src, Register tmp1,
217 Register tmp2, DecoratorSet decorators = 0);
218 void store_heap_oop(Address dst, Register val, Register tmp1,
219 Register tmp2, Register tmp3, DecoratorSet decorators = 0);
220
221 void store_klass_gap(Register dst, Register src);
222
223 // currently unimplemented
224 // Used for storing null. All other oop constants should be
225 // stored using routines that take a jobject.
226 void store_heap_oop_null(Address dst);
227
228 // This dummy is to prevent a call to store_heap_oop from
229 // converting a zero (linked null) into a Register by giving
230 // the compiler two choices it can't resolve
231
232 void store_heap_oop(Address dst, void* dummy);
233
234 // Support for null-checks
235 //
236 // Generates code that causes a null OS exception if the content of reg is null.
237 // If the accessed location is M[reg + offset] and the offset is known, provide the
238 // offset. No explicit code generateion is needed if the offset is within a certain
239 // range (0 <= offset <= page_size).
240
241 virtual void null_check(Register reg, int offset = -1);
242 static bool needs_explicit_null_check(intptr_t offset);
243 static bool uses_implicit_null_check(void* address);
244
245 // interface method calling
246 void lookup_interface_method(Register recv_klass,
247 Register intf_klass,
248 RegisterOrConstant itable_index,
249 Register method_result,
250 Register scan_tmp,
251 Label& no_such_interface,
252 bool return_method = true);
253
254 void lookup_interface_method_stub(Register recv_klass,
255 Register holder_klass,
256 Register resolved_klass,
257 Register method_result,
258 Register temp_reg,
259 Register temp_reg2,
260 int itable_index,
261 Label& L_no_such_interface);
262
263 // virtual method calling
264 // n.n. x86 allows RegisterOrConstant for vtable_index
265 void lookup_virtual_method(Register recv_klass,
266 RegisterOrConstant vtable_index,
267 Register method_result);
268
269 // Form an address from base + offset in Rd. Rd my or may not
270 // actually be used: you must use the Address that is returned. It
271 // is up to you to ensure that the shift provided matches the size
272 // of your data.
273 Address form_address(Register Rd, Register base, int64_t byte_offset);
274
275 // Sometimes we get misaligned loads and stores, usually from Unsafe
276 // accesses, and these can exceed the offset range.
277 Address legitimize_address(Register Rd, const Address &adr) {
278 if (adr.getMode() == Address::base_plus_offset) {
279 if (!is_simm12(adr.offset())) {
280 return form_address(Rd, adr.base(), adr.offset());
281 }
282 }
283 return adr;
284 }
285
286 // allocation
287 void tlab_allocate(
288 Register obj, // result: pointer to object after successful allocation
289 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
290 int con_size_in_bytes, // object size in bytes if known at compile time
291 Register tmp1, // temp register
292 Register tmp2, // temp register
293 Label& slow_case, // continuation point of fast allocation fails
294 bool is_far = false
295 );
296
297 // Test sub_klass against super_klass, with fast and slow paths.
298
299 // The fast path produces a tri-state answer: yes / no / maybe-slow.
300 // One of the three labels can be null, meaning take the fall-through.
301 // If super_check_offset is -1, the value is loaded up from super_klass.
302 // No registers are killed, except tmp_reg
303 void check_klass_subtype_fast_path(Register sub_klass,
304 Register super_klass,
305 Register tmp_reg,
306 Label* L_success,
307 Label* L_failure,
308 Label* L_slow_path,
309 Register super_check_offset = noreg);
310
311 // The reset of the type check; must be wired to a corresponding fast path.
312 // It does not repeat the fast path logic, so don't use it standalone.
313 // The tmp1_reg and tmp2_reg can be noreg, if no temps are available.
314 // Updates the sub's secondary super cache as necessary.
315 void check_klass_subtype_slow_path(Register sub_klass,
316 Register super_klass,
317 Register tmp1_reg,
318 Register tmp2_reg,
319 Label* L_success,
320 Label* L_failure,
321 bool set_cond_codes = false);
322
323 void check_klass_subtype_slow_path_linear(Register sub_klass,
324 Register super_klass,
325 Register tmp1_reg,
326 Register tmp2_reg,
327 Label* L_success,
328 Label* L_failure,
329 bool set_cond_codes = false);
330
331 void check_klass_subtype_slow_path_table(Register sub_klass,
332 Register super_klass,
333 Register tmp1_reg,
334 Register tmp2_reg,
335 Label* L_success,
336 Label* L_failure,
337 bool set_cond_codes = false);
338
339 // If r is valid, return r.
340 // If r is invalid, remove a register r2 from available_regs, add r2
341 // to regs_to_push, then return r2.
342 Register allocate_if_noreg(const Register r,
343 RegSetIterator<Register> &available_regs,
344 RegSet ®s_to_push);
345
346 // Secondary subtype checking
347 void lookup_secondary_supers_table_var(Register sub_klass,
348 Register r_super_klass,
349 Register result,
350 Register tmp1,
351 Register tmp2,
352 Register tmp3,
353 Register tmp4,
354 Label *L_success);
355
356 void population_count(Register dst, Register src, Register tmp1, Register tmp2);
357
358 // As above, but with a constant super_klass.
359 // The result is in Register result, not the condition codes.
360 bool lookup_secondary_supers_table_const(Register r_sub_klass,
361 Register r_super_klass,
362 Register result,
363 Register tmp1,
364 Register tmp2,
365 Register tmp3,
366 Register tmp4,
367 u1 super_klass_slot,
368 bool stub_is_near = false);
369
370 void verify_secondary_supers_table(Register r_sub_klass,
371 Register r_super_klass,
372 Register result,
373 Register tmp1,
374 Register tmp2,
375 Register tmp3);
376
377 void lookup_secondary_supers_table_slow_path(Register r_super_klass,
378 Register r_array_base,
379 Register r_array_index,
380 Register r_bitmap,
381 Register result,
382 Register tmp,
383 bool is_stub = true);
384
385 void check_klass_subtype(Register sub_klass,
386 Register super_klass,
387 Register tmp_reg,
388 Label& L_success);
389
390 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
391
392 // only if +VerifyOops
393 void _verify_oop(Register reg, const char* s, const char* file, int line);
394 void _verify_oop_addr(Address addr, const char* s, const char* file, int line);
395
396 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
397 if (VerifyOops) {
398 _verify_oop(reg, s, file, line);
399 }
400 }
401 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
402 if (VerifyOops) {
403 _verify_oop_addr(reg, s, file, line);
404 }
405 }
406
407 void _verify_method_ptr(Register reg, const char* msg, const char* file, int line) {}
408 void _verify_klass_ptr(Register reg, const char* msg, const char* file, int line) {}
409
410 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__)
411 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__)
412 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__)
413 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
414 #define verify_klass_ptr(reg) _verify_method_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
415
416 // A more convenient access to fence for our purposes
417 // We used four bit to indicate the read and write bits in the predecessors and successors,
418 // and extended i for r, o for w if UseConservativeFence enabled.
419 enum Membar_mask_bits {
420 StoreStore = 0b0101, // (pred = w + succ = w)
421 LoadStore = 0b1001, // (pred = r + succ = w)
422 StoreLoad = 0b0110, // (pred = w + succ = r)
423 LoadLoad = 0b1010, // (pred = r + succ = r)
424 AnyAny = LoadStore | StoreLoad // (pred = rw + succ = rw)
425 };
426
427 void membar(uint32_t order_constraint);
428
429 private:
430
431 static void membar_mask_to_pred_succ(uint32_t order_constraint,
432 uint32_t& predecessor, uint32_t& successor) {
433 predecessor = (order_constraint >> 2) & 0x3;
434 successor = order_constraint & 0x3;
435
436 // extend rw -> iorw:
437 // 01(w) -> 0101(ow)
438 // 10(r) -> 1010(ir)
439 // 11(rw)-> 1111(iorw)
440 if (UseConservativeFence) {
441 predecessor |= predecessor << 2;
442 successor |= successor << 2;
443 }
444 }
445
446 static int pred_succ_to_membar_mask(uint32_t predecessor, uint32_t successor) {
447 return ((predecessor & 0x3) << 2) | (successor & 0x3);
448 }
449
450 public:
451
452 void cmodx_fence();
453
454 void pause() {
455 // Zihintpause
456 // PAUSE is encoded as a FENCE instruction with pred=W, succ=0, fm=0, rd=x0, and rs1=x0.
457 Assembler::fence(w, 0);
458 }
459
460 // prints msg, dumps registers and stops execution
461 void stop(const char* msg);
462
463 static void debug64(char* msg, int64_t pc, int64_t regs[]);
464
465 void unimplemented(const char* what = "");
466
467 void should_not_reach_here() { stop("should not reach here"); }
468
469 static address target_addr_for_insn(address insn_addr);
470
471 // Required platform-specific helpers for Label::patch_instructions.
472 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
473 static int pd_patch_instruction_size(address branch, address target);
474 static void pd_patch_instruction(address branch, address target, const char* file = nullptr, int line = 0) {
475 pd_patch_instruction_size(branch, target);
476 }
477 static address pd_call_destination(address branch) {
478 return target_addr_for_insn(branch);
479 }
480
481 static int patch_oop(address insn_addr, address o);
482
483 static address get_target_of_li32(address insn_addr);
484 static int patch_imm_in_li32(address branch, int32_t target);
485
486 // Return whether code is emitted to a scratch blob.
487 virtual bool in_scratch_emit_size() {
488 return false;
489 }
490
491 address emit_reloc_call_address_stub(int insts_call_instruction_offset, address target);
492 static int max_reloc_call_address_stub_size();
493
494 void emit_static_call_stub();
495 static int static_call_stub_size();
496
497 // The following 4 methods return the offset of the appropriate move instruction
498
499 // Support for fast byte/short loading with zero extension (depending on particular CPU)
500 int load_unsigned_byte(Register dst, Address src);
501 int load_unsigned_short(Register dst, Address src);
502
503 // Support for fast byte/short loading with sign extension (depending on particular CPU)
504 int load_signed_byte(Register dst, Address src);
505 int load_signed_short(Register dst, Address src);
506
507 // Load and store values by size and signed-ness
508 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed);
509 void store_sized_value(Address dst, Register src, size_t size_in_bytes);
510
511 // Misaligned loads, will use the best way, according to the AvoidUnalignedAccess flag
512 void load_short_misaligned(Register dst, Address src, Register tmp, bool is_signed, int granularity = 1);
513 void load_int_misaligned(Register dst, Address src, Register tmp, bool is_signed, int granularity = 1);
514 void load_long_misaligned(Register dst, Address src, Register tmp, int granularity = 1);
515
516 public:
517 // Standard pseudo instructions
518 inline void nop() {
519 addi(x0, x0, 0);
520 }
521
522 inline void mv(Register Rd, Register Rs) {
523 if (Rd != Rs) {
524 addi(Rd, Rs, 0);
525 }
526 }
527
528 inline void notr(Register Rd, Register Rs) {
529 if (do_compress_zcb(Rd, Rs) && (Rd == Rs)) {
530 c_not(Rd);
531 } else {
532 xori(Rd, Rs, -1);
533 }
534 }
535
536 inline void neg(Register Rd, Register Rs) {
537 sub(Rd, x0, Rs);
538 }
539
540 inline void negw(Register Rd, Register Rs) {
541 subw(Rd, x0, Rs);
542 }
543
544 inline void sext_w(Register Rd, Register Rs) {
545 addiw(Rd, Rs, 0);
546 }
547
548 inline void zext_b(Register Rd, Register Rs) {
549 if (do_compress_zcb(Rd, Rs) && (Rd == Rs)) {
550 c_zext_b(Rd);
551 } else {
552 andi(Rd, Rs, 0xFF);
553 }
554 }
555
556 inline void seqz(Register Rd, Register Rs) {
557 sltiu(Rd, Rs, 1);
558 }
559
560 inline void snez(Register Rd, Register Rs) {
561 sltu(Rd, x0, Rs);
562 }
563
564 inline void sltz(Register Rd, Register Rs) {
565 slt(Rd, Rs, x0);
566 }
567
568 inline void sgtz(Register Rd, Register Rs) {
569 slt(Rd, x0, Rs);
570 }
571
572 // Bit-manipulation extension pseudo instructions
573 // zero extend word
574 inline void zext_w(Register Rd, Register Rs) {
575 assert(UseZba, "must be");
576 if (do_compress_zcb(Rd, Rs) && (Rd == Rs)) {
577 c_zext_w(Rd);
578 } else {
579 add_uw(Rd, Rs, zr);
580 }
581 }
582
583 // Floating-point data-processing pseudo instructions
584 inline void fmv_s(FloatRegister Rd, FloatRegister Rs) {
585 if (Rd != Rs) {
586 fsgnj_s(Rd, Rs, Rs);
587 }
588 }
589
590 inline void fabs_s(FloatRegister Rd, FloatRegister Rs) {
591 fsgnjx_s(Rd, Rs, Rs);
592 }
593
594 inline void fneg_s(FloatRegister Rd, FloatRegister Rs) {
595 fsgnjn_s(Rd, Rs, Rs);
596 }
597
598 inline void fmv_d(FloatRegister Rd, FloatRegister Rs) {
599 if (Rd != Rs) {
600 fsgnj_d(Rd, Rs, Rs);
601 }
602 }
603
604 inline void fabs_d(FloatRegister Rd, FloatRegister Rs) {
605 fsgnjx_d(Rd, Rs, Rs);
606 }
607
608 inline void fneg_d(FloatRegister Rd, FloatRegister Rs) {
609 fsgnjn_d(Rd, Rs, Rs);
610 }
611
612 // Control and status pseudo instructions
613 void csrr(Register Rd, unsigned csr); // read csr
614 void csrw(unsigned csr, Register Rs); // write csr
615 void csrs(unsigned csr, Register Rs); // set bits in csr
616 void csrc(unsigned csr, Register Rs); // clear bits in csr
617 void csrwi(unsigned csr, unsigned imm);
618 void csrsi(unsigned csr, unsigned imm);
619 void csrci(unsigned csr, unsigned imm);
620 void frcsr(Register Rd) { csrr(Rd, CSR_FCSR); }; // read float-point csr
621 void fscsr(Register Rd, Register Rs); // swap float-point csr
622 void fscsr(Register Rs); // write float-point csr
623 void frrm(Register Rd) { csrr(Rd, CSR_FRM); }; // read float-point rounding mode
624 void fsrm(Register Rd, Register Rs); // swap float-point rounding mode
625 void fsrm(Register Rs); // write float-point rounding mode
626 void fsrmi(Register Rd, unsigned imm);
627 void fsrmi(unsigned imm);
628 void frflags(Register Rd) { csrr(Rd, CSR_FFLAGS); }; // read float-point exception flags
629 void fsflags(Register Rd, Register Rs); // swap float-point exception flags
630 void fsflags(Register Rs); // write float-point exception flags
631 void fsflagsi(Register Rd, unsigned imm);
632 void fsflagsi(unsigned imm);
633 // Requires Zicntr
634 void rdinstret(Register Rd) { csrr(Rd, CSR_INSTRET); }; // read instruction-retired counter
635 void rdcycle(Register Rd) { csrr(Rd, CSR_CYCLE); }; // read cycle counter
636 void rdtime(Register Rd) { csrr(Rd, CSR_TIME); }; // read time
637
638 // Restore cpu control state after JNI call
639 void restore_cpu_control_state_after_jni(Register tmp);
640
641 // Control transfer pseudo instructions
642 void beqz(Register Rs, const address dest);
643 void bnez(Register Rs, const address dest);
644 void blez(Register Rs, const address dest);
645 void bgez(Register Rs, const address dest);
646 void bltz(Register Rs, const address dest);
647 void bgtz(Register Rs, const address dest);
648
649 void cmov_eq(Register cmp1, Register cmp2, Register dst, Register src);
650 void cmov_ne(Register cmp1, Register cmp2, Register dst, Register src);
651 void cmov_le(Register cmp1, Register cmp2, Register dst, Register src);
652 void cmov_leu(Register cmp1, Register cmp2, Register dst, Register src);
653 void cmov_ge(Register cmp1, Register cmp2, Register dst, Register src);
654 void cmov_geu(Register cmp1, Register cmp2, Register dst, Register src);
655 void cmov_lt(Register cmp1, Register cmp2, Register dst, Register src);
656 void cmov_ltu(Register cmp1, Register cmp2, Register dst, Register src);
657 void cmov_gt(Register cmp1, Register cmp2, Register dst, Register src);
658 void cmov_gtu(Register cmp1, Register cmp2, Register dst, Register src);
659
660 void cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
661 void cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
662 void cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
663 void cmov_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
664 void cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
665 void cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
666
667 public:
668 // We try to follow risc-v asm menomics.
669 // But as we don't layout a reachable GOT,
670 // we often need to resort to movptr, li <48imm>.
671 // https://github.com/riscv-non-isa/riscv-asm-manual/blob/main/src/asm-manual.adoc
672
673 // Hotspot only use the standard calling convention using x1/ra.
674 // The alternative calling convection using x5/t0 is not used.
675 // Using x5 as a temp causes the CPU to mispredict returns.
676
677 // JALR, return address stack updates:
678 // | rd is x1/x5 | rs1 is x1/x5 | rd=rs1 | RAS action
679 // | ----------- | ------------ | ------ |-------------
680 // | No | No | - | None
681 // | No | Yes | - | Pop
682 // | Yes | No | - | Push
683 // | Yes | Yes | No | Pop, then push
684 // | Yes | Yes | Yes | Push
685 //
686 // JAL, return address stack updates:
687 // | rd is x1/x5 | RAS action
688 // | ----------- | ----------
689 // | Yes | Push
690 // | No | None
691 //
692 // JUMPs uses Rd = x0/zero and Rs = x6/t1 or imm
693 // CALLS uses Rd = x1/ra and Rs = x6/t1 or imm (or x1/ra*)
694 // RETURNS uses Rd = x0/zero and Rs = x1/ra
695 // *use of x1/ra should not normally be used, special case only.
696
697 // jump: jal x0, offset
698 // For long reach uses temp register for:
699 // la + jr
700 void j(const address dest, Register temp = t1);
701 void j(const Address &dest, Register temp = t1);
702 void j(Label &l, Register temp = noreg);
703
704 // jump register: jalr x0, offset(rs)
705 void jr(Register Rd, int32_t offset = 0);
706
707 // call: la + jalr x1
708 void call(const address dest, Register temp = t1);
709
710 // jalr: jalr x1, offset(rs)
711 void jalr(Register Rs, int32_t offset = 0);
712
713 // Emit a runtime call. Only invalidates the tmp register which
714 // is used to keep the entry address for jalr/movptr.
715 // Uses call() for intra code cache, else movptr + jalr.
716 // Clobebrs t1
717 void rt_call(address dest, Register tmp = t1);
718
719 // ret: jalr x0, 0(x1)
720 inline void ret() {
721 Assembler::jalr(x0, x1, 0);
722 }
723
724 //label
725 void beqz(Register Rs, Label &l, bool is_far = false);
726 void bnez(Register Rs, Label &l, bool is_far = false);
727 void blez(Register Rs, Label &l, bool is_far = false);
728 void bgez(Register Rs, Label &l, bool is_far = false);
729 void bltz(Register Rs, Label &l, bool is_far = false);
730 void bgtz(Register Rs, Label &l, bool is_far = false);
731
732 void beq (Register Rs1, Register Rs2, Label &L, bool is_far = false);
733 void bne (Register Rs1, Register Rs2, Label &L, bool is_far = false);
734 void blt (Register Rs1, Register Rs2, Label &L, bool is_far = false);
735 void bge (Register Rs1, Register Rs2, Label &L, bool is_far = false);
736 void bltu(Register Rs1, Register Rs2, Label &L, bool is_far = false);
737 void bgeu(Register Rs1, Register Rs2, Label &L, bool is_far = false);
738
739 void bgt (Register Rs, Register Rt, const address dest);
740 void ble (Register Rs, Register Rt, const address dest);
741 void bgtu(Register Rs, Register Rt, const address dest);
742 void bleu(Register Rs, Register Rt, const address dest);
743
744 void bgt (Register Rs, Register Rt, Label &l, bool is_far = false);
745 void ble (Register Rs, Register Rt, Label &l, bool is_far = false);
746 void bgtu(Register Rs, Register Rt, Label &l, bool is_far = false);
747 void bleu(Register Rs, Register Rt, Label &l, bool is_far = false);
748
749 #define INSN_ENTRY_RELOC(result_type, header) \
750 result_type header { \
751 guarantee(rtype == relocInfo::internal_word_type, \
752 "only internal_word_type relocs make sense here"); \
753 relocate(InternalAddress(dest).rspec()); \
754 IncompressibleScope scope(this); /* relocations */
755
756 #define INSN(NAME) \
757 void NAME(Register Rs1, Register Rs2, const address dest) { \
758 assert_cond(dest != nullptr); \
759 int64_t offset = dest - pc(); \
760 guarantee(is_simm13(offset) && is_even(offset), \
761 "offset is invalid: is_simm_13: %s offset: " INT64_FORMAT, \
762 BOOL_TO_STR(is_simm13(offset)), offset); \
763 Assembler::NAME(Rs1, Rs2, offset); \
764 } \
765 INSN_ENTRY_RELOC(void, NAME(Register Rs1, Register Rs2, address dest, relocInfo::relocType rtype)) \
766 NAME(Rs1, Rs2, dest); \
767 }
768
769 INSN(beq);
770 INSN(bne);
771 INSN(bge);
772 INSN(bgeu);
773 INSN(blt);
774 INSN(bltu);
775
776 #undef INSN
777
778 #undef INSN_ENTRY_RELOC
779
780 void float_beq(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
781 void float_bne(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
782 void float_ble(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
783 void float_bge(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
784 void float_blt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
785 void float_bgt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
786
787 void double_beq(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
788 void double_bne(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
789 void double_ble(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
790 void double_bge(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
791 void double_blt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
792 void double_bgt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
793
794 private:
795 int push_reg(unsigned int bitset, Register stack);
796 int pop_reg(unsigned int bitset, Register stack);
797 int push_fp(unsigned int bitset, Register stack);
798 int pop_fp(unsigned int bitset, Register stack);
799 #ifdef COMPILER2
800 int push_v(unsigned int bitset, Register stack);
801 int pop_v(unsigned int bitset, Register stack);
802 #endif // COMPILER2
803
804 // The signed 20-bit upper imm can materialize at most negative 0xF...F80000000, two G.
805 // The following signed 12-bit imm can at max subtract 0x800, two K, from that previously loaded two G.
806 bool is_valid_32bit_offset(int64_t x) {
807 constexpr int64_t twoG = (2 * G);
808 constexpr int64_t twoK = (2 * K);
809 return x < (twoG - twoK) && x >= (-twoG - twoK);
810 }
811
812 // Ensure that the auipc can reach the destination at x from anywhere within
813 // the code cache so that if it is relocated we know it will still reach.
814 bool is_32bit_offset_from_codecache(int64_t x) {
815 int64_t low = (int64_t)CodeCache::low_bound();
816 int64_t high = (int64_t)CodeCache::high_bound();
817 return is_valid_32bit_offset(x - low) && is_valid_32bit_offset(x - high);
818 }
819
820 public:
821 void push_reg(Register Rs);
822 void pop_reg(Register Rd);
823 void push_reg(RegSet regs, Register stack) { if (regs.bits()) push_reg(regs.bits(), stack); }
824 void pop_reg(RegSet regs, Register stack) { if (regs.bits()) pop_reg(regs.bits(), stack); }
825 void push_fp(FloatRegSet regs, Register stack) { if (regs.bits()) push_fp(regs.bits(), stack); }
826 void pop_fp(FloatRegSet regs, Register stack) { if (regs.bits()) pop_fp(regs.bits(), stack); }
827 #ifdef COMPILER2
828 void push_v(VectorRegSet regs, Register stack) { if (regs.bits()) push_v(regs.bits(), stack); }
829 void pop_v(VectorRegSet regs, Register stack) { if (regs.bits()) pop_v(regs.bits(), stack); }
830 #endif // COMPILER2
831
832 // Push and pop everything that might be clobbered by a native
833 // runtime call except t0 and t1. (They are always
834 // temporary registers, so we don't have to protect them.)
835 // Additional registers can be excluded in a passed RegSet.
836 void push_call_clobbered_registers_except(RegSet exclude);
837 void pop_call_clobbered_registers_except(RegSet exclude);
838
839 void push_call_clobbered_registers() {
840 push_call_clobbered_registers_except(RegSet());
841 }
842 void pop_call_clobbered_registers() {
843 pop_call_clobbered_registers_except(RegSet());
844 }
845
846 void push_CPU_state(bool save_vectors = false, int vector_size_in_bytes = 0);
847 void pop_CPU_state(bool restore_vectors = false, int vector_size_in_bytes = 0);
848
849 void push_cont_fastpath(Register java_thread = xthread);
850 void pop_cont_fastpath(Register java_thread = xthread);
851
852 // if heap base register is used - reinit it with the correct value
853 void reinit_heapbase();
854
855 void bind(Label& L) {
856 Assembler::bind(L);
857 // fences across basic blocks should not be merged
858 code()->clear_last_insn();
859 }
860
861 typedef void (MacroAssembler::* compare_and_branch_insn)(Register Rs1, Register Rs2, const address dest);
862 typedef void (MacroAssembler::* compare_and_branch_label_insn)(Register Rs1, Register Rs2, Label &L, bool is_far);
863 typedef void (MacroAssembler::* jal_jalr_insn)(Register Rt, address dest);
864
865 void wrap_label(Register r, Label &L, jal_jalr_insn insn);
866 void wrap_label(Register r1, Register r2, Label &L,
867 compare_and_branch_insn insn,
868 compare_and_branch_label_insn neg_insn, bool is_far = false);
869
870 void la(Register Rd, Label &label);
871 void la(Register Rd, const address addr);
872 void la(Register Rd, const address addr, int32_t &offset);
873 void la(Register Rd, const Address &adr);
874
875 void li16u(Register Rd, uint16_t imm);
876 void li32(Register Rd, int32_t imm);
877 void li (Register Rd, int64_t imm); // optimized load immediate
878
879 // mv
880 void mv(Register Rd, address addr) { li(Rd, (int64_t)addr); }
881 void mv(Register Rd, address addr, int32_t &offset) {
882 // Split address into a lower 12-bit sign-extended offset and the remainder,
883 // so that the offset could be encoded in jalr or load/store instruction.
884 offset = ((int32_t)(int64_t)addr << 20) >> 20;
885 li(Rd, (int64_t)addr - offset);
886 }
887
888 template<typename T, ENABLE_IF(std::is_integral<T>::value)>
889 inline void mv(Register Rd, T o) { li(Rd, (int64_t)o); }
890
891 void mv(Register Rd, RegisterOrConstant src) {
892 if (src.is_register()) {
893 mv(Rd, src.as_register());
894 } else {
895 mv(Rd, src.as_constant());
896 }
897 }
898
899 // Generates a load of a 48-bit constant which can be
900 // patched to any 48-bit constant, i.e. address.
901 // If common case supply additional temp register
902 // to shorten the instruction sequence.
903 void movptr(Register Rd, const Address &addr, Register tmp = noreg);
904 void movptr(Register Rd, address addr, Register tmp = noreg);
905 void movptr(Register Rd, address addr, int32_t &offset, Register tmp = noreg);
906
907 private:
908 void movptr1(Register Rd, uintptr_t addr, int32_t &offset);
909 void movptr2(Register Rd, uintptr_t addr, int32_t &offset, Register tmp);
910 public:
911 // float imm move
912 static bool can_hf_imm_load(short imm);
913 static bool can_fp_imm_load(float imm);
914 static bool can_dp_imm_load(double imm);
915 void fli_h(FloatRegister Rd, short imm);
916 void fli_s(FloatRegister Rd, float imm);
917 void fli_d(FloatRegister Rd, double imm);
918
919 // arith
920 void add (Register Rd, Register Rn, int64_t increment, Register tmp = t0);
921 void sub (Register Rd, Register Rn, int64_t decrement, Register tmp = t0);
922 void addw(Register Rd, Register Rn, int64_t increment, Register tmp = t0);
923 void subw(Register Rd, Register Rn, int64_t decrement, Register tmp = t0);
924
925 void subi(Register Rd, Register Rn, int64_t decrement) {
926 assert(is_simm12(-decrement), "Must be");
927 addi(Rd, Rn, -decrement);
928 }
929
930 void subiw(Register Rd, Register Rn, int64_t decrement) {
931 assert(is_simm12(-decrement), "Must be");
932 addiw(Rd, Rn, -decrement);
933 }
934
935 #define INSN(NAME) \
936 inline void NAME(Register Rd, Register Rs1, Register Rs2) { \
937 Assembler::NAME(Rd, Rs1, Rs2); \
938 }
939
940 INSN(add);
941 INSN(addw);
942 INSN(sub);
943 INSN(subw);
944
945 #undef INSN
946
947 // logic
948 void andrw(Register Rd, Register Rs1, Register Rs2);
949 void orrw(Register Rd, Register Rs1, Register Rs2);
950 void xorrw(Register Rd, Register Rs1, Register Rs2);
951
952 // logic with negate
953 void andn(Register Rd, Register Rs1, Register Rs2);
954 void orn(Register Rd, Register Rs1, Register Rs2);
955
956 // reverse bytes
957 void revbw(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2= t1); // reverse bytes in lower word, sign-extend
958 void revb(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1); // reverse bytes in doubleword
959
960 void ror(Register dst, Register src, Register shift, Register tmp = t0);
961 void ror(Register dst, Register src, uint32_t shift, Register tmp = t0);
962 void rolw(Register dst, Register src, uint32_t shift, Register tmp = t0);
963
964 void orptr(Address adr, RegisterOrConstant src, Register tmp1 = t0, Register tmp2 = t1);
965
966 // Load and Store Instructions
967 #define INSN_ENTRY_RELOC(result_type, header) \
968 result_type header { \
969 guarantee(rtype == relocInfo::internal_word_type, \
970 "only internal_word_type relocs make sense here"); \
971 relocate(InternalAddress(dest).rspec()); \
972 IncompressibleScope scope(this); /* relocations */
973
974 #define INSN(NAME) \
975 void NAME(Register Rd, address dest) { \
976 assert_cond(dest != nullptr); \
977 if (CodeCache::contains(dest)) { \
978 int64_t distance = dest - pc(); \
979 assert(is_valid_32bit_offset(distance), "Must be"); \
980 auipc(Rd, (int32_t)distance + 0x800); \
981 Assembler::NAME(Rd, Rd, ((int32_t)distance << 20) >> 20); \
982 } else { \
983 int32_t offset = 0; \
984 movptr(Rd, dest, offset); \
985 Assembler::NAME(Rd, Rd, offset); \
986 } \
987 } \
988 INSN_ENTRY_RELOC(void, NAME(Register Rd, address dest, relocInfo::relocType rtype)) \
989 NAME(Rd, dest); \
990 } \
991 void NAME(Register Rd, const Address &adr, Register temp = t0) { \
992 switch (adr.getMode()) { \
993 case Address::literal: { \
994 relocate(adr.rspec(), [&] { \
995 NAME(Rd, adr.target()); \
996 }); \
997 break; \
998 } \
999 case Address::base_plus_offset: { \
1000 if (is_simm12(adr.offset())) { \
1001 Assembler::NAME(Rd, adr.base(), adr.offset()); \
1002 } else { \
1003 int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \
1004 if (Rd == adr.base()) { \
1005 la(temp, Address(adr.base(), adr.offset() - offset)); \
1006 Assembler::NAME(Rd, temp, offset); \
1007 } else { \
1008 la(Rd, Address(adr.base(), adr.offset() - offset)); \
1009 Assembler::NAME(Rd, Rd, offset); \
1010 } \
1011 } \
1012 break; \
1013 } \
1014 default: \
1015 ShouldNotReachHere(); \
1016 } \
1017 } \
1018 void NAME(Register Rd, Label &L) { \
1019 wrap_label(Rd, L, &MacroAssembler::NAME); \
1020 }
1021
1022 INSN(lb);
1023 INSN(lbu);
1024 INSN(lh);
1025 INSN(lhu);
1026 INSN(lw);
1027 INSN(lwu);
1028 INSN(ld);
1029
1030 #undef INSN
1031
1032 #define INSN(NAME) \
1033 void NAME(FloatRegister Rd, address dest, Register temp = t0) { \
1034 assert_cond(dest != nullptr); \
1035 if (CodeCache::contains(dest)) { \
1036 int64_t distance = dest - pc(); \
1037 assert(is_valid_32bit_offset(distance), "Must be"); \
1038 auipc(temp, (int32_t)distance + 0x800); \
1039 Assembler::NAME(Rd, temp, ((int32_t)distance << 20) >> 20); \
1040 } else { \
1041 int32_t offset = 0; \
1042 movptr(temp, dest, offset); \
1043 Assembler::NAME(Rd, temp, offset); \
1044 } \
1045 } \
1046 INSN_ENTRY_RELOC(void, NAME(FloatRegister Rd, address dest, \
1047 relocInfo::relocType rtype, Register temp = t0)) \
1048 NAME(Rd, dest, temp); \
1049 } \
1050 void NAME(FloatRegister Rd, const Address &adr, Register temp = t0) { \
1051 switch (adr.getMode()) { \
1052 case Address::literal: { \
1053 relocate(adr.rspec(), [&] { \
1054 NAME(Rd, adr.target(), temp); \
1055 }); \
1056 break; \
1057 } \
1058 case Address::base_plus_offset: { \
1059 if (is_simm12(adr.offset())) { \
1060 Assembler::NAME(Rd, adr.base(), adr.offset()); \
1061 } else { \
1062 int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \
1063 la(temp, Address(adr.base(), adr.offset() - offset)); \
1064 Assembler::NAME(Rd, temp, offset); \
1065 } \
1066 break; \
1067 } \
1068 default: \
1069 ShouldNotReachHere(); \
1070 } \
1071 }
1072
1073 INSN(flh);
1074 INSN(flw);
1075 INSN(fld);
1076
1077 #undef INSN
1078
1079 #define INSN(NAME, REGISTER) \
1080 INSN_ENTRY_RELOC(void, NAME(REGISTER Rs, address dest, \
1081 relocInfo::relocType rtype, Register temp = t0)) \
1082 NAME(Rs, dest, temp); \
1083 }
1084
1085 INSN(sb, Register);
1086 INSN(sh, Register);
1087 INSN(sw, Register);
1088 INSN(sd, Register);
1089 INSN(fsw, FloatRegister);
1090 INSN(fsd, FloatRegister);
1091
1092 #undef INSN
1093
1094 #define INSN(NAME) \
1095 void NAME(Register Rs, address dest, Register temp = t0) { \
1096 assert_cond(dest != nullptr); \
1097 assert_different_registers(Rs, temp); \
1098 if (CodeCache::contains(dest)) { \
1099 int64_t distance = dest - pc(); \
1100 assert(is_valid_32bit_offset(distance), "Must be"); \
1101 auipc(temp, (int32_t)distance + 0x800); \
1102 Assembler::NAME(Rs, temp, ((int32_t)distance << 20) >> 20); \
1103 } else { \
1104 int32_t offset = 0; \
1105 movptr(temp, dest, offset); \
1106 Assembler::NAME(Rs, temp, offset); \
1107 } \
1108 } \
1109 void NAME(Register Rs, const Address &adr, Register temp = t0) { \
1110 switch (adr.getMode()) { \
1111 case Address::literal: { \
1112 assert_different_registers(Rs, temp); \
1113 relocate(adr.rspec(), [&] { \
1114 NAME(Rs, adr.target(), temp); \
1115 }); \
1116 break; \
1117 } \
1118 case Address::base_plus_offset: { \
1119 if (is_simm12(adr.offset())) { \
1120 Assembler::NAME(Rs, adr.base(), adr.offset()); \
1121 } else { \
1122 assert_different_registers(Rs, temp); \
1123 int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \
1124 la(temp, Address(adr.base(), adr.offset() - offset)); \
1125 Assembler::NAME(Rs, temp, offset); \
1126 } \
1127 break; \
1128 } \
1129 default: \
1130 ShouldNotReachHere(); \
1131 } \
1132 }
1133
1134 INSN(sb);
1135 INSN(sh);
1136 INSN(sw);
1137 INSN(sd);
1138
1139 #undef INSN
1140
1141 #define INSN(NAME) \
1142 void NAME(FloatRegister Rs, address dest, Register temp = t0) { \
1143 assert_cond(dest != nullptr); \
1144 if (CodeCache::contains(dest)) { \
1145 int64_t distance = dest - pc(); \
1146 assert(is_valid_32bit_offset(distance), "Must be"); \
1147 auipc(temp, (int32_t)distance + 0x800); \
1148 Assembler::NAME(Rs, temp, ((int32_t)distance << 20) >> 20); \
1149 } else { \
1150 int32_t offset = 0; \
1151 movptr(temp, dest, offset); \
1152 Assembler::NAME(Rs, temp, offset); \
1153 } \
1154 } \
1155 void NAME(FloatRegister Rs, const Address &adr, Register temp = t0) { \
1156 switch (adr.getMode()) { \
1157 case Address::literal: { \
1158 relocate(adr.rspec(), [&] { \
1159 NAME(Rs, adr.target(), temp); \
1160 }); \
1161 break; \
1162 } \
1163 case Address::base_plus_offset: { \
1164 if (is_simm12(adr.offset())) { \
1165 Assembler::NAME(Rs, adr.base(), adr.offset()); \
1166 } else { \
1167 int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \
1168 la(temp, Address(adr.base(), adr.offset() - offset)); \
1169 Assembler::NAME(Rs, temp, offset); \
1170 } \
1171 break; \
1172 } \
1173 default: \
1174 ShouldNotReachHere(); \
1175 } \
1176 }
1177
1178 INSN(fsw);
1179 INSN(fsd);
1180
1181 #undef INSN
1182
1183 #undef INSN_ENTRY_RELOC
1184
1185 void cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, Label &succeed, Label *fail);
1186 void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, Label &succeed, Label *fail);
1187 void cmpxchg(Register addr, Register expected,
1188 Register new_val,
1189 Assembler::operand_size size,
1190 Assembler::Aqrl acquire, Assembler::Aqrl release,
1191 Register result, bool result_as_bool = false);
1192 void weak_cmpxchg(Register addr, Register expected,
1193 Register new_val,
1194 Assembler::operand_size size,
1195 Assembler::Aqrl acquire, Assembler::Aqrl release,
1196 Register result);
1197 void cmpxchg_narrow_value_helper(Register addr, Register expected, Register new_val,
1198 Assembler::operand_size size,
1199 Register shift, Register mask, Register aligned_addr);
1200 void cmpxchg_narrow_value(Register addr, Register expected,
1201 Register new_val,
1202 Assembler::operand_size size,
1203 Assembler::Aqrl acquire, Assembler::Aqrl release,
1204 Register result, bool result_as_bool,
1205 Register tmp1, Register tmp2, Register tmp3);
1206 void weak_cmpxchg_narrow_value(Register addr, Register expected,
1207 Register new_val,
1208 Assembler::operand_size size,
1209 Assembler::Aqrl acquire, Assembler::Aqrl release,
1210 Register result,
1211 Register tmp1, Register tmp2, Register tmp3);
1212
1213 void atomic_add(Register prev, RegisterOrConstant incr, Register addr);
1214 void atomic_addw(Register prev, RegisterOrConstant incr, Register addr);
1215 void atomic_addal(Register prev, RegisterOrConstant incr, Register addr);
1216 void atomic_addalw(Register prev, RegisterOrConstant incr, Register addr);
1217
1218 void atomic_xchg(Register prev, Register newv, Register addr);
1219 void atomic_xchgw(Register prev, Register newv, Register addr);
1220 void atomic_xchgal(Register prev, Register newv, Register addr);
1221 void atomic_xchgalw(Register prev, Register newv, Register addr);
1222 void atomic_xchgwu(Register prev, Register newv, Register addr);
1223 void atomic_xchgalwu(Register prev, Register newv, Register addr);
1224
1225 void atomic_cas(Register prev, Register newv, Register addr, Assembler::operand_size size,
1226 Assembler::Aqrl acquire = Assembler::relaxed, Assembler::Aqrl release = Assembler::relaxed);
1227
1228 // Emit a far call/jump. Only invalidates the tmp register which
1229 // is used to keep the entry address for jalr.
1230 // The address must be inside the code cache.
1231 // Supported entry.rspec():
1232 // - relocInfo::external_word_type
1233 // - relocInfo::runtime_call_type
1234 // - relocInfo::none
1235 // Clobbers t1 default.
1236 void far_call(const Address &entry, Register tmp = t1);
1237 void far_jump(const Address &entry, Register tmp = t1);
1238
1239 static int far_branch_size() {
1240 return 2 * MacroAssembler::instruction_size; // auipc + jalr, see far_call() & far_jump()
1241 }
1242
1243 void load_byte_map_base(Register reg);
1244
1245 void bang_stack_with_offset(int offset) {
1246 // stack grows down, caller passes positive offset
1247 assert(offset > 0, "must bang with negative offset");
1248 sub(t0, sp, offset);
1249 sd(zr, Address(t0));
1250 }
1251
1252 virtual void _call_Unimplemented(address call_site) {
1253 mv(t1, call_site);
1254 }
1255
1256 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__)
1257
1258 // Frame creation and destruction shared between JITs.
1259 void build_frame(int framesize);
1260 void remove_frame(int framesize);
1261
1262 void reserved_stack_check();
1263
1264 void get_polling_page(Register dest, relocInfo::relocType rtype);
1265 void read_polling_page(Register r, int32_t offset, relocInfo::relocType rtype);
1266
1267 // RISCV64 OpenJDK uses three different types of calls:
1268 //
1269 // - far call: auipc reg, pc_relative_offset; jalr ra, reg, offset
1270 // The offset has the range [-(2G + 2K), 2G - 2K). Addresses out of the
1271 // range in the code cache requires indirect call.
1272 // If a jump is needed rather than a call, a far jump 'jalr x0, reg, offset'
1273 // can be used instead.
1274 // All instructions are embedded at a call site.
1275 //
1276 // - indirect call: movptr + jalr
1277 // This can reach anywhere in the address space, but it cannot be patched
1278 // while code is running, so it must only be modified at a safepoint.
1279 // This form of call is most suitable for targets at fixed addresses,
1280 // which will never be patched.
1281 //
1282 // - reloc call:
1283 // This too can reach anywhere in the address space but is only available
1284 // in C1/C2-generated code (nmethod).
1285 //
1286 // [Main code section]
1287 // auipc
1288 // ld <address_from_stub_section>
1289 // jalr
1290 //
1291 // [Stub section]
1292 // address stub:
1293 // <64-bit destination address>
1294 //
1295 // To change the destination we simply atomically store the new
1296 // address in the stub section.
1297 // There is a benign race in that the other thread might observe the old
1298 // 64-bit destination address before it observes the new address. That does
1299 // not matter because the destination method has been invalidated, so there
1300 // will be a trap at its start.
1301
1302 // Emit a reloc call and create a stub to hold the entry point address.
1303 // Supported entry.rspec():
1304 // - relocInfo::runtime_call_type
1305 // - relocInfo::opt_virtual_call_type
1306 // - relocInfo::static_call_type
1307 // - relocInfo::virtual_call_type
1308 //
1309 // Return: the call PC or nullptr if CodeCache is full.
1310 address reloc_call(Address entry, Register tmp = t1);
1311
1312 address ic_call(address entry, jint method_index = 0);
1313 static int ic_check_size();
1314 int ic_check(int end_alignment = MacroAssembler::instruction_size);
1315
1316 // Support for memory inc/dec
1317 // n.b. increment/decrement calls with an Address destination will
1318 // need to use a scratch register to load the value to be
1319 // incremented. increment/decrement calls which add or subtract a
1320 // constant value other than sign-extended 12-bit immediate will need
1321 // to use a 2nd scratch register to hold the constant. so, an address
1322 // increment/decrement may trash both t0 and t1.
1323
1324 void increment(const Address dst, int64_t value = 1, Register tmp1 = t0, Register tmp2 = t1);
1325 void incrementw(const Address dst, int32_t value = 1, Register tmp1 = t0, Register tmp2 = t1);
1326
1327 void decrement(const Address dst, int64_t value = 1, Register tmp1 = t0, Register tmp2 = t1);
1328 void decrementw(const Address dst, int32_t value = 1, Register tmp1 = t0, Register tmp2 = t1);
1329
1330 void cmpptr(Register src1, const Address &src2, Label& equal, Register tmp = t0);
1331
1332 void clinit_barrier(Register klass, Register tmp, Label* L_fast_path = nullptr, Label* L_slow_path = nullptr);
1333 void load_method_holder_cld(Register result, Register method);
1334 void load_method_holder(Register holder, Register method);
1335
1336 void compute_index(Register str1, Register trailing_zeros, Register match_mask,
1337 Register result, Register char_tmp, Register tmp,
1338 bool haystack_isL);
1339 void compute_match_mask(Register src, Register pattern, Register match_mask,
1340 Register mask1, Register mask2);
1341
1342 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
1343 void kernel_crc32(Register crc, Register buf, Register len,
1344 Register table0, Register table1, Register table2, Register table3,
1345 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register tmp6);
1346 void update_word_crc32(Register crc, Register v, Register tmp1, Register tmp2, Register tmp3,
1347 Register table0, Register table1, Register table2, Register table3,
1348 bool upper);
1349 void update_byte_crc32(Register crc, Register val, Register table);
1350
1351 #ifdef COMPILER2
1352 void vector_update_crc32(Register crc, Register buf, Register len,
1353 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5,
1354 Register table0, Register table3);
1355 void kernel_crc32_vclmul_fold(Register crc, Register buf, Register len,
1356 Register table0, Register table1, Register table2, Register table3,
1357 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
1358 void crc32_vclmul_fold_to_16_bytes_vectorsize_32(VectorRegister vx, VectorRegister vy, VectorRegister vt,
1359 VectorRegister vtmp1, VectorRegister vtmp2, VectorRegister vtmp3, VectorRegister vtmp4);
1360 void kernel_crc32_vclmul_fold_vectorsize_32(Register crc, Register buf, Register len,
1361 Register vclmul_table, Register tmp1, Register tmp2);
1362 void crc32_vclmul_fold_16_bytes_vectorsize_16(VectorRegister vx, VectorRegister vt,
1363 VectorRegister vtmp1, VectorRegister vtmp2, VectorRegister vtmp3, VectorRegister vtmp4,
1364 Register buf, Register tmp, const int STEP);
1365 void crc32_vclmul_fold_16_bytes_vectorsize_16_2(VectorRegister vx, VectorRegister vy, VectorRegister vt,
1366 VectorRegister vtmp1, VectorRegister vtmp2, VectorRegister vtmp3, VectorRegister vtmp4,
1367 Register tmp);
1368 void crc32_vclmul_fold_16_bytes_vectorsize_16_3(VectorRegister vx, VectorRegister vy, VectorRegister vt,
1369 VectorRegister vtmp1, VectorRegister vtmp2, VectorRegister vtmp3, VectorRegister vtmp4,
1370 Register tmp);
1371 void kernel_crc32_vclmul_fold_vectorsize_16(Register crc, Register buf, Register len,
1372 Register vclmul_table, Register tmp1, Register tmp2);
1373
1374 void mul_add(Register out, Register in, Register offset,
1375 Register len, Register k, Register tmp);
1376 void wide_mul(Register prod_lo, Register prod_hi, Register n, Register m);
1377 void wide_madd(Register sum_lo, Register sum_hi, Register n,
1378 Register m, Register tmp1, Register tmp2);
1379 void cad(Register dst, Register src1, Register src2, Register carry);
1380 void cadc(Register dst, Register src1, Register src2, Register carry);
1381 void adc(Register dst, Register src1, Register src2, Register carry);
1382 void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
1383 Register src1, Register src2, Register carry);
1384 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1385 Register y, Register y_idx, Register z,
1386 Register carry, Register product,
1387 Register idx, Register kdx);
1388 void multiply_128_x_128_loop(Register y, Register z,
1389 Register carry, Register carry2,
1390 Register idx, Register jdx,
1391 Register yz_idx1, Register yz_idx2,
1392 Register tmp, Register tmp3, Register tmp4,
1393 Register tmp6, Register product_hi);
1394 void multiply_to_len(Register x, Register xlen, Register y, Register ylen,
1395 Register z, Register tmp0,
1396 Register tmp1, Register tmp2, Register tmp3, Register tmp4,
1397 Register tmp5, Register tmp6, Register product_hi);
1398
1399 #endif // COMPILER2
1400
1401 void inflate_lo32(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1);
1402 void inflate_hi32(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1);
1403
1404 void ctzc_bits(Register Rd, Register Rs, bool isLL = false,
1405 Register tmp1 = t0, Register tmp2 = t1);
1406
1407 void zero_words(Register base, uint64_t cnt);
1408 address zero_words(Register ptr, Register cnt);
1409 void fill_words(Register base, Register cnt, Register value);
1410 void zero_memory(Register addr, Register len, Register tmp);
1411 void zero_dcache_blocks(Register base, Register cnt, Register tmp1, Register tmp2);
1412
1413 // shift left by shamt and add
1414 void shadd(Register Rd, Register Rs1, Register Rs2, Register tmp, int shamt);
1415
1416 // test single bit in Rs, result is set to Rd
1417 void test_bit(Register Rd, Register Rs, uint32_t bit_pos);
1418
1419 // Here the float instructions with safe deal with some exceptions.
1420 // e.g. convert from NaN, +Inf, -Inf to int, float, double
1421 // will trigger exception, we need to deal with these situations
1422 // to get correct results.
1423 void fcvt_w_s_safe(Register dst, FloatRegister src, Register tmp = t0);
1424 void fcvt_l_s_safe(Register dst, FloatRegister src, Register tmp = t0);
1425 void fcvt_w_d_safe(Register dst, FloatRegister src, Register tmp = t0);
1426 void fcvt_l_d_safe(Register dst, FloatRegister src, Register tmp = t0);
1427
1428 void java_round_float(Register dst, FloatRegister src, FloatRegister ftmp);
1429 void java_round_double(Register dst, FloatRegister src, FloatRegister ftmp);
1430
1431 // Helper routine processing the slow path of NaN when converting float to float16
1432 void float_to_float16_NaN(Register dst, FloatRegister src, Register tmp1, Register tmp2);
1433
1434 // vector load/store unit-stride instructions
1435 void vlex_v(VectorRegister vd, Register base, Assembler::SEW sew, VectorMask vm = unmasked) {
1436 switch (sew) {
1437 case Assembler::e64:
1438 vle64_v(vd, base, vm);
1439 break;
1440 case Assembler::e32:
1441 vle32_v(vd, base, vm);
1442 break;
1443 case Assembler::e16:
1444 vle16_v(vd, base, vm);
1445 break;
1446 case Assembler::e8: // fall through
1447 default:
1448 vle8_v(vd, base, vm);
1449 break;
1450 }
1451 }
1452
1453 void vsex_v(VectorRegister store_data, Register base, Assembler::SEW sew, VectorMask vm = unmasked) {
1454 switch (sew) {
1455 case Assembler::e64:
1456 vse64_v(store_data, base, vm);
1457 break;
1458 case Assembler::e32:
1459 vse32_v(store_data, base, vm);
1460 break;
1461 case Assembler::e16:
1462 vse16_v(store_data, base, vm);
1463 break;
1464 case Assembler::e8: // fall through
1465 default:
1466 vse8_v(store_data, base, vm);
1467 break;
1468 }
1469 }
1470
1471 // vector pseudo instructions
1472 // rotate vector register left with shift bits, 32-bit version
1473 inline void vrole32_vi(VectorRegister vd, uint32_t shift, VectorRegister tmp_vr) {
1474 vsrl_vi(tmp_vr, vd, 32 - shift);
1475 vsll_vi(vd, vd, shift);
1476 vor_vv(vd, vd, tmp_vr);
1477 }
1478
1479 inline void vl1r_v(VectorRegister vd, Register rs) {
1480 vl1re8_v(vd, rs);
1481 }
1482
1483 inline void vmnot_m(VectorRegister vd, VectorRegister vs) {
1484 vmnand_mm(vd, vs, vs);
1485 }
1486
1487 inline void vncvt_x_x_w(VectorRegister vd, VectorRegister vs, VectorMask vm = unmasked) {
1488 vnsrl_wx(vd, vs, x0, vm);
1489 }
1490
1491 inline void vneg_v(VectorRegister vd, VectorRegister vs, VectorMask vm = unmasked) {
1492 vrsub_vx(vd, vs, x0, vm);
1493 }
1494
1495 inline void vfneg_v(VectorRegister vd, VectorRegister vs, VectorMask vm = unmasked) {
1496 vfsgnjn_vv(vd, vs, vs, vm);
1497 }
1498
1499 inline void vfabs_v(VectorRegister vd, VectorRegister vs, VectorMask vm = unmasked) {
1500 vfsgnjx_vv(vd, vs, vs, vm);
1501 }
1502
1503 inline void vmsgt_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) {
1504 vmslt_vv(vd, vs1, vs2, vm);
1505 }
1506
1507 inline void vmsgtu_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) {
1508 vmsltu_vv(vd, vs1, vs2, vm);
1509 }
1510
1511 inline void vmsge_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) {
1512 vmsle_vv(vd, vs1, vs2, vm);
1513 }
1514
1515 inline void vmsgeu_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) {
1516 vmsleu_vv(vd, vs1, vs2, vm);
1517 }
1518
1519 inline void vmfgt_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) {
1520 vmflt_vv(vd, vs1, vs2, vm);
1521 }
1522
1523 inline void vmfge_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) {
1524 vmfle_vv(vd, vs1, vs2, vm);
1525 }
1526
1527 inline void vmsltu_vi(VectorRegister Vd, VectorRegister Vs2, uint32_t imm, VectorMask vm = unmasked) {
1528 guarantee(imm >= 1 && imm <= 16, "imm is invalid");
1529 vmsleu_vi(Vd, Vs2, imm-1, vm);
1530 }
1531
1532 inline void vmsgeu_vi(VectorRegister Vd, VectorRegister Vs2, uint32_t imm, VectorMask vm = unmasked) {
1533 guarantee(imm >= 1 && imm <= 16, "imm is invalid");
1534 vmsgtu_vi(Vd, Vs2, imm-1, vm);
1535 }
1536
1537 // Copy mask register
1538 inline void vmmv_m(VectorRegister vd, VectorRegister vs) {
1539 vmand_mm(vd, vs, vs);
1540 }
1541
1542 // Clear mask register
1543 inline void vmclr_m(VectorRegister vd) {
1544 vmxor_mm(vd, vd, vd);
1545 }
1546
1547 // Set mask register
1548 inline void vmset_m(VectorRegister vd) {
1549 vmxnor_mm(vd, vd, vd);
1550 }
1551
1552 inline void vnot_v(VectorRegister Vd, VectorRegister Vs, VectorMask vm = unmasked) {
1553 vxor_vi(Vd, Vs, -1, vm);
1554 }
1555
1556 static const int zero_words_block_size;
1557
1558 void cast_primitive_type(BasicType type, Register Rt) {
1559 switch (type) {
1560 case T_BOOLEAN:
1561 sltu(Rt, zr, Rt);
1562 break;
1563 case T_CHAR :
1564 zext(Rt, Rt, 16);
1565 break;
1566 case T_BYTE :
1567 sext(Rt, Rt, 8);
1568 break;
1569 case T_SHORT :
1570 sext(Rt, Rt, 16);
1571 break;
1572 case T_INT :
1573 sext(Rt, Rt, 32);
1574 break;
1575 case T_LONG : /* nothing to do */ break;
1576 case T_VOID : /* nothing to do */ break;
1577 case T_FLOAT : /* nothing to do */ break;
1578 case T_DOUBLE : /* nothing to do */ break;
1579 default: ShouldNotReachHere();
1580 }
1581 }
1582
1583 // float cmp with unordered_result
1584 void float_compare(Register result, FloatRegister Rs1, FloatRegister Rs2, int unordered_result);
1585 void double_compare(Register result, FloatRegister Rs1, FloatRegister Rs2, int unordered_result);
1586
1587 // Zero/Sign-extend
1588 void zext(Register dst, Register src, int bits);
1589 void sext(Register dst, Register src, int bits);
1590
1591 private:
1592 void cmp_x2i(Register dst, Register src1, Register src2, Register tmp, bool is_signed = true);
1593
1594 public:
1595 // compare src1 and src2 and get -1/0/1 in dst.
1596 // if [src1 > src2], dst = 1;
1597 // if [src1 == src2], dst = 0;
1598 // if [src1 < src2], dst = -1;
1599 void cmp_l2i(Register dst, Register src1, Register src2, Register tmp = t0);
1600 void cmp_ul2i(Register dst, Register src1, Register src2, Register tmp = t0);
1601 void cmp_uw2i(Register dst, Register src1, Register src2, Register tmp = t0);
1602
1603 // support for argument shuffling
1604 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = t0);
1605 void float_move(VMRegPair src, VMRegPair dst, Register tmp = t0);
1606 void long_move(VMRegPair src, VMRegPair dst, Register tmp = t0);
1607 void double_move(VMRegPair src, VMRegPair dst, Register tmp = t0);
1608 void object_move(OopMap* map,
1609 int oop_handle_offset,
1610 int framesize_in_slots,
1611 VMRegPair src,
1612 VMRegPair dst,
1613 bool is_receiver,
1614 int* receiver_offset);
1615
1616 #ifdef ASSERT
1617 // Template short-hand support to clean-up after a failed call to trampoline
1618 // call generation (see trampoline_call() below), when a set of Labels must
1619 // be reset (before returning).
1620 template<typename Label, typename... More>
1621 void reset_labels(Label& lbl, More&... more) {
1622 lbl.reset(); reset_labels(more...);
1623 }
1624 template<typename Label>
1625 void reset_labels(Label& lbl) {
1626 lbl.reset();
1627 }
1628 #endif
1629
1630 private:
1631
1632 void repne_scan(Register addr, Register value, Register count, Register tmp);
1633
1634 int bitset_to_regs(unsigned int bitset, unsigned char* regs);
1635 Address add_memory_helper(const Address dst, Register tmp);
1636
1637 void load_reserved(Register dst, Register addr, Assembler::operand_size size, Assembler::Aqrl acquire);
1638 void store_conditional(Register dst, Register new_val, Register addr, Assembler::operand_size size, Assembler::Aqrl release);
1639
1640 public:
1641 void lightweight_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow);
1642 void lightweight_unlock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow);
1643
1644 public:
1645 enum {
1646 // movptr
1647 movptr1_instruction_size = 6 * MacroAssembler::instruction_size, // lui, addi, slli, addi, slli, addi. See movptr1().
1648 movptr2_instruction_size = 5 * MacroAssembler::instruction_size, // lui, lui, slli, add, addi. See movptr2().
1649 load_pc_relative_instruction_size = 2 * MacroAssembler::instruction_size // auipc, ld
1650 };
1651
1652 static bool is_load_pc_relative_at(address branch);
1653 static bool is_li16u_at(address instr);
1654
1655 static bool is_jal_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1101111; }
1656 static bool is_jalr_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1100111 && extract_funct3(instr) == 0b000; }
1657 static bool is_branch_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1100011; }
1658 static bool is_ld_at(address instr) { assert_cond(instr != nullptr); return is_load_at(instr) && extract_funct3(instr) == 0b011; }
1659 static bool is_load_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0000011; }
1660 static bool is_float_load_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0000111; }
1661 static bool is_auipc_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0010111; }
1662 static bool is_jump_at(address instr) { assert_cond(instr != nullptr); return is_branch_at(instr) || is_jal_at(instr) || is_jalr_at(instr); }
1663 static bool is_add_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0110011 && extract_funct3(instr) == 0b000; }
1664 static bool is_addi_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0010011 && extract_funct3(instr) == 0b000; }
1665 static bool is_addiw_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0011011 && extract_funct3(instr) == 0b000; }
1666 static bool is_addiw_to_zr_at(address instr){ assert_cond(instr != nullptr); return is_addiw_at(instr) && extract_rd(instr) == zr; }
1667 static bool is_lui_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0110111; }
1668 static bool is_lui_to_zr_at(address instr) { assert_cond(instr != nullptr); return is_lui_at(instr) && extract_rd(instr) == zr; }
1669
1670 static bool is_srli_at(address instr) {
1671 assert_cond(instr != nullptr);
1672 return extract_opcode(instr) == 0b0010011 &&
1673 extract_funct3(instr) == 0b101 &&
1674 Assembler::extract(((unsigned*)instr)[0], 31, 26) == 0b000000;
1675 }
1676
1677 static bool is_slli_shift_at(address instr, uint32_t shift) {
1678 assert_cond(instr != nullptr);
1679 return (extract_opcode(instr) == 0b0010011 && // opcode field
1680 extract_funct3(instr) == 0b001 && // funct3 field, select the type of operation
1681 Assembler::extract(Assembler::ld_instr(instr), 25, 20) == shift); // shamt field
1682 }
1683
1684 static bool is_movptr1_at(address instr);
1685 static bool is_movptr2_at(address instr);
1686
1687 static bool is_lwu_to_zr(address instr);
1688
1689 static Register extract_rs1(address instr);
1690 static Register extract_rs2(address instr);
1691 static Register extract_rd(address instr);
1692 static uint32_t extract_opcode(address instr);
1693 static uint32_t extract_funct3(address instr);
1694
1695 // the instruction sequence of movptr is as below:
1696 // lui
1697 // addi
1698 // slli
1699 // addi
1700 // slli
1701 // addi/jalr/load
1702 static bool check_movptr1_data_dependency(address instr) {
1703 address lui = instr;
1704 address addi1 = lui + MacroAssembler::instruction_size;
1705 address slli1 = addi1 + MacroAssembler::instruction_size;
1706 address addi2 = slli1 + MacroAssembler::instruction_size;
1707 address slli2 = addi2 + MacroAssembler::instruction_size;
1708 address last_instr = slli2 + MacroAssembler::instruction_size;
1709 return extract_rs1(addi1) == extract_rd(lui) &&
1710 extract_rs1(addi1) == extract_rd(addi1) &&
1711 extract_rs1(slli1) == extract_rd(addi1) &&
1712 extract_rs1(slli1) == extract_rd(slli1) &&
1713 extract_rs1(addi2) == extract_rd(slli1) &&
1714 extract_rs1(addi2) == extract_rd(addi2) &&
1715 extract_rs1(slli2) == extract_rd(addi2) &&
1716 extract_rs1(slli2) == extract_rd(slli2) &&
1717 extract_rs1(last_instr) == extract_rd(slli2);
1718 }
1719
1720 // the instruction sequence of movptr2 is as below:
1721 // lui
1722 // lui
1723 // slli
1724 // add
1725 // addi/jalr/load
1726 static bool check_movptr2_data_dependency(address instr) {
1727 address lui1 = instr;
1728 address lui2 = lui1 + MacroAssembler::instruction_size;
1729 address slli = lui2 + MacroAssembler::instruction_size;
1730 address add = slli + MacroAssembler::instruction_size;
1731 address last_instr = add + MacroAssembler::instruction_size;
1732 return extract_rd(add) == extract_rd(lui2) &&
1733 extract_rs1(add) == extract_rd(lui2) &&
1734 extract_rs2(add) == extract_rd(slli) &&
1735 extract_rs1(slli) == extract_rd(lui1) &&
1736 extract_rd(slli) == extract_rd(lui1) &&
1737 extract_rs1(last_instr) == extract_rd(add);
1738 }
1739
1740 // the instruction sequence of li16u is as below:
1741 // lui
1742 // srli
1743 static bool check_li16u_data_dependency(address instr) {
1744 address lui = instr;
1745 address srli = lui + MacroAssembler::instruction_size;
1746
1747 return extract_rs1(srli) == extract_rd(lui) &&
1748 extract_rs1(srli) == extract_rd(srli);
1749 }
1750
1751 // the instruction sequence of li32 is as below:
1752 // lui
1753 // addiw
1754 static bool check_li32_data_dependency(address instr) {
1755 address lui = instr;
1756 address addiw = lui + MacroAssembler::instruction_size;
1757
1758 return extract_rs1(addiw) == extract_rd(lui) &&
1759 extract_rs1(addiw) == extract_rd(addiw);
1760 }
1761
1762 // the instruction sequence of pc-relative is as below:
1763 // auipc
1764 // jalr/addi/load/float_load
1765 static bool check_pc_relative_data_dependency(address instr) {
1766 address auipc = instr;
1767 address last_instr = auipc + MacroAssembler::instruction_size;
1768
1769 return extract_rs1(last_instr) == extract_rd(auipc);
1770 }
1771
1772 // the instruction sequence of load_label is as below:
1773 // auipc
1774 // load
1775 static bool check_load_pc_relative_data_dependency(address instr) {
1776 address auipc = instr;
1777 address load = auipc + MacroAssembler::instruction_size;
1778
1779 return extract_rd(load) == extract_rd(auipc) &&
1780 extract_rs1(load) == extract_rd(load);
1781 }
1782
1783 static bool is_li32_at(address instr);
1784 static bool is_pc_relative_at(address branch);
1785
1786 static bool is_membar(address addr) {
1787 return (Bytes::get_native_u4(addr) & 0x7f) == 0b1111 && extract_funct3(addr) == 0;
1788 }
1789 static uint32_t get_membar_kind(address addr);
1790 static void set_membar_kind(address addr, uint32_t order_kind);
1791 };
1792
1793 #ifdef ASSERT
1794 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
1795 #endif
1796
1797 #endif // CPU_RISCV_MACROASSEMBLER_RISCV_HPP