69
70 // This is the base routine called by the different versions of call_VM. The interpreter
71 // may customize this version by overriding it for its purposes (e.g., to save/restore
72 // additional registers when doing a VM call).
73 //
74 // If no java_thread register is specified (noreg) than rthread will be used instead. call_VM_base
75 // returns the register which contains the thread upon return. If a thread register has been
76 // specified, the return value will correspond to that register. If no last_java_sp is specified
77 // (noreg) than rsp will be used instead.
78 virtual void call_VM_base( // returns the register containing the thread upon return
79 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
80 Register java_thread, // the thread if computed before ; use noreg otherwise
81 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
82 address entry_point, // the entry point
83 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
84 bool check_exceptions // whether to check for pending exceptions after return
85 );
86
87 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
88
89 enum KlassDecodeMode {
90 KlassDecodeNone,
91 KlassDecodeZero,
92 KlassDecodeXor,
93 KlassDecodeMovk
94 };
95
96 KlassDecodeMode klass_decode_mode();
97
98 private:
99 static KlassDecodeMode _klass_decode_mode;
100
101 public:
102 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
103
104 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
105 // The implementation is only non-empty for the InterpreterMacroAssembler,
106 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
107 virtual void check_and_handle_popframe(Register java_thread);
108 virtual void check_and_handle_earlyret(Register java_thread);
109
110 void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp = rscratch1);
111 void rt_call(address dest, Register tmp = rscratch1);
112
113 // Load Effective Address
114 void lea(Register r, const Address &a) {
115 InstructionMark im(this);
116 a.lea(this, r);
825
826 void reset_last_Java_frame(Register thread);
827
828 // thread in the default location (rthread)
829 void reset_last_Java_frame(bool clear_fp);
830
831 // Stores
832 void store_check(Register obj); // store check for obj - register is destroyed afterwards
833 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
834
835 void resolve_jobject(Register value, Register tmp1, Register tmp2);
836 void resolve_global_jobject(Register value, Register tmp1, Register tmp2);
837
838 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
839 void c2bool(Register x);
840
841 void load_method_holder_cld(Register rresult, Register rmethod);
842 void load_method_holder(Register holder, Register method);
843
844 // oop manipulations
845 void load_klass(Register dst, Register src);
846 void load_klass_check_null(Register dst, Register src);
847 void store_klass(Register dst, Register src);
848 void cmp_klass(Register oop, Register trial_klass, Register tmp);
849
850 void resolve_weak_handle(Register result, Register tmp1, Register tmp2);
851 void resolve_oop_handle(Register result, Register tmp1, Register tmp2);
852 void load_mirror(Register dst, Register method, Register tmp1, Register tmp2);
853
854 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
855 Register tmp1, Register tmp2);
856
857 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
858 Register tmp1, Register tmp2, Register tmp3);
859
860 void load_heap_oop(Register dst, Address src, Register tmp1,
861 Register tmp2, DecoratorSet decorators = 0);
862
863 void load_heap_oop_not_null(Register dst, Address src, Register tmp1,
864 Register tmp2, DecoratorSet decorators = 0);
1554 int sve_vec_reg_size_in_bytes) {
1555 assert(sve_vec_reg_size_in_bytes % 16 == 0, "unexpected sve vector reg size");
1556 for (int i = 0; i < sve_vec_reg_size_in_bytes / 16; i++) {
1557 spill_copy128(src_offset, dst_offset);
1558 src_offset += 16;
1559 dst_offset += 16;
1560 }
1561 }
1562 void spill_copy_sve_predicate_stack_to_stack(int src_offset, int dst_offset,
1563 int sve_predicate_reg_size_in_bytes) {
1564 sve_ldr(ptrue, sve_spill_address(sve_predicate_reg_size_in_bytes, src_offset));
1565 sve_str(ptrue, sve_spill_address(sve_predicate_reg_size_in_bytes, dst_offset));
1566 reinitialize_ptrue();
1567 }
1568 void cache_wb(Address line);
1569 void cache_wbsync(bool is_pre);
1570
1571 // Code for java.lang.Thread::onSpinWait() intrinsic.
1572 void spin_wait();
1573
1574 private:
1575 // Check the current thread doesn't need a cross modify fence.
1576 void verify_cross_modify_fence_not_required() PRODUCT_RETURN;
1577
1578 };
1579
1580 #ifdef ASSERT
1581 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
1582 #endif
1583
1584 /**
1585 * class SkipIfEqual:
1586 *
1587 * Instantiating this class will result in assembly code being output that will
1588 * jump around any code emitted between the creation of the instance and it's
1589 * automatic destruction at the end of a scope block, depending on the value of
1590 * the flag passed to the constructor, which will be checked at run-time.
1591 */
1592 class SkipIfEqual {
1593 private:
|
69
70 // This is the base routine called by the different versions of call_VM. The interpreter
71 // may customize this version by overriding it for its purposes (e.g., to save/restore
72 // additional registers when doing a VM call).
73 //
74 // If no java_thread register is specified (noreg) than rthread will be used instead. call_VM_base
75 // returns the register which contains the thread upon return. If a thread register has been
76 // specified, the return value will correspond to that register. If no last_java_sp is specified
77 // (noreg) than rsp will be used instead.
78 virtual void call_VM_base( // returns the register containing the thread upon return
79 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
80 Register java_thread, // the thread if computed before ; use noreg otherwise
81 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
82 address entry_point, // the entry point
83 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
84 bool check_exceptions // whether to check for pending exceptions after return
85 );
86
87 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
88
89 public:
90
91 enum KlassDecodeMode {
92 KlassDecodeNone,
93 KlassDecodeZero,
94 KlassDecodeXor,
95 KlassDecodeMovk
96 };
97
98 // Return the current narrow Klass pointer decode mode. Initialized on first call.
99 static KlassDecodeMode klass_decode_mode();
100
101 // Given an arbitrary base address, return the KlassDecodeMode that would be used. Return KlassDecodeNone
102 // if base address is not valid for encoding.
103 static KlassDecodeMode klass_decode_mode_for_base(address base);
104
105 // Returns a static string
106 static const char* describe_klass_decode_mode(KlassDecodeMode mode);
107
108 private:
109 static KlassDecodeMode _klass_decode_mode;
110
111 public:
112 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
113
114 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
115 // The implementation is only non-empty for the InterpreterMacroAssembler,
116 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
117 virtual void check_and_handle_popframe(Register java_thread);
118 virtual void check_and_handle_earlyret(Register java_thread);
119
120 void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp = rscratch1);
121 void rt_call(address dest, Register tmp = rscratch1);
122
123 // Load Effective Address
124 void lea(Register r, const Address &a) {
125 InstructionMark im(this);
126 a.lea(this, r);
835
836 void reset_last_Java_frame(Register thread);
837
838 // thread in the default location (rthread)
839 void reset_last_Java_frame(bool clear_fp);
840
841 // Stores
842 void store_check(Register obj); // store check for obj - register is destroyed afterwards
843 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
844
845 void resolve_jobject(Register value, Register tmp1, Register tmp2);
846 void resolve_global_jobject(Register value, Register tmp1, Register tmp2);
847
848 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
849 void c2bool(Register x);
850
851 void load_method_holder_cld(Register rresult, Register rmethod);
852 void load_method_holder(Register holder, Register method);
853
854 // oop manipulations
855 void load_nklass(Register dst, Register src);
856 void load_klass(Register dst, Register src);
857 void load_klass_check_null(Register dst, Register src);
858 void store_klass(Register dst, Register src);
859 void cmp_klass(Register oop, Register trial_klass, Register tmp);
860
861 void resolve_weak_handle(Register result, Register tmp1, Register tmp2);
862 void resolve_oop_handle(Register result, Register tmp1, Register tmp2);
863 void load_mirror(Register dst, Register method, Register tmp1, Register tmp2);
864
865 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
866 Register tmp1, Register tmp2);
867
868 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
869 Register tmp1, Register tmp2, Register tmp3);
870
871 void load_heap_oop(Register dst, Address src, Register tmp1,
872 Register tmp2, DecoratorSet decorators = 0);
873
874 void load_heap_oop_not_null(Register dst, Address src, Register tmp1,
875 Register tmp2, DecoratorSet decorators = 0);
1565 int sve_vec_reg_size_in_bytes) {
1566 assert(sve_vec_reg_size_in_bytes % 16 == 0, "unexpected sve vector reg size");
1567 for (int i = 0; i < sve_vec_reg_size_in_bytes / 16; i++) {
1568 spill_copy128(src_offset, dst_offset);
1569 src_offset += 16;
1570 dst_offset += 16;
1571 }
1572 }
1573 void spill_copy_sve_predicate_stack_to_stack(int src_offset, int dst_offset,
1574 int sve_predicate_reg_size_in_bytes) {
1575 sve_ldr(ptrue, sve_spill_address(sve_predicate_reg_size_in_bytes, src_offset));
1576 sve_str(ptrue, sve_spill_address(sve_predicate_reg_size_in_bytes, dst_offset));
1577 reinitialize_ptrue();
1578 }
1579 void cache_wb(Address line);
1580 void cache_wbsync(bool is_pre);
1581
1582 // Code for java.lang.Thread::onSpinWait() intrinsic.
1583 void spin_wait();
1584
1585 void fast_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow, bool rt_check_stack = true);
1586 void fast_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow);
1587
1588 private:
1589 // Check the current thread doesn't need a cross modify fence.
1590 void verify_cross_modify_fence_not_required() PRODUCT_RETURN;
1591
1592 };
1593
1594 #ifdef ASSERT
1595 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
1596 #endif
1597
1598 /**
1599 * class SkipIfEqual:
1600 *
1601 * Instantiating this class will result in assembly code being output that will
1602 * jump around any code emitted between the creation of the instance and it's
1603 * automatic destruction at the end of a scope block, depending on the value of
1604 * the flag passed to the constructor, which will be checked at run-time.
1605 */
1606 class SkipIfEqual {
1607 private:
|