< prev index next >

src/hotspot/cpu/aarch64/assembler_aarch64.hpp

Print this page

3117     int encodedShift = isSHR ? cVal - shift : cVal + shift;
3118     tszh = encodedShift >> 5;
3119     tszl_imm = encodedShift & 0x1f;
3120   }
3121 
3122 public:
3123 
3124 // SVE integer arithmetic - predicate
3125 #define INSN(NAME, op1, op2)                                                                            \
3126   void NAME(FloatRegister Zdn_or_Zd_or_Vd, SIMD_RegVariant T, PRegister Pg, FloatRegister Znm_or_Vn) {  \
3127     assert(T != Q, "invalid register variant");                                                         \
3128     sve_predicate_reg_insn(op1, op2, Zdn_or_Zd_or_Vd, T, Pg, Znm_or_Vn);                                \
3129   }
3130 
3131   INSN(sve_abs,  0b00000100, 0b010110101); // vector abs, unary
3132   INSN(sve_add,  0b00000100, 0b000000000); // vector add
3133   INSN(sve_and,  0b00000100, 0b011010000); // vector and
3134   INSN(sve_andv, 0b00000100, 0b011010001); // bitwise and reduction to scalar
3135   INSN(sve_asr,  0b00000100, 0b010000100); // vector arithmetic shift right
3136   INSN(sve_bic,  0b00000100, 0b011011000); // vector bitwise clear

3137   INSN(sve_cnt,  0b00000100, 0b011010101); // count non-zero bits
3138   INSN(sve_cpy,  0b00000101, 0b100000100); // copy scalar to each active vector element
3139   INSN(sve_eor,  0b00000100, 0b011001000); // vector eor
3140   INSN(sve_eorv, 0b00000100, 0b011001001); // bitwise xor reduction to scalar
3141   INSN(sve_lsl,  0b00000100, 0b010011100); // vector logical shift left
3142   INSN(sve_lsr,  0b00000100, 0b010001100); // vector logical shift right
3143   INSN(sve_mul,  0b00000100, 0b010000000); // vector mul
3144   INSN(sve_neg,  0b00000100, 0b010111101); // vector neg, unary
3145   INSN(sve_not,  0b00000100, 0b011110101); // bitwise invert vector, unary
3146   INSN(sve_orr,  0b00000100, 0b011000000); // vector or
3147   INSN(sve_orv,  0b00000100, 0b011000001); // bitwise or reduction to scalar
3148   INSN(sve_smax, 0b00000100, 0b001000000); // signed maximum vectors
3149   INSN(sve_smaxv, 0b00000100, 0b001000001); // signed maximum reduction to scalar
3150   INSN(sve_smin,  0b00000100, 0b001010000); // signed minimum vectors
3151   INSN(sve_sminv, 0b00000100, 0b001010001); // signed minimum reduction to scalar
3152   INSN(sve_sub,   0b00000100, 0b000001000); // vector sub
3153   INSN(sve_uaddv, 0b00000100, 0b000001001); // unsigned add reduction to scalar
3154 #undef INSN
3155 
3156 // SVE floating-point arithmetic - predicate

3776     pgrf(Pg, 10), rf(Zn, 5), rf(Rd, 0);                                         \
3777   }
3778 
3779   INSN(sve_lasta, 0b0);
3780   INSN(sve_lastb, 0b1);
3781 #undef INSN
3782 
3783 // SVE extract element to SIMD&FP scalar register
3784 #define INSN(NAME, before)                                                           \
3785   void NAME(FloatRegister Vd, SIMD_RegVariant T, PRegister Pg,  FloatRegister Zn) {  \
3786     starti;                                                                          \
3787     f(0b00000101, 31, 24), f(T, 23, 22), f(0b10001, 21, 17);                         \
3788     f(before, 16), f(0b100, 15, 13);                                                 \
3789     pgrf(Pg, 10), rf(Zn, 5), rf(Vd, 0);                                              \
3790   }
3791 
3792   INSN(sve_lasta, 0b0);
3793   INSN(sve_lastb, 0b1);
3794 #undef INSN
3795 
3796   // SVE create index starting from and incremented by immediate














3797   void sve_index(FloatRegister Zd, SIMD_RegVariant T, int imm1, int imm2) {
3798     starti;

3799     f(0b00000100, 31, 24), f(T, 23, 22), f(0b1, 21);
3800     sf(imm2, 20, 16), f(0b010000, 15, 10);
3801     sf(imm1, 9, 5), rf(Zd, 0);
3802   }
3803 










3804   // SVE programmable table lookup/permute using vector of element indices
3805   void sve_tbl(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) {
3806     starti;
3807     assert(T != Q, "invalid size");
3808     f(0b00000101, 31, 24), f(T, 23, 22), f(0b1, 21), rf(Zm, 16);
3809     f(0b001100, 15, 10), rf(Zn, 5), rf(Zd, 0);
3810   }
3811 

















3812   Assembler(CodeBuffer* code) : AbstractAssembler(code) {
3813   }
3814 
3815   // Stack overflow checking
3816   virtual void bang_stack_with_offset(int offset);
3817 
3818   static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm);
3819   static bool operand_valid_for_sve_logical_immediate(unsigned elembits, uint64_t imm);
3820   static bool operand_valid_for_add_sub_immediate(int64_t imm);
3821   static bool operand_valid_for_sve_add_sub_immediate(int64_t imm);
3822   static bool operand_valid_for_float_immediate(double imm);
3823   static int  operand_valid_for_movi_immediate(uint64_t imm64, SIMD_Arrangement T);
3824 
3825   void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
3826   void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
3827 };
3828 
3829 inline Assembler::Membar_mask_bits operator|(Assembler::Membar_mask_bits a,
3830                                              Assembler::Membar_mask_bits b) {
3831   return Assembler::Membar_mask_bits(unsigned(a)|unsigned(b));

3117     int encodedShift = isSHR ? cVal - shift : cVal + shift;
3118     tszh = encodedShift >> 5;
3119     tszl_imm = encodedShift & 0x1f;
3120   }
3121 
3122 public:
3123 
3124 // SVE integer arithmetic - predicate
3125 #define INSN(NAME, op1, op2)                                                                            \
3126   void NAME(FloatRegister Zdn_or_Zd_or_Vd, SIMD_RegVariant T, PRegister Pg, FloatRegister Znm_or_Vn) {  \
3127     assert(T != Q, "invalid register variant");                                                         \
3128     sve_predicate_reg_insn(op1, op2, Zdn_or_Zd_or_Vd, T, Pg, Znm_or_Vn);                                \
3129   }
3130 
3131   INSN(sve_abs,  0b00000100, 0b010110101); // vector abs, unary
3132   INSN(sve_add,  0b00000100, 0b000000000); // vector add
3133   INSN(sve_and,  0b00000100, 0b011010000); // vector and
3134   INSN(sve_andv, 0b00000100, 0b011010001); // bitwise and reduction to scalar
3135   INSN(sve_asr,  0b00000100, 0b010000100); // vector arithmetic shift right
3136   INSN(sve_bic,  0b00000100, 0b011011000); // vector bitwise clear
3137   INSN(sve_clz,  0b00000100, 0b011001101); // vector count leading zero bits
3138   INSN(sve_cnt,  0b00000100, 0b011010101); // count non-zero bits
3139   INSN(sve_cpy,  0b00000101, 0b100000100); // copy scalar to each active vector element
3140   INSN(sve_eor,  0b00000100, 0b011001000); // vector eor
3141   INSN(sve_eorv, 0b00000100, 0b011001001); // bitwise xor reduction to scalar
3142   INSN(sve_lsl,  0b00000100, 0b010011100); // vector logical shift left
3143   INSN(sve_lsr,  0b00000100, 0b010001100); // vector logical shift right
3144   INSN(sve_mul,  0b00000100, 0b010000000); // vector mul
3145   INSN(sve_neg,  0b00000100, 0b010111101); // vector neg, unary
3146   INSN(sve_not,  0b00000100, 0b011110101); // bitwise invert vector, unary
3147   INSN(sve_orr,  0b00000100, 0b011000000); // vector or
3148   INSN(sve_orv,  0b00000100, 0b011000001); // bitwise or reduction to scalar
3149   INSN(sve_smax, 0b00000100, 0b001000000); // signed maximum vectors
3150   INSN(sve_smaxv, 0b00000100, 0b001000001); // signed maximum reduction to scalar
3151   INSN(sve_smin,  0b00000100, 0b001010000); // signed minimum vectors
3152   INSN(sve_sminv, 0b00000100, 0b001010001); // signed minimum reduction to scalar
3153   INSN(sve_sub,   0b00000100, 0b000001000); // vector sub
3154   INSN(sve_uaddv, 0b00000100, 0b000001001); // unsigned add reduction to scalar
3155 #undef INSN
3156 
3157 // SVE floating-point arithmetic - predicate

3777     pgrf(Pg, 10), rf(Zn, 5), rf(Rd, 0);                                         \
3778   }
3779 
3780   INSN(sve_lasta, 0b0);
3781   INSN(sve_lastb, 0b1);
3782 #undef INSN
3783 
3784 // SVE extract element to SIMD&FP scalar register
3785 #define INSN(NAME, before)                                                           \
3786   void NAME(FloatRegister Vd, SIMD_RegVariant T, PRegister Pg,  FloatRegister Zn) {  \
3787     starti;                                                                          \
3788     f(0b00000101, 31, 24), f(T, 23, 22), f(0b10001, 21, 17);                         \
3789     f(before, 16), f(0b100, 15, 13);                                                 \
3790     pgrf(Pg, 10), rf(Zn, 5), rf(Vd, 0);                                              \
3791   }
3792 
3793   INSN(sve_lasta, 0b0);
3794   INSN(sve_lastb, 0b1);
3795 #undef INSN
3796 
3797 // SVE reverse within elements
3798 #define INSN(NAME, opc, cond)                                                        \
3799   void NAME(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg,  FloatRegister Zn) {  \
3800     starti;                                                                          \
3801     assert(cond, "invalid size");                                                    \
3802     f(0b00000101, 31, 24), f(T, 23, 22), f(0b1001, 21, 18), f(opc, 17, 16);          \
3803     f(0b100, 15, 13), pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0);                            \
3804   }
3805 
3806   INSN(sve_revb, 0b00, T == H || T == S || T == D);
3807   INSN(sve_rbit, 0b11, T != Q);
3808 #undef INSN
3809 
3810   // SVE Index Generation:
3811   // Create index starting from and incremented by immediate
3812   void sve_index(FloatRegister Zd, SIMD_RegVariant T, int imm1, int imm2) {
3813     starti;
3814     assert(T != Q, "invalid size");
3815     f(0b00000100, 31, 24), f(T, 23, 22), f(0b1, 21);
3816     sf(imm2, 20, 16), f(0b010000, 15, 10);
3817     sf(imm1, 9, 5), rf(Zd, 0);
3818   }
3819 
3820   // SVE Index Generation:
3821   // Create index starting from general-purpose register and incremented by immediate
3822   void sve_index(FloatRegister Zd, SIMD_RegVariant T, Register Rn, int imm) {
3823     starti;
3824     assert(T != Q, "invalid size");
3825     f(0b00000100, 31, 24), f(T, 23, 22), f(0b1, 21);
3826     sf(imm, 20, 16), f(0b010001, 15, 10);
3827     zrf(Rn, 5), rf(Zd, 0);
3828   }
3829 
3830   // SVE programmable table lookup/permute using vector of element indices
3831   void sve_tbl(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) {
3832     starti;
3833     assert(T != Q, "invalid size");
3834     f(0b00000101, 31, 24), f(T, 23, 22), f(0b1, 21), rf(Zm, 16);
3835     f(0b001100, 15, 10), rf(Zn, 5), rf(Zd, 0);
3836   }
3837 
3838   // Shuffle active elements of vector to the right and fill with zero
3839   void sve_compact(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, PRegister Pg) {
3840     starti;
3841     assert(T == S || T == D, "invalid size");
3842     f(0b00000101, 31, 24), f(T, 23, 22), f(0b100001100, 21, 13);
3843     pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0);
3844   }
3845 
3846   // SVE2 Count matching elements in vector
3847   void sve_histcnt(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg,
3848                    FloatRegister Zn, FloatRegister Zm) {
3849     starti;
3850     assert(T == S || T == D, "invalid size");
3851     f(0b01000101, 31, 24), f(T, 23, 22), f(0b1, 21), rf(Zm, 16);
3852     f(0b110, 15, 13), pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0);
3853   }
3854 
3855   Assembler(CodeBuffer* code) : AbstractAssembler(code) {
3856   }
3857 
3858   // Stack overflow checking
3859   virtual void bang_stack_with_offset(int offset);
3860 
3861   static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm);
3862   static bool operand_valid_for_sve_logical_immediate(unsigned elembits, uint64_t imm);
3863   static bool operand_valid_for_add_sub_immediate(int64_t imm);
3864   static bool operand_valid_for_sve_add_sub_immediate(int64_t imm);
3865   static bool operand_valid_for_float_immediate(double imm);
3866   static int  operand_valid_for_movi_immediate(uint64_t imm64, SIMD_Arrangement T);
3867 
3868   void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
3869   void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
3870 };
3871 
3872 inline Assembler::Membar_mask_bits operator|(Assembler::Membar_mask_bits a,
3873                                              Assembler::Membar_mask_bits b) {
3874   return Assembler::Membar_mask_bits(unsigned(a)|unsigned(b));
< prev index next >