< prev index next >

src/hotspot/cpu/aarch64/assembler_aarch64.hpp

Print this page
@@ -3132,10 +3132,11 @@
    INSN(sve_add,  0b00000100, 0b000000000); // vector add
    INSN(sve_and,  0b00000100, 0b011010000); // vector and
    INSN(sve_andv, 0b00000100, 0b011010001); // bitwise and reduction to scalar
    INSN(sve_asr,  0b00000100, 0b010000100); // vector arithmetic shift right
    INSN(sve_bic,  0b00000100, 0b011011000); // vector bitwise clear
+   INSN(sve_clz,  0b00000100, 0b011001101); // vector count leading zero bits
    INSN(sve_cnt,  0b00000100, 0b011010101); // count non-zero bits
    INSN(sve_cpy,  0b00000101, 0b100000100); // copy scalar to each active vector element
    INSN(sve_eor,  0b00000100, 0b011001000); // vector eor
    INSN(sve_eorv, 0b00000100, 0b011001001); // bitwise xor reduction to scalar
    INSN(sve_lsl,  0b00000100, 0b010011100); // vector logical shift left

@@ -3791,26 +3792,68 @@
  
    INSN(sve_lasta, 0b0);
    INSN(sve_lastb, 0b1);
  #undef INSN
  
-   // SVE create index starting from and incremented by immediate
+ // SVE reverse within elements
+ #define INSN(NAME, opc, cond)                                                        \
+   void NAME(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg,  FloatRegister Zn) {  \
+     starti;                                                                          \
+     assert(cond, "invalid size");                                                    \
+     f(0b00000101, 31, 24), f(T, 23, 22), f(0b1001, 21, 18), f(opc, 17, 16);          \
+     f(0b100, 15, 13), pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0);                            \
+   }
+ 
+   INSN(sve_revb, 0b00, T == H || T == S || T == D);
+   INSN(sve_rbit, 0b11, T != Q);
+ #undef INSN
+ 
+   // SVE Index Generation:
+   // Create index starting from and incremented by immediate
    void sve_index(FloatRegister Zd, SIMD_RegVariant T, int imm1, int imm2) {
      starti;
+     assert(T != Q, "invalid size");
      f(0b00000100, 31, 24), f(T, 23, 22), f(0b1, 21);
      sf(imm2, 20, 16), f(0b010000, 15, 10);
      sf(imm1, 9, 5), rf(Zd, 0);
    }
  
+   // SVE Index Generation:
+   // Create index starting from general-purpose register and incremented by immediate
+   void sve_index(FloatRegister Zd, SIMD_RegVariant T, Register Rn, int imm) {
+     starti;
+     assert(T != Q, "invalid size");
+     f(0b00000100, 31, 24), f(T, 23, 22), f(0b1, 21);
+     sf(imm, 20, 16), f(0b010001, 15, 10);
+     zrf(Rn, 5), rf(Zd, 0);
+   }
+ 
    // SVE programmable table lookup/permute using vector of element indices
    void sve_tbl(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) {
      starti;
      assert(T != Q, "invalid size");
      f(0b00000101, 31, 24), f(T, 23, 22), f(0b1, 21), rf(Zm, 16);
      f(0b001100, 15, 10), rf(Zn, 5), rf(Zd, 0);
    }
  
+   // Shuffle active elements of vector to the right and fill with zero
+   void sve_compact(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, PRegister Pg) {
+     starti;
+     assert(T == S || T == D, "invalid size");
+     f(0b00000101, 31, 24), f(T, 23, 22), f(0b100001100, 21, 13);
+     pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0);
+   }
+ 
+   // SVE2 Count matching elements in vector
+   void sve_histcnt(FloatRegister Zd, SIMD_RegVariant T, PRegister Pg,
+                    FloatRegister Zn, FloatRegister Zm) {
+     starti;
+     assert(T == S || T == D, "invalid size");
+     f(0b01000101, 31, 24), f(T, 23, 22), f(0b1, 21), rf(Zm, 16);
+     f(0b110, 15, 13), pgrf(Pg, 10), rf(Zn, 5), rf(Zd, 0);
+   }
+ 
    Assembler(CodeBuffer* code) : AbstractAssembler(code) {
    }
  
    // Stack overflow checking
    virtual void bang_stack_with_offset(int offset);
< prev index next >