7 * published by the Free Software Foundation. Oracle designates this
8 * particular file as subject to the "Classpath" exception as provided
9 * by Oracle in the LICENSE file that accompanied this code.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 */
25 package jdk.incubator.vector;
26
27 import java.nio.ByteBuffer;
28 import java.util.Arrays;
29 import java.util.Objects;
30 import java.util.function.IntUnaryOperator;
31
32 import jdk.internal.vm.annotation.ForceInline;
33 import jdk.internal.vm.vector.VectorSupport;
34
35 import static jdk.internal.vm.vector.VectorSupport.*;
36
37 import static jdk.incubator.vector.VectorOperators.*;
38
39 // -- This file was mechanically generated: Do not edit! -- //
40
41 @SuppressWarnings("cast") // warning: redundant cast
42 final class Byte64Vector extends ByteVector {
43 static final ByteSpecies VSPECIES =
44 (ByteSpecies) ByteVector.SPECIES_64;
45
46 static final VectorShape VSHAPE =
47 VSPECIES.vectorShape();
48
49 static final Class<Byte64Vector> VCLASS = Byte64Vector.class;
50
51 static final int VSIZE = VSPECIES.vectorBitSize();
457 @ForceInline
458 public Byte64Vector rearrange(VectorShuffle<Byte> shuffle,
459 VectorMask<Byte> m) {
460 return (Byte64Vector)
461 super.rearrangeTemplate(Byte64Shuffle.class,
462 Byte64Mask.class,
463 (Byte64Shuffle) shuffle,
464 (Byte64Mask) m); // specialize
465 }
466
467 @Override
468 @ForceInline
469 public Byte64Vector rearrange(VectorShuffle<Byte> s,
470 Vector<Byte> v) {
471 return (Byte64Vector)
472 super.rearrangeTemplate(Byte64Shuffle.class,
473 (Byte64Shuffle) s,
474 (Byte64Vector) v); // specialize
475 }
476
477 @Override
478 @ForceInline
479 public Byte64Vector selectFrom(Vector<Byte> v) {
480 return (Byte64Vector)
481 super.selectFromTemplate((Byte64Vector) v); // specialize
482 }
483
484 @Override
485 @ForceInline
486 public Byte64Vector selectFrom(Vector<Byte> v,
487 VectorMask<Byte> m) {
488 return (Byte64Vector)
489 super.selectFromTemplate((Byte64Vector) v,
490 (Byte64Mask) m); // specialize
491 }
492
493
494 @ForceInline
495 @Override
496 public byte lane(int i) {
644 this, species,
645 (m, s) -> s.maskFactory(m.toArray()).check(s));
646 }
647
648 @Override
649 @ForceInline
650 public Byte64Mask eq(VectorMask<Byte> mask) {
651 Objects.requireNonNull(mask);
652 Byte64Mask m = (Byte64Mask)mask;
653 return xor(m.not());
654 }
655
656 // Unary operations
657
658 @Override
659 @ForceInline
660 public Byte64Mask not() {
661 return xor(maskAll(true));
662 }
663
664 // Binary operations
665
666 @Override
667 @ForceInline
668 public Byte64Mask and(VectorMask<Byte> mask) {
669 Objects.requireNonNull(mask);
670 Byte64Mask m = (Byte64Mask)mask;
671 return VectorSupport.binaryOp(VECTOR_OP_AND, Byte64Mask.class, null, byte.class, VLENGTH,
672 this, m, null,
673 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a & b));
674 }
675
676 @Override
677 @ForceInline
678 public Byte64Mask or(VectorMask<Byte> mask) {
679 Objects.requireNonNull(mask);
680 Byte64Mask m = (Byte64Mask)mask;
681 return VectorSupport.binaryOp(VECTOR_OP_OR, Byte64Mask.class, null, byte.class, VLENGTH,
682 this, m, null,
683 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a | b));
843
844
845
846 @ForceInline
847 @Override
848 final
849 ByteVector fromBooleanArray0(boolean[] a, int offset) {
850 return super.fromBooleanArray0Template(a, offset); // specialize
851 }
852
853 @ForceInline
854 @Override
855 final
856 ByteVector fromBooleanArray0(boolean[] a, int offset, VectorMask<Byte> m) {
857 return super.fromBooleanArray0Template(Byte64Mask.class, a, offset, (Byte64Mask) m); // specialize
858 }
859
860 @ForceInline
861 @Override
862 final
863 ByteVector fromByteArray0(byte[] a, int offset) {
864 return super.fromByteArray0Template(a, offset); // specialize
865 }
866
867 @ForceInline
868 @Override
869 final
870 ByteVector fromByteArray0(byte[] a, int offset, VectorMask<Byte> m) {
871 return super.fromByteArray0Template(Byte64Mask.class, a, offset, (Byte64Mask) m); // specialize
872 }
873
874 @ForceInline
875 @Override
876 final
877 ByteVector fromByteBuffer0(ByteBuffer bb, int offset) {
878 return super.fromByteBuffer0Template(bb, offset); // specialize
879 }
880
881 @ForceInline
882 @Override
883 final
884 ByteVector fromByteBuffer0(ByteBuffer bb, int offset, VectorMask<Byte> m) {
885 return super.fromByteBuffer0Template(Byte64Mask.class, bb, offset, (Byte64Mask) m); // specialize
886 }
887
888 @ForceInline
889 @Override
890 final
891 void intoArray0(byte[] a, int offset) {
892 super.intoArray0Template(a, offset); // specialize
893 }
894
895 @ForceInline
896 @Override
897 final
898 void intoArray0(byte[] a, int offset, VectorMask<Byte> m) {
899 super.intoArray0Template(Byte64Mask.class, a, offset, (Byte64Mask) m);
900 }
901
902
903 @ForceInline
904 @Override
905 final
906 void intoBooleanArray0(boolean[] a, int offset, VectorMask<Byte> m) {
907 super.intoBooleanArray0Template(Byte64Mask.class, a, offset, (Byte64Mask) m);
908 }
909
910 @ForceInline
911 @Override
912 final
913 void intoByteArray0(byte[] a, int offset) {
914 super.intoByteArray0Template(a, offset); // specialize
915 }
916
917 @ForceInline
918 @Override
919 final
920 void intoByteArray0(byte[] a, int offset, VectorMask<Byte> m) {
921 super.intoByteArray0Template(Byte64Mask.class, a, offset, (Byte64Mask) m); // specialize
922 }
923
924 @ForceInline
925 @Override
926 final
927 void intoByteBuffer0(ByteBuffer bb, int offset, VectorMask<Byte> m) {
928 super.intoByteBuffer0Template(Byte64Mask.class, bb, offset, (Byte64Mask) m);
929 }
930
931
932 // End of specialized low-level memory operations.
933
934 // ================================================
935
936 }
|
7 * published by the Free Software Foundation. Oracle designates this
8 * particular file as subject to the "Classpath" exception as provided
9 * by Oracle in the LICENSE file that accompanied this code.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 */
25 package jdk.incubator.vector;
26
27 import java.util.Arrays;
28 import java.util.Objects;
29 import java.util.function.IntUnaryOperator;
30
31 import jdk.incubator.foreign.MemorySegment;
32 import jdk.internal.vm.annotation.ForceInline;
33 import jdk.internal.vm.vector.VectorSupport;
34
35 import static jdk.internal.vm.vector.VectorSupport.*;
36
37 import static jdk.incubator.vector.VectorOperators.*;
38
39 // -- This file was mechanically generated: Do not edit! -- //
40
41 @SuppressWarnings("cast") // warning: redundant cast
42 final class Byte64Vector extends ByteVector {
43 static final ByteSpecies VSPECIES =
44 (ByteSpecies) ByteVector.SPECIES_64;
45
46 static final VectorShape VSHAPE =
47 VSPECIES.vectorShape();
48
49 static final Class<Byte64Vector> VCLASS = Byte64Vector.class;
50
51 static final int VSIZE = VSPECIES.vectorBitSize();
457 @ForceInline
458 public Byte64Vector rearrange(VectorShuffle<Byte> shuffle,
459 VectorMask<Byte> m) {
460 return (Byte64Vector)
461 super.rearrangeTemplate(Byte64Shuffle.class,
462 Byte64Mask.class,
463 (Byte64Shuffle) shuffle,
464 (Byte64Mask) m); // specialize
465 }
466
467 @Override
468 @ForceInline
469 public Byte64Vector rearrange(VectorShuffle<Byte> s,
470 Vector<Byte> v) {
471 return (Byte64Vector)
472 super.rearrangeTemplate(Byte64Shuffle.class,
473 (Byte64Shuffle) s,
474 (Byte64Vector) v); // specialize
475 }
476
477 @Override
478 @ForceInline
479 public Byte64Vector compress(VectorMask<Byte> m) {
480 return (Byte64Vector)
481 super.compressTemplate(Byte64Mask.class,
482 (Byte64Mask) m); // specialize
483 }
484
485 @Override
486 @ForceInline
487 public Byte64Vector expand(VectorMask<Byte> m) {
488 return (Byte64Vector)
489 super.expandTemplate(Byte64Mask.class,
490 (Byte64Mask) m); // specialize
491 }
492
493 @Override
494 @ForceInline
495 public Byte64Vector selectFrom(Vector<Byte> v) {
496 return (Byte64Vector)
497 super.selectFromTemplate((Byte64Vector) v); // specialize
498 }
499
500 @Override
501 @ForceInline
502 public Byte64Vector selectFrom(Vector<Byte> v,
503 VectorMask<Byte> m) {
504 return (Byte64Vector)
505 super.selectFromTemplate((Byte64Vector) v,
506 (Byte64Mask) m); // specialize
507 }
508
509
510 @ForceInline
511 @Override
512 public byte lane(int i) {
660 this, species,
661 (m, s) -> s.maskFactory(m.toArray()).check(s));
662 }
663
664 @Override
665 @ForceInline
666 public Byte64Mask eq(VectorMask<Byte> mask) {
667 Objects.requireNonNull(mask);
668 Byte64Mask m = (Byte64Mask)mask;
669 return xor(m.not());
670 }
671
672 // Unary operations
673
674 @Override
675 @ForceInline
676 public Byte64Mask not() {
677 return xor(maskAll(true));
678 }
679
680 @Override
681 @ForceInline
682 public Byte64Mask compress() {
683 return (Byte64Mask)VectorSupport.comExpOp(VectorSupport.VECTOR_OP_MASK_COMPRESS,
684 Byte64Vector.class, Byte64Mask.class, ETYPE, VLENGTH, null, this,
685 (v1, m1) -> VSPECIES.iota().compare(VectorOperators.LT, m1.trueCount()));
686 }
687
688
689 // Binary operations
690
691 @Override
692 @ForceInline
693 public Byte64Mask and(VectorMask<Byte> mask) {
694 Objects.requireNonNull(mask);
695 Byte64Mask m = (Byte64Mask)mask;
696 return VectorSupport.binaryOp(VECTOR_OP_AND, Byte64Mask.class, null, byte.class, VLENGTH,
697 this, m, null,
698 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a & b));
699 }
700
701 @Override
702 @ForceInline
703 public Byte64Mask or(VectorMask<Byte> mask) {
704 Objects.requireNonNull(mask);
705 Byte64Mask m = (Byte64Mask)mask;
706 return VectorSupport.binaryOp(VECTOR_OP_OR, Byte64Mask.class, null, byte.class, VLENGTH,
707 this, m, null,
708 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a | b));
868
869
870
871 @ForceInline
872 @Override
873 final
874 ByteVector fromBooleanArray0(boolean[] a, int offset) {
875 return super.fromBooleanArray0Template(a, offset); // specialize
876 }
877
878 @ForceInline
879 @Override
880 final
881 ByteVector fromBooleanArray0(boolean[] a, int offset, VectorMask<Byte> m) {
882 return super.fromBooleanArray0Template(Byte64Mask.class, a, offset, (Byte64Mask) m); // specialize
883 }
884
885 @ForceInline
886 @Override
887 final
888 ByteVector fromMemorySegment0(MemorySegment ms, long offset) {
889 return super.fromMemorySegment0Template(ms, offset); // specialize
890 }
891
892 @ForceInline
893 @Override
894 final
895 ByteVector fromMemorySegment0(MemorySegment ms, long offset, VectorMask<Byte> m) {
896 return super.fromMemorySegment0Template(Byte64Mask.class, ms, offset, (Byte64Mask) m); // specialize
897 }
898
899 @ForceInline
900 @Override
901 final
902 void intoArray0(byte[] a, int offset) {
903 super.intoArray0Template(a, offset); // specialize
904 }
905
906 @ForceInline
907 @Override
908 final
909 void intoArray0(byte[] a, int offset, VectorMask<Byte> m) {
910 super.intoArray0Template(Byte64Mask.class, a, offset, (Byte64Mask) m);
911 }
912
913
914 @ForceInline
915 @Override
916 final
917 void intoBooleanArray0(boolean[] a, int offset, VectorMask<Byte> m) {
918 super.intoBooleanArray0Template(Byte64Mask.class, a, offset, (Byte64Mask) m);
919 }
920
921 @ForceInline
922 @Override
923 final
924 void intoMemorySegment0(MemorySegment ms, long offset, VectorMask<Byte> m) {
925 super.intoMemorySegment0Template(Byte64Mask.class, ms, offset, (Byte64Mask) m);
926 }
927
928
929 // End of specialized low-level memory operations.
930
931 // ================================================
932
933 }
|