7 * published by the Free Software Foundation. Oracle designates this
8 * particular file as subject to the "Classpath" exception as provided
9 * by Oracle in the LICENSE file that accompanied this code.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 */
25 package jdk.incubator.vector;
26
27 import java.nio.ByteBuffer;
28 import java.util.Arrays;
29 import java.util.Objects;
30 import java.util.function.IntUnaryOperator;
31
32 import jdk.internal.vm.annotation.ForceInline;
33 import jdk.internal.vm.vector.VectorSupport;
34
35 import static jdk.internal.vm.vector.VectorSupport.*;
36
37 import static jdk.incubator.vector.VectorOperators.*;
38
39 // -- This file was mechanically generated: Do not edit! -- //
40
41 @SuppressWarnings("cast") // warning: redundant cast
42 final class Double128Vector extends DoubleVector {
43 static final DoubleSpecies VSPECIES =
44 (DoubleSpecies) DoubleVector.SPECIES_128;
45
46 static final VectorShape VSHAPE =
47 VSPECIES.vectorShape();
48
49 static final Class<Double128Vector> VCLASS = Double128Vector.class;
50
51 static final int VSIZE = VSPECIES.vectorBitSize();
444 @ForceInline
445 public Double128Vector rearrange(VectorShuffle<Double> shuffle,
446 VectorMask<Double> m) {
447 return (Double128Vector)
448 super.rearrangeTemplate(Double128Shuffle.class,
449 Double128Mask.class,
450 (Double128Shuffle) shuffle,
451 (Double128Mask) m); // specialize
452 }
453
454 @Override
455 @ForceInline
456 public Double128Vector rearrange(VectorShuffle<Double> s,
457 Vector<Double> v) {
458 return (Double128Vector)
459 super.rearrangeTemplate(Double128Shuffle.class,
460 (Double128Shuffle) s,
461 (Double128Vector) v); // specialize
462 }
463
464 @Override
465 @ForceInline
466 public Double128Vector selectFrom(Vector<Double> v) {
467 return (Double128Vector)
468 super.selectFromTemplate((Double128Vector) v); // specialize
469 }
470
471 @Override
472 @ForceInline
473 public Double128Vector selectFrom(Vector<Double> v,
474 VectorMask<Double> m) {
475 return (Double128Vector)
476 super.selectFromTemplate((Double128Vector) v,
477 (Double128Mask) m); // specialize
478 }
479
480
481 @ForceInline
482 @Override
483 public double lane(int i) {
621 this, species,
622 (m, s) -> s.maskFactory(m.toArray()).check(s));
623 }
624
625 @Override
626 @ForceInline
627 public Double128Mask eq(VectorMask<Double> mask) {
628 Objects.requireNonNull(mask);
629 Double128Mask m = (Double128Mask)mask;
630 return xor(m.not());
631 }
632
633 // Unary operations
634
635 @Override
636 @ForceInline
637 public Double128Mask not() {
638 return xor(maskAll(true));
639 }
640
641 // Binary operations
642
643 @Override
644 @ForceInline
645 public Double128Mask and(VectorMask<Double> mask) {
646 Objects.requireNonNull(mask);
647 Double128Mask m = (Double128Mask)mask;
648 return VectorSupport.binaryOp(VECTOR_OP_AND, Double128Mask.class, null, long.class, VLENGTH,
649 this, m, null,
650 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a & b));
651 }
652
653 @Override
654 @ForceInline
655 public Double128Mask or(VectorMask<Double> mask) {
656 Objects.requireNonNull(mask);
657 Double128Mask m = (Double128Mask)mask;
658 return VectorSupport.binaryOp(VECTOR_OP_OR, Double128Mask.class, null, long.class, VLENGTH,
659 this, m, null,
660 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a | b));
813
814 @ForceInline
815 @Override
816 final
817 DoubleVector fromArray0(double[] a, int offset, VectorMask<Double> m) {
818 return super.fromArray0Template(Double128Mask.class, a, offset, (Double128Mask) m); // specialize
819 }
820
821 @ForceInline
822 @Override
823 final
824 DoubleVector fromArray0(double[] a, int offset, int[] indexMap, int mapOffset, VectorMask<Double> m) {
825 return super.fromArray0Template(Double128Mask.class, a, offset, indexMap, mapOffset, (Double128Mask) m);
826 }
827
828
829
830 @ForceInline
831 @Override
832 final
833 DoubleVector fromByteArray0(byte[] a, int offset) {
834 return super.fromByteArray0Template(a, offset); // specialize
835 }
836
837 @ForceInline
838 @Override
839 final
840 DoubleVector fromByteArray0(byte[] a, int offset, VectorMask<Double> m) {
841 return super.fromByteArray0Template(Double128Mask.class, a, offset, (Double128Mask) m); // specialize
842 }
843
844 @ForceInline
845 @Override
846 final
847 DoubleVector fromByteBuffer0(ByteBuffer bb, int offset) {
848 return super.fromByteBuffer0Template(bb, offset); // specialize
849 }
850
851 @ForceInline
852 @Override
853 final
854 DoubleVector fromByteBuffer0(ByteBuffer bb, int offset, VectorMask<Double> m) {
855 return super.fromByteBuffer0Template(Double128Mask.class, bb, offset, (Double128Mask) m); // specialize
856 }
857
858 @ForceInline
859 @Override
860 final
861 void intoArray0(double[] a, int offset) {
862 super.intoArray0Template(a, offset); // specialize
863 }
864
865 @ForceInline
866 @Override
867 final
868 void intoArray0(double[] a, int offset, VectorMask<Double> m) {
869 super.intoArray0Template(Double128Mask.class, a, offset, (Double128Mask) m);
870 }
871
872 @ForceInline
873 @Override
874 final
875 void intoArray0(double[] a, int offset, int[] indexMap, int mapOffset, VectorMask<Double> m) {
876 super.intoArray0Template(Double128Mask.class, a, offset, indexMap, mapOffset, (Double128Mask) m);
877 }
878
879
880 @ForceInline
881 @Override
882 final
883 void intoByteArray0(byte[] a, int offset) {
884 super.intoByteArray0Template(a, offset); // specialize
885 }
886
887 @ForceInline
888 @Override
889 final
890 void intoByteArray0(byte[] a, int offset, VectorMask<Double> m) {
891 super.intoByteArray0Template(Double128Mask.class, a, offset, (Double128Mask) m); // specialize
892 }
893
894 @ForceInline
895 @Override
896 final
897 void intoByteBuffer0(ByteBuffer bb, int offset, VectorMask<Double> m) {
898 super.intoByteBuffer0Template(Double128Mask.class, bb, offset, (Double128Mask) m);
899 }
900
901
902 // End of specialized low-level memory operations.
903
904 // ================================================
905
906 }
|
7 * published by the Free Software Foundation. Oracle designates this
8 * particular file as subject to the "Classpath" exception as provided
9 * by Oracle in the LICENSE file that accompanied this code.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 */
25 package jdk.incubator.vector;
26
27 import java.util.Arrays;
28 import java.util.Objects;
29 import java.util.function.IntUnaryOperator;
30
31 import jdk.incubator.foreign.MemorySegment;
32 import jdk.internal.vm.annotation.ForceInline;
33 import jdk.internal.vm.vector.VectorSupport;
34
35 import static jdk.internal.vm.vector.VectorSupport.*;
36
37 import static jdk.incubator.vector.VectorOperators.*;
38
39 // -- This file was mechanically generated: Do not edit! -- //
40
41 @SuppressWarnings("cast") // warning: redundant cast
42 final class Double128Vector extends DoubleVector {
43 static final DoubleSpecies VSPECIES =
44 (DoubleSpecies) DoubleVector.SPECIES_128;
45
46 static final VectorShape VSHAPE =
47 VSPECIES.vectorShape();
48
49 static final Class<Double128Vector> VCLASS = Double128Vector.class;
50
51 static final int VSIZE = VSPECIES.vectorBitSize();
444 @ForceInline
445 public Double128Vector rearrange(VectorShuffle<Double> shuffle,
446 VectorMask<Double> m) {
447 return (Double128Vector)
448 super.rearrangeTemplate(Double128Shuffle.class,
449 Double128Mask.class,
450 (Double128Shuffle) shuffle,
451 (Double128Mask) m); // specialize
452 }
453
454 @Override
455 @ForceInline
456 public Double128Vector rearrange(VectorShuffle<Double> s,
457 Vector<Double> v) {
458 return (Double128Vector)
459 super.rearrangeTemplate(Double128Shuffle.class,
460 (Double128Shuffle) s,
461 (Double128Vector) v); // specialize
462 }
463
464 @Override
465 @ForceInline
466 public Double128Vector compress(VectorMask<Double> m) {
467 return (Double128Vector)
468 super.compressTemplate(Double128Mask.class,
469 (Double128Mask) m); // specialize
470 }
471
472 @Override
473 @ForceInline
474 public Double128Vector expand(VectorMask<Double> m) {
475 return (Double128Vector)
476 super.expandTemplate(Double128Mask.class,
477 (Double128Mask) m); // specialize
478 }
479
480 @Override
481 @ForceInline
482 public Double128Vector selectFrom(Vector<Double> v) {
483 return (Double128Vector)
484 super.selectFromTemplate((Double128Vector) v); // specialize
485 }
486
487 @Override
488 @ForceInline
489 public Double128Vector selectFrom(Vector<Double> v,
490 VectorMask<Double> m) {
491 return (Double128Vector)
492 super.selectFromTemplate((Double128Vector) v,
493 (Double128Mask) m); // specialize
494 }
495
496
497 @ForceInline
498 @Override
499 public double lane(int i) {
637 this, species,
638 (m, s) -> s.maskFactory(m.toArray()).check(s));
639 }
640
641 @Override
642 @ForceInline
643 public Double128Mask eq(VectorMask<Double> mask) {
644 Objects.requireNonNull(mask);
645 Double128Mask m = (Double128Mask)mask;
646 return xor(m.not());
647 }
648
649 // Unary operations
650
651 @Override
652 @ForceInline
653 public Double128Mask not() {
654 return xor(maskAll(true));
655 }
656
657 @Override
658 @ForceInline
659 public Double128Mask compress() {
660 return (Double128Mask)VectorSupport.comExpOp(VectorSupport.VECTOR_OP_MASK_COMPRESS,
661 Double128Vector.class, Double128Mask.class, ETYPE, VLENGTH, null, this,
662 (v1, m1) -> VSPECIES.iota().compare(VectorOperators.LT, m1.trueCount()));
663 }
664
665
666 // Binary operations
667
668 @Override
669 @ForceInline
670 public Double128Mask and(VectorMask<Double> mask) {
671 Objects.requireNonNull(mask);
672 Double128Mask m = (Double128Mask)mask;
673 return VectorSupport.binaryOp(VECTOR_OP_AND, Double128Mask.class, null, long.class, VLENGTH,
674 this, m, null,
675 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a & b));
676 }
677
678 @Override
679 @ForceInline
680 public Double128Mask or(VectorMask<Double> mask) {
681 Objects.requireNonNull(mask);
682 Double128Mask m = (Double128Mask)mask;
683 return VectorSupport.binaryOp(VECTOR_OP_OR, Double128Mask.class, null, long.class, VLENGTH,
684 this, m, null,
685 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a | b));
838
839 @ForceInline
840 @Override
841 final
842 DoubleVector fromArray0(double[] a, int offset, VectorMask<Double> m) {
843 return super.fromArray0Template(Double128Mask.class, a, offset, (Double128Mask) m); // specialize
844 }
845
846 @ForceInline
847 @Override
848 final
849 DoubleVector fromArray0(double[] a, int offset, int[] indexMap, int mapOffset, VectorMask<Double> m) {
850 return super.fromArray0Template(Double128Mask.class, a, offset, indexMap, mapOffset, (Double128Mask) m);
851 }
852
853
854
855 @ForceInline
856 @Override
857 final
858 DoubleVector fromMemorySegment0(MemorySegment ms, long offset) {
859 return super.fromMemorySegment0Template(ms, offset); // specialize
860 }
861
862 @ForceInline
863 @Override
864 final
865 DoubleVector fromMemorySegment0(MemorySegment ms, long offset, VectorMask<Double> m) {
866 return super.fromMemorySegment0Template(Double128Mask.class, ms, offset, (Double128Mask) m); // specialize
867 }
868
869 @ForceInline
870 @Override
871 final
872 void intoArray0(double[] a, int offset) {
873 super.intoArray0Template(a, offset); // specialize
874 }
875
876 @ForceInline
877 @Override
878 final
879 void intoArray0(double[] a, int offset, VectorMask<Double> m) {
880 super.intoArray0Template(Double128Mask.class, a, offset, (Double128Mask) m);
881 }
882
883 @ForceInline
884 @Override
885 final
886 void intoArray0(double[] a, int offset, int[] indexMap, int mapOffset, VectorMask<Double> m) {
887 super.intoArray0Template(Double128Mask.class, a, offset, indexMap, mapOffset, (Double128Mask) m);
888 }
889
890
891 @ForceInline
892 @Override
893 final
894 void intoMemorySegment0(MemorySegment ms, long offset, VectorMask<Double> m) {
895 super.intoMemorySegment0Template(Double128Mask.class, ms, offset, (Double128Mask) m);
896 }
897
898
899 // End of specialized low-level memory operations.
900
901 // ================================================
902
903 }
|