7 * published by the Free Software Foundation. Oracle designates this
8 * particular file as subject to the "Classpath" exception as provided
9 * by Oracle in the LICENSE file that accompanied this code.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 */
25 package jdk.incubator.vector;
26
27 import java.nio.ByteBuffer;
28 import java.util.Arrays;
29 import java.util.Objects;
30 import java.util.function.IntUnaryOperator;
31
32 import jdk.internal.vm.annotation.ForceInline;
33 import jdk.internal.vm.vector.VectorSupport;
34
35 import static jdk.internal.vm.vector.VectorSupport.*;
36
37 import static jdk.incubator.vector.VectorOperators.*;
38
39 // -- This file was mechanically generated: Do not edit! -- //
40
41 @SuppressWarnings("cast") // warning: redundant cast
42 final class Double64Vector extends DoubleVector {
43 static final DoubleSpecies VSPECIES =
44 (DoubleSpecies) DoubleVector.SPECIES_64;
45
46 static final VectorShape VSHAPE =
47 VSPECIES.vectorShape();
48
49 static final Class<Double64Vector> VCLASS = Double64Vector.class;
50
51 static final int VSIZE = VSPECIES.vectorBitSize();
444 @ForceInline
445 public Double64Vector rearrange(VectorShuffle<Double> shuffle,
446 VectorMask<Double> m) {
447 return (Double64Vector)
448 super.rearrangeTemplate(Double64Shuffle.class,
449 Double64Mask.class,
450 (Double64Shuffle) shuffle,
451 (Double64Mask) m); // specialize
452 }
453
454 @Override
455 @ForceInline
456 public Double64Vector rearrange(VectorShuffle<Double> s,
457 Vector<Double> v) {
458 return (Double64Vector)
459 super.rearrangeTemplate(Double64Shuffle.class,
460 (Double64Shuffle) s,
461 (Double64Vector) v); // specialize
462 }
463
464 @Override
465 @ForceInline
466 public Double64Vector selectFrom(Vector<Double> v) {
467 return (Double64Vector)
468 super.selectFromTemplate((Double64Vector) v); // specialize
469 }
470
471 @Override
472 @ForceInline
473 public Double64Vector selectFrom(Vector<Double> v,
474 VectorMask<Double> m) {
475 return (Double64Vector)
476 super.selectFromTemplate((Double64Vector) v,
477 (Double64Mask) m); // specialize
478 }
479
480
481 @ForceInline
482 @Override
483 public double lane(int i) {
619 this, species,
620 (m, s) -> s.maskFactory(m.toArray()).check(s));
621 }
622
623 @Override
624 @ForceInline
625 public Double64Mask eq(VectorMask<Double> mask) {
626 Objects.requireNonNull(mask);
627 Double64Mask m = (Double64Mask)mask;
628 return xor(m.not());
629 }
630
631 // Unary operations
632
633 @Override
634 @ForceInline
635 public Double64Mask not() {
636 return xor(maskAll(true));
637 }
638
639 // Binary operations
640
641 @Override
642 @ForceInline
643 public Double64Mask and(VectorMask<Double> mask) {
644 Objects.requireNonNull(mask);
645 Double64Mask m = (Double64Mask)mask;
646 return VectorSupport.binaryOp(VECTOR_OP_AND, Double64Mask.class, null, long.class, VLENGTH,
647 this, m, null,
648 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a & b));
649 }
650
651 @Override
652 @ForceInline
653 public Double64Mask or(VectorMask<Double> mask) {
654 Objects.requireNonNull(mask);
655 Double64Mask m = (Double64Mask)mask;
656 return VectorSupport.binaryOp(VECTOR_OP_OR, Double64Mask.class, null, long.class, VLENGTH,
657 this, m, null,
658 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a | b));
811
812 @ForceInline
813 @Override
814 final
815 DoubleVector fromArray0(double[] a, int offset, VectorMask<Double> m) {
816 return super.fromArray0Template(Double64Mask.class, a, offset, (Double64Mask) m); // specialize
817 }
818
819 @ForceInline
820 @Override
821 final
822 DoubleVector fromArray0(double[] a, int offset, int[] indexMap, int mapOffset, VectorMask<Double> m) {
823 return super.fromArray0Template(Double64Mask.class, a, offset, indexMap, mapOffset, (Double64Mask) m);
824 }
825
826
827
828 @ForceInline
829 @Override
830 final
831 DoubleVector fromByteArray0(byte[] a, int offset) {
832 return super.fromByteArray0Template(a, offset); // specialize
833 }
834
835 @ForceInline
836 @Override
837 final
838 DoubleVector fromByteArray0(byte[] a, int offset, VectorMask<Double> m) {
839 return super.fromByteArray0Template(Double64Mask.class, a, offset, (Double64Mask) m); // specialize
840 }
841
842 @ForceInline
843 @Override
844 final
845 DoubleVector fromByteBuffer0(ByteBuffer bb, int offset) {
846 return super.fromByteBuffer0Template(bb, offset); // specialize
847 }
848
849 @ForceInline
850 @Override
851 final
852 DoubleVector fromByteBuffer0(ByteBuffer bb, int offset, VectorMask<Double> m) {
853 return super.fromByteBuffer0Template(Double64Mask.class, bb, offset, (Double64Mask) m); // specialize
854 }
855
856 @ForceInline
857 @Override
858 final
859 void intoArray0(double[] a, int offset) {
860 super.intoArray0Template(a, offset); // specialize
861 }
862
863 @ForceInline
864 @Override
865 final
866 void intoArray0(double[] a, int offset, VectorMask<Double> m) {
867 super.intoArray0Template(Double64Mask.class, a, offset, (Double64Mask) m);
868 }
869
870 @ForceInline
871 @Override
872 final
873 void intoArray0(double[] a, int offset, int[] indexMap, int mapOffset, VectorMask<Double> m) {
874 super.intoArray0Template(Double64Mask.class, a, offset, indexMap, mapOffset, (Double64Mask) m);
875 }
876
877
878 @ForceInline
879 @Override
880 final
881 void intoByteArray0(byte[] a, int offset) {
882 super.intoByteArray0Template(a, offset); // specialize
883 }
884
885 @ForceInline
886 @Override
887 final
888 void intoByteArray0(byte[] a, int offset, VectorMask<Double> m) {
889 super.intoByteArray0Template(Double64Mask.class, a, offset, (Double64Mask) m); // specialize
890 }
891
892 @ForceInline
893 @Override
894 final
895 void intoByteBuffer0(ByteBuffer bb, int offset, VectorMask<Double> m) {
896 super.intoByteBuffer0Template(Double64Mask.class, bb, offset, (Double64Mask) m);
897 }
898
899
900 // End of specialized low-level memory operations.
901
902 // ================================================
903
904 }
|
7 * published by the Free Software Foundation. Oracle designates this
8 * particular file as subject to the "Classpath" exception as provided
9 * by Oracle in the LICENSE file that accompanied this code.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 */
25 package jdk.incubator.vector;
26
27 import java.util.Arrays;
28 import java.util.Objects;
29 import java.util.function.IntUnaryOperator;
30
31 import jdk.incubator.foreign.MemorySegment;
32 import jdk.internal.vm.annotation.ForceInline;
33 import jdk.internal.vm.vector.VectorSupport;
34
35 import static jdk.internal.vm.vector.VectorSupport.*;
36
37 import static jdk.incubator.vector.VectorOperators.*;
38
39 // -- This file was mechanically generated: Do not edit! -- //
40
41 @SuppressWarnings("cast") // warning: redundant cast
42 final class Double64Vector extends DoubleVector {
43 static final DoubleSpecies VSPECIES =
44 (DoubleSpecies) DoubleVector.SPECIES_64;
45
46 static final VectorShape VSHAPE =
47 VSPECIES.vectorShape();
48
49 static final Class<Double64Vector> VCLASS = Double64Vector.class;
50
51 static final int VSIZE = VSPECIES.vectorBitSize();
444 @ForceInline
445 public Double64Vector rearrange(VectorShuffle<Double> shuffle,
446 VectorMask<Double> m) {
447 return (Double64Vector)
448 super.rearrangeTemplate(Double64Shuffle.class,
449 Double64Mask.class,
450 (Double64Shuffle) shuffle,
451 (Double64Mask) m); // specialize
452 }
453
454 @Override
455 @ForceInline
456 public Double64Vector rearrange(VectorShuffle<Double> s,
457 Vector<Double> v) {
458 return (Double64Vector)
459 super.rearrangeTemplate(Double64Shuffle.class,
460 (Double64Shuffle) s,
461 (Double64Vector) v); // specialize
462 }
463
464 @Override
465 @ForceInline
466 public Double64Vector compress(VectorMask<Double> m) {
467 return (Double64Vector)
468 super.compressTemplate(Double64Mask.class,
469 (Double64Mask) m); // specialize
470 }
471
472 @Override
473 @ForceInline
474 public Double64Vector expand(VectorMask<Double> m) {
475 return (Double64Vector)
476 super.expandTemplate(Double64Mask.class,
477 (Double64Mask) m); // specialize
478 }
479
480 @Override
481 @ForceInline
482 public Double64Vector selectFrom(Vector<Double> v) {
483 return (Double64Vector)
484 super.selectFromTemplate((Double64Vector) v); // specialize
485 }
486
487 @Override
488 @ForceInline
489 public Double64Vector selectFrom(Vector<Double> v,
490 VectorMask<Double> m) {
491 return (Double64Vector)
492 super.selectFromTemplate((Double64Vector) v,
493 (Double64Mask) m); // specialize
494 }
495
496
497 @ForceInline
498 @Override
499 public double lane(int i) {
635 this, species,
636 (m, s) -> s.maskFactory(m.toArray()).check(s));
637 }
638
639 @Override
640 @ForceInline
641 public Double64Mask eq(VectorMask<Double> mask) {
642 Objects.requireNonNull(mask);
643 Double64Mask m = (Double64Mask)mask;
644 return xor(m.not());
645 }
646
647 // Unary operations
648
649 @Override
650 @ForceInline
651 public Double64Mask not() {
652 return xor(maskAll(true));
653 }
654
655 @Override
656 @ForceInline
657 public Double64Mask compress() {
658 return (Double64Mask)VectorSupport.comExpOp(VectorSupport.VECTOR_OP_MASK_COMPRESS,
659 Double64Vector.class, Double64Mask.class, ETYPE, VLENGTH, null, this,
660 (v1, m1) -> VSPECIES.iota().compare(VectorOperators.LT, m1.trueCount()));
661 }
662
663
664 // Binary operations
665
666 @Override
667 @ForceInline
668 public Double64Mask and(VectorMask<Double> mask) {
669 Objects.requireNonNull(mask);
670 Double64Mask m = (Double64Mask)mask;
671 return VectorSupport.binaryOp(VECTOR_OP_AND, Double64Mask.class, null, long.class, VLENGTH,
672 this, m, null,
673 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a & b));
674 }
675
676 @Override
677 @ForceInline
678 public Double64Mask or(VectorMask<Double> mask) {
679 Objects.requireNonNull(mask);
680 Double64Mask m = (Double64Mask)mask;
681 return VectorSupport.binaryOp(VECTOR_OP_OR, Double64Mask.class, null, long.class, VLENGTH,
682 this, m, null,
683 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a | b));
836
837 @ForceInline
838 @Override
839 final
840 DoubleVector fromArray0(double[] a, int offset, VectorMask<Double> m) {
841 return super.fromArray0Template(Double64Mask.class, a, offset, (Double64Mask) m); // specialize
842 }
843
844 @ForceInline
845 @Override
846 final
847 DoubleVector fromArray0(double[] a, int offset, int[] indexMap, int mapOffset, VectorMask<Double> m) {
848 return super.fromArray0Template(Double64Mask.class, a, offset, indexMap, mapOffset, (Double64Mask) m);
849 }
850
851
852
853 @ForceInline
854 @Override
855 final
856 DoubleVector fromMemorySegment0(MemorySegment ms, long offset) {
857 return super.fromMemorySegment0Template(ms, offset); // specialize
858 }
859
860 @ForceInline
861 @Override
862 final
863 DoubleVector fromMemorySegment0(MemorySegment ms, long offset, VectorMask<Double> m) {
864 return super.fromMemorySegment0Template(Double64Mask.class, ms, offset, (Double64Mask) m); // specialize
865 }
866
867 @ForceInline
868 @Override
869 final
870 void intoArray0(double[] a, int offset) {
871 super.intoArray0Template(a, offset); // specialize
872 }
873
874 @ForceInline
875 @Override
876 final
877 void intoArray0(double[] a, int offset, VectorMask<Double> m) {
878 super.intoArray0Template(Double64Mask.class, a, offset, (Double64Mask) m);
879 }
880
881 @ForceInline
882 @Override
883 final
884 void intoArray0(double[] a, int offset, int[] indexMap, int mapOffset, VectorMask<Double> m) {
885 super.intoArray0Template(Double64Mask.class, a, offset, indexMap, mapOffset, (Double64Mask) m);
886 }
887
888
889 @ForceInline
890 @Override
891 final
892 void intoMemorySegment0(MemorySegment ms, long offset, VectorMask<Double> m) {
893 super.intoMemorySegment0Template(Double64Mask.class, ms, offset, (Double64Mask) m);
894 }
895
896
897 // End of specialized low-level memory operations.
898
899 // ================================================
900
901 }
|