7 * published by the Free Software Foundation. Oracle designates this
8 * particular file as subject to the "Classpath" exception as provided
9 * by Oracle in the LICENSE file that accompanied this code.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 */
25 package jdk.incubator.vector;
26
27 import java.nio.ByteBuffer;
28 import java.nio.ByteOrder;
29 import java.nio.ReadOnlyBufferException;
30 import java.util.Arrays;
31 import java.util.Objects;
32 import java.util.function.Function;
33 import java.util.function.UnaryOperator;
34
35 import jdk.internal.misc.ScopedMemoryAccess;
36 import jdk.internal.misc.Unsafe;
37 import jdk.internal.vm.annotation.ForceInline;
38 import jdk.internal.vm.vector.VectorSupport;
39
40 import static jdk.internal.vm.vector.VectorSupport.*;
41 import static jdk.incubator.vector.VectorIntrinsics.*;
42
43 import static jdk.incubator.vector.VectorOperators.*;
44
45 // -- This file was mechanically generated: Do not edit! -- //
46
47 /**
48 * A specialized {@link Vector} representing an ordered immutable sequence of
49 * {@code long} values.
50 */
51 @SuppressWarnings("cast") // warning: redundant cast
52 public abstract class LongVector extends AbstractVector<Long> {
53
54 LongVector(long[] vec) {
55 super(vec);
56 }
57
58 static final int FORBID_OPCODE_KIND = VO_ONLYFP;
59
60 @ForceInline
61 static int opCode(Operator op) {
62 return VectorOperators.opCode(op, VO_OPCODE_VALID, FORBID_OPCODE_KIND);
63 }
64 @ForceInline
65 static int opCode(Operator op, int requireKind) {
66 requireKind |= VO_OPCODE_VALID;
67 return VectorOperators.opCode(op, requireKind, FORBID_OPCODE_KIND);
68 }
69 @ForceInline
70 static boolean opKind(Operator op, int bit) {
71 return VectorOperators.opKind(op, bit);
72 }
73
74 // Virtualized factories and operators,
75 // coded with portable definitions.
76 // These are all @ForceInline in case
77 // they need to be used performantly.
78 // The various shape-specific subclasses
79 // also specialize them by wrapping
334 return vectorFactory(res);
335 }
336
337 /*package-private*/
338 @ForceInline
339 final
340 <M> LongVector ldOp(M memory, int offset,
341 VectorMask<Long> m,
342 FLdOp<M> f) {
343 //long[] vec = vec();
344 long[] res = new long[length()];
345 boolean[] mbits = ((AbstractMask<Long>)m).getBits();
346 for (int i = 0; i < res.length; i++) {
347 if (mbits[i]) {
348 res[i] = f.apply(memory, offset, i);
349 }
350 }
351 return vectorFactory(res);
352 }
353
354 interface FStOp<M> {
355 void apply(M memory, int offset, int i, long a);
356 }
357
358 /*package-private*/
359 @ForceInline
360 final
361 <M> void stOp(M memory, int offset,
362 FStOp<M> f) {
363 long[] vec = vec();
364 for (int i = 0; i < vec.length; i++) {
365 f.apply(memory, offset, i, vec[i]);
366 }
367 }
368
369 /*package-private*/
370 @ForceInline
371 final
372 <M> void stOp(M memory, int offset,
373 VectorMask<Long> m,
374 FStOp<M> f) {
375 long[] vec = vec();
376 boolean[] mbits = ((AbstractMask<Long>)m).getBits();
377 for (int i = 0; i < vec.length; i++) {
378 if (mbits[i]) {
379 f.apply(memory, offset, i, vec[i]);
380 }
381 }
382 }
383
384 // Binary test
385
386 /*package-private*/
387 interface FBinTest {
388 boolean apply(int cond, int i, long a, long b);
389 }
390
391 /*package-private*/
392 @ForceInline
393 final
394 AbstractMask<Long> bTest(int cond,
395 Vector<Long> o,
396 FBinTest f) {
397 long[] vec1 = vec();
398 long[] vec2 = ((LongVector)o).vec();
399 boolean[] bits = new boolean[length()];
400 for (int i = 0; i < length(); i++){
401 bits[i] = f.apply(cond, i, vec1[i], vec2[i]);
402 }
403 return maskFactory(bits);
414 static long rotateRight(long a, int n) {
415 return Long.rotateRight(a, n);
416 }
417
418 /*package-private*/
419 @Override
420 abstract LongSpecies vspecies();
421
422 /*package-private*/
423 @ForceInline
424 static long toBits(long e) {
425 return e;
426 }
427
428 /*package-private*/
429 @ForceInline
430 static long fromBits(long bits) {
431 return ((long)bits);
432 }
433
434 // Static factories (other than memory operations)
435
436 // Note: A surprising behavior in javadoc
437 // sometimes makes a lone /** {@inheritDoc} */
438 // comment drop the method altogether,
439 // apparently if the method mentions an
440 // parameter or return type of Vector<Long>
441 // instead of Vector<E> as originally specified.
442 // Adding an empty HTML fragment appears to
443 // nudge javadoc into providing the desired
444 // inherited documentation. We use the HTML
445 // comment <!--workaround--> for this.
446
447 /**
448 * Returns a vector of the given species
449 * where all lane elements are set to
450 * zero, the default primitive value.
451 *
452 * @param species species of the desired zero vector
453 * @return a zero vector
561 return lanewise(XOR, broadcast(-1), m);
562 }
563 }
564 int opc = opCode(op);
565 return VectorSupport.unaryOp(
566 opc, getClass(), maskClass, long.class, length(),
567 this, m,
568 UN_IMPL.find(op, opc, LongVector::unaryOperations));
569 }
570
571 private static final
572 ImplCache<Unary, UnaryOperation<LongVector, VectorMask<Long>>>
573 UN_IMPL = new ImplCache<>(Unary.class, LongVector.class);
574
575 private static UnaryOperation<LongVector, VectorMask<Long>> unaryOperations(int opc_) {
576 switch (opc_) {
577 case VECTOR_OP_NEG: return (v0, m) ->
578 v0.uOp(m, (i, a) -> (long) -a);
579 case VECTOR_OP_ABS: return (v0, m) ->
580 v0.uOp(m, (i, a) -> (long) Math.abs(a));
581 default: return null;
582 }
583 }
584
585 // Binary lanewise support
586
587 /**
588 * {@inheritDoc} <!--workaround-->
589 * @see #lanewise(VectorOperators.Binary,long)
590 * @see #lanewise(VectorOperators.Binary,long,VectorMask)
591 */
592 @Override
593 public abstract
594 LongVector lanewise(VectorOperators.Binary op,
595 Vector<Long> v);
596 @ForceInline
597 final
598 LongVector lanewiseTemplate(VectorOperators.Binary op,
599 Vector<Long> v) {
600 LongVector that = (LongVector) v;
701 case VECTOR_OP_MAX: return (v0, v1, vm) ->
702 v0.bOp(v1, vm, (i, a, b) -> (long)Math.max(a, b));
703 case VECTOR_OP_MIN: return (v0, v1, vm) ->
704 v0.bOp(v1, vm, (i, a, b) -> (long)Math.min(a, b));
705 case VECTOR_OP_AND: return (v0, v1, vm) ->
706 v0.bOp(v1, vm, (i, a, b) -> (long)(a & b));
707 case VECTOR_OP_OR: return (v0, v1, vm) ->
708 v0.bOp(v1, vm, (i, a, b) -> (long)(a | b));
709 case VECTOR_OP_XOR: return (v0, v1, vm) ->
710 v0.bOp(v1, vm, (i, a, b) -> (long)(a ^ b));
711 case VECTOR_OP_LSHIFT: return (v0, v1, vm) ->
712 v0.bOp(v1, vm, (i, a, n) -> (long)(a << n));
713 case VECTOR_OP_RSHIFT: return (v0, v1, vm) ->
714 v0.bOp(v1, vm, (i, a, n) -> (long)(a >> n));
715 case VECTOR_OP_URSHIFT: return (v0, v1, vm) ->
716 v0.bOp(v1, vm, (i, a, n) -> (long)((a & LSHR_SETUP_MASK) >>> n));
717 case VECTOR_OP_LROTATE: return (v0, v1, vm) ->
718 v0.bOp(v1, vm, (i, a, n) -> rotateLeft(a, (int)n));
719 case VECTOR_OP_RROTATE: return (v0, v1, vm) ->
720 v0.bOp(v1, vm, (i, a, n) -> rotateRight(a, (int)n));
721 default: return null;
722 }
723 }
724
725 // FIXME: Maybe all of the public final methods in this file (the
726 // simple ones that just call lanewise) should be pushed down to
727 // the X-VectorBits template. They can't optimize properly at
728 // this level, and must rely on inlining. Does it work?
729 // (If it works, of course keep the code here.)
730
731 /**
732 * Combines the lane values of this vector
733 * with the value of a broadcast scalar.
734 *
735 * This is a lane-wise binary operation which applies
736 * the selected operation to each lane.
737 * The return value will be equal to this expression:
738 * {@code this.lanewise(op, this.broadcast(e))}.
739 *
740 * @param op the operation used to process lane values
1641 /**
1642 * {@inheritDoc} <!--workaround-->
1643 */
1644 @Override
1645 @ForceInline
1646 public final
1647 LongVector neg() {
1648 return lanewise(NEG);
1649 }
1650
1651 /**
1652 * {@inheritDoc} <!--workaround-->
1653 */
1654 @Override
1655 @ForceInline
1656 public final
1657 LongVector abs() {
1658 return lanewise(ABS);
1659 }
1660
1661 // not (~)
1662 /**
1663 * Computes the bitwise logical complement ({@code ~})
1664 * of this vector.
1665 *
1666 * This is a lane-wise binary operation which applies the
1667 * the primitive bitwise "not" operation ({@code ~})
1668 * to each lane value.
1669 *
1670 * This method is also equivalent to the expression
1671 * {@link #lanewise(VectorOperators.Unary)
1672 * lanewise}{@code (}{@link VectorOperators#NOT
1673 * NOT}{@code )}.
1674 *
1675 * <p>
1676 * This is not a full-service named operation like
1677 * {@link #add(Vector) add}. A masked version of
1678 * this operation is not directly available
1679 * but may be obtained via the masked version of
1680 * {@code lanewise}.
2220 long[] a = toArray();
2221 int[] sa = new int[a.length];
2222 for (int i = 0; i < a.length; i++) {
2223 sa[i] = (int) a[i];
2224 }
2225 return VectorShuffle.fromArray(dsp, sa, 0);
2226 }
2227
2228 /*package-private*/
2229 @ForceInline
2230 final
2231 VectorShuffle<Long> toShuffleTemplate(Class<?> shuffleType) {
2232 LongSpecies vsp = vspecies();
2233 return VectorSupport.convert(VectorSupport.VECTOR_OP_CAST,
2234 getClass(), long.class, length(),
2235 shuffleType, byte.class, length(),
2236 this, vsp,
2237 LongVector::toShuffle0);
2238 }
2239
2240 /**
2241 * {@inheritDoc} <!--workaround-->
2242 */
2243 @Override
2244 public abstract
2245 LongVector selectFrom(Vector<Long> v);
2246
2247 /*package-private*/
2248 @ForceInline
2249 final LongVector selectFromTemplate(LongVector v) {
2250 return v.rearrange(this.toShuffle());
2251 }
2252
2253 /**
2254 * {@inheritDoc} <!--workaround-->
2255 */
2256 @Override
2257 public abstract
2258 LongVector selectFrom(Vector<Long> s, VectorMask<Long> m);
2259
2620 }
2621
2622 /** {@inheritDoc} <!--workaround-->
2623 * @implNote
2624 * When this method is used on used on vectors
2625 * of type {@code LongVector},
2626 * up to nine bits of precision may be lost
2627 * for lane values of large magnitude.
2628 */
2629 @ForceInline
2630 @Override
2631 public final double[] toDoubleArray() {
2632 long[] a = toArray();
2633 double[] res = new double[a.length];
2634 for (int i = 0; i < a.length; i++) {
2635 res[i] = (double) a[i];
2636 }
2637 return res;
2638 }
2639
2640 /**
2641 * Loads a vector from a byte array starting at an offset.
2642 * Bytes are composed into primitive lane elements according
2643 * to the specified byte order.
2644 * The vector is arranged into lanes according to
2645 * <a href="Vector.html#lane-order">memory ordering</a>.
2646 * <p>
2647 * This method behaves as if it returns the result of calling
2648 * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
2649 * fromByteBuffer()} as follows:
2650 * <pre>{@code
2651 * var bb = ByteBuffer.wrap(a);
2652 * var m = species.maskAll(true);
2653 * return fromByteBuffer(species, bb, offset, bo, m);
2654 * }</pre>
2655 *
2656 * @param species species of desired vector
2657 * @param a the byte array
2658 * @param offset the offset into the array
2659 * @param bo the intended byte order
2660 * @return a vector loaded from a byte array
2661 * @throws IndexOutOfBoundsException
2662 * if {@code offset+N*ESIZE < 0}
2663 * or {@code offset+(N+1)*ESIZE > a.length}
2664 * for any lane {@code N} in the vector
2665 */
2666 @ForceInline
2667 public static
2668 LongVector fromByteArray(VectorSpecies<Long> species,
2669 byte[] a, int offset,
2670 ByteOrder bo) {
2671 offset = checkFromIndexSize(offset, species.vectorByteSize(), a.length);
2672 LongSpecies vsp = (LongSpecies) species;
2673 return vsp.dummyVector().fromByteArray0(a, offset).maybeSwap(bo);
2674 }
2675
2676 /**
2677 * Loads a vector from a byte array starting at an offset
2678 * and using a mask.
2679 * Lanes where the mask is unset are filled with the default
2680 * value of {@code long} (zero).
2681 * Bytes are composed into primitive lane elements according
2682 * to the specified byte order.
2683 * The vector is arranged into lanes according to
2684 * <a href="Vector.html#lane-order">memory ordering</a>.
2685 * <p>
2686 * This method behaves as if it returns the result of calling
2687 * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
2688 * fromByteBuffer()} as follows:
2689 * <pre>{@code
2690 * var bb = ByteBuffer.wrap(a);
2691 * return fromByteBuffer(species, bb, offset, bo, m);
2692 * }</pre>
2693 *
2694 * @param species species of desired vector
2695 * @param a the byte array
2696 * @param offset the offset into the array
2697 * @param bo the intended byte order
2698 * @param m the mask controlling lane selection
2699 * @return a vector loaded from a byte array
2700 * @throws IndexOutOfBoundsException
2701 * if {@code offset+N*ESIZE < 0}
2702 * or {@code offset+(N+1)*ESIZE > a.length}
2703 * for any lane {@code N} in the vector
2704 * where the mask is set
2705 */
2706 @ForceInline
2707 public static
2708 LongVector fromByteArray(VectorSpecies<Long> species,
2709 byte[] a, int offset,
2710 ByteOrder bo,
2711 VectorMask<Long> m) {
2712 LongSpecies vsp = (LongSpecies) species;
2713 if (offset >= 0 && offset <= (a.length - species.vectorByteSize())) {
2714 return vsp.dummyVector().fromByteArray0(a, offset, m).maybeSwap(bo);
2715 }
2716
2717 // FIXME: optimize
2718 checkMaskFromIndexSize(offset, vsp, m, 8, a.length);
2719 ByteBuffer wb = wrapper(a, bo);
2720 return vsp.ldOp(wb, offset, (AbstractMask<Long>)m,
2721 (wb_, o, i) -> wb_.getLong(o + i * 8));
2722 }
2723
2724 /**
2725 * Loads a vector from an array of type {@code long[]}
2726 * starting at an offset.
2727 * For each vector lane, where {@code N} is the vector lane index, the
2728 * array element at index {@code offset + N} is placed into the
2729 * resulting vector at lane index {@code N}.
2730 *
2731 * @param species species of desired vector
2732 * @param a the array
2733 * @param offset the offset into the array
2734 * @return the vector loaded from an array
2735 * @throws IndexOutOfBoundsException
2736 * if {@code offset+N < 0} or {@code offset+N >= a.length}
2737 * for any lane {@code N} in the vector
2738 */
2739 @ForceInline
2740 public static
2741 LongVector fromArray(VectorSpecies<Long> species,
2742 long[] a, int offset) {
2743 offset = checkFromIndexSize(offset, species.length(), a.length);
2894 * @see LongVector#toIntArray()
2895 */
2896 @ForceInline
2897 public static
2898 LongVector fromArray(VectorSpecies<Long> species,
2899 long[] a, int offset,
2900 int[] indexMap, int mapOffset,
2901 VectorMask<Long> m) {
2902 if (m.allTrue()) {
2903 return fromArray(species, a, offset, indexMap, mapOffset);
2904 }
2905 else {
2906 LongSpecies vsp = (LongSpecies) species;
2907 return vsp.dummyVector().fromArray0(a, offset, indexMap, mapOffset, m);
2908 }
2909 }
2910
2911
2912
2913 /**
2914 * Loads a vector from a {@linkplain ByteBuffer byte buffer}
2915 * starting at an offset into the byte buffer.
2916 * Bytes are composed into primitive lane elements according
2917 * to the specified byte order.
2918 * The vector is arranged into lanes according to
2919 * <a href="Vector.html#lane-order">memory ordering</a>.
2920 * <p>
2921 * This method behaves as if it returns the result of calling
2922 * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
2923 * fromByteBuffer()} as follows:
2924 * <pre>{@code
2925 * var m = species.maskAll(true);
2926 * return fromByteBuffer(species, bb, offset, bo, m);
2927 * }</pre>
2928 *
2929 * @param species species of desired vector
2930 * @param bb the byte buffer
2931 * @param offset the offset into the byte buffer
2932 * @param bo the intended byte order
2933 * @return a vector loaded from a byte buffer
2934 * @throws IndexOutOfBoundsException
2935 * if {@code offset+N*8 < 0}
2936 * or {@code offset+N*8 >= bb.limit()}
2937 * for any lane {@code N} in the vector
2938 */
2939 @ForceInline
2940 public static
2941 LongVector fromByteBuffer(VectorSpecies<Long> species,
2942 ByteBuffer bb, int offset,
2943 ByteOrder bo) {
2944 offset = checkFromIndexSize(offset, species.vectorByteSize(), bb.limit());
2945 LongSpecies vsp = (LongSpecies) species;
2946 return vsp.dummyVector().fromByteBuffer0(bb, offset).maybeSwap(bo);
2947 }
2948
2949 /**
2950 * Loads a vector from a {@linkplain ByteBuffer byte buffer}
2951 * starting at an offset into the byte buffer
2952 * and using a mask.
2953 * Lanes where the mask is unset are filled with the default
2954 * value of {@code long} (zero).
2955 * Bytes are composed into primitive lane elements according
2956 * to the specified byte order.
2957 * The vector is arranged into lanes according to
2958 * <a href="Vector.html#lane-order">memory ordering</a>.
2959 * <p>
2960 * The following pseudocode illustrates the behavior:
2961 * <pre>{@code
2962 * LongBuffer eb = bb.duplicate()
2963 * .position(offset)
2964 * .order(bo).asLongBuffer();
2965 * long[] ar = new long[species.length()];
2966 * for (int n = 0; n < ar.length; n++) {
2967 * if (m.laneIsSet(n)) {
2968 * ar[n] = eb.get(n);
2969 * }
2970 * }
2971 * LongVector r = LongVector.fromArray(species, ar, 0);
2972 * }</pre>
2973 * @implNote
2974 * This operation is likely to be more efficient if
2975 * the specified byte order is the same as
2976 * {@linkplain ByteOrder#nativeOrder()
2977 * the platform native order},
2978 * since this method will not need to reorder
2979 * the bytes of lane values.
2980 *
2981 * @param species species of desired vector
2982 * @param bb the byte buffer
2983 * @param offset the offset into the byte buffer
2984 * @param bo the intended byte order
2985 * @param m the mask controlling lane selection
2986 * @return a vector loaded from a byte buffer
2987 * @throws IndexOutOfBoundsException
2988 * if {@code offset+N*8 < 0}
2989 * or {@code offset+N*8 >= bb.limit()}
2990 * for any lane {@code N} in the vector
2991 * where the mask is set
2992 */
2993 @ForceInline
2994 public static
2995 LongVector fromByteBuffer(VectorSpecies<Long> species,
2996 ByteBuffer bb, int offset,
2997 ByteOrder bo,
2998 VectorMask<Long> m) {
2999 LongSpecies vsp = (LongSpecies) species;
3000 if (offset >= 0 && offset <= (bb.limit() - species.vectorByteSize())) {
3001 return vsp.dummyVector().fromByteBuffer0(bb, offset, m).maybeSwap(bo);
3002 }
3003
3004 // FIXME: optimize
3005 checkMaskFromIndexSize(offset, vsp, m, 8, bb.limit());
3006 ByteBuffer wb = wrapper(bb, bo);
3007 return vsp.ldOp(wb, offset, (AbstractMask<Long>)m,
3008 (wb_, o, i) -> wb_.getLong(o + i * 8));
3009 }
3010
3011 // Memory store operations
3012
3013 /**
3014 * Stores this vector into an array of type {@code long[]}
3015 * starting at an offset.
3016 * <p>
3017 * For each vector lane, where {@code N} is the vector lane index,
3018 * the lane element at index {@code N} is stored into the array
3019 * element {@code a[offset+N]}.
3020 *
3021 * @param a the array, of type {@code long[]}
3022 * @param offset the offset into the array
3023 * @throws IndexOutOfBoundsException
3024 * if {@code offset+N < 0} or {@code offset+N >= a.length}
3025 * for any lane {@code N} in the vector
3026 */
3027 @ForceInline
3028 public final
3029 void intoArray(long[] a, int offset) {
3030 offset = checkFromIndexSize(offset, length(), a.length);
3031 LongSpecies vsp = vspecies();
3032 VectorSupport.store(
3033 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3034 a, arrayAddress(a, offset),
3035 this,
3036 a, offset,
3037 (arr, off, v)
3038 -> v.stOp(arr, off,
3039 (arr_, off_, i, e) -> arr_[off_ + i] = e));
3040 }
3041
3042 /**
3043 * Stores this vector into an array of type {@code long[]}
3044 * starting at offset and using a mask.
3045 * <p>
3046 * For each vector lane, where {@code N} is the vector lane index,
3047 * the lane element at index {@code N} is stored into the array
3048 * element {@code a[offset+N]}.
3049 * If the mask lane at {@code N} is unset then the corresponding
3050 * array element {@code a[offset+N]} is left unchanged.
3051 * <p>
3052 * Array range checking is done for lanes where the mask is set.
3053 * Lanes where the mask is unset are not stored and do not need
3054 * to correspond to legitimate elements of {@code a}.
3055 * That is, unset lanes may correspond to array indexes less than
3056 * zero or beyond the end of the array.
3057 *
3058 * @param a the array, of type {@code long[]}
3178 * where the mask is set
3179 * @see LongVector#toIntArray()
3180 */
3181 @ForceInline
3182 public final
3183 void intoArray(long[] a, int offset,
3184 int[] indexMap, int mapOffset,
3185 VectorMask<Long> m) {
3186 if (m.allTrue()) {
3187 intoArray(a, offset, indexMap, mapOffset);
3188 }
3189 else {
3190 intoArray0(a, offset, indexMap, mapOffset, m);
3191 }
3192 }
3193
3194
3195
3196 /**
3197 * {@inheritDoc} <!--workaround-->
3198 */
3199 @Override
3200 @ForceInline
3201 public final
3202 void intoByteArray(byte[] a, int offset,
3203 ByteOrder bo) {
3204 offset = checkFromIndexSize(offset, byteSize(), a.length);
3205 maybeSwap(bo).intoByteArray0(a, offset);
3206 }
3207
3208 /**
3209 * {@inheritDoc} <!--workaround-->
3210 */
3211 @Override
3212 @ForceInline
3213 public final
3214 void intoByteArray(byte[] a, int offset,
3215 ByteOrder bo,
3216 VectorMask<Long> m) {
3217 if (m.allTrue()) {
3218 intoByteArray(a, offset, bo);
3219 } else {
3220 LongSpecies vsp = vspecies();
3221 checkMaskFromIndexSize(offset, vsp, m, 8, a.length);
3222 maybeSwap(bo).intoByteArray0(a, offset, m);
3223 }
3224 }
3225
3226 /**
3227 * {@inheritDoc} <!--workaround-->
3228 */
3229 @Override
3230 @ForceInline
3231 public final
3232 void intoByteBuffer(ByteBuffer bb, int offset,
3233 ByteOrder bo) {
3234 if (ScopedMemoryAccess.isReadOnly(bb)) {
3235 throw new ReadOnlyBufferException();
3236 }
3237 offset = checkFromIndexSize(offset, byteSize(), bb.limit());
3238 maybeSwap(bo).intoByteBuffer0(bb, offset);
3239 }
3240
3241 /**
3242 * {@inheritDoc} <!--workaround-->
3243 */
3244 @Override
3245 @ForceInline
3246 public final
3247 void intoByteBuffer(ByteBuffer bb, int offset,
3248 ByteOrder bo,
3249 VectorMask<Long> m) {
3250 if (m.allTrue()) {
3251 intoByteBuffer(bb, offset, bo);
3252 } else {
3253 if (bb.isReadOnly()) {
3254 throw new ReadOnlyBufferException();
3255 }
3256 LongSpecies vsp = vspecies();
3257 checkMaskFromIndexSize(offset, vsp, m, 8, bb.limit());
3258 maybeSwap(bo).intoByteBuffer0(bb, offset, m);
3259 }
3260 }
3261
3262 // ================================================
3263
3264 // Low-level memory operations.
3265 //
3266 // Note that all of these operations *must* inline into a context
3267 // where the exact species of the involved vector is a
3268 // compile-time constant. Otherwise, the intrinsic generation
3269 // will fail and performance will suffer.
3270 //
3271 // In many cases this is achieved by re-deriving a version of the
3272 // method in each concrete subclass (per species). The re-derived
3273 // method simply calls one of these generic methods, with exact
3274 // parameters for the controlling metadata, which is either a
3275 // typed vector or constant species instance.
3276
3277 // Unchecked loading operations in native byte order.
3278 // Caller is responsible for applying index checks, masking, and
3279 // byte swapping.
3280
3281 /*package-private*/
3282 abstract
3283 LongVector fromArray0(long[] a, int offset);
3284 @ForceInline
3285 final
3286 LongVector fromArray0Template(long[] a, int offset) {
3287 LongSpecies vsp = vspecies();
3288 return VectorSupport.load(
3289 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3290 a, arrayAddress(a, offset),
3291 a, offset, vsp,
3292 (arr, off, s) -> s.ldOp(arr, off,
3293 (arr_, off_, i) -> arr_[off_ + i]));
3294 }
3295
3296 /*package-private*/
3297 abstract
3298 LongVector fromArray0(long[] a, int offset, VectorMask<Long> m);
3299 @ForceInline
3300 final
3301 <M extends VectorMask<Long>>
3302 LongVector fromArray0Template(Class<M> maskClass, long[] a, int offset, M m) {
3303 m.check(species());
3304 LongSpecies vsp = vspecies();
3305 return VectorSupport.loadMasked(
3306 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3307 a, arrayAddress(a, offset), m,
3308 a, offset, vsp,
3309 (arr, off, s, vm) -> s.ldOp(arr, off, vm,
3310 (arr_, off_, i) -> arr_[off_ + i]));
3311 }
3312
3313 /*package-private*/
3314 abstract
3315 LongVector fromArray0(long[] a, int offset,
3316 int[] indexMap, int mapOffset,
3317 VectorMask<Long> m);
3318 @ForceInline
3319 final
3320 <M extends VectorMask<Long>>
3321 LongVector fromArray0Template(Class<M> maskClass, long[] a, int offset,
3322 int[] indexMap, int mapOffset, M m) {
3323 LongSpecies vsp = vspecies();
3324 IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3325 Objects.requireNonNull(a);
3326 Objects.requireNonNull(indexMap);
3327 m.check(vsp);
3328 Class<? extends LongVector> vectorType = vsp.vectorType();
3329
3347 } else {
3348 vix = IntVector
3349 .fromArray(isp, indexMap, mapOffset)
3350 .add(offset);
3351 }
3352
3353 // FIXME: Check index under mask controlling.
3354 vix = VectorIntrinsics.checkIndex(vix, a.length);
3355
3356 return VectorSupport.loadWithMap(
3357 vectorType, maskClass, long.class, vsp.laneCount(),
3358 isp.vectorType(),
3359 a, ARRAY_BASE, vix, m,
3360 a, offset, indexMap, mapOffset, vsp,
3361 (c, idx, iMap, idy, s, vm) ->
3362 s.vOp(vm, n -> c[idx + iMap[idy+n]]));
3363 }
3364
3365
3366
3367 @Override
3368 abstract
3369 LongVector fromByteArray0(byte[] a, int offset);
3370 @ForceInline
3371 final
3372 LongVector fromByteArray0Template(byte[] a, int offset) {
3373 LongSpecies vsp = vspecies();
3374 return VectorSupport.load(
3375 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3376 a, byteArrayAddress(a, offset),
3377 a, offset, vsp,
3378 (arr, off, s) -> {
3379 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3380 return s.ldOp(wb, off,
3381 (wb_, o, i) -> wb_.getLong(o + i * 8));
3382 });
3383 }
3384
3385 abstract
3386 LongVector fromByteArray0(byte[] a, int offset, VectorMask<Long> m);
3387 @ForceInline
3388 final
3389 <M extends VectorMask<Long>>
3390 LongVector fromByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
3391 LongSpecies vsp = vspecies();
3392 m.check(vsp);
3393 return VectorSupport.loadMasked(
3394 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3395 a, byteArrayAddress(a, offset), m,
3396 a, offset, vsp,
3397 (arr, off, s, vm) -> {
3398 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3399 return s.ldOp(wb, off, vm,
3400 (wb_, o, i) -> wb_.getLong(o + i * 8));
3401 });
3402 }
3403
3404 abstract
3405 LongVector fromByteBuffer0(ByteBuffer bb, int offset);
3406 @ForceInline
3407 final
3408 LongVector fromByteBuffer0Template(ByteBuffer bb, int offset) {
3409 LongSpecies vsp = vspecies();
3410 return ScopedMemoryAccess.loadFromByteBuffer(
3411 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3412 bb, offset, vsp,
3413 (buf, off, s) -> {
3414 ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3415 return s.ldOp(wb, off,
3416 (wb_, o, i) -> wb_.getLong(o + i * 8));
3417 });
3418 }
3419
3420 abstract
3421 LongVector fromByteBuffer0(ByteBuffer bb, int offset, VectorMask<Long> m);
3422 @ForceInline
3423 final
3424 <M extends VectorMask<Long>>
3425 LongVector fromByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
3426 LongSpecies vsp = vspecies();
3427 m.check(vsp);
3428 return ScopedMemoryAccess.loadFromByteBufferMasked(
3429 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3430 bb, offset, m, vsp,
3431 (buf, off, s, vm) -> {
3432 ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3433 return s.ldOp(wb, off, vm,
3434 (wb_, o, i) -> wb_.getLong(o + i * 8));
3435 });
3436 }
3437
3438 // Unchecked storing operations in native byte order.
3439 // Caller is responsible for applying index checks, masking, and
3440 // byte swapping.
3441
3442 abstract
3443 void intoArray0(long[] a, int offset);
3444 @ForceInline
3445 final
3446 void intoArray0Template(long[] a, int offset) {
3447 LongSpecies vsp = vspecies();
3448 VectorSupport.store(
3449 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3450 a, arrayAddress(a, offset),
3451 this, a, offset,
3452 (arr, off, v)
3453 -> v.stOp(arr, off,
3454 (arr_, off_, i, e) -> arr_[off_+i] = e));
3455 }
3456
3457 abstract
3458 void intoArray0(long[] a, int offset, VectorMask<Long> m);
3459 @ForceInline
3460 final
3461 <M extends VectorMask<Long>>
3462 void intoArray0Template(Class<M> maskClass, long[] a, int offset, M m) {
3463 m.check(species());
3464 LongSpecies vsp = vspecies();
3465 VectorSupport.storeMasked(
3466 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3467 a, arrayAddress(a, offset),
3468 this, m, a, offset,
3469 (arr, off, v, vm)
3470 -> v.stOp(arr, off, vm,
3471 (arr_, off_, i, e) -> arr_[off_ + i] = e));
3472 }
3473
3474 abstract
3475 void intoArray0(long[] a, int offset,
3476 int[] indexMap, int mapOffset,
3477 VectorMask<Long> m);
3478 @ForceInline
3479 final
3480 <M extends VectorMask<Long>>
3481 void intoArray0Template(Class<M> maskClass, long[] a, int offset,
3482 int[] indexMap, int mapOffset, M m) {
3483 m.check(species());
3484 LongSpecies vsp = vspecies();
3485 IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3486 if (vsp.laneCount() == 1) {
3487 intoArray(a, offset + indexMap[mapOffset], m);
3488 return;
3489 }
3490
3508
3509
3510 // FIXME: Check index under mask controlling.
3511 vix = VectorIntrinsics.checkIndex(vix, a.length);
3512
3513 VectorSupport.storeWithMap(
3514 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3515 isp.vectorType(),
3516 a, arrayAddress(a, 0), vix,
3517 this, m,
3518 a, offset, indexMap, mapOffset,
3519 (arr, off, v, map, mo, vm)
3520 -> v.stOp(arr, off, vm,
3521 (arr_, off_, i, e) -> {
3522 int j = map[mo + i];
3523 arr[off + j] = e;
3524 }));
3525 }
3526
3527
3528 abstract
3529 void intoByteArray0(byte[] a, int offset);
3530 @ForceInline
3531 final
3532 void intoByteArray0Template(byte[] a, int offset) {
3533 LongSpecies vsp = vspecies();
3534 VectorSupport.store(
3535 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3536 a, byteArrayAddress(a, offset),
3537 this, a, offset,
3538 (arr, off, v) -> {
3539 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3540 v.stOp(wb, off,
3541 (tb_, o, i, e) -> tb_.putLong(o + i * 8, e));
3542 });
3543 }
3544
3545 abstract
3546 void intoByteArray0(byte[] a, int offset, VectorMask<Long> m);
3547 @ForceInline
3548 final
3549 <M extends VectorMask<Long>>
3550 void intoByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
3551 LongSpecies vsp = vspecies();
3552 m.check(vsp);
3553 VectorSupport.storeMasked(
3554 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3555 a, byteArrayAddress(a, offset),
3556 this, m, a, offset,
3557 (arr, off, v, vm) -> {
3558 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3559 v.stOp(wb, off, vm,
3560 (tb_, o, i, e) -> tb_.putLong(o + i * 8, e));
3561 });
3562 }
3563
3564 @ForceInline
3565 final
3566 void intoByteBuffer0(ByteBuffer bb, int offset) {
3567 LongSpecies vsp = vspecies();
3568 ScopedMemoryAccess.storeIntoByteBuffer(
3569 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3570 this, bb, offset,
3571 (buf, off, v) -> {
3572 ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3573 v.stOp(wb, off,
3574 (wb_, o, i, e) -> wb_.putLong(o + i * 8, e));
3575 });
3576 }
3577
3578 abstract
3579 void intoByteBuffer0(ByteBuffer bb, int offset, VectorMask<Long> m);
3580 @ForceInline
3581 final
3582 <M extends VectorMask<Long>>
3583 void intoByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
3584 LongSpecies vsp = vspecies();
3585 m.check(vsp);
3586 ScopedMemoryAccess.storeIntoByteBufferMasked(
3587 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3588 this, m, bb, offset,
3589 (buf, off, v, vm) -> {
3590 ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3591 v.stOp(wb, off, vm,
3592 (wb_, o, i, e) -> wb_.putLong(o + i * 8, e));
3593 });
3594 }
3595
3596
3597 // End of low-level memory operations.
3598
3599 private static
3600 void checkMaskFromIndexSize(int offset,
3601 LongSpecies vsp,
3602 VectorMask<Long> m,
3603 int scale,
3604 int limit) {
3605 ((AbstractMask<Long>)m)
3606 .checkIndexByLane(offset, limit, vsp.iota(), scale);
3607 }
3608
3609 @ForceInline
3610 private void conditionalStoreNYI(int offset,
3611 LongSpecies vsp,
3612 VectorMask<Long> m,
3613 int scale,
3614 int limit) {
3615 if (offset < 0 || offset + vsp.laneCount() * scale > limit) {
3616 String msg =
3617 String.format("unimplemented: store @%d in [0..%d), %s in %s",
3618 offset, limit, m, vsp);
3619 throw new AssertionError(msg);
3620 }
3621 }
3622
3623 /*package-private*/
3624 @Override
3625 @ForceInline
3626 final
3627 LongVector maybeSwap(ByteOrder bo) {
3628 if (bo != NATIVE_ENDIAN) {
3890 }
3891 }
3892 return dummyVector().vectorFactory(res);
3893 }
3894
3895 /*package-private*/
3896 @ForceInline
3897 <M> LongVector ldOp(M memory, int offset,
3898 FLdOp<M> f) {
3899 return dummyVector().ldOp(memory, offset, f);
3900 }
3901
3902 /*package-private*/
3903 @ForceInline
3904 <M> LongVector ldOp(M memory, int offset,
3905 VectorMask<Long> m,
3906 FLdOp<M> f) {
3907 return dummyVector().ldOp(memory, offset, m, f);
3908 }
3909
3910 /*package-private*/
3911 @ForceInline
3912 <M> void stOp(M memory, int offset, FStOp<M> f) {
3913 dummyVector().stOp(memory, offset, f);
3914 }
3915
3916 /*package-private*/
3917 @ForceInline
3918 <M> void stOp(M memory, int offset,
3919 AbstractMask<Long> m,
3920 FStOp<M> f) {
3921 dummyVector().stOp(memory, offset, m, f);
3922 }
3923
3924 // N.B. Make sure these constant vectors and
3925 // masks load up correctly into registers.
3926 //
3927 // Also, see if we can avoid all that switching.
3928 // Could we cache both vectors and both masks in
3929 // this species object?
3930
3931 // Zero and iota vector access
3932 @Override
3933 @ForceInline
3934 public final LongVector zero() {
3935 if ((Class<?>) vectorType() == LongMaxVector.class)
3936 return LongMaxVector.ZERO;
3937 switch (vectorBitSize()) {
3938 case 64: return Long64Vector.ZERO;
3939 case 128: return Long128Vector.ZERO;
3940 case 256: return Long256Vector.ZERO;
3941 case 512: return Long512Vector.ZERO;
3942 }
3943 throw new AssertionError();
|
7 * published by the Free Software Foundation. Oracle designates this
8 * particular file as subject to the "Classpath" exception as provided
9 * by Oracle in the LICENSE file that accompanied this code.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 */
25 package jdk.incubator.vector;
26
27 import java.nio.ByteOrder;
28 import java.util.Arrays;
29 import java.util.Objects;
30 import java.util.function.Function;
31
32 import jdk.incubator.foreign.MemorySegment;
33 import jdk.incubator.foreign.ValueLayout;
34 import jdk.internal.access.foreign.MemorySegmentProxy;
35 import jdk.internal.misc.ScopedMemoryAccess;
36 import jdk.internal.misc.Unsafe;
37 import jdk.internal.vm.annotation.ForceInline;
38 import jdk.internal.vm.vector.VectorSupport;
39
40 import static jdk.internal.vm.vector.VectorSupport.*;
41 import static jdk.incubator.vector.VectorIntrinsics.*;
42
43 import static jdk.incubator.vector.VectorOperators.*;
44
45 // -- This file was mechanically generated: Do not edit! -- //
46
47 /**
48 * A specialized {@link Vector} representing an ordered immutable sequence of
49 * {@code long} values.
50 */
51 @SuppressWarnings("cast") // warning: redundant cast
52 public abstract class LongVector extends AbstractVector<Long> {
53
54 LongVector(long[] vec) {
55 super(vec);
56 }
57
58 static final int FORBID_OPCODE_KIND = VO_ONLYFP;
59
60 static final ValueLayout.OfLong ELEMENT_LAYOUT = ValueLayout.JAVA_LONG.withBitAlignment(8);
61
62 @ForceInline
63 static int opCode(Operator op) {
64 return VectorOperators.opCode(op, VO_OPCODE_VALID, FORBID_OPCODE_KIND);
65 }
66 @ForceInline
67 static int opCode(Operator op, int requireKind) {
68 requireKind |= VO_OPCODE_VALID;
69 return VectorOperators.opCode(op, requireKind, FORBID_OPCODE_KIND);
70 }
71 @ForceInline
72 static boolean opKind(Operator op, int bit) {
73 return VectorOperators.opKind(op, bit);
74 }
75
76 // Virtualized factories and operators,
77 // coded with portable definitions.
78 // These are all @ForceInline in case
79 // they need to be used performantly.
80 // The various shape-specific subclasses
81 // also specialize them by wrapping
336 return vectorFactory(res);
337 }
338
339 /*package-private*/
340 @ForceInline
341 final
342 <M> LongVector ldOp(M memory, int offset,
343 VectorMask<Long> m,
344 FLdOp<M> f) {
345 //long[] vec = vec();
346 long[] res = new long[length()];
347 boolean[] mbits = ((AbstractMask<Long>)m).getBits();
348 for (int i = 0; i < res.length; i++) {
349 if (mbits[i]) {
350 res[i] = f.apply(memory, offset, i);
351 }
352 }
353 return vectorFactory(res);
354 }
355
356 /*package-private*/
357 interface FLdLongOp {
358 long apply(MemorySegment memory, long offset, int i);
359 }
360
361 /*package-private*/
362 @ForceInline
363 final
364 LongVector ldLongOp(MemorySegment memory, long offset,
365 FLdLongOp f) {
366 //dummy; no vec = vec();
367 long[] res = new long[length()];
368 for (int i = 0; i < res.length; i++) {
369 res[i] = f.apply(memory, offset, i);
370 }
371 return vectorFactory(res);
372 }
373
374 /*package-private*/
375 @ForceInline
376 final
377 LongVector ldLongOp(MemorySegment memory, long offset,
378 VectorMask<Long> m,
379 FLdLongOp f) {
380 //long[] vec = vec();
381 long[] res = new long[length()];
382 boolean[] mbits = ((AbstractMask<Long>)m).getBits();
383 for (int i = 0; i < res.length; i++) {
384 if (mbits[i]) {
385 res[i] = f.apply(memory, offset, i);
386 }
387 }
388 return vectorFactory(res);
389 }
390
391 static long memorySegmentGet(MemorySegment ms, long o, int i) {
392 return ms.get(ELEMENT_LAYOUT, o + i * 8L);
393 }
394
395 interface FStOp<M> {
396 void apply(M memory, int offset, int i, long a);
397 }
398
399 /*package-private*/
400 @ForceInline
401 final
402 <M> void stOp(M memory, int offset,
403 FStOp<M> f) {
404 long[] vec = vec();
405 for (int i = 0; i < vec.length; i++) {
406 f.apply(memory, offset, i, vec[i]);
407 }
408 }
409
410 /*package-private*/
411 @ForceInline
412 final
413 <M> void stOp(M memory, int offset,
414 VectorMask<Long> m,
415 FStOp<M> f) {
416 long[] vec = vec();
417 boolean[] mbits = ((AbstractMask<Long>)m).getBits();
418 for (int i = 0; i < vec.length; i++) {
419 if (mbits[i]) {
420 f.apply(memory, offset, i, vec[i]);
421 }
422 }
423 }
424
425 interface FStLongOp {
426 void apply(MemorySegment memory, long offset, int i, long a);
427 }
428
429 /*package-private*/
430 @ForceInline
431 final
432 void stLongOp(MemorySegment memory, long offset,
433 FStLongOp f) {
434 long[] vec = vec();
435 for (int i = 0; i < vec.length; i++) {
436 f.apply(memory, offset, i, vec[i]);
437 }
438 }
439
440 /*package-private*/
441 @ForceInline
442 final
443 void stLongOp(MemorySegment memory, long offset,
444 VectorMask<Long> m,
445 FStLongOp f) {
446 long[] vec = vec();
447 boolean[] mbits = ((AbstractMask<Long>)m).getBits();
448 for (int i = 0; i < vec.length; i++) {
449 if (mbits[i]) {
450 f.apply(memory, offset, i, vec[i]);
451 }
452 }
453 }
454
455 static void memorySegmentSet(MemorySegment ms, long o, int i, long e) {
456 ms.set(ELEMENT_LAYOUT, o + i * 8L, e);
457 }
458
459 // Binary test
460
461 /*package-private*/
462 interface FBinTest {
463 boolean apply(int cond, int i, long a, long b);
464 }
465
466 /*package-private*/
467 @ForceInline
468 final
469 AbstractMask<Long> bTest(int cond,
470 Vector<Long> o,
471 FBinTest f) {
472 long[] vec1 = vec();
473 long[] vec2 = ((LongVector)o).vec();
474 boolean[] bits = new boolean[length()];
475 for (int i = 0; i < length(); i++){
476 bits[i] = f.apply(cond, i, vec1[i], vec2[i]);
477 }
478 return maskFactory(bits);
489 static long rotateRight(long a, int n) {
490 return Long.rotateRight(a, n);
491 }
492
493 /*package-private*/
494 @Override
495 abstract LongSpecies vspecies();
496
497 /*package-private*/
498 @ForceInline
499 static long toBits(long e) {
500 return e;
501 }
502
503 /*package-private*/
504 @ForceInline
505 static long fromBits(long bits) {
506 return ((long)bits);
507 }
508
509 static LongVector expandHelper(Vector<Long> v, VectorMask<Long> m) {
510 VectorSpecies<Long> vsp = m.vectorSpecies();
511 LongVector r = (LongVector) vsp.zero();
512 LongVector vi = (LongVector) v;
513 if (m.allTrue()) {
514 return vi;
515 }
516 for (int i = 0, j = 0; i < vsp.length(); i++) {
517 if (m.laneIsSet(i)) {
518 r = r.withLane(i, vi.lane(j++));
519 }
520 }
521 return r;
522 }
523
524 static LongVector compressHelper(Vector<Long> v, VectorMask<Long> m) {
525 VectorSpecies<Long> vsp = m.vectorSpecies();
526 LongVector r = (LongVector) vsp.zero();
527 LongVector vi = (LongVector) v;
528 if (m.allTrue()) {
529 return vi;
530 }
531 for (int i = 0, j = 0; i < vsp.length(); i++) {
532 if (m.laneIsSet(i)) {
533 r = r.withLane(j++, vi.lane(i));
534 }
535 }
536 return r;
537 }
538
539 // Static factories (other than memory operations)
540
541 // Note: A surprising behavior in javadoc
542 // sometimes makes a lone /** {@inheritDoc} */
543 // comment drop the method altogether,
544 // apparently if the method mentions an
545 // parameter or return type of Vector<Long>
546 // instead of Vector<E> as originally specified.
547 // Adding an empty HTML fragment appears to
548 // nudge javadoc into providing the desired
549 // inherited documentation. We use the HTML
550 // comment <!--workaround--> for this.
551
552 /**
553 * Returns a vector of the given species
554 * where all lane elements are set to
555 * zero, the default primitive value.
556 *
557 * @param species species of the desired zero vector
558 * @return a zero vector
666 return lanewise(XOR, broadcast(-1), m);
667 }
668 }
669 int opc = opCode(op);
670 return VectorSupport.unaryOp(
671 opc, getClass(), maskClass, long.class, length(),
672 this, m,
673 UN_IMPL.find(op, opc, LongVector::unaryOperations));
674 }
675
676 private static final
677 ImplCache<Unary, UnaryOperation<LongVector, VectorMask<Long>>>
678 UN_IMPL = new ImplCache<>(Unary.class, LongVector.class);
679
680 private static UnaryOperation<LongVector, VectorMask<Long>> unaryOperations(int opc_) {
681 switch (opc_) {
682 case VECTOR_OP_NEG: return (v0, m) ->
683 v0.uOp(m, (i, a) -> (long) -a);
684 case VECTOR_OP_ABS: return (v0, m) ->
685 v0.uOp(m, (i, a) -> (long) Math.abs(a));
686 case VECTOR_OP_BIT_COUNT: return (v0, m) ->
687 v0.uOp(m, (i, a) -> (long) Long.bitCount(a));
688 case VECTOR_OP_TZ_COUNT: return (v0, m) ->
689 v0.uOp(m, (i, a) -> (long) Long.numberOfTrailingZeros(a));
690 case VECTOR_OP_LZ_COUNT: return (v0, m) ->
691 v0.uOp(m, (i, a) -> (long) Long.numberOfLeadingZeros(a));
692 case VECTOR_OP_REVERSE: return (v0, m) ->
693 v0.uOp(m, (i, a) -> (long) Long.reverse(a));
694 case VECTOR_OP_REVERSE_BYTES: return (v0, m) ->
695 v0.uOp(m, (i, a) -> (long) Long.reverseBytes(a));
696 default: return null;
697 }
698 }
699
700 // Binary lanewise support
701
702 /**
703 * {@inheritDoc} <!--workaround-->
704 * @see #lanewise(VectorOperators.Binary,long)
705 * @see #lanewise(VectorOperators.Binary,long,VectorMask)
706 */
707 @Override
708 public abstract
709 LongVector lanewise(VectorOperators.Binary op,
710 Vector<Long> v);
711 @ForceInline
712 final
713 LongVector lanewiseTemplate(VectorOperators.Binary op,
714 Vector<Long> v) {
715 LongVector that = (LongVector) v;
816 case VECTOR_OP_MAX: return (v0, v1, vm) ->
817 v0.bOp(v1, vm, (i, a, b) -> (long)Math.max(a, b));
818 case VECTOR_OP_MIN: return (v0, v1, vm) ->
819 v0.bOp(v1, vm, (i, a, b) -> (long)Math.min(a, b));
820 case VECTOR_OP_AND: return (v0, v1, vm) ->
821 v0.bOp(v1, vm, (i, a, b) -> (long)(a & b));
822 case VECTOR_OP_OR: return (v0, v1, vm) ->
823 v0.bOp(v1, vm, (i, a, b) -> (long)(a | b));
824 case VECTOR_OP_XOR: return (v0, v1, vm) ->
825 v0.bOp(v1, vm, (i, a, b) -> (long)(a ^ b));
826 case VECTOR_OP_LSHIFT: return (v0, v1, vm) ->
827 v0.bOp(v1, vm, (i, a, n) -> (long)(a << n));
828 case VECTOR_OP_RSHIFT: return (v0, v1, vm) ->
829 v0.bOp(v1, vm, (i, a, n) -> (long)(a >> n));
830 case VECTOR_OP_URSHIFT: return (v0, v1, vm) ->
831 v0.bOp(v1, vm, (i, a, n) -> (long)((a & LSHR_SETUP_MASK) >>> n));
832 case VECTOR_OP_LROTATE: return (v0, v1, vm) ->
833 v0.bOp(v1, vm, (i, a, n) -> rotateLeft(a, (int)n));
834 case VECTOR_OP_RROTATE: return (v0, v1, vm) ->
835 v0.bOp(v1, vm, (i, a, n) -> rotateRight(a, (int)n));
836 case VECTOR_OP_COMPRESS_BITS: return (v0, v1, vm) ->
837 v0.bOp(v1, vm, (i, a, n) -> Long.compress(a, n));
838 case VECTOR_OP_EXPAND_BITS: return (v0, v1, vm) ->
839 v0.bOp(v1, vm, (i, a, n) -> Long.expand(a, n));
840 default: return null;
841 }
842 }
843
844 // FIXME: Maybe all of the public final methods in this file (the
845 // simple ones that just call lanewise) should be pushed down to
846 // the X-VectorBits template. They can't optimize properly at
847 // this level, and must rely on inlining. Does it work?
848 // (If it works, of course keep the code here.)
849
850 /**
851 * Combines the lane values of this vector
852 * with the value of a broadcast scalar.
853 *
854 * This is a lane-wise binary operation which applies
855 * the selected operation to each lane.
856 * The return value will be equal to this expression:
857 * {@code this.lanewise(op, this.broadcast(e))}.
858 *
859 * @param op the operation used to process lane values
1760 /**
1761 * {@inheritDoc} <!--workaround-->
1762 */
1763 @Override
1764 @ForceInline
1765 public final
1766 LongVector neg() {
1767 return lanewise(NEG);
1768 }
1769
1770 /**
1771 * {@inheritDoc} <!--workaround-->
1772 */
1773 @Override
1774 @ForceInline
1775 public final
1776 LongVector abs() {
1777 return lanewise(ABS);
1778 }
1779
1780
1781 // not (~)
1782 /**
1783 * Computes the bitwise logical complement ({@code ~})
1784 * of this vector.
1785 *
1786 * This is a lane-wise binary operation which applies the
1787 * the primitive bitwise "not" operation ({@code ~})
1788 * to each lane value.
1789 *
1790 * This method is also equivalent to the expression
1791 * {@link #lanewise(VectorOperators.Unary)
1792 * lanewise}{@code (}{@link VectorOperators#NOT
1793 * NOT}{@code )}.
1794 *
1795 * <p>
1796 * This is not a full-service named operation like
1797 * {@link #add(Vector) add}. A masked version of
1798 * this operation is not directly available
1799 * but may be obtained via the masked version of
1800 * {@code lanewise}.
2340 long[] a = toArray();
2341 int[] sa = new int[a.length];
2342 for (int i = 0; i < a.length; i++) {
2343 sa[i] = (int) a[i];
2344 }
2345 return VectorShuffle.fromArray(dsp, sa, 0);
2346 }
2347
2348 /*package-private*/
2349 @ForceInline
2350 final
2351 VectorShuffle<Long> toShuffleTemplate(Class<?> shuffleType) {
2352 LongSpecies vsp = vspecies();
2353 return VectorSupport.convert(VectorSupport.VECTOR_OP_CAST,
2354 getClass(), long.class, length(),
2355 shuffleType, byte.class, length(),
2356 this, vsp,
2357 LongVector::toShuffle0);
2358 }
2359
2360 /**
2361 * {@inheritDoc} <!--workaround-->
2362 * @since 19
2363 */
2364 @Override
2365 public abstract
2366 LongVector compress(VectorMask<Long> m);
2367
2368 /*package-private*/
2369 @ForceInline
2370 final
2371 <M extends AbstractMask<Long>>
2372 LongVector compressTemplate(Class<M> masktype, M m) {
2373 m.check(masktype, this);
2374 return (LongVector) VectorSupport.comExpOp(VectorSupport.VECTOR_OP_COMPRESS, getClass(), masktype,
2375 long.class, length(), this, m,
2376 (v1, m1) -> compressHelper(v1, m1));
2377 }
2378
2379 /**
2380 * {@inheritDoc} <!--workaround-->
2381 * @since 19
2382 */
2383 @Override
2384 public abstract
2385 LongVector expand(VectorMask<Long> m);
2386
2387 /*package-private*/
2388 @ForceInline
2389 final
2390 <M extends AbstractMask<Long>>
2391 LongVector expandTemplate(Class<M> masktype, M m) {
2392 m.check(masktype, this);
2393 return (LongVector) VectorSupport.comExpOp(VectorSupport.VECTOR_OP_EXPAND, getClass(), masktype,
2394 long.class, length(), this, m,
2395 (v1, m1) -> expandHelper(v1, m1));
2396 }
2397
2398
2399 /**
2400 * {@inheritDoc} <!--workaround-->
2401 */
2402 @Override
2403 public abstract
2404 LongVector selectFrom(Vector<Long> v);
2405
2406 /*package-private*/
2407 @ForceInline
2408 final LongVector selectFromTemplate(LongVector v) {
2409 return v.rearrange(this.toShuffle());
2410 }
2411
2412 /**
2413 * {@inheritDoc} <!--workaround-->
2414 */
2415 @Override
2416 public abstract
2417 LongVector selectFrom(Vector<Long> s, VectorMask<Long> m);
2418
2779 }
2780
2781 /** {@inheritDoc} <!--workaround-->
2782 * @implNote
2783 * When this method is used on used on vectors
2784 * of type {@code LongVector},
2785 * up to nine bits of precision may be lost
2786 * for lane values of large magnitude.
2787 */
2788 @ForceInline
2789 @Override
2790 public final double[] toDoubleArray() {
2791 long[] a = toArray();
2792 double[] res = new double[a.length];
2793 for (int i = 0; i < a.length; i++) {
2794 res[i] = (double) a[i];
2795 }
2796 return res;
2797 }
2798
2799 /**
2800 * Loads a vector from an array of type {@code long[]}
2801 * starting at an offset.
2802 * For each vector lane, where {@code N} is the vector lane index, the
2803 * array element at index {@code offset + N} is placed into the
2804 * resulting vector at lane index {@code N}.
2805 *
2806 * @param species species of desired vector
2807 * @param a the array
2808 * @param offset the offset into the array
2809 * @return the vector loaded from an array
2810 * @throws IndexOutOfBoundsException
2811 * if {@code offset+N < 0} or {@code offset+N >= a.length}
2812 * for any lane {@code N} in the vector
2813 */
2814 @ForceInline
2815 public static
2816 LongVector fromArray(VectorSpecies<Long> species,
2817 long[] a, int offset) {
2818 offset = checkFromIndexSize(offset, species.length(), a.length);
2969 * @see LongVector#toIntArray()
2970 */
2971 @ForceInline
2972 public static
2973 LongVector fromArray(VectorSpecies<Long> species,
2974 long[] a, int offset,
2975 int[] indexMap, int mapOffset,
2976 VectorMask<Long> m) {
2977 if (m.allTrue()) {
2978 return fromArray(species, a, offset, indexMap, mapOffset);
2979 }
2980 else {
2981 LongSpecies vsp = (LongSpecies) species;
2982 return vsp.dummyVector().fromArray0(a, offset, indexMap, mapOffset, m);
2983 }
2984 }
2985
2986
2987
2988 /**
2989 * Loads a vector from a {@linkplain MemorySegment memory segment}
2990 * starting at an offset into the memory segment.
2991 * Bytes are composed into primitive lane elements according
2992 * to the specified byte order.
2993 * The vector is arranged into lanes according to
2994 * <a href="Vector.html#lane-order">memory ordering</a>.
2995 * <p>
2996 * This method behaves as if it returns the result of calling
2997 * {@link #fromMemorySegment(VectorSpecies,MemorySegment,long,ByteOrder,VectorMask)
2998 * fromMemorySegment()} as follows:
2999 * <pre>{@code
3000 * var m = species.maskAll(true);
3001 * return fromMemorySegment(species, ms, offset, bo, m);
3002 * }</pre>
3003 *
3004 * @param species species of desired vector
3005 * @param ms the memory segment
3006 * @param offset the offset into the memory segment
3007 * @param bo the intended byte order
3008 * @return a vector loaded from the memory segment
3009 * @throws IndexOutOfBoundsException
3010 * if {@code offset+N*8 < 0}
3011 * or {@code offset+N*8 >= ms.byteSize()}
3012 * for any lane {@code N} in the vector
3013 * @throws IllegalArgumentException if the memory segment is a heap segment that is
3014 * not backed by a {@code byte[]} array.
3015 * @throws IllegalStateException if the memory segment's session is not alive,
3016 * or if access occurs from a thread other than the thread owning the session.
3017 * @since 19
3018 */
3019 @ForceInline
3020 public static
3021 LongVector fromMemorySegment(VectorSpecies<Long> species,
3022 MemorySegment ms, long offset,
3023 ByteOrder bo) {
3024 offset = checkFromIndexSize(offset, species.vectorByteSize(), ms.byteSize());
3025 LongSpecies vsp = (LongSpecies) species;
3026 return vsp.dummyVector().fromMemorySegment0(ms, offset).maybeSwap(bo);
3027 }
3028
3029 /**
3030 * Loads a vector from a {@linkplain MemorySegment memory segment}
3031 * starting at an offset into the memory segment
3032 * and using a mask.
3033 * Lanes where the mask is unset are filled with the default
3034 * value of {@code long} (zero).
3035 * Bytes are composed into primitive lane elements according
3036 * to the specified byte order.
3037 * The vector is arranged into lanes according to
3038 * <a href="Vector.html#lane-order">memory ordering</a>.
3039 * <p>
3040 * The following pseudocode illustrates the behavior:
3041 * <pre>{@code
3042 * var slice = ms.asSlice(offset);
3043 * long[] ar = new long[species.length()];
3044 * for (int n = 0; n < ar.length; n++) {
3045 * if (m.laneIsSet(n)) {
3046 * ar[n] = slice.getAtIndex(ValuaLayout.JAVA_LONG.withBitAlignment(8), n);
3047 * }
3048 * }
3049 * LongVector r = LongVector.fromArray(species, ar, 0);
3050 * }</pre>
3051 * @implNote
3052 * This operation is likely to be more efficient if
3053 * the specified byte order is the same as
3054 * {@linkplain ByteOrder#nativeOrder()
3055 * the platform native order},
3056 * since this method will not need to reorder
3057 * the bytes of lane values.
3058 *
3059 * @param species species of desired vector
3060 * @param ms the memory segment
3061 * @param offset the offset into the memory segment
3062 * @param bo the intended byte order
3063 * @param m the mask controlling lane selection
3064 * @return a vector loaded from the memory segment
3065 * @throws IndexOutOfBoundsException
3066 * if {@code offset+N*8 < 0}
3067 * or {@code offset+N*8 >= ms.byteSize()}
3068 * for any lane {@code N} in the vector
3069 * where the mask is set
3070 * @throws IllegalArgumentException if the memory segment is a heap segment that is
3071 * not backed by a {@code byte[]} array.
3072 * @throws IllegalStateException if the memory segment's session is not alive,
3073 * or if access occurs from a thread other than the thread owning the session.
3074 * @since 19
3075 */
3076 @ForceInline
3077 public static
3078 LongVector fromMemorySegment(VectorSpecies<Long> species,
3079 MemorySegment ms, long offset,
3080 ByteOrder bo,
3081 VectorMask<Long> m) {
3082 LongSpecies vsp = (LongSpecies) species;
3083 if (offset >= 0 && offset <= (ms.byteSize() - species.vectorByteSize())) {
3084 return vsp.dummyVector().fromMemorySegment0(ms, offset, m).maybeSwap(bo);
3085 }
3086
3087 // FIXME: optimize
3088 checkMaskFromIndexSize(offset, vsp, m, 8, ms.byteSize());
3089 return vsp.ldLongOp(ms, offset, m, LongVector::memorySegmentGet);
3090 }
3091
3092 // Memory store operations
3093
3094 /**
3095 * Stores this vector into an array of type {@code long[]}
3096 * starting at an offset.
3097 * <p>
3098 * For each vector lane, where {@code N} is the vector lane index,
3099 * the lane element at index {@code N} is stored into the array
3100 * element {@code a[offset+N]}.
3101 *
3102 * @param a the array, of type {@code long[]}
3103 * @param offset the offset into the array
3104 * @throws IndexOutOfBoundsException
3105 * if {@code offset+N < 0} or {@code offset+N >= a.length}
3106 * for any lane {@code N} in the vector
3107 */
3108 @ForceInline
3109 public final
3110 void intoArray(long[] a, int offset) {
3111 offset = checkFromIndexSize(offset, length(), a.length);
3112 LongSpecies vsp = vspecies();
3113 VectorSupport.store(
3114 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3115 a, arrayAddress(a, offset),
3116 this,
3117 a, offset,
3118 (arr, off, v)
3119 -> v.stOp(arr, (int) off,
3120 (arr_, off_, i, e) -> arr_[off_ + i] = e));
3121 }
3122
3123 /**
3124 * Stores this vector into an array of type {@code long[]}
3125 * starting at offset and using a mask.
3126 * <p>
3127 * For each vector lane, where {@code N} is the vector lane index,
3128 * the lane element at index {@code N} is stored into the array
3129 * element {@code a[offset+N]}.
3130 * If the mask lane at {@code N} is unset then the corresponding
3131 * array element {@code a[offset+N]} is left unchanged.
3132 * <p>
3133 * Array range checking is done for lanes where the mask is set.
3134 * Lanes where the mask is unset are not stored and do not need
3135 * to correspond to legitimate elements of {@code a}.
3136 * That is, unset lanes may correspond to array indexes less than
3137 * zero or beyond the end of the array.
3138 *
3139 * @param a the array, of type {@code long[]}
3259 * where the mask is set
3260 * @see LongVector#toIntArray()
3261 */
3262 @ForceInline
3263 public final
3264 void intoArray(long[] a, int offset,
3265 int[] indexMap, int mapOffset,
3266 VectorMask<Long> m) {
3267 if (m.allTrue()) {
3268 intoArray(a, offset, indexMap, mapOffset);
3269 }
3270 else {
3271 intoArray0(a, offset, indexMap, mapOffset, m);
3272 }
3273 }
3274
3275
3276
3277 /**
3278 * {@inheritDoc} <!--workaround-->
3279 * @since 19
3280 */
3281 @Override
3282 @ForceInline
3283 public final
3284 void intoMemorySegment(MemorySegment ms, long offset,
3285 ByteOrder bo) {
3286 if (ms.isReadOnly()) {
3287 throw new UnsupportedOperationException("Attempt to write a read-only segment");
3288 }
3289
3290 offset = checkFromIndexSize(offset, byteSize(), ms.byteSize());
3291 maybeSwap(bo).intoMemorySegment0(ms, offset);
3292 }
3293
3294 /**
3295 * {@inheritDoc} <!--workaround-->
3296 * @since 19
3297 */
3298 @Override
3299 @ForceInline
3300 public final
3301 void intoMemorySegment(MemorySegment ms, long offset,
3302 ByteOrder bo,
3303 VectorMask<Long> m) {
3304 if (m.allTrue()) {
3305 intoMemorySegment(ms, offset, bo);
3306 } else {
3307 if (ms.isReadOnly()) {
3308 throw new UnsupportedOperationException("Attempt to write a read-only segment");
3309 }
3310 LongSpecies vsp = vspecies();
3311 checkMaskFromIndexSize(offset, vsp, m, 8, ms.byteSize());
3312 maybeSwap(bo).intoMemorySegment0(ms, offset, m);
3313 }
3314 }
3315
3316 // ================================================
3317
3318 // Low-level memory operations.
3319 //
3320 // Note that all of these operations *must* inline into a context
3321 // where the exact species of the involved vector is a
3322 // compile-time constant. Otherwise, the intrinsic generation
3323 // will fail and performance will suffer.
3324 //
3325 // In many cases this is achieved by re-deriving a version of the
3326 // method in each concrete subclass (per species). The re-derived
3327 // method simply calls one of these generic methods, with exact
3328 // parameters for the controlling metadata, which is either a
3329 // typed vector or constant species instance.
3330
3331 // Unchecked loading operations in native byte order.
3332 // Caller is responsible for applying index checks, masking, and
3333 // byte swapping.
3334
3335 /*package-private*/
3336 abstract
3337 LongVector fromArray0(long[] a, int offset);
3338 @ForceInline
3339 final
3340 LongVector fromArray0Template(long[] a, int offset) {
3341 LongSpecies vsp = vspecies();
3342 return VectorSupport.load(
3343 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3344 a, arrayAddress(a, offset),
3345 a, offset, vsp,
3346 (arr, off, s) -> s.ldOp(arr, (int) off,
3347 (arr_, off_, i) -> arr_[off_ + i]));
3348 }
3349
3350 /*package-private*/
3351 abstract
3352 LongVector fromArray0(long[] a, int offset, VectorMask<Long> m);
3353 @ForceInline
3354 final
3355 <M extends VectorMask<Long>>
3356 LongVector fromArray0Template(Class<M> maskClass, long[] a, int offset, M m) {
3357 m.check(species());
3358 LongSpecies vsp = vspecies();
3359 return VectorSupport.loadMasked(
3360 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3361 a, arrayAddress(a, offset), m,
3362 a, offset, vsp,
3363 (arr, off, s, vm) -> s.ldOp(arr, (int) off, vm,
3364 (arr_, off_, i) -> arr_[off_ + i]));
3365 }
3366
3367 /*package-private*/
3368 abstract
3369 LongVector fromArray0(long[] a, int offset,
3370 int[] indexMap, int mapOffset,
3371 VectorMask<Long> m);
3372 @ForceInline
3373 final
3374 <M extends VectorMask<Long>>
3375 LongVector fromArray0Template(Class<M> maskClass, long[] a, int offset,
3376 int[] indexMap, int mapOffset, M m) {
3377 LongSpecies vsp = vspecies();
3378 IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3379 Objects.requireNonNull(a);
3380 Objects.requireNonNull(indexMap);
3381 m.check(vsp);
3382 Class<? extends LongVector> vectorType = vsp.vectorType();
3383
3401 } else {
3402 vix = IntVector
3403 .fromArray(isp, indexMap, mapOffset)
3404 .add(offset);
3405 }
3406
3407 // FIXME: Check index under mask controlling.
3408 vix = VectorIntrinsics.checkIndex(vix, a.length);
3409
3410 return VectorSupport.loadWithMap(
3411 vectorType, maskClass, long.class, vsp.laneCount(),
3412 isp.vectorType(),
3413 a, ARRAY_BASE, vix, m,
3414 a, offset, indexMap, mapOffset, vsp,
3415 (c, idx, iMap, idy, s, vm) ->
3416 s.vOp(vm, n -> c[idx + iMap[idy+n]]));
3417 }
3418
3419
3420
3421 abstract
3422 LongVector fromMemorySegment0(MemorySegment bb, long offset);
3423 @ForceInline
3424 final
3425 LongVector fromMemorySegment0Template(MemorySegment ms, long offset) {
3426 LongSpecies vsp = vspecies();
3427 return ScopedMemoryAccess.loadFromMemorySegment(
3428 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3429 (MemorySegmentProxy) ms, offset, vsp,
3430 (msp, off, s) -> {
3431 return s.ldLongOp((MemorySegment) msp, off, LongVector::memorySegmentGet);
3432 });
3433 }
3434
3435 abstract
3436 LongVector fromMemorySegment0(MemorySegment ms, long offset, VectorMask<Long> m);
3437 @ForceInline
3438 final
3439 <M extends VectorMask<Long>>
3440 LongVector fromMemorySegment0Template(Class<M> maskClass, MemorySegment ms, long offset, M m) {
3441 LongSpecies vsp = vspecies();
3442 m.check(vsp);
3443 return ScopedMemoryAccess.loadFromMemorySegmentMasked(
3444 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3445 (MemorySegmentProxy) ms, offset, m, vsp,
3446 (msp, off, s, vm) -> {
3447 return s.ldLongOp((MemorySegment) msp, off, vm, LongVector::memorySegmentGet);
3448 });
3449 }
3450
3451 // Unchecked storing operations in native byte order.
3452 // Caller is responsible for applying index checks, masking, and
3453 // byte swapping.
3454
3455 abstract
3456 void intoArray0(long[] a, int offset);
3457 @ForceInline
3458 final
3459 void intoArray0Template(long[] a, int offset) {
3460 LongSpecies vsp = vspecies();
3461 VectorSupport.store(
3462 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3463 a, arrayAddress(a, offset),
3464 this, a, offset,
3465 (arr, off, v)
3466 -> v.stOp(arr, (int) off,
3467 (arr_, off_, i, e) -> arr_[off_+i] = e));
3468 }
3469
3470 abstract
3471 void intoArray0(long[] a, int offset, VectorMask<Long> m);
3472 @ForceInline
3473 final
3474 <M extends VectorMask<Long>>
3475 void intoArray0Template(Class<M> maskClass, long[] a, int offset, M m) {
3476 m.check(species());
3477 LongSpecies vsp = vspecies();
3478 VectorSupport.storeMasked(
3479 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3480 a, arrayAddress(a, offset),
3481 this, m, a, offset,
3482 (arr, off, v, vm)
3483 -> v.stOp(arr, (int) off, vm,
3484 (arr_, off_, i, e) -> arr_[off_ + i] = e));
3485 }
3486
3487 abstract
3488 void intoArray0(long[] a, int offset,
3489 int[] indexMap, int mapOffset,
3490 VectorMask<Long> m);
3491 @ForceInline
3492 final
3493 <M extends VectorMask<Long>>
3494 void intoArray0Template(Class<M> maskClass, long[] a, int offset,
3495 int[] indexMap, int mapOffset, M m) {
3496 m.check(species());
3497 LongSpecies vsp = vspecies();
3498 IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3499 if (vsp.laneCount() == 1) {
3500 intoArray(a, offset + indexMap[mapOffset], m);
3501 return;
3502 }
3503
3521
3522
3523 // FIXME: Check index under mask controlling.
3524 vix = VectorIntrinsics.checkIndex(vix, a.length);
3525
3526 VectorSupport.storeWithMap(
3527 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3528 isp.vectorType(),
3529 a, arrayAddress(a, 0), vix,
3530 this, m,
3531 a, offset, indexMap, mapOffset,
3532 (arr, off, v, map, mo, vm)
3533 -> v.stOp(arr, off, vm,
3534 (arr_, off_, i, e) -> {
3535 int j = map[mo + i];
3536 arr[off + j] = e;
3537 }));
3538 }
3539
3540
3541 @ForceInline
3542 final
3543 void intoMemorySegment0(MemorySegment ms, long offset) {
3544 LongSpecies vsp = vspecies();
3545 ScopedMemoryAccess.storeIntoMemorySegment(
3546 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3547 this,
3548 (MemorySegmentProxy) ms, offset,
3549 (msp, off, v) -> {
3550 v.stLongOp((MemorySegment) msp, off, LongVector::memorySegmentSet);
3551 });
3552 }
3553
3554 abstract
3555 void intoMemorySegment0(MemorySegment bb, long offset, VectorMask<Long> m);
3556 @ForceInline
3557 final
3558 <M extends VectorMask<Long>>
3559 void intoMemorySegment0Template(Class<M> maskClass, MemorySegment ms, long offset, M m) {
3560 LongSpecies vsp = vspecies();
3561 m.check(vsp);
3562 ScopedMemoryAccess.storeIntoMemorySegmentMasked(
3563 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3564 this, m,
3565 (MemorySegmentProxy) ms, offset,
3566 (msp, off, v, vm) -> {
3567 v.stLongOp((MemorySegment) msp, off, vm, LongVector::memorySegmentSet);
3568 });
3569 }
3570
3571
3572 // End of low-level memory operations.
3573
3574 private static
3575 void checkMaskFromIndexSize(int offset,
3576 LongSpecies vsp,
3577 VectorMask<Long> m,
3578 int scale,
3579 int limit) {
3580 ((AbstractMask<Long>)m)
3581 .checkIndexByLane(offset, limit, vsp.iota(), scale);
3582 }
3583
3584 private static
3585 void checkMaskFromIndexSize(long offset,
3586 LongSpecies vsp,
3587 VectorMask<Long> m,
3588 int scale,
3589 long limit) {
3590 ((AbstractMask<Long>)m)
3591 .checkIndexByLane(offset, limit, vsp.iota(), scale);
3592 }
3593
3594 @ForceInline
3595 private void conditionalStoreNYI(int offset,
3596 LongSpecies vsp,
3597 VectorMask<Long> m,
3598 int scale,
3599 int limit) {
3600 if (offset < 0 || offset + vsp.laneCount() * scale > limit) {
3601 String msg =
3602 String.format("unimplemented: store @%d in [0..%d), %s in %s",
3603 offset, limit, m, vsp);
3604 throw new AssertionError(msg);
3605 }
3606 }
3607
3608 /*package-private*/
3609 @Override
3610 @ForceInline
3611 final
3612 LongVector maybeSwap(ByteOrder bo) {
3613 if (bo != NATIVE_ENDIAN) {
3875 }
3876 }
3877 return dummyVector().vectorFactory(res);
3878 }
3879
3880 /*package-private*/
3881 @ForceInline
3882 <M> LongVector ldOp(M memory, int offset,
3883 FLdOp<M> f) {
3884 return dummyVector().ldOp(memory, offset, f);
3885 }
3886
3887 /*package-private*/
3888 @ForceInline
3889 <M> LongVector ldOp(M memory, int offset,
3890 VectorMask<Long> m,
3891 FLdOp<M> f) {
3892 return dummyVector().ldOp(memory, offset, m, f);
3893 }
3894
3895 /*package-private*/
3896 @ForceInline
3897 LongVector ldLongOp(MemorySegment memory, long offset,
3898 FLdLongOp f) {
3899 return dummyVector().ldLongOp(memory, offset, f);
3900 }
3901
3902 /*package-private*/
3903 @ForceInline
3904 LongVector ldLongOp(MemorySegment memory, long offset,
3905 VectorMask<Long> m,
3906 FLdLongOp f) {
3907 return dummyVector().ldLongOp(memory, offset, m, f);
3908 }
3909
3910 /*package-private*/
3911 @ForceInline
3912 <M> void stOp(M memory, int offset, FStOp<M> f) {
3913 dummyVector().stOp(memory, offset, f);
3914 }
3915
3916 /*package-private*/
3917 @ForceInline
3918 <M> void stOp(M memory, int offset,
3919 AbstractMask<Long> m,
3920 FStOp<M> f) {
3921 dummyVector().stOp(memory, offset, m, f);
3922 }
3923
3924 /*package-private*/
3925 @ForceInline
3926 void stLongOp(MemorySegment memory, long offset, FStLongOp f) {
3927 dummyVector().stLongOp(memory, offset, f);
3928 }
3929
3930 /*package-private*/
3931 @ForceInline
3932 void stLongOp(MemorySegment memory, long offset,
3933 AbstractMask<Long> m,
3934 FStLongOp f) {
3935 dummyVector().stLongOp(memory, offset, m, f);
3936 }
3937
3938 // N.B. Make sure these constant vectors and
3939 // masks load up correctly into registers.
3940 //
3941 // Also, see if we can avoid all that switching.
3942 // Could we cache both vectors and both masks in
3943 // this species object?
3944
3945 // Zero and iota vector access
3946 @Override
3947 @ForceInline
3948 public final LongVector zero() {
3949 if ((Class<?>) vectorType() == LongMaxVector.class)
3950 return LongMaxVector.ZERO;
3951 switch (vectorBitSize()) {
3952 case 64: return Long64Vector.ZERO;
3953 case 128: return Long128Vector.ZERO;
3954 case 256: return Long256Vector.ZERO;
3955 case 512: return Long512Vector.ZERO;
3956 }
3957 throw new AssertionError();
|