< prev index next >

src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector.java

Print this page

   7  * published by the Free Software Foundation.  Oracle designates this
   8  * particular file as subject to the "Classpath" exception as provided
   9  * by Oracle in the LICENSE file that accompanied this code.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  */
  25 package jdk.incubator.vector;
  26 
  27 import java.nio.ByteBuffer;
  28 import java.nio.ByteOrder;
  29 import java.nio.ReadOnlyBufferException;
  30 import java.util.Arrays;
  31 import java.util.Objects;
  32 import java.util.function.Function;
  33 import java.util.function.UnaryOperator;
  34 



  35 import jdk.internal.misc.ScopedMemoryAccess;
  36 import jdk.internal.misc.Unsafe;
  37 import jdk.internal.vm.annotation.ForceInline;
  38 import jdk.internal.vm.vector.VectorSupport;
  39 
  40 import static jdk.internal.vm.vector.VectorSupport.*;
  41 import static jdk.incubator.vector.VectorIntrinsics.*;
  42 
  43 import static jdk.incubator.vector.VectorOperators.*;
  44 
  45 // -- This file was mechanically generated: Do not edit! -- //
  46 
  47 /**
  48  * A specialized {@link Vector} representing an ordered immutable sequence of
  49  * {@code int} values.
  50  */
  51 @SuppressWarnings("cast")  // warning: redundant cast
  52 public abstract class IntVector extends AbstractVector<Integer> {
  53 
  54     IntVector(int[] vec) {
  55         super(vec);
  56     }
  57 
  58     static final int FORBID_OPCODE_KIND = VO_ONLYFP;
  59 


  60     @ForceInline
  61     static int opCode(Operator op) {
  62         return VectorOperators.opCode(op, VO_OPCODE_VALID, FORBID_OPCODE_KIND);
  63     }
  64     @ForceInline
  65     static int opCode(Operator op, int requireKind) {
  66         requireKind |= VO_OPCODE_VALID;
  67         return VectorOperators.opCode(op, requireKind, FORBID_OPCODE_KIND);
  68     }
  69     @ForceInline
  70     static boolean opKind(Operator op, int bit) {
  71         return VectorOperators.opKind(op, bit);
  72     }
  73 
  74     // Virtualized factories and operators,
  75     // coded with portable definitions.
  76     // These are all @ForceInline in case
  77     // they need to be used performantly.
  78     // The various shape-specific subclasses
  79     // also specialize them by wrapping

 334         return vectorFactory(res);
 335     }
 336 
 337     /*package-private*/
 338     @ForceInline
 339     final
 340     <M> IntVector ldOp(M memory, int offset,
 341                                   VectorMask<Integer> m,
 342                                   FLdOp<M> f) {
 343         //int[] vec = vec();
 344         int[] res = new int[length()];
 345         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 346         for (int i = 0; i < res.length; i++) {
 347             if (mbits[i]) {
 348                 res[i] = f.apply(memory, offset, i);
 349             }
 350         }
 351         return vectorFactory(res);
 352     }
 353 







































 354     interface FStOp<M> {
 355         void apply(M memory, int offset, int i, int a);
 356     }
 357 
 358     /*package-private*/
 359     @ForceInline
 360     final
 361     <M> void stOp(M memory, int offset,
 362                   FStOp<M> f) {
 363         int[] vec = vec();
 364         for (int i = 0; i < vec.length; i++) {
 365             f.apply(memory, offset, i, vec[i]);
 366         }
 367     }
 368 
 369     /*package-private*/
 370     @ForceInline
 371     final
 372     <M> void stOp(M memory, int offset,
 373                   VectorMask<Integer> m,
 374                   FStOp<M> f) {
 375         int[] vec = vec();
 376         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 377         for (int i = 0; i < vec.length; i++) {
 378             if (mbits[i]) {
 379                 f.apply(memory, offset, i, vec[i]);
 380             }
 381         }
 382     }
 383 


































 384     // Binary test
 385 
 386     /*package-private*/
 387     interface FBinTest {
 388         boolean apply(int cond, int i, int a, int b);
 389     }
 390 
 391     /*package-private*/
 392     @ForceInline
 393     final
 394     AbstractMask<Integer> bTest(int cond,
 395                                   Vector<Integer> o,
 396                                   FBinTest f) {
 397         int[] vec1 = vec();
 398         int[] vec2 = ((IntVector)o).vec();
 399         boolean[] bits = new boolean[length()];
 400         for (int i = 0; i < length(); i++){
 401             bits[i] = f.apply(cond, i, vec1[i], vec2[i]);
 402         }
 403         return maskFactory(bits);

 414     static int rotateRight(int a, int n) {
 415         return Integer.rotateRight(a, n);
 416     }
 417 
 418     /*package-private*/
 419     @Override
 420     abstract IntSpecies vspecies();
 421 
 422     /*package-private*/
 423     @ForceInline
 424     static long toBits(int e) {
 425         return  e;
 426     }
 427 
 428     /*package-private*/
 429     @ForceInline
 430     static int fromBits(long bits) {
 431         return ((int)bits);
 432     }
 433 






























 434     // Static factories (other than memory operations)
 435 
 436     // Note: A surprising behavior in javadoc
 437     // sometimes makes a lone /** {@inheritDoc} */
 438     // comment drop the method altogether,
 439     // apparently if the method mentions an
 440     // parameter or return type of Vector<Integer>
 441     // instead of Vector<E> as originally specified.
 442     // Adding an empty HTML fragment appears to
 443     // nudge javadoc into providing the desired
 444     // inherited documentation.  We use the HTML
 445     // comment <!--workaround--> for this.
 446 
 447     /**
 448      * Returns a vector of the given species
 449      * where all lane elements are set to
 450      * zero, the default primitive value.
 451      *
 452      * @param species species of the desired zero vector
 453      * @return a zero vector

 603                 return lanewise(XOR, broadcast(-1), m);
 604             }
 605         }
 606         int opc = opCode(op);
 607         return VectorSupport.unaryOp(
 608             opc, getClass(), maskClass, int.class, length(),
 609             this, m,
 610             UN_IMPL.find(op, opc, IntVector::unaryOperations));
 611     }
 612 
 613     private static final
 614     ImplCache<Unary, UnaryOperation<IntVector, VectorMask<Integer>>>
 615         UN_IMPL = new ImplCache<>(Unary.class, IntVector.class);
 616 
 617     private static UnaryOperation<IntVector, VectorMask<Integer>> unaryOperations(int opc_) {
 618         switch (opc_) {
 619             case VECTOR_OP_NEG: return (v0, m) ->
 620                     v0.uOp(m, (i, a) -> (int) -a);
 621             case VECTOR_OP_ABS: return (v0, m) ->
 622                     v0.uOp(m, (i, a) -> (int) Math.abs(a));










 623             default: return null;
 624         }
 625     }
 626 
 627     // Binary lanewise support
 628 
 629     /**
 630      * {@inheritDoc} <!--workaround-->
 631      * @see #lanewise(VectorOperators.Binary,int)
 632      * @see #lanewise(VectorOperators.Binary,int,VectorMask)
 633      */
 634     @Override
 635     public abstract
 636     IntVector lanewise(VectorOperators.Binary op,
 637                                   Vector<Integer> v);
 638     @ForceInline
 639     final
 640     IntVector lanewiseTemplate(VectorOperators.Binary op,
 641                                           Vector<Integer> v) {
 642         IntVector that = (IntVector) v;

 743             case VECTOR_OP_MAX: return (v0, v1, vm) ->
 744                     v0.bOp(v1, vm, (i, a, b) -> (int)Math.max(a, b));
 745             case VECTOR_OP_MIN: return (v0, v1, vm) ->
 746                     v0.bOp(v1, vm, (i, a, b) -> (int)Math.min(a, b));
 747             case VECTOR_OP_AND: return (v0, v1, vm) ->
 748                     v0.bOp(v1, vm, (i, a, b) -> (int)(a & b));
 749             case VECTOR_OP_OR: return (v0, v1, vm) ->
 750                     v0.bOp(v1, vm, (i, a, b) -> (int)(a | b));
 751             case VECTOR_OP_XOR: return (v0, v1, vm) ->
 752                     v0.bOp(v1, vm, (i, a, b) -> (int)(a ^ b));
 753             case VECTOR_OP_LSHIFT: return (v0, v1, vm) ->
 754                     v0.bOp(v1, vm, (i, a, n) -> (int)(a << n));
 755             case VECTOR_OP_RSHIFT: return (v0, v1, vm) ->
 756                     v0.bOp(v1, vm, (i, a, n) -> (int)(a >> n));
 757             case VECTOR_OP_URSHIFT: return (v0, v1, vm) ->
 758                     v0.bOp(v1, vm, (i, a, n) -> (int)((a & LSHR_SETUP_MASK) >>> n));
 759             case VECTOR_OP_LROTATE: return (v0, v1, vm) ->
 760                     v0.bOp(v1, vm, (i, a, n) -> rotateLeft(a, (int)n));
 761             case VECTOR_OP_RROTATE: return (v0, v1, vm) ->
 762                     v0.bOp(v1, vm, (i, a, n) -> rotateRight(a, (int)n));




 763             default: return null;
 764         }
 765     }
 766 
 767     // FIXME: Maybe all of the public final methods in this file (the
 768     // simple ones that just call lanewise) should be pushed down to
 769     // the X-VectorBits template.  They can't optimize properly at
 770     // this level, and must rely on inlining.  Does it work?
 771     // (If it works, of course keep the code here.)
 772 
 773     /**
 774      * Combines the lane values of this vector
 775      * with the value of a broadcast scalar.
 776      *
 777      * This is a lane-wise binary operation which applies
 778      * the selected operation to each lane.
 779      * The return value will be equal to this expression:
 780      * {@code this.lanewise(op, this.broadcast(e))}.
 781      *
 782      * @param op the operation used to process lane values

1728     /**
1729      * {@inheritDoc} <!--workaround-->
1730      */
1731     @Override
1732     @ForceInline
1733     public final
1734     IntVector neg() {
1735         return lanewise(NEG);
1736     }
1737 
1738     /**
1739      * {@inheritDoc} <!--workaround-->
1740      */
1741     @Override
1742     @ForceInline
1743     public final
1744     IntVector abs() {
1745         return lanewise(ABS);
1746     }
1747 

1748     // not (~)
1749     /**
1750      * Computes the bitwise logical complement ({@code ~})
1751      * of this vector.
1752      *
1753      * This is a lane-wise binary operation which applies the
1754      * the primitive bitwise "not" operation ({@code ~})
1755      * to each lane value.
1756      *
1757      * This method is also equivalent to the expression
1758      * {@link #lanewise(VectorOperators.Unary)
1759      *    lanewise}{@code (}{@link VectorOperators#NOT
1760      *    NOT}{@code )}.
1761      *
1762      * <p>
1763      * This is not a full-service named operation like
1764      * {@link #add(Vector) add}.  A masked version of
1765      * this operation is not directly available
1766      * but may be obtained via the masked version of
1767      * {@code lanewise}.

2354         int[] a = toArray();
2355         int[] sa = new int[a.length];
2356         for (int i = 0; i < a.length; i++) {
2357             sa[i] = (int) a[i];
2358         }
2359         return VectorShuffle.fromArray(dsp, sa, 0);
2360     }
2361 
2362     /*package-private*/
2363     @ForceInline
2364     final
2365     VectorShuffle<Integer> toShuffleTemplate(Class<?> shuffleType) {
2366         IntSpecies vsp = vspecies();
2367         return VectorSupport.convert(VectorSupport.VECTOR_OP_CAST,
2368                                      getClass(), int.class, length(),
2369                                      shuffleType, byte.class, length(),
2370                                      this, vsp,
2371                                      IntVector::toShuffle0);
2372     }
2373 







































2374     /**
2375      * {@inheritDoc} <!--workaround-->
2376      */
2377     @Override
2378     public abstract
2379     IntVector selectFrom(Vector<Integer> v);
2380 
2381     /*package-private*/
2382     @ForceInline
2383     final IntVector selectFromTemplate(IntVector v) {
2384         return v.rearrange(this.toShuffle());
2385     }
2386 
2387     /**
2388      * {@inheritDoc} <!--workaround-->
2389      */
2390     @Override
2391     public abstract
2392     IntVector selectFrom(Vector<Integer> s, VectorMask<Integer> m);
2393 

2759         return res;
2760     }
2761 
2762     /** {@inheritDoc} <!--workaround-->
2763      * @implNote
2764      * When this method is used on used on vectors
2765      * of type {@code IntVector},
2766      * there will be no loss of precision.
2767      */
2768     @ForceInline
2769     @Override
2770     public final double[] toDoubleArray() {
2771         int[] a = toArray();
2772         double[] res = new double[a.length];
2773         for (int i = 0; i < a.length; i++) {
2774             res[i] = (double) a[i];
2775         }
2776         return res;
2777     }
2778 
2779     /**
2780      * Loads a vector from a byte array starting at an offset.
2781      * Bytes are composed into primitive lane elements according
2782      * to the specified byte order.
2783      * The vector is arranged into lanes according to
2784      * <a href="Vector.html#lane-order">memory ordering</a>.
2785      * <p>
2786      * This method behaves as if it returns the result of calling
2787      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
2788      * fromByteBuffer()} as follows:
2789      * <pre>{@code
2790      * var bb = ByteBuffer.wrap(a);
2791      * var m = species.maskAll(true);
2792      * return fromByteBuffer(species, bb, offset, bo, m);
2793      * }</pre>
2794      *
2795      * @param species species of desired vector
2796      * @param a the byte array
2797      * @param offset the offset into the array
2798      * @param bo the intended byte order
2799      * @return a vector loaded from a byte array
2800      * @throws IndexOutOfBoundsException
2801      *         if {@code offset+N*ESIZE < 0}
2802      *         or {@code offset+(N+1)*ESIZE > a.length}
2803      *         for any lane {@code N} in the vector
2804      */
2805     @ForceInline
2806     public static
2807     IntVector fromByteArray(VectorSpecies<Integer> species,
2808                                        byte[] a, int offset,
2809                                        ByteOrder bo) {
2810         offset = checkFromIndexSize(offset, species.vectorByteSize(), a.length);
2811         IntSpecies vsp = (IntSpecies) species;
2812         return vsp.dummyVector().fromByteArray0(a, offset).maybeSwap(bo);
2813     }
2814 
2815     /**
2816      * Loads a vector from a byte array starting at an offset
2817      * and using a mask.
2818      * Lanes where the mask is unset are filled with the default
2819      * value of {@code int} (zero).
2820      * Bytes are composed into primitive lane elements according
2821      * to the specified byte order.
2822      * The vector is arranged into lanes according to
2823      * <a href="Vector.html#lane-order">memory ordering</a>.
2824      * <p>
2825      * This method behaves as if it returns the result of calling
2826      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
2827      * fromByteBuffer()} as follows:
2828      * <pre>{@code
2829      * var bb = ByteBuffer.wrap(a);
2830      * return fromByteBuffer(species, bb, offset, bo, m);
2831      * }</pre>
2832      *
2833      * @param species species of desired vector
2834      * @param a the byte array
2835      * @param offset the offset into the array
2836      * @param bo the intended byte order
2837      * @param m the mask controlling lane selection
2838      * @return a vector loaded from a byte array
2839      * @throws IndexOutOfBoundsException
2840      *         if {@code offset+N*ESIZE < 0}
2841      *         or {@code offset+(N+1)*ESIZE > a.length}
2842      *         for any lane {@code N} in the vector
2843      *         where the mask is set
2844      */
2845     @ForceInline
2846     public static
2847     IntVector fromByteArray(VectorSpecies<Integer> species,
2848                                        byte[] a, int offset,
2849                                        ByteOrder bo,
2850                                        VectorMask<Integer> m) {
2851         IntSpecies vsp = (IntSpecies) species;
2852         if (offset >= 0 && offset <= (a.length - species.vectorByteSize())) {
2853             return vsp.dummyVector().fromByteArray0(a, offset, m).maybeSwap(bo);
2854         }
2855 
2856         // FIXME: optimize
2857         checkMaskFromIndexSize(offset, vsp, m, 4, a.length);
2858         ByteBuffer wb = wrapper(a, bo);
2859         return vsp.ldOp(wb, offset, (AbstractMask<Integer>)m,
2860                    (wb_, o, i)  -> wb_.getInt(o + i * 4));
2861     }
2862 
2863     /**
2864      * Loads a vector from an array of type {@code int[]}
2865      * starting at an offset.
2866      * For each vector lane, where {@code N} is the vector lane index, the
2867      * array element at index {@code offset + N} is placed into the
2868      * resulting vector at lane index {@code N}.
2869      *
2870      * @param species species of desired vector
2871      * @param a the array
2872      * @param offset the offset into the array
2873      * @return the vector loaded from an array
2874      * @throws IndexOutOfBoundsException
2875      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
2876      *         for any lane {@code N} in the vector
2877      */
2878     @ForceInline
2879     public static
2880     IntVector fromArray(VectorSpecies<Integer> species,
2881                                    int[] a, int offset) {
2882         offset = checkFromIndexSize(offset, species.length(), a.length);

3015      * @see IntVector#toIntArray()
3016      */
3017     @ForceInline
3018     public static
3019     IntVector fromArray(VectorSpecies<Integer> species,
3020                                    int[] a, int offset,
3021                                    int[] indexMap, int mapOffset,
3022                                    VectorMask<Integer> m) {
3023         if (m.allTrue()) {
3024             return fromArray(species, a, offset, indexMap, mapOffset);
3025         }
3026         else {
3027             IntSpecies vsp = (IntSpecies) species;
3028             return vsp.dummyVector().fromArray0(a, offset, indexMap, mapOffset, m);
3029         }
3030     }
3031 
3032 
3033 
3034     /**
3035      * Loads a vector from a {@linkplain ByteBuffer byte buffer}
3036      * starting at an offset into the byte buffer.
3037      * Bytes are composed into primitive lane elements according
3038      * to the specified byte order.
3039      * The vector is arranged into lanes according to
3040      * <a href="Vector.html#lane-order">memory ordering</a>.
3041      * <p>
3042      * This method behaves as if it returns the result of calling
3043      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
3044      * fromByteBuffer()} as follows:
3045      * <pre>{@code
3046      * var m = species.maskAll(true);
3047      * return fromByteBuffer(species, bb, offset, bo, m);
3048      * }</pre>
3049      *
3050      * @param species species of desired vector
3051      * @param bb the byte buffer
3052      * @param offset the offset into the byte buffer
3053      * @param bo the intended byte order
3054      * @return a vector loaded from a byte buffer
3055      * @throws IndexOutOfBoundsException
3056      *         if {@code offset+N*4 < 0}
3057      *         or {@code offset+N*4 >= bb.limit()}
3058      *         for any lane {@code N} in the vector





3059      */
3060     @ForceInline
3061     public static
3062     IntVector fromByteBuffer(VectorSpecies<Integer> species,
3063                                         ByteBuffer bb, int offset,
3064                                         ByteOrder bo) {
3065         offset = checkFromIndexSize(offset, species.vectorByteSize(), bb.limit());
3066         IntSpecies vsp = (IntSpecies) species;
3067         return vsp.dummyVector().fromByteBuffer0(bb, offset).maybeSwap(bo);
3068     }
3069 
3070     /**
3071      * Loads a vector from a {@linkplain ByteBuffer byte buffer}
3072      * starting at an offset into the byte buffer
3073      * and using a mask.
3074      * Lanes where the mask is unset are filled with the default
3075      * value of {@code int} (zero).
3076      * Bytes are composed into primitive lane elements according
3077      * to the specified byte order.
3078      * The vector is arranged into lanes according to
3079      * <a href="Vector.html#lane-order">memory ordering</a>.
3080      * <p>
3081      * The following pseudocode illustrates the behavior:
3082      * <pre>{@code
3083      * IntBuffer eb = bb.duplicate()
3084      *     .position(offset)
3085      *     .order(bo).asIntBuffer();
3086      * int[] ar = new int[species.length()];
3087      * for (int n = 0; n < ar.length; n++) {
3088      *     if (m.laneIsSet(n)) {
3089      *         ar[n] = eb.get(n);
3090      *     }
3091      * }
3092      * IntVector r = IntVector.fromArray(species, ar, 0);
3093      * }</pre>
3094      * @implNote
3095      * This operation is likely to be more efficient if
3096      * the specified byte order is the same as
3097      * {@linkplain ByteOrder#nativeOrder()
3098      * the platform native order},
3099      * since this method will not need to reorder
3100      * the bytes of lane values.
3101      *
3102      * @param species species of desired vector
3103      * @param bb the byte buffer
3104      * @param offset the offset into the byte buffer
3105      * @param bo the intended byte order
3106      * @param m the mask controlling lane selection
3107      * @return a vector loaded from a byte buffer
3108      * @throws IndexOutOfBoundsException
3109      *         if {@code offset+N*4 < 0}
3110      *         or {@code offset+N*4 >= bb.limit()}
3111      *         for any lane {@code N} in the vector
3112      *         where the mask is set





3113      */
3114     @ForceInline
3115     public static
3116     IntVector fromByteBuffer(VectorSpecies<Integer> species,
3117                                         ByteBuffer bb, int offset,
3118                                         ByteOrder bo,
3119                                         VectorMask<Integer> m) {
3120         IntSpecies vsp = (IntSpecies) species;
3121         if (offset >= 0 && offset <= (bb.limit() - species.vectorByteSize())) {
3122             return vsp.dummyVector().fromByteBuffer0(bb, offset, m).maybeSwap(bo);
3123         }
3124 
3125         // FIXME: optimize
3126         checkMaskFromIndexSize(offset, vsp, m, 4, bb.limit());
3127         ByteBuffer wb = wrapper(bb, bo);
3128         return vsp.ldOp(wb, offset, (AbstractMask<Integer>)m,
3129                    (wb_, o, i)  -> wb_.getInt(o + i * 4));
3130     }
3131 
3132     // Memory store operations
3133 
3134     /**
3135      * Stores this vector into an array of type {@code int[]}
3136      * starting at an offset.
3137      * <p>
3138      * For each vector lane, where {@code N} is the vector lane index,
3139      * the lane element at index {@code N} is stored into the array
3140      * element {@code a[offset+N]}.
3141      *
3142      * @param a the array, of type {@code int[]}
3143      * @param offset the offset into the array
3144      * @throws IndexOutOfBoundsException
3145      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
3146      *         for any lane {@code N} in the vector
3147      */
3148     @ForceInline
3149     public final
3150     void intoArray(int[] a, int offset) {
3151         offset = checkFromIndexSize(offset, length(), a.length);
3152         IntSpecies vsp = vspecies();
3153         VectorSupport.store(
3154             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3155             a, arrayAddress(a, offset),
3156             this,
3157             a, offset,
3158             (arr, off, v)
3159             -> v.stOp(arr, off,
3160                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3161     }
3162 
3163     /**
3164      * Stores this vector into an array of type {@code int[]}
3165      * starting at offset and using a mask.
3166      * <p>
3167      * For each vector lane, where {@code N} is the vector lane index,
3168      * the lane element at index {@code N} is stored into the array
3169      * element {@code a[offset+N]}.
3170      * If the mask lane at {@code N} is unset then the corresponding
3171      * array element {@code a[offset+N]} is left unchanged.
3172      * <p>
3173      * Array range checking is done for lanes where the mask is set.
3174      * Lanes where the mask is unset are not stored and do not need
3175      * to correspond to legitimate elements of {@code a}.
3176      * That is, unset lanes may correspond to array indexes less than
3177      * zero or beyond the end of the array.
3178      *
3179      * @param a the array, of type {@code int[]}

3280      *         where the mask is set
3281      * @see IntVector#toIntArray()
3282      */
3283     @ForceInline
3284     public final
3285     void intoArray(int[] a, int offset,
3286                    int[] indexMap, int mapOffset,
3287                    VectorMask<Integer> m) {
3288         if (m.allTrue()) {
3289             intoArray(a, offset, indexMap, mapOffset);
3290         }
3291         else {
3292             intoArray0(a, offset, indexMap, mapOffset, m);
3293         }
3294     }
3295 
3296 
3297 
3298     /**
3299      * {@inheritDoc} <!--workaround-->

3300      */
3301     @Override
3302     @ForceInline
3303     public final
3304     void intoByteArray(byte[] a, int offset,
3305                        ByteOrder bo) {
3306         offset = checkFromIndexSize(offset, byteSize(), a.length);
3307         maybeSwap(bo).intoByteArray0(a, offset);
3308     }
3309 
3310     /**
3311      * {@inheritDoc} <!--workaround-->
3312      */
3313     @Override
3314     @ForceInline
3315     public final
3316     void intoByteArray(byte[] a, int offset,
3317                        ByteOrder bo,
3318                        VectorMask<Integer> m) {
3319         if (m.allTrue()) {
3320             intoByteArray(a, offset, bo);
3321         } else {
3322             IntSpecies vsp = vspecies();
3323             checkMaskFromIndexSize(offset, vsp, m, 4, a.length);
3324             maybeSwap(bo).intoByteArray0(a, offset, m);
3325         }
3326     }
3327 
3328     /**
3329      * {@inheritDoc} <!--workaround-->
3330      */
3331     @Override
3332     @ForceInline
3333     public final
3334     void intoByteBuffer(ByteBuffer bb, int offset,
3335                         ByteOrder bo) {
3336         if (ScopedMemoryAccess.isReadOnly(bb)) {
3337             throw new ReadOnlyBufferException();
3338         }
3339         offset = checkFromIndexSize(offset, byteSize(), bb.limit());
3340         maybeSwap(bo).intoByteBuffer0(bb, offset);
3341     }
3342 
3343     /**
3344      * {@inheritDoc} <!--workaround-->

3345      */
3346     @Override
3347     @ForceInline
3348     public final
3349     void intoByteBuffer(ByteBuffer bb, int offset,
3350                         ByteOrder bo,
3351                         VectorMask<Integer> m) {
3352         if (m.allTrue()) {
3353             intoByteBuffer(bb, offset, bo);
3354         } else {
3355             if (bb.isReadOnly()) {
3356                 throw new ReadOnlyBufferException();
3357             }
3358             IntSpecies vsp = vspecies();
3359             checkMaskFromIndexSize(offset, vsp, m, 4, bb.limit());
3360             maybeSwap(bo).intoByteBuffer0(bb, offset, m);
3361         }
3362     }
3363 
3364     // ================================================
3365 
3366     // Low-level memory operations.
3367     //
3368     // Note that all of these operations *must* inline into a context
3369     // where the exact species of the involved vector is a
3370     // compile-time constant.  Otherwise, the intrinsic generation
3371     // will fail and performance will suffer.
3372     //
3373     // In many cases this is achieved by re-deriving a version of the
3374     // method in each concrete subclass (per species).  The re-derived
3375     // method simply calls one of these generic methods, with exact
3376     // parameters for the controlling metadata, which is either a
3377     // typed vector or constant species instance.
3378 
3379     // Unchecked loading operations in native byte order.
3380     // Caller is responsible for applying index checks, masking, and
3381     // byte swapping.
3382 
3383     /*package-private*/
3384     abstract
3385     IntVector fromArray0(int[] a, int offset);
3386     @ForceInline
3387     final
3388     IntVector fromArray0Template(int[] a, int offset) {
3389         IntSpecies vsp = vspecies();
3390         return VectorSupport.load(
3391             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3392             a, arrayAddress(a, offset),
3393             a, offset, vsp,
3394             (arr, off, s) -> s.ldOp(arr, off,
3395                                     (arr_, off_, i) -> arr_[off_ + i]));
3396     }
3397 
3398     /*package-private*/
3399     abstract
3400     IntVector fromArray0(int[] a, int offset, VectorMask<Integer> m);
3401     @ForceInline
3402     final
3403     <M extends VectorMask<Integer>>
3404     IntVector fromArray0Template(Class<M> maskClass, int[] a, int offset, M m) {
3405         m.check(species());
3406         IntSpecies vsp = vspecies();
3407         return VectorSupport.loadMasked(
3408             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3409             a, arrayAddress(a, offset), m,
3410             a, offset, vsp,
3411             (arr, off, s, vm) -> s.ldOp(arr, off, vm,
3412                                         (arr_, off_, i) -> arr_[off_ + i]));
3413     }
3414 
3415     /*package-private*/
3416     abstract
3417     IntVector fromArray0(int[] a, int offset,
3418                                     int[] indexMap, int mapOffset,
3419                                     VectorMask<Integer> m);
3420     @ForceInline
3421     final
3422     <M extends VectorMask<Integer>>
3423     IntVector fromArray0Template(Class<M> maskClass, int[] a, int offset,
3424                                             int[] indexMap, int mapOffset, M m) {
3425         IntSpecies vsp = vspecies();
3426         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3427         Objects.requireNonNull(a);
3428         Objects.requireNonNull(indexMap);
3429         m.check(vsp);
3430         Class<? extends IntVector> vectorType = vsp.vectorType();
3431 
3432         // Index vector: vix[0:n] = k -> offset + indexMap[mapOffset + k]
3433         IntVector vix = IntVector
3434             .fromArray(isp, indexMap, mapOffset)
3435             .add(offset);
3436 
3437         // FIXME: Check index under mask controlling.
3438         vix = VectorIntrinsics.checkIndex(vix, a.length);
3439 
3440         return VectorSupport.loadWithMap(
3441             vectorType, maskClass, int.class, vsp.laneCount(),
3442             isp.vectorType(),
3443             a, ARRAY_BASE, vix, m,
3444             a, offset, indexMap, mapOffset, vsp,
3445             (c, idx, iMap, idy, s, vm) ->
3446             s.vOp(vm, n -> c[idx + iMap[idy+n]]));
3447     }
3448 
3449 
3450 
3451     @Override
3452     abstract
3453     IntVector fromByteArray0(byte[] a, int offset);
3454     @ForceInline
3455     final
3456     IntVector fromByteArray0Template(byte[] a, int offset) {
3457         IntSpecies vsp = vspecies();
3458         return VectorSupport.load(
3459             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3460             a, byteArrayAddress(a, offset),
3461             a, offset, vsp,
3462             (arr, off, s) -> {
3463                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3464                 return s.ldOp(wb, off,
3465                         (wb_, o, i) -> wb_.getInt(o + i * 4));
3466             });
3467     }
3468 
3469     abstract
3470     IntVector fromByteArray0(byte[] a, int offset, VectorMask<Integer> m);
3471     @ForceInline
3472     final
3473     <M extends VectorMask<Integer>>
3474     IntVector fromByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
3475         IntSpecies vsp = vspecies();
3476         m.check(vsp);
3477         return VectorSupport.loadMasked(
3478             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3479             a, byteArrayAddress(a, offset), m,
3480             a, offset, vsp,
3481             (arr, off, s, vm) -> {
3482                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3483                 return s.ldOp(wb, off, vm,
3484                         (wb_, o, i) -> wb_.getInt(o + i * 4));
3485             });
3486     }
3487 
3488     abstract
3489     IntVector fromByteBuffer0(ByteBuffer bb, int offset);
3490     @ForceInline
3491     final
3492     IntVector fromByteBuffer0Template(ByteBuffer bb, int offset) {
3493         IntSpecies vsp = vspecies();
3494         return ScopedMemoryAccess.loadFromByteBuffer(
3495                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3496                 bb, offset, vsp,
3497                 (buf, off, s) -> {
3498                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3499                     return s.ldOp(wb, off,
3500                             (wb_, o, i) -> wb_.getInt(o + i * 4));
3501                 });
3502     }
3503 
3504     abstract
3505     IntVector fromByteBuffer0(ByteBuffer bb, int offset, VectorMask<Integer> m);
3506     @ForceInline
3507     final
3508     <M extends VectorMask<Integer>>
3509     IntVector fromByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
3510         IntSpecies vsp = vspecies();
3511         m.check(vsp);
3512         return ScopedMemoryAccess.loadFromByteBufferMasked(
3513                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3514                 bb, offset, m, vsp,
3515                 (buf, off, s, vm) -> {
3516                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3517                     return s.ldOp(wb, off, vm,
3518                             (wb_, o, i) -> wb_.getInt(o + i * 4));
3519                 });
3520     }
3521 
3522     // Unchecked storing operations in native byte order.
3523     // Caller is responsible for applying index checks, masking, and
3524     // byte swapping.
3525 
3526     abstract
3527     void intoArray0(int[] a, int offset);
3528     @ForceInline
3529     final
3530     void intoArray0Template(int[] a, int offset) {
3531         IntSpecies vsp = vspecies();
3532         VectorSupport.store(
3533             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3534             a, arrayAddress(a, offset),
3535             this, a, offset,
3536             (arr, off, v)
3537             -> v.stOp(arr, off,
3538                       (arr_, off_, i, e) -> arr_[off_+i] = e));
3539     }
3540 
3541     abstract
3542     void intoArray0(int[] a, int offset, VectorMask<Integer> m);
3543     @ForceInline
3544     final
3545     <M extends VectorMask<Integer>>
3546     void intoArray0Template(Class<M> maskClass, int[] a, int offset, M m) {
3547         m.check(species());
3548         IntSpecies vsp = vspecies();
3549         VectorSupport.storeMasked(
3550             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3551             a, arrayAddress(a, offset),
3552             this, m, a, offset,
3553             (arr, off, v, vm)
3554             -> v.stOp(arr, off, vm,
3555                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3556     }
3557 
3558     abstract
3559     void intoArray0(int[] a, int offset,
3560                     int[] indexMap, int mapOffset,
3561                     VectorMask<Integer> m);
3562     @ForceInline
3563     final
3564     <M extends VectorMask<Integer>>
3565     void intoArray0Template(Class<M> maskClass, int[] a, int offset,
3566                             int[] indexMap, int mapOffset, M m) {
3567         m.check(species());
3568         IntSpecies vsp = vspecies();
3569         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3570         // Index vector: vix[0:n] = i -> offset + indexMap[mo + i]
3571         IntVector vix = IntVector
3572             .fromArray(isp, indexMap, mapOffset)
3573             .add(offset);
3574 
3575         // FIXME: Check index under mask controlling.
3576         vix = VectorIntrinsics.checkIndex(vix, a.length);
3577 
3578         VectorSupport.storeWithMap(
3579             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3580             isp.vectorType(),
3581             a, arrayAddress(a, 0), vix,
3582             this, m,
3583             a, offset, indexMap, mapOffset,
3584             (arr, off, v, map, mo, vm)
3585             -> v.stOp(arr, off, vm,
3586                       (arr_, off_, i, e) -> {
3587                           int j = map[mo + i];
3588                           arr[off + j] = e;
3589                       }));
3590     }
3591 
3592 
3593     abstract
3594     void intoByteArray0(byte[] a, int offset);
3595     @ForceInline
3596     final
3597     void intoByteArray0Template(byte[] a, int offset) {
3598         IntSpecies vsp = vspecies();
3599         VectorSupport.store(
3600             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3601             a, byteArrayAddress(a, offset),
3602             this, a, offset,
3603             (arr, off, v) -> {
3604                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3605                 v.stOp(wb, off,
3606                         (tb_, o, i, e) -> tb_.putInt(o + i * 4, e));
3607             });
3608     }
3609 
3610     abstract
3611     void intoByteArray0(byte[] a, int offset, VectorMask<Integer> m);
3612     @ForceInline
3613     final
3614     <M extends VectorMask<Integer>>
3615     void intoByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
3616         IntSpecies vsp = vspecies();
3617         m.check(vsp);
3618         VectorSupport.storeMasked(
3619             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3620             a, byteArrayAddress(a, offset),
3621             this, m, a, offset,
3622             (arr, off, v, vm) -> {
3623                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3624                 v.stOp(wb, off, vm,
3625                         (tb_, o, i, e) -> tb_.putInt(o + i * 4, e));
3626             });
3627     }
3628 
3629     @ForceInline
3630     final
3631     void intoByteBuffer0(ByteBuffer bb, int offset) {
3632         IntSpecies vsp = vspecies();
3633         ScopedMemoryAccess.storeIntoByteBuffer(
3634                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3635                 this, bb, offset,
3636                 (buf, off, v) -> {
3637                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3638                     v.stOp(wb, off,
3639                             (wb_, o, i, e) -> wb_.putInt(o + i * 4, e));
3640                 });
3641     }
3642 
3643     abstract
3644     void intoByteBuffer0(ByteBuffer bb, int offset, VectorMask<Integer> m);
3645     @ForceInline
3646     final
3647     <M extends VectorMask<Integer>>
3648     void intoByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
3649         IntSpecies vsp = vspecies();
3650         m.check(vsp);
3651         ScopedMemoryAccess.storeIntoByteBufferMasked(
3652                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3653                 this, m, bb, offset,
3654                 (buf, off, v, vm) -> {
3655                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3656                     v.stOp(wb, off, vm,
3657                             (wb_, o, i, e) -> wb_.putInt(o + i * 4, e));
3658                 });
3659     }
3660 
3661 
3662     // End of low-level memory operations.
3663 
3664     private static
3665     void checkMaskFromIndexSize(int offset,
3666                                 IntSpecies vsp,
3667                                 VectorMask<Integer> m,
3668                                 int scale,
3669                                 int limit) {
3670         ((AbstractMask<Integer>)m)
3671             .checkIndexByLane(offset, limit, vsp.iota(), scale);
3672     }
3673 










3674     @ForceInline
3675     private void conditionalStoreNYI(int offset,
3676                                      IntSpecies vsp,
3677                                      VectorMask<Integer> m,
3678                                      int scale,
3679                                      int limit) {
3680         if (offset < 0 || offset + vsp.laneCount() * scale > limit) {
3681             String msg =
3682                 String.format("unimplemented: store @%d in [0..%d), %s in %s",
3683                               offset, limit, m, vsp);
3684             throw new AssertionError(msg);
3685         }
3686     }
3687 
3688     /*package-private*/
3689     @Override
3690     @ForceInline
3691     final
3692     IntVector maybeSwap(ByteOrder bo) {
3693         if (bo != NATIVE_ENDIAN) {

3964                 }
3965             }
3966             return dummyVector().vectorFactory(res);
3967         }
3968 
3969         /*package-private*/
3970         @ForceInline
3971         <M> IntVector ldOp(M memory, int offset,
3972                                       FLdOp<M> f) {
3973             return dummyVector().ldOp(memory, offset, f);
3974         }
3975 
3976         /*package-private*/
3977         @ForceInline
3978         <M> IntVector ldOp(M memory, int offset,
3979                                       VectorMask<Integer> m,
3980                                       FLdOp<M> f) {
3981             return dummyVector().ldOp(memory, offset, m, f);
3982         }
3983 















3984         /*package-private*/
3985         @ForceInline
3986         <M> void stOp(M memory, int offset, FStOp<M> f) {
3987             dummyVector().stOp(memory, offset, f);
3988         }
3989 
3990         /*package-private*/
3991         @ForceInline
3992         <M> void stOp(M memory, int offset,
3993                       AbstractMask<Integer> m,
3994                       FStOp<M> f) {
3995             dummyVector().stOp(memory, offset, m, f);
3996         }
3997 














3998         // N.B. Make sure these constant vectors and
3999         // masks load up correctly into registers.
4000         //
4001         // Also, see if we can avoid all that switching.
4002         // Could we cache both vectors and both masks in
4003         // this species object?
4004 
4005         // Zero and iota vector access
4006         @Override
4007         @ForceInline
4008         public final IntVector zero() {
4009             if ((Class<?>) vectorType() == IntMaxVector.class)
4010                 return IntMaxVector.ZERO;
4011             switch (vectorBitSize()) {
4012                 case 64: return Int64Vector.ZERO;
4013                 case 128: return Int128Vector.ZERO;
4014                 case 256: return Int256Vector.ZERO;
4015                 case 512: return Int512Vector.ZERO;
4016             }
4017             throw new AssertionError();

   7  * published by the Free Software Foundation.  Oracle designates this
   8  * particular file as subject to the "Classpath" exception as provided
   9  * by Oracle in the LICENSE file that accompanied this code.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  */
  25 package jdk.incubator.vector;
  26 

  27 import java.nio.ByteOrder;

  28 import java.util.Arrays;
  29 import java.util.Objects;
  30 import java.util.function.Function;

  31 
  32 import jdk.incubator.foreign.MemorySegment;
  33 import jdk.incubator.foreign.ValueLayout;
  34 import jdk.internal.access.foreign.MemorySegmentProxy;
  35 import jdk.internal.misc.ScopedMemoryAccess;
  36 import jdk.internal.misc.Unsafe;
  37 import jdk.internal.vm.annotation.ForceInline;
  38 import jdk.internal.vm.vector.VectorSupport;
  39 
  40 import static jdk.internal.vm.vector.VectorSupport.*;
  41 import static jdk.incubator.vector.VectorIntrinsics.*;
  42 
  43 import static jdk.incubator.vector.VectorOperators.*;
  44 
  45 // -- This file was mechanically generated: Do not edit! -- //
  46 
  47 /**
  48  * A specialized {@link Vector} representing an ordered immutable sequence of
  49  * {@code int} values.
  50  */
  51 @SuppressWarnings("cast")  // warning: redundant cast
  52 public abstract class IntVector extends AbstractVector<Integer> {
  53 
  54     IntVector(int[] vec) {
  55         super(vec);
  56     }
  57 
  58     static final int FORBID_OPCODE_KIND = VO_ONLYFP;
  59 
  60     static final ValueLayout.OfInt ELEMENT_LAYOUT = ValueLayout.JAVA_INT.withBitAlignment(8);
  61 
  62     @ForceInline
  63     static int opCode(Operator op) {
  64         return VectorOperators.opCode(op, VO_OPCODE_VALID, FORBID_OPCODE_KIND);
  65     }
  66     @ForceInline
  67     static int opCode(Operator op, int requireKind) {
  68         requireKind |= VO_OPCODE_VALID;
  69         return VectorOperators.opCode(op, requireKind, FORBID_OPCODE_KIND);
  70     }
  71     @ForceInline
  72     static boolean opKind(Operator op, int bit) {
  73         return VectorOperators.opKind(op, bit);
  74     }
  75 
  76     // Virtualized factories and operators,
  77     // coded with portable definitions.
  78     // These are all @ForceInline in case
  79     // they need to be used performantly.
  80     // The various shape-specific subclasses
  81     // also specialize them by wrapping

 336         return vectorFactory(res);
 337     }
 338 
 339     /*package-private*/
 340     @ForceInline
 341     final
 342     <M> IntVector ldOp(M memory, int offset,
 343                                   VectorMask<Integer> m,
 344                                   FLdOp<M> f) {
 345         //int[] vec = vec();
 346         int[] res = new int[length()];
 347         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 348         for (int i = 0; i < res.length; i++) {
 349             if (mbits[i]) {
 350                 res[i] = f.apply(memory, offset, i);
 351             }
 352         }
 353         return vectorFactory(res);
 354     }
 355 
 356     /*package-private*/
 357     interface FLdLongOp {
 358         int apply(MemorySegment memory, long offset, int i);
 359     }
 360 
 361     /*package-private*/
 362     @ForceInline
 363     final
 364     IntVector ldLongOp(MemorySegment memory, long offset,
 365                                   FLdLongOp f) {
 366         //dummy; no vec = vec();
 367         int[] res = new int[length()];
 368         for (int i = 0; i < res.length; i++) {
 369             res[i] = f.apply(memory, offset, i);
 370         }
 371         return vectorFactory(res);
 372     }
 373 
 374     /*package-private*/
 375     @ForceInline
 376     final
 377     IntVector ldLongOp(MemorySegment memory, long offset,
 378                                   VectorMask<Integer> m,
 379                                   FLdLongOp f) {
 380         //int[] vec = vec();
 381         int[] res = new int[length()];
 382         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 383         for (int i = 0; i < res.length; i++) {
 384             if (mbits[i]) {
 385                 res[i] = f.apply(memory, offset, i);
 386             }
 387         }
 388         return vectorFactory(res);
 389     }
 390 
 391     static int memorySegmentGet(MemorySegment ms, long o, int i) {
 392         return ms.get(ELEMENT_LAYOUT, o + i * 4L);
 393     }
 394 
 395     interface FStOp<M> {
 396         void apply(M memory, int offset, int i, int a);
 397     }
 398 
 399     /*package-private*/
 400     @ForceInline
 401     final
 402     <M> void stOp(M memory, int offset,
 403                   FStOp<M> f) {
 404         int[] vec = vec();
 405         for (int i = 0; i < vec.length; i++) {
 406             f.apply(memory, offset, i, vec[i]);
 407         }
 408     }
 409 
 410     /*package-private*/
 411     @ForceInline
 412     final
 413     <M> void stOp(M memory, int offset,
 414                   VectorMask<Integer> m,
 415                   FStOp<M> f) {
 416         int[] vec = vec();
 417         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 418         for (int i = 0; i < vec.length; i++) {
 419             if (mbits[i]) {
 420                 f.apply(memory, offset, i, vec[i]);
 421             }
 422         }
 423     }
 424 
 425     interface FStLongOp {
 426         void apply(MemorySegment memory, long offset, int i, int a);
 427     }
 428 
 429     /*package-private*/
 430     @ForceInline
 431     final
 432     void stLongOp(MemorySegment memory, long offset,
 433                   FStLongOp f) {
 434         int[] vec = vec();
 435         for (int i = 0; i < vec.length; i++) {
 436             f.apply(memory, offset, i, vec[i]);
 437         }
 438     }
 439 
 440     /*package-private*/
 441     @ForceInline
 442     final
 443     void stLongOp(MemorySegment memory, long offset,
 444                   VectorMask<Integer> m,
 445                   FStLongOp f) {
 446         int[] vec = vec();
 447         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 448         for (int i = 0; i < vec.length; i++) {
 449             if (mbits[i]) {
 450                 f.apply(memory, offset, i, vec[i]);
 451             }
 452         }
 453     }
 454 
 455     static void memorySegmentSet(MemorySegment ms, long o, int i, int e) {
 456         ms.set(ELEMENT_LAYOUT, o + i * 4L, e);
 457     }
 458 
 459     // Binary test
 460 
 461     /*package-private*/
 462     interface FBinTest {
 463         boolean apply(int cond, int i, int a, int b);
 464     }
 465 
 466     /*package-private*/
 467     @ForceInline
 468     final
 469     AbstractMask<Integer> bTest(int cond,
 470                                   Vector<Integer> o,
 471                                   FBinTest f) {
 472         int[] vec1 = vec();
 473         int[] vec2 = ((IntVector)o).vec();
 474         boolean[] bits = new boolean[length()];
 475         for (int i = 0; i < length(); i++){
 476             bits[i] = f.apply(cond, i, vec1[i], vec2[i]);
 477         }
 478         return maskFactory(bits);

 489     static int rotateRight(int a, int n) {
 490         return Integer.rotateRight(a, n);
 491     }
 492 
 493     /*package-private*/
 494     @Override
 495     abstract IntSpecies vspecies();
 496 
 497     /*package-private*/
 498     @ForceInline
 499     static long toBits(int e) {
 500         return  e;
 501     }
 502 
 503     /*package-private*/
 504     @ForceInline
 505     static int fromBits(long bits) {
 506         return ((int)bits);
 507     }
 508 
 509     static IntVector expandHelper(Vector<Integer> v, VectorMask<Integer> m) {
 510         VectorSpecies<Integer> vsp = m.vectorSpecies();
 511         IntVector r  = (IntVector) vsp.zero();
 512         IntVector vi = (IntVector) v;
 513         if (m.allTrue()) {
 514             return vi;
 515         }
 516         for (int i = 0, j = 0; i < vsp.length(); i++) {
 517             if (m.laneIsSet(i)) {
 518                 r = r.withLane(i, vi.lane(j++));
 519             }
 520         }
 521         return r;
 522     }
 523 
 524     static IntVector compressHelper(Vector<Integer> v, VectorMask<Integer> m) {
 525         VectorSpecies<Integer> vsp = m.vectorSpecies();
 526         IntVector r  = (IntVector) vsp.zero();
 527         IntVector vi = (IntVector) v;
 528         if (m.allTrue()) {
 529             return vi;
 530         }
 531         for (int i = 0, j = 0; i < vsp.length(); i++) {
 532             if (m.laneIsSet(i)) {
 533                 r = r.withLane(j++, vi.lane(i));
 534             }
 535         }
 536         return r;
 537     }
 538 
 539     // Static factories (other than memory operations)
 540 
 541     // Note: A surprising behavior in javadoc
 542     // sometimes makes a lone /** {@inheritDoc} */
 543     // comment drop the method altogether,
 544     // apparently if the method mentions an
 545     // parameter or return type of Vector<Integer>
 546     // instead of Vector<E> as originally specified.
 547     // Adding an empty HTML fragment appears to
 548     // nudge javadoc into providing the desired
 549     // inherited documentation.  We use the HTML
 550     // comment <!--workaround--> for this.
 551 
 552     /**
 553      * Returns a vector of the given species
 554      * where all lane elements are set to
 555      * zero, the default primitive value.
 556      *
 557      * @param species species of the desired zero vector
 558      * @return a zero vector

 708                 return lanewise(XOR, broadcast(-1), m);
 709             }
 710         }
 711         int opc = opCode(op);
 712         return VectorSupport.unaryOp(
 713             opc, getClass(), maskClass, int.class, length(),
 714             this, m,
 715             UN_IMPL.find(op, opc, IntVector::unaryOperations));
 716     }
 717 
 718     private static final
 719     ImplCache<Unary, UnaryOperation<IntVector, VectorMask<Integer>>>
 720         UN_IMPL = new ImplCache<>(Unary.class, IntVector.class);
 721 
 722     private static UnaryOperation<IntVector, VectorMask<Integer>> unaryOperations(int opc_) {
 723         switch (opc_) {
 724             case VECTOR_OP_NEG: return (v0, m) ->
 725                     v0.uOp(m, (i, a) -> (int) -a);
 726             case VECTOR_OP_ABS: return (v0, m) ->
 727                     v0.uOp(m, (i, a) -> (int) Math.abs(a));
 728             case VECTOR_OP_BIT_COUNT: return (v0, m) ->
 729                     v0.uOp(m, (i, a) -> (int) Integer.bitCount(a));
 730             case VECTOR_OP_TZ_COUNT: return (v0, m) ->
 731                     v0.uOp(m, (i, a) -> (int) Integer.numberOfTrailingZeros(a));
 732             case VECTOR_OP_LZ_COUNT: return (v0, m) ->
 733                     v0.uOp(m, (i, a) -> (int) Integer.numberOfLeadingZeros(a));
 734             case VECTOR_OP_REVERSE: return (v0, m) ->
 735                     v0.uOp(m, (i, a) -> (int) Integer.reverse(a));
 736             case VECTOR_OP_REVERSE_BYTES: return (v0, m) ->
 737                     v0.uOp(m, (i, a) -> (int) Integer.reverseBytes(a));
 738             default: return null;
 739         }
 740     }
 741 
 742     // Binary lanewise support
 743 
 744     /**
 745      * {@inheritDoc} <!--workaround-->
 746      * @see #lanewise(VectorOperators.Binary,int)
 747      * @see #lanewise(VectorOperators.Binary,int,VectorMask)
 748      */
 749     @Override
 750     public abstract
 751     IntVector lanewise(VectorOperators.Binary op,
 752                                   Vector<Integer> v);
 753     @ForceInline
 754     final
 755     IntVector lanewiseTemplate(VectorOperators.Binary op,
 756                                           Vector<Integer> v) {
 757         IntVector that = (IntVector) v;

 858             case VECTOR_OP_MAX: return (v0, v1, vm) ->
 859                     v0.bOp(v1, vm, (i, a, b) -> (int)Math.max(a, b));
 860             case VECTOR_OP_MIN: return (v0, v1, vm) ->
 861                     v0.bOp(v1, vm, (i, a, b) -> (int)Math.min(a, b));
 862             case VECTOR_OP_AND: return (v0, v1, vm) ->
 863                     v0.bOp(v1, vm, (i, a, b) -> (int)(a & b));
 864             case VECTOR_OP_OR: return (v0, v1, vm) ->
 865                     v0.bOp(v1, vm, (i, a, b) -> (int)(a | b));
 866             case VECTOR_OP_XOR: return (v0, v1, vm) ->
 867                     v0.bOp(v1, vm, (i, a, b) -> (int)(a ^ b));
 868             case VECTOR_OP_LSHIFT: return (v0, v1, vm) ->
 869                     v0.bOp(v1, vm, (i, a, n) -> (int)(a << n));
 870             case VECTOR_OP_RSHIFT: return (v0, v1, vm) ->
 871                     v0.bOp(v1, vm, (i, a, n) -> (int)(a >> n));
 872             case VECTOR_OP_URSHIFT: return (v0, v1, vm) ->
 873                     v0.bOp(v1, vm, (i, a, n) -> (int)((a & LSHR_SETUP_MASK) >>> n));
 874             case VECTOR_OP_LROTATE: return (v0, v1, vm) ->
 875                     v0.bOp(v1, vm, (i, a, n) -> rotateLeft(a, (int)n));
 876             case VECTOR_OP_RROTATE: return (v0, v1, vm) ->
 877                     v0.bOp(v1, vm, (i, a, n) -> rotateRight(a, (int)n));
 878             case VECTOR_OP_COMPRESS_BITS: return (v0, v1, vm) ->
 879                     v0.bOp(v1, vm, (i, a, n) -> Integer.compress(a, n));
 880             case VECTOR_OP_EXPAND_BITS: return (v0, v1, vm) ->
 881                     v0.bOp(v1, vm, (i, a, n) -> Integer.expand(a, n));
 882             default: return null;
 883         }
 884     }
 885 
 886     // FIXME: Maybe all of the public final methods in this file (the
 887     // simple ones that just call lanewise) should be pushed down to
 888     // the X-VectorBits template.  They can't optimize properly at
 889     // this level, and must rely on inlining.  Does it work?
 890     // (If it works, of course keep the code here.)
 891 
 892     /**
 893      * Combines the lane values of this vector
 894      * with the value of a broadcast scalar.
 895      *
 896      * This is a lane-wise binary operation which applies
 897      * the selected operation to each lane.
 898      * The return value will be equal to this expression:
 899      * {@code this.lanewise(op, this.broadcast(e))}.
 900      *
 901      * @param op the operation used to process lane values

1847     /**
1848      * {@inheritDoc} <!--workaround-->
1849      */
1850     @Override
1851     @ForceInline
1852     public final
1853     IntVector neg() {
1854         return lanewise(NEG);
1855     }
1856 
1857     /**
1858      * {@inheritDoc} <!--workaround-->
1859      */
1860     @Override
1861     @ForceInline
1862     public final
1863     IntVector abs() {
1864         return lanewise(ABS);
1865     }
1866 
1867 
1868     // not (~)
1869     /**
1870      * Computes the bitwise logical complement ({@code ~})
1871      * of this vector.
1872      *
1873      * This is a lane-wise binary operation which applies the
1874      * the primitive bitwise "not" operation ({@code ~})
1875      * to each lane value.
1876      *
1877      * This method is also equivalent to the expression
1878      * {@link #lanewise(VectorOperators.Unary)
1879      *    lanewise}{@code (}{@link VectorOperators#NOT
1880      *    NOT}{@code )}.
1881      *
1882      * <p>
1883      * This is not a full-service named operation like
1884      * {@link #add(Vector) add}.  A masked version of
1885      * this operation is not directly available
1886      * but may be obtained via the masked version of
1887      * {@code lanewise}.

2474         int[] a = toArray();
2475         int[] sa = new int[a.length];
2476         for (int i = 0; i < a.length; i++) {
2477             sa[i] = (int) a[i];
2478         }
2479         return VectorShuffle.fromArray(dsp, sa, 0);
2480     }
2481 
2482     /*package-private*/
2483     @ForceInline
2484     final
2485     VectorShuffle<Integer> toShuffleTemplate(Class<?> shuffleType) {
2486         IntSpecies vsp = vspecies();
2487         return VectorSupport.convert(VectorSupport.VECTOR_OP_CAST,
2488                                      getClass(), int.class, length(),
2489                                      shuffleType, byte.class, length(),
2490                                      this, vsp,
2491                                      IntVector::toShuffle0);
2492     }
2493 
2494     /**
2495      * {@inheritDoc} <!--workaround-->
2496      * @since 19
2497      */
2498     @Override
2499     public abstract
2500     IntVector compress(VectorMask<Integer> m);
2501 
2502     /*package-private*/
2503     @ForceInline
2504     final
2505     <M extends AbstractMask<Integer>>
2506     IntVector compressTemplate(Class<M> masktype, M m) {
2507       m.check(masktype, this);
2508       return (IntVector) VectorSupport.comExpOp(VectorSupport.VECTOR_OP_COMPRESS, getClass(), masktype,
2509                                                    int.class, length(), this, m,
2510                                                    (v1, m1) -> compressHelper(v1, m1));
2511     }
2512 
2513     /**
2514      * {@inheritDoc} <!--workaround-->
2515      * @since 19
2516      */
2517     @Override
2518     public abstract
2519     IntVector expand(VectorMask<Integer> m);
2520 
2521     /*package-private*/
2522     @ForceInline
2523     final
2524     <M extends AbstractMask<Integer>>
2525     IntVector expandTemplate(Class<M> masktype, M m) {
2526       m.check(masktype, this);
2527       return (IntVector) VectorSupport.comExpOp(VectorSupport.VECTOR_OP_EXPAND, getClass(), masktype,
2528                                                    int.class, length(), this, m,
2529                                                    (v1, m1) -> expandHelper(v1, m1));
2530     }
2531 
2532 
2533     /**
2534      * {@inheritDoc} <!--workaround-->
2535      */
2536     @Override
2537     public abstract
2538     IntVector selectFrom(Vector<Integer> v);
2539 
2540     /*package-private*/
2541     @ForceInline
2542     final IntVector selectFromTemplate(IntVector v) {
2543         return v.rearrange(this.toShuffle());
2544     }
2545 
2546     /**
2547      * {@inheritDoc} <!--workaround-->
2548      */
2549     @Override
2550     public abstract
2551     IntVector selectFrom(Vector<Integer> s, VectorMask<Integer> m);
2552 

2918         return res;
2919     }
2920 
2921     /** {@inheritDoc} <!--workaround-->
2922      * @implNote
2923      * When this method is used on used on vectors
2924      * of type {@code IntVector},
2925      * there will be no loss of precision.
2926      */
2927     @ForceInline
2928     @Override
2929     public final double[] toDoubleArray() {
2930         int[] a = toArray();
2931         double[] res = new double[a.length];
2932         for (int i = 0; i < a.length; i++) {
2933             res[i] = (double) a[i];
2934         }
2935         return res;
2936     }
2937 




















































































2938     /**
2939      * Loads a vector from an array of type {@code int[]}
2940      * starting at an offset.
2941      * For each vector lane, where {@code N} is the vector lane index, the
2942      * array element at index {@code offset + N} is placed into the
2943      * resulting vector at lane index {@code N}.
2944      *
2945      * @param species species of desired vector
2946      * @param a the array
2947      * @param offset the offset into the array
2948      * @return the vector loaded from an array
2949      * @throws IndexOutOfBoundsException
2950      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
2951      *         for any lane {@code N} in the vector
2952      */
2953     @ForceInline
2954     public static
2955     IntVector fromArray(VectorSpecies<Integer> species,
2956                                    int[] a, int offset) {
2957         offset = checkFromIndexSize(offset, species.length(), a.length);

3090      * @see IntVector#toIntArray()
3091      */
3092     @ForceInline
3093     public static
3094     IntVector fromArray(VectorSpecies<Integer> species,
3095                                    int[] a, int offset,
3096                                    int[] indexMap, int mapOffset,
3097                                    VectorMask<Integer> m) {
3098         if (m.allTrue()) {
3099             return fromArray(species, a, offset, indexMap, mapOffset);
3100         }
3101         else {
3102             IntSpecies vsp = (IntSpecies) species;
3103             return vsp.dummyVector().fromArray0(a, offset, indexMap, mapOffset, m);
3104         }
3105     }
3106 
3107 
3108 
3109     /**
3110      * Loads a vector from a {@linkplain MemorySegment memory segment}
3111      * starting at an offset into the memory segment.
3112      * Bytes are composed into primitive lane elements according
3113      * to the specified byte order.
3114      * The vector is arranged into lanes according to
3115      * <a href="Vector.html#lane-order">memory ordering</a>.
3116      * <p>
3117      * This method behaves as if it returns the result of calling
3118      * {@link #fromMemorySegment(VectorSpecies,MemorySegment,long,ByteOrder,VectorMask)
3119      * fromMemorySegment()} as follows:
3120      * <pre>{@code
3121      * var m = species.maskAll(true);
3122      * return fromMemorySegment(species, ms, offset, bo, m);
3123      * }</pre>
3124      *
3125      * @param species species of desired vector
3126      * @param ms the memory segment
3127      * @param offset the offset into the memory segment
3128      * @param bo the intended byte order
3129      * @return a vector loaded from the memory segment
3130      * @throws IndexOutOfBoundsException
3131      *         if {@code offset+N*4 < 0}
3132      *         or {@code offset+N*4 >= ms.byteSize()}
3133      *         for any lane {@code N} in the vector
3134      * @throws IllegalArgumentException if the memory segment is a heap segment that is
3135      *         not backed by a {@code byte[]} array.
3136      * @throws IllegalStateException if the memory segment's session is not alive,
3137      *         or if access occurs from a thread other than the thread owning the session.
3138      * @since 19
3139      */
3140     @ForceInline
3141     public static
3142     IntVector fromMemorySegment(VectorSpecies<Integer> species,
3143                                            MemorySegment ms, long offset,
3144                                            ByteOrder bo) {
3145         offset = checkFromIndexSize(offset, species.vectorByteSize(), ms.byteSize());
3146         IntSpecies vsp = (IntSpecies) species;
3147         return vsp.dummyVector().fromMemorySegment0(ms, offset).maybeSwap(bo);
3148     }
3149 
3150     /**
3151      * Loads a vector from a {@linkplain MemorySegment memory segment}
3152      * starting at an offset into the memory segment
3153      * and using a mask.
3154      * Lanes where the mask is unset are filled with the default
3155      * value of {@code int} (zero).
3156      * Bytes are composed into primitive lane elements according
3157      * to the specified byte order.
3158      * The vector is arranged into lanes according to
3159      * <a href="Vector.html#lane-order">memory ordering</a>.
3160      * <p>
3161      * The following pseudocode illustrates the behavior:
3162      * <pre>{@code
3163      * var slice = ms.asSlice(offset);


3164      * int[] ar = new int[species.length()];
3165      * for (int n = 0; n < ar.length; n++) {
3166      *     if (m.laneIsSet(n)) {
3167      *         ar[n] = slice.getAtIndex(ValuaLayout.JAVA_INT.withBitAlignment(8), n);
3168      *     }
3169      * }
3170      * IntVector r = IntVector.fromArray(species, ar, 0);
3171      * }</pre>
3172      * @implNote
3173      * This operation is likely to be more efficient if
3174      * the specified byte order is the same as
3175      * {@linkplain ByteOrder#nativeOrder()
3176      * the platform native order},
3177      * since this method will not need to reorder
3178      * the bytes of lane values.
3179      *
3180      * @param species species of desired vector
3181      * @param ms the memory segment
3182      * @param offset the offset into the memory segment
3183      * @param bo the intended byte order
3184      * @param m the mask controlling lane selection
3185      * @return a vector loaded from the memory segment
3186      * @throws IndexOutOfBoundsException
3187      *         if {@code offset+N*4 < 0}
3188      *         or {@code offset+N*4 >= ms.byteSize()}
3189      *         for any lane {@code N} in the vector
3190      *         where the mask is set
3191      * @throws IllegalArgumentException if the memory segment is a heap segment that is
3192      *         not backed by a {@code byte[]} array.
3193      * @throws IllegalStateException if the memory segment's session is not alive,
3194      *         or if access occurs from a thread other than the thread owning the session.
3195      * @since 19
3196      */
3197     @ForceInline
3198     public static
3199     IntVector fromMemorySegment(VectorSpecies<Integer> species,
3200                                            MemorySegment ms, long offset,
3201                                            ByteOrder bo,
3202                                            VectorMask<Integer> m) {
3203         IntSpecies vsp = (IntSpecies) species;
3204         if (offset >= 0 && offset <= (ms.byteSize() - species.vectorByteSize())) {
3205             return vsp.dummyVector().fromMemorySegment0(ms, offset, m).maybeSwap(bo);
3206         }
3207 
3208         // FIXME: optimize
3209         checkMaskFromIndexSize(offset, vsp, m, 4, ms.byteSize());
3210         return vsp.ldLongOp(ms, offset, m, IntVector::memorySegmentGet);


3211     }
3212 
3213     // Memory store operations
3214 
3215     /**
3216      * Stores this vector into an array of type {@code int[]}
3217      * starting at an offset.
3218      * <p>
3219      * For each vector lane, where {@code N} is the vector lane index,
3220      * the lane element at index {@code N} is stored into the array
3221      * element {@code a[offset+N]}.
3222      *
3223      * @param a the array, of type {@code int[]}
3224      * @param offset the offset into the array
3225      * @throws IndexOutOfBoundsException
3226      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
3227      *         for any lane {@code N} in the vector
3228      */
3229     @ForceInline
3230     public final
3231     void intoArray(int[] a, int offset) {
3232         offset = checkFromIndexSize(offset, length(), a.length);
3233         IntSpecies vsp = vspecies();
3234         VectorSupport.store(
3235             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3236             a, arrayAddress(a, offset),
3237             this,
3238             a, offset,
3239             (arr, off, v)
3240             -> v.stOp(arr, (int) off,
3241                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3242     }
3243 
3244     /**
3245      * Stores this vector into an array of type {@code int[]}
3246      * starting at offset and using a mask.
3247      * <p>
3248      * For each vector lane, where {@code N} is the vector lane index,
3249      * the lane element at index {@code N} is stored into the array
3250      * element {@code a[offset+N]}.
3251      * If the mask lane at {@code N} is unset then the corresponding
3252      * array element {@code a[offset+N]} is left unchanged.
3253      * <p>
3254      * Array range checking is done for lanes where the mask is set.
3255      * Lanes where the mask is unset are not stored and do not need
3256      * to correspond to legitimate elements of {@code a}.
3257      * That is, unset lanes may correspond to array indexes less than
3258      * zero or beyond the end of the array.
3259      *
3260      * @param a the array, of type {@code int[]}

3361      *         where the mask is set
3362      * @see IntVector#toIntArray()
3363      */
3364     @ForceInline
3365     public final
3366     void intoArray(int[] a, int offset,
3367                    int[] indexMap, int mapOffset,
3368                    VectorMask<Integer> m) {
3369         if (m.allTrue()) {
3370             intoArray(a, offset, indexMap, mapOffset);
3371         }
3372         else {
3373             intoArray0(a, offset, indexMap, mapOffset, m);
3374         }
3375     }
3376 
3377 
3378 
3379     /**
3380      * {@inheritDoc} <!--workaround-->
3381      * @since 19
3382      */
3383     @Override
3384     @ForceInline
3385     public final
3386     void intoMemorySegment(MemorySegment ms, long offset,
3387                            ByteOrder bo) {
3388         if (ms.isReadOnly()) {
3389             throw new UnsupportedOperationException("Attempt to write a read-only segment");

















3390         }

3391 
3392         offset = checkFromIndexSize(offset, byteSize(), ms.byteSize());
3393         maybeSwap(bo).intoMemorySegment0(ms, offset);











3394     }
3395 
3396     /**
3397      * {@inheritDoc} <!--workaround-->
3398      * @since 19
3399      */
3400     @Override
3401     @ForceInline
3402     public final
3403     void intoMemorySegment(MemorySegment ms, long offset,
3404                            ByteOrder bo,
3405                            VectorMask<Integer> m) {
3406         if (m.allTrue()) {
3407             intoMemorySegment(ms, offset, bo);
3408         } else {
3409             if (ms.isReadOnly()) {
3410                 throw new UnsupportedOperationException("Attempt to write a read-only segment");
3411             }
3412             IntSpecies vsp = vspecies();
3413             checkMaskFromIndexSize(offset, vsp, m, 4, ms.byteSize());
3414             maybeSwap(bo).intoMemorySegment0(ms, offset, m);
3415         }
3416     }
3417 
3418     // ================================================
3419 
3420     // Low-level memory operations.
3421     //
3422     // Note that all of these operations *must* inline into a context
3423     // where the exact species of the involved vector is a
3424     // compile-time constant.  Otherwise, the intrinsic generation
3425     // will fail and performance will suffer.
3426     //
3427     // In many cases this is achieved by re-deriving a version of the
3428     // method in each concrete subclass (per species).  The re-derived
3429     // method simply calls one of these generic methods, with exact
3430     // parameters for the controlling metadata, which is either a
3431     // typed vector or constant species instance.
3432 
3433     // Unchecked loading operations in native byte order.
3434     // Caller is responsible for applying index checks, masking, and
3435     // byte swapping.
3436 
3437     /*package-private*/
3438     abstract
3439     IntVector fromArray0(int[] a, int offset);
3440     @ForceInline
3441     final
3442     IntVector fromArray0Template(int[] a, int offset) {
3443         IntSpecies vsp = vspecies();
3444         return VectorSupport.load(
3445             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3446             a, arrayAddress(a, offset),
3447             a, offset, vsp,
3448             (arr, off, s) -> s.ldOp(arr, (int) off,
3449                                     (arr_, off_, i) -> arr_[off_ + i]));
3450     }
3451 
3452     /*package-private*/
3453     abstract
3454     IntVector fromArray0(int[] a, int offset, VectorMask<Integer> m);
3455     @ForceInline
3456     final
3457     <M extends VectorMask<Integer>>
3458     IntVector fromArray0Template(Class<M> maskClass, int[] a, int offset, M m) {
3459         m.check(species());
3460         IntSpecies vsp = vspecies();
3461         return VectorSupport.loadMasked(
3462             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3463             a, arrayAddress(a, offset), m,
3464             a, offset, vsp,
3465             (arr, off, s, vm) -> s.ldOp(arr, (int) off, vm,
3466                                         (arr_, off_, i) -> arr_[off_ + i]));
3467     }
3468 
3469     /*package-private*/
3470     abstract
3471     IntVector fromArray0(int[] a, int offset,
3472                                     int[] indexMap, int mapOffset,
3473                                     VectorMask<Integer> m);
3474     @ForceInline
3475     final
3476     <M extends VectorMask<Integer>>
3477     IntVector fromArray0Template(Class<M> maskClass, int[] a, int offset,
3478                                             int[] indexMap, int mapOffset, M m) {
3479         IntSpecies vsp = vspecies();
3480         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3481         Objects.requireNonNull(a);
3482         Objects.requireNonNull(indexMap);
3483         m.check(vsp);
3484         Class<? extends IntVector> vectorType = vsp.vectorType();
3485 
3486         // Index vector: vix[0:n] = k -> offset + indexMap[mapOffset + k]
3487         IntVector vix = IntVector
3488             .fromArray(isp, indexMap, mapOffset)
3489             .add(offset);
3490 
3491         // FIXME: Check index under mask controlling.
3492         vix = VectorIntrinsics.checkIndex(vix, a.length);
3493 
3494         return VectorSupport.loadWithMap(
3495             vectorType, maskClass, int.class, vsp.laneCount(),
3496             isp.vectorType(),
3497             a, ARRAY_BASE, vix, m,
3498             a, offset, indexMap, mapOffset, vsp,
3499             (c, idx, iMap, idy, s, vm) ->
3500             s.vOp(vm, n -> c[idx + iMap[idy+n]]));
3501     }
3502 
3503 
3504 

3505     abstract
3506     IntVector fromMemorySegment0(MemorySegment bb, long offset);
3507     @ForceInline
3508     final
3509     IntVector fromMemorySegment0Template(MemorySegment ms, long offset) {
3510         IntSpecies vsp = vspecies();
3511         return ScopedMemoryAccess.loadFromMemorySegment(




































3512                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3513                 (MemorySegmentProxy) ms, offset, vsp,
3514                 (msp, off, s) -> {
3515                     return s.ldLongOp((MemorySegment) msp, off, IntVector::memorySegmentGet);


3516                 });
3517     }
3518 
3519     abstract
3520     IntVector fromMemorySegment0(MemorySegment ms, long offset, VectorMask<Integer> m);
3521     @ForceInline
3522     final
3523     <M extends VectorMask<Integer>>
3524     IntVector fromMemorySegment0Template(Class<M> maskClass, MemorySegment ms, long offset, M m) {
3525         IntSpecies vsp = vspecies();
3526         m.check(vsp);
3527         return ScopedMemoryAccess.loadFromMemorySegmentMasked(
3528                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3529                 (MemorySegmentProxy) ms, offset, m, vsp,
3530                 (msp, off, s, vm) -> {
3531                     return s.ldLongOp((MemorySegment) msp, off, vm, IntVector::memorySegmentGet);


3532                 });
3533     }
3534 
3535     // Unchecked storing operations in native byte order.
3536     // Caller is responsible for applying index checks, masking, and
3537     // byte swapping.
3538 
3539     abstract
3540     void intoArray0(int[] a, int offset);
3541     @ForceInline
3542     final
3543     void intoArray0Template(int[] a, int offset) {
3544         IntSpecies vsp = vspecies();
3545         VectorSupport.store(
3546             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3547             a, arrayAddress(a, offset),
3548             this, a, offset,
3549             (arr, off, v)
3550             -> v.stOp(arr, (int) off,
3551                       (arr_, off_, i, e) -> arr_[off_+i] = e));
3552     }
3553 
3554     abstract
3555     void intoArray0(int[] a, int offset, VectorMask<Integer> m);
3556     @ForceInline
3557     final
3558     <M extends VectorMask<Integer>>
3559     void intoArray0Template(Class<M> maskClass, int[] a, int offset, M m) {
3560         m.check(species());
3561         IntSpecies vsp = vspecies();
3562         VectorSupport.storeMasked(
3563             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3564             a, arrayAddress(a, offset),
3565             this, m, a, offset,
3566             (arr, off, v, vm)
3567             -> v.stOp(arr, (int) off, vm,
3568                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3569     }
3570 
3571     abstract
3572     void intoArray0(int[] a, int offset,
3573                     int[] indexMap, int mapOffset,
3574                     VectorMask<Integer> m);
3575     @ForceInline
3576     final
3577     <M extends VectorMask<Integer>>
3578     void intoArray0Template(Class<M> maskClass, int[] a, int offset,
3579                             int[] indexMap, int mapOffset, M m) {
3580         m.check(species());
3581         IntSpecies vsp = vspecies();
3582         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3583         // Index vector: vix[0:n] = i -> offset + indexMap[mo + i]
3584         IntVector vix = IntVector
3585             .fromArray(isp, indexMap, mapOffset)
3586             .add(offset);
3587 
3588         // FIXME: Check index under mask controlling.
3589         vix = VectorIntrinsics.checkIndex(vix, a.length);
3590 
3591         VectorSupport.storeWithMap(
3592             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3593             isp.vectorType(),
3594             a, arrayAddress(a, 0), vix,
3595             this, m,
3596             a, offset, indexMap, mapOffset,
3597             (arr, off, v, map, mo, vm)
3598             -> v.stOp(arr, off, vm,
3599                       (arr_, off_, i, e) -> {
3600                           int j = map[mo + i];
3601                           arr[off + j] = e;
3602                       }));
3603     }
3604 
3605 



















3606     @ForceInline
3607     final
3608     void intoMemorySegment0(MemorySegment ms, long offset) {

3609         IntSpecies vsp = vspecies();
3610         ScopedMemoryAccess.storeIntoMemorySegment(
















3611                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3612                 this,
3613                 (MemorySegmentProxy) ms, offset,
3614                 (msp, off, v) -> {
3615                     v.stLongOp((MemorySegment) msp, off, IntVector::memorySegmentSet);

3616                 });
3617     }
3618 
3619     abstract
3620     void intoMemorySegment0(MemorySegment bb, long offset, VectorMask<Integer> m);
3621     @ForceInline
3622     final
3623     <M extends VectorMask<Integer>>
3624     void intoMemorySegment0Template(Class<M> maskClass, MemorySegment ms, long offset, M m) {
3625         IntSpecies vsp = vspecies();
3626         m.check(vsp);
3627         ScopedMemoryAccess.storeIntoMemorySegmentMasked(
3628                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3629                 this, m,
3630                 (MemorySegmentProxy) ms, offset,
3631                 (msp, off, v, vm) -> {
3632                     v.stLongOp((MemorySegment) msp, off, vm, IntVector::memorySegmentSet);

3633                 });
3634     }
3635 
3636 
3637     // End of low-level memory operations.
3638 
3639     private static
3640     void checkMaskFromIndexSize(int offset,
3641                                 IntSpecies vsp,
3642                                 VectorMask<Integer> m,
3643                                 int scale,
3644                                 int limit) {
3645         ((AbstractMask<Integer>)m)
3646             .checkIndexByLane(offset, limit, vsp.iota(), scale);
3647     }
3648 
3649     private static
3650     void checkMaskFromIndexSize(long offset,
3651                                 IntSpecies vsp,
3652                                 VectorMask<Integer> m,
3653                                 int scale,
3654                                 long limit) {
3655         ((AbstractMask<Integer>)m)
3656             .checkIndexByLane(offset, limit, vsp.iota(), scale);
3657     }
3658 
3659     @ForceInline
3660     private void conditionalStoreNYI(int offset,
3661                                      IntSpecies vsp,
3662                                      VectorMask<Integer> m,
3663                                      int scale,
3664                                      int limit) {
3665         if (offset < 0 || offset + vsp.laneCount() * scale > limit) {
3666             String msg =
3667                 String.format("unimplemented: store @%d in [0..%d), %s in %s",
3668                               offset, limit, m, vsp);
3669             throw new AssertionError(msg);
3670         }
3671     }
3672 
3673     /*package-private*/
3674     @Override
3675     @ForceInline
3676     final
3677     IntVector maybeSwap(ByteOrder bo) {
3678         if (bo != NATIVE_ENDIAN) {

3949                 }
3950             }
3951             return dummyVector().vectorFactory(res);
3952         }
3953 
3954         /*package-private*/
3955         @ForceInline
3956         <M> IntVector ldOp(M memory, int offset,
3957                                       FLdOp<M> f) {
3958             return dummyVector().ldOp(memory, offset, f);
3959         }
3960 
3961         /*package-private*/
3962         @ForceInline
3963         <M> IntVector ldOp(M memory, int offset,
3964                                       VectorMask<Integer> m,
3965                                       FLdOp<M> f) {
3966             return dummyVector().ldOp(memory, offset, m, f);
3967         }
3968 
3969         /*package-private*/
3970         @ForceInline
3971         IntVector ldLongOp(MemorySegment memory, long offset,
3972                                       FLdLongOp f) {
3973             return dummyVector().ldLongOp(memory, offset, f);
3974         }
3975 
3976         /*package-private*/
3977         @ForceInline
3978         IntVector ldLongOp(MemorySegment memory, long offset,
3979                                       VectorMask<Integer> m,
3980                                       FLdLongOp f) {
3981             return dummyVector().ldLongOp(memory, offset, m, f);
3982         }
3983 
3984         /*package-private*/
3985         @ForceInline
3986         <M> void stOp(M memory, int offset, FStOp<M> f) {
3987             dummyVector().stOp(memory, offset, f);
3988         }
3989 
3990         /*package-private*/
3991         @ForceInline
3992         <M> void stOp(M memory, int offset,
3993                       AbstractMask<Integer> m,
3994                       FStOp<M> f) {
3995             dummyVector().stOp(memory, offset, m, f);
3996         }
3997 
3998         /*package-private*/
3999         @ForceInline
4000         void stLongOp(MemorySegment memory, long offset, FStLongOp f) {
4001             dummyVector().stLongOp(memory, offset, f);
4002         }
4003 
4004         /*package-private*/
4005         @ForceInline
4006         void stLongOp(MemorySegment memory, long offset,
4007                       AbstractMask<Integer> m,
4008                       FStLongOp f) {
4009             dummyVector().stLongOp(memory, offset, m, f);
4010         }
4011 
4012         // N.B. Make sure these constant vectors and
4013         // masks load up correctly into registers.
4014         //
4015         // Also, see if we can avoid all that switching.
4016         // Could we cache both vectors and both masks in
4017         // this species object?
4018 
4019         // Zero and iota vector access
4020         @Override
4021         @ForceInline
4022         public final IntVector zero() {
4023             if ((Class<?>) vectorType() == IntMaxVector.class)
4024                 return IntMaxVector.ZERO;
4025             switch (vectorBitSize()) {
4026                 case 64: return Int64Vector.ZERO;
4027                 case 128: return Int128Vector.ZERO;
4028                 case 256: return Int256Vector.ZERO;
4029                 case 512: return Int512Vector.ZERO;
4030             }
4031             throw new AssertionError();
< prev index next >