< prev index next >

src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector.java

Print this page

   7  * published by the Free Software Foundation.  Oracle designates this
   8  * particular file as subject to the "Classpath" exception as provided
   9  * by Oracle in the LICENSE file that accompanied this code.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  */
  25 package jdk.incubator.vector;
  26 
  27 import java.nio.ByteBuffer;
  28 import java.nio.ByteOrder;
  29 import java.nio.ReadOnlyBufferException;
  30 import java.util.Arrays;
  31 import java.util.Objects;
  32 import java.util.function.Function;
  33 import java.util.function.UnaryOperator;
  34 



  35 import jdk.internal.misc.ScopedMemoryAccess;
  36 import jdk.internal.misc.Unsafe;
  37 import jdk.internal.vm.annotation.ForceInline;
  38 import jdk.internal.vm.vector.VectorSupport;
  39 
  40 import static jdk.internal.vm.vector.VectorSupport.*;
  41 import static jdk.incubator.vector.VectorIntrinsics.*;
  42 
  43 import static jdk.incubator.vector.VectorOperators.*;
  44 
  45 // -- This file was mechanically generated: Do not edit! -- //
  46 
  47 /**
  48  * A specialized {@link Vector} representing an ordered immutable sequence of
  49  * {@code short} values.
  50  */
  51 @SuppressWarnings("cast")  // warning: redundant cast
  52 public abstract class ShortVector extends AbstractVector<Short> {
  53 
  54     ShortVector(short[] vec) {
  55         super(vec);
  56     }
  57 
  58     static final int FORBID_OPCODE_KIND = VO_ONLYFP;
  59 


  60     @ForceInline
  61     static int opCode(Operator op) {
  62         return VectorOperators.opCode(op, VO_OPCODE_VALID, FORBID_OPCODE_KIND);
  63     }
  64     @ForceInline
  65     static int opCode(Operator op, int requireKind) {
  66         requireKind |= VO_OPCODE_VALID;
  67         return VectorOperators.opCode(op, requireKind, FORBID_OPCODE_KIND);
  68     }
  69     @ForceInline
  70     static boolean opKind(Operator op, int bit) {
  71         return VectorOperators.opKind(op, bit);
  72     }
  73 
  74     // Virtualized factories and operators,
  75     // coded with portable definitions.
  76     // These are all @ForceInline in case
  77     // they need to be used performantly.
  78     // The various shape-specific subclasses
  79     // also specialize them by wrapping

 334         return vectorFactory(res);
 335     }
 336 
 337     /*package-private*/
 338     @ForceInline
 339     final
 340     <M> ShortVector ldOp(M memory, int offset,
 341                                   VectorMask<Short> m,
 342                                   FLdOp<M> f) {
 343         //short[] vec = vec();
 344         short[] res = new short[length()];
 345         boolean[] mbits = ((AbstractMask<Short>)m).getBits();
 346         for (int i = 0; i < res.length; i++) {
 347             if (mbits[i]) {
 348                 res[i] = f.apply(memory, offset, i);
 349             }
 350         }
 351         return vectorFactory(res);
 352     }
 353 







































 354     interface FStOp<M> {
 355         void apply(M memory, int offset, int i, short a);
 356     }
 357 
 358     /*package-private*/
 359     @ForceInline
 360     final
 361     <M> void stOp(M memory, int offset,
 362                   FStOp<M> f) {
 363         short[] vec = vec();
 364         for (int i = 0; i < vec.length; i++) {
 365             f.apply(memory, offset, i, vec[i]);
 366         }
 367     }
 368 
 369     /*package-private*/
 370     @ForceInline
 371     final
 372     <M> void stOp(M memory, int offset,
 373                   VectorMask<Short> m,
 374                   FStOp<M> f) {
 375         short[] vec = vec();
 376         boolean[] mbits = ((AbstractMask<Short>)m).getBits();
 377         for (int i = 0; i < vec.length; i++) {
 378             if (mbits[i]) {
 379                 f.apply(memory, offset, i, vec[i]);
 380             }
 381         }
 382     }
 383 


































 384     // Binary test
 385 
 386     /*package-private*/
 387     interface FBinTest {
 388         boolean apply(int cond, int i, short a, short b);
 389     }
 390 
 391     /*package-private*/
 392     @ForceInline
 393     final
 394     AbstractMask<Short> bTest(int cond,
 395                                   Vector<Short> o,
 396                                   FBinTest f) {
 397         short[] vec1 = vec();
 398         short[] vec2 = ((ShortVector)o).vec();
 399         boolean[] bits = new boolean[length()];
 400         for (int i = 0; i < length(); i++){
 401             bits[i] = f.apply(cond, i, vec1[i], vec2[i]);
 402         }
 403         return maskFactory(bits);

 414     static short rotateRight(short a, int n) {
 415         return (short)(((((short)a) & Short.toUnsignedInt((short)-1)) >>> (n & Short.SIZE-1)) | ((((short)a) & Short.toUnsignedInt((short)-1)) << (Short.SIZE - (n & Short.SIZE-1))));
 416     }
 417 
 418     /*package-private*/
 419     @Override
 420     abstract ShortSpecies vspecies();
 421 
 422     /*package-private*/
 423     @ForceInline
 424     static long toBits(short e) {
 425         return  e;
 426     }
 427 
 428     /*package-private*/
 429     @ForceInline
 430     static short fromBits(long bits) {
 431         return ((short)bits);
 432     }
 433 






























 434     // Static factories (other than memory operations)
 435 
 436     // Note: A surprising behavior in javadoc
 437     // sometimes makes a lone /** {@inheritDoc} */
 438     // comment drop the method altogether,
 439     // apparently if the method mentions an
 440     // parameter or return type of Vector<Short>
 441     // instead of Vector<E> as originally specified.
 442     // Adding an empty HTML fragment appears to
 443     // nudge javadoc into providing the desired
 444     // inherited documentation.  We use the HTML
 445     // comment <!--workaround--> for this.
 446 
 447     /**
 448      * Returns a vector of the given species
 449      * where all lane elements are set to
 450      * zero, the default primitive value.
 451      *
 452      * @param species species of the desired zero vector
 453      * @return a zero vector

 603                 return lanewise(XOR, broadcast(-1), m);
 604             }
 605         }
 606         int opc = opCode(op);
 607         return VectorSupport.unaryOp(
 608             opc, getClass(), maskClass, short.class, length(),
 609             this, m,
 610             UN_IMPL.find(op, opc, ShortVector::unaryOperations));
 611     }
 612 
 613     private static final
 614     ImplCache<Unary, UnaryOperation<ShortVector, VectorMask<Short>>>
 615         UN_IMPL = new ImplCache<>(Unary.class, ShortVector.class);
 616 
 617     private static UnaryOperation<ShortVector, VectorMask<Short>> unaryOperations(int opc_) {
 618         switch (opc_) {
 619             case VECTOR_OP_NEG: return (v0, m) ->
 620                     v0.uOp(m, (i, a) -> (short) -a);
 621             case VECTOR_OP_ABS: return (v0, m) ->
 622                     v0.uOp(m, (i, a) -> (short) Math.abs(a));










 623             default: return null;
 624         }
 625     }
 626 
 627     // Binary lanewise support
 628 
 629     /**
 630      * {@inheritDoc} <!--workaround-->
 631      * @see #lanewise(VectorOperators.Binary,short)
 632      * @see #lanewise(VectorOperators.Binary,short,VectorMask)
 633      */
 634     @Override
 635     public abstract
 636     ShortVector lanewise(VectorOperators.Binary op,
 637                                   Vector<Short> v);
 638     @ForceInline
 639     final
 640     ShortVector lanewiseTemplate(VectorOperators.Binary op,
 641                                           Vector<Short> v) {
 642         ShortVector that = (ShortVector) v;

1729     /**
1730      * {@inheritDoc} <!--workaround-->
1731      */
1732     @Override
1733     @ForceInline
1734     public final
1735     ShortVector neg() {
1736         return lanewise(NEG);
1737     }
1738 
1739     /**
1740      * {@inheritDoc} <!--workaround-->
1741      */
1742     @Override
1743     @ForceInline
1744     public final
1745     ShortVector abs() {
1746         return lanewise(ABS);
1747     }
1748 




















1749     // not (~)
1750     /**
1751      * Computes the bitwise logical complement ({@code ~})
1752      * of this vector.
1753      *
1754      * This is a lane-wise binary operation which applies the
1755      * the primitive bitwise "not" operation ({@code ~})
1756      * to each lane value.
1757      *
1758      * This method is also equivalent to the expression
1759      * {@link #lanewise(VectorOperators.Unary)
1760      *    lanewise}{@code (}{@link VectorOperators#NOT
1761      *    NOT}{@code )}.
1762      *
1763      * <p>
1764      * This is not a full-service named operation like
1765      * {@link #add(Vector) add}.  A masked version of
1766      * this operation is not directly available
1767      * but may be obtained via the masked version of
1768      * {@code lanewise}.

2355         short[] a = toArray();
2356         int[] sa = new int[a.length];
2357         for (int i = 0; i < a.length; i++) {
2358             sa[i] = (int) a[i];
2359         }
2360         return VectorShuffle.fromArray(dsp, sa, 0);
2361     }
2362 
2363     /*package-private*/
2364     @ForceInline
2365     final
2366     VectorShuffle<Short> toShuffleTemplate(Class<?> shuffleType) {
2367         ShortSpecies vsp = vspecies();
2368         return VectorSupport.convert(VectorSupport.VECTOR_OP_CAST,
2369                                      getClass(), short.class, length(),
2370                                      shuffleType, byte.class, length(),
2371                                      this, vsp,
2372                                      ShortVector::toShuffle0);
2373     }
2374 







































2375     /**
2376      * {@inheritDoc} <!--workaround-->
2377      */
2378     @Override
2379     public abstract
2380     ShortVector selectFrom(Vector<Short> v);
2381 
2382     /*package-private*/
2383     @ForceInline
2384     final ShortVector selectFromTemplate(ShortVector v) {
2385         return v.rearrange(this.toShuffle());
2386     }
2387 
2388     /**
2389      * {@inheritDoc} <!--workaround-->
2390      */
2391     @Override
2392     public abstract
2393     ShortVector selectFrom(Vector<Short> s, VectorMask<Short> m);
2394 

2767         return res;
2768     }
2769 
2770     /** {@inheritDoc} <!--workaround-->
2771      * @implNote
2772      * When this method is used on used on vectors
2773      * of type {@code ShortVector},
2774      * there will be no loss of precision.
2775      */
2776     @ForceInline
2777     @Override
2778     public final double[] toDoubleArray() {
2779         short[] a = toArray();
2780         double[] res = new double[a.length];
2781         for (int i = 0; i < a.length; i++) {
2782             res[i] = (double) a[i];
2783         }
2784         return res;
2785     }
2786 
2787     /**
2788      * Loads a vector from a byte array starting at an offset.
2789      * Bytes are composed into primitive lane elements according
2790      * to the specified byte order.
2791      * The vector is arranged into lanes according to
2792      * <a href="Vector.html#lane-order">memory ordering</a>.
2793      * <p>
2794      * This method behaves as if it returns the result of calling
2795      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
2796      * fromByteBuffer()} as follows:
2797      * <pre>{@code
2798      * var bb = ByteBuffer.wrap(a);
2799      * var m = species.maskAll(true);
2800      * return fromByteBuffer(species, bb, offset, bo, m);
2801      * }</pre>
2802      *
2803      * @param species species of desired vector
2804      * @param a the byte array
2805      * @param offset the offset into the array
2806      * @param bo the intended byte order
2807      * @return a vector loaded from a byte array
2808      * @throws IndexOutOfBoundsException
2809      *         if {@code offset+N*ESIZE < 0}
2810      *         or {@code offset+(N+1)*ESIZE > a.length}
2811      *         for any lane {@code N} in the vector
2812      */
2813     @ForceInline
2814     public static
2815     ShortVector fromByteArray(VectorSpecies<Short> species,
2816                                        byte[] a, int offset,
2817                                        ByteOrder bo) {
2818         offset = checkFromIndexSize(offset, species.vectorByteSize(), a.length);
2819         ShortSpecies vsp = (ShortSpecies) species;
2820         return vsp.dummyVector().fromByteArray0(a, offset).maybeSwap(bo);
2821     }
2822 
2823     /**
2824      * Loads a vector from a byte array starting at an offset
2825      * and using a mask.
2826      * Lanes where the mask is unset are filled with the default
2827      * value of {@code short} (zero).
2828      * Bytes are composed into primitive lane elements according
2829      * to the specified byte order.
2830      * The vector is arranged into lanes according to
2831      * <a href="Vector.html#lane-order">memory ordering</a>.
2832      * <p>
2833      * This method behaves as if it returns the result of calling
2834      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
2835      * fromByteBuffer()} as follows:
2836      * <pre>{@code
2837      * var bb = ByteBuffer.wrap(a);
2838      * return fromByteBuffer(species, bb, offset, bo, m);
2839      * }</pre>
2840      *
2841      * @param species species of desired vector
2842      * @param a the byte array
2843      * @param offset the offset into the array
2844      * @param bo the intended byte order
2845      * @param m the mask controlling lane selection
2846      * @return a vector loaded from a byte array
2847      * @throws IndexOutOfBoundsException
2848      *         if {@code offset+N*ESIZE < 0}
2849      *         or {@code offset+(N+1)*ESIZE > a.length}
2850      *         for any lane {@code N} in the vector
2851      *         where the mask is set
2852      */
2853     @ForceInline
2854     public static
2855     ShortVector fromByteArray(VectorSpecies<Short> species,
2856                                        byte[] a, int offset,
2857                                        ByteOrder bo,
2858                                        VectorMask<Short> m) {
2859         ShortSpecies vsp = (ShortSpecies) species;
2860         if (offset >= 0 && offset <= (a.length - species.vectorByteSize())) {
2861             return vsp.dummyVector().fromByteArray0(a, offset, m).maybeSwap(bo);
2862         }
2863 
2864         // FIXME: optimize
2865         checkMaskFromIndexSize(offset, vsp, m, 2, a.length);
2866         ByteBuffer wb = wrapper(a, bo);
2867         return vsp.ldOp(wb, offset, (AbstractMask<Short>)m,
2868                    (wb_, o, i)  -> wb_.getShort(o + i * 2));
2869     }
2870 
2871     /**
2872      * Loads a vector from an array of type {@code short[]}
2873      * starting at an offset.
2874      * For each vector lane, where {@code N} is the vector lane index, the
2875      * array element at index {@code offset + N} is placed into the
2876      * resulting vector at lane index {@code N}.
2877      *
2878      * @param species species of desired vector
2879      * @param a the array
2880      * @param offset the offset into the array
2881      * @return the vector loaded from an array
2882      * @throws IndexOutOfBoundsException
2883      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
2884      *         for any lane {@code N} in the vector
2885      */
2886     @ForceInline
2887     public static
2888     ShortVector fromArray(VectorSpecies<Short> species,
2889                                    short[] a, int offset) {
2890         offset = checkFromIndexSize(offset, species.length(), a.length);

3150      *         or if {@code mapOffset+N >= indexMap.length},
3151      *         or if {@code f(N)=offset+indexMap[mapOffset+N]}
3152      *         is an invalid index into {@code a},
3153      *         for any lane {@code N} in the vector
3154      *         where the mask is set
3155      * @see ShortVector#toIntArray()
3156      */
3157     @ForceInline
3158     public static
3159     ShortVector fromCharArray(VectorSpecies<Short> species,
3160                                        char[] a, int offset,
3161                                        int[] indexMap, int mapOffset,
3162                                        VectorMask<Short> m) {
3163         // FIXME: optimize
3164         ShortSpecies vsp = (ShortSpecies) species;
3165         return vsp.vOp(m, n -> (short) a[offset + indexMap[mapOffset + n]]);
3166     }
3167 
3168 
3169     /**
3170      * Loads a vector from a {@linkplain ByteBuffer byte buffer}
3171      * starting at an offset into the byte buffer.
3172      * Bytes are composed into primitive lane elements according
3173      * to the specified byte order.
3174      * The vector is arranged into lanes according to
3175      * <a href="Vector.html#lane-order">memory ordering</a>.
3176      * <p>
3177      * This method behaves as if it returns the result of calling
3178      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
3179      * fromByteBuffer()} as follows:
3180      * <pre>{@code
3181      * var m = species.maskAll(true);
3182      * return fromByteBuffer(species, bb, offset, bo, m);
3183      * }</pre>
3184      *
3185      * @param species species of desired vector
3186      * @param bb the byte buffer
3187      * @param offset the offset into the byte buffer
3188      * @param bo the intended byte order
3189      * @return a vector loaded from a byte buffer
3190      * @throws IndexOutOfBoundsException
3191      *         if {@code offset+N*2 < 0}
3192      *         or {@code offset+N*2 >= bb.limit()}
3193      *         for any lane {@code N} in the vector





3194      */
3195     @ForceInline
3196     public static
3197     ShortVector fromByteBuffer(VectorSpecies<Short> species,
3198                                         ByteBuffer bb, int offset,
3199                                         ByteOrder bo) {
3200         offset = checkFromIndexSize(offset, species.vectorByteSize(), bb.limit());
3201         ShortSpecies vsp = (ShortSpecies) species;
3202         return vsp.dummyVector().fromByteBuffer0(bb, offset).maybeSwap(bo);
3203     }
3204 
3205     /**
3206      * Loads a vector from a {@linkplain ByteBuffer byte buffer}
3207      * starting at an offset into the byte buffer
3208      * and using a mask.
3209      * Lanes where the mask is unset are filled with the default
3210      * value of {@code short} (zero).
3211      * Bytes are composed into primitive lane elements according
3212      * to the specified byte order.
3213      * The vector is arranged into lanes according to
3214      * <a href="Vector.html#lane-order">memory ordering</a>.
3215      * <p>
3216      * The following pseudocode illustrates the behavior:
3217      * <pre>{@code
3218      * ShortBuffer eb = bb.duplicate()
3219      *     .position(offset)
3220      *     .order(bo).asShortBuffer();
3221      * short[] ar = new short[species.length()];
3222      * for (int n = 0; n < ar.length; n++) {
3223      *     if (m.laneIsSet(n)) {
3224      *         ar[n] = eb.get(n);
3225      *     }
3226      * }
3227      * ShortVector r = ShortVector.fromArray(species, ar, 0);
3228      * }</pre>
3229      * @implNote
3230      * This operation is likely to be more efficient if
3231      * the specified byte order is the same as
3232      * {@linkplain ByteOrder#nativeOrder()
3233      * the platform native order},
3234      * since this method will not need to reorder
3235      * the bytes of lane values.
3236      *
3237      * @param species species of desired vector
3238      * @param bb the byte buffer
3239      * @param offset the offset into the byte buffer
3240      * @param bo the intended byte order
3241      * @param m the mask controlling lane selection
3242      * @return a vector loaded from a byte buffer
3243      * @throws IndexOutOfBoundsException
3244      *         if {@code offset+N*2 < 0}
3245      *         or {@code offset+N*2 >= bb.limit()}
3246      *         for any lane {@code N} in the vector
3247      *         where the mask is set





3248      */
3249     @ForceInline
3250     public static
3251     ShortVector fromByteBuffer(VectorSpecies<Short> species,
3252                                         ByteBuffer bb, int offset,
3253                                         ByteOrder bo,
3254                                         VectorMask<Short> m) {
3255         ShortSpecies vsp = (ShortSpecies) species;
3256         if (offset >= 0 && offset <= (bb.limit() - species.vectorByteSize())) {
3257             return vsp.dummyVector().fromByteBuffer0(bb, offset, m).maybeSwap(bo);
3258         }
3259 
3260         // FIXME: optimize
3261         checkMaskFromIndexSize(offset, vsp, m, 2, bb.limit());
3262         ByteBuffer wb = wrapper(bb, bo);
3263         return vsp.ldOp(wb, offset, (AbstractMask<Short>)m,
3264                    (wb_, o, i)  -> wb_.getShort(o + i * 2));
3265     }
3266 
3267     // Memory store operations
3268 
3269     /**
3270      * Stores this vector into an array of type {@code short[]}
3271      * starting at an offset.
3272      * <p>
3273      * For each vector lane, where {@code N} is the vector lane index,
3274      * the lane element at index {@code N} is stored into the array
3275      * element {@code a[offset+N]}.
3276      *
3277      * @param a the array, of type {@code short[]}
3278      * @param offset the offset into the array
3279      * @throws IndexOutOfBoundsException
3280      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
3281      *         for any lane {@code N} in the vector
3282      */
3283     @ForceInline
3284     public final
3285     void intoArray(short[] a, int offset) {
3286         offset = checkFromIndexSize(offset, length(), a.length);
3287         ShortSpecies vsp = vspecies();
3288         VectorSupport.store(
3289             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3290             a, arrayAddress(a, offset),
3291             this,
3292             a, offset,
3293             (arr, off, v)
3294             -> v.stOp(arr, off,
3295                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3296     }
3297 
3298     /**
3299      * Stores this vector into an array of type {@code short[]}
3300      * starting at offset and using a mask.
3301      * <p>
3302      * For each vector lane, where {@code N} is the vector lane index,
3303      * the lane element at index {@code N} is stored into the array
3304      * element {@code a[offset+N]}.
3305      * If the mask lane at {@code N} is unset then the corresponding
3306      * array element {@code a[offset+N]} is left unchanged.
3307      * <p>
3308      * Array range checking is done for lanes where the mask is set.
3309      * Lanes where the mask is unset are not stored and do not need
3310      * to correspond to legitimate elements of {@code a}.
3311      * That is, unset lanes may correspond to array indexes less than
3312      * zero or beyond the end of the array.
3313      *
3314      * @param a the array, of type {@code short[]}

3420      * is first cast to a {@code char} value and then
3421      * stored into the array element {@code a[offset+N]}.
3422      *
3423      * @param a the array, of type {@code char[]}
3424      * @param offset the offset into the array
3425      * @throws IndexOutOfBoundsException
3426      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
3427      *         for any lane {@code N} in the vector
3428      */
3429     @ForceInline
3430     public final
3431     void intoCharArray(char[] a, int offset) {
3432         offset = checkFromIndexSize(offset, length(), a.length);
3433         ShortSpecies vsp = vspecies();
3434         VectorSupport.store(
3435             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3436             a, charArrayAddress(a, offset),
3437             this,
3438             a, offset,
3439             (arr, off, v)
3440             -> v.stOp(arr, off,
3441                       (arr_, off_, i, e) -> arr_[off_ + i] = (char) e));
3442     }
3443 
3444     /**
3445      * Stores this vector into an array of type {@code char[]}
3446      * starting at offset and using a mask.
3447      * <p>
3448      * For each vector lane, where {@code N} is the vector lane index,
3449      * the lane element at index {@code N}
3450      * is first cast to a {@code char} value and then
3451      * stored into the array element {@code a[offset+N]}.
3452      * If the mask lane at {@code N} is unset then the corresponding
3453      * array element {@code a[offset+N]} is left unchanged.
3454      * <p>
3455      * Array range checking is done for lanes where the mask is set.
3456      * Lanes where the mask is unset are not stored and do not need
3457      * to correspond to legitimate elements of {@code a}.
3458      * That is, unset lanes may correspond to array indexes less than
3459      * zero or beyond the end of the array.
3460      *

3550      *         for any lane {@code N} in the vector
3551      *         where the mask is set
3552      * @see ShortVector#toIntArray()
3553      */
3554     @ForceInline
3555     public final
3556     void intoCharArray(char[] a, int offset,
3557                        int[] indexMap, int mapOffset,
3558                        VectorMask<Short> m) {
3559         // FIXME: optimize
3560         stOp(a, offset, m,
3561              (arr, off, i, e) -> {
3562                  int j = indexMap[mapOffset + i];
3563                  arr[off + j] = (char) e;
3564              });
3565     }
3566 
3567 
3568     /**
3569      * {@inheritDoc} <!--workaround-->

3570      */
3571     @Override
3572     @ForceInline
3573     public final
3574     void intoByteArray(byte[] a, int offset,
3575                        ByteOrder bo) {
3576         offset = checkFromIndexSize(offset, byteSize(), a.length);
3577         maybeSwap(bo).intoByteArray0(a, offset);
3578     }
3579 
3580     /**
3581      * {@inheritDoc} <!--workaround-->
3582      */
3583     @Override
3584     @ForceInline
3585     public final
3586     void intoByteArray(byte[] a, int offset,
3587                        ByteOrder bo,
3588                        VectorMask<Short> m) {
3589         if (m.allTrue()) {
3590             intoByteArray(a, offset, bo);
3591         } else {
3592             ShortSpecies vsp = vspecies();
3593             checkMaskFromIndexSize(offset, vsp, m, 2, a.length);
3594             maybeSwap(bo).intoByteArray0(a, offset, m);
3595         }
3596     }
3597 
3598     /**
3599      * {@inheritDoc} <!--workaround-->
3600      */
3601     @Override
3602     @ForceInline
3603     public final
3604     void intoByteBuffer(ByteBuffer bb, int offset,
3605                         ByteOrder bo) {
3606         if (ScopedMemoryAccess.isReadOnly(bb)) {
3607             throw new ReadOnlyBufferException();
3608         }
3609         offset = checkFromIndexSize(offset, byteSize(), bb.limit());
3610         maybeSwap(bo).intoByteBuffer0(bb, offset);
3611     }
3612 
3613     /**
3614      * {@inheritDoc} <!--workaround-->

3615      */
3616     @Override
3617     @ForceInline
3618     public final
3619     void intoByteBuffer(ByteBuffer bb, int offset,
3620                         ByteOrder bo,
3621                         VectorMask<Short> m) {
3622         if (m.allTrue()) {
3623             intoByteBuffer(bb, offset, bo);
3624         } else {
3625             if (bb.isReadOnly()) {
3626                 throw new ReadOnlyBufferException();
3627             }
3628             ShortSpecies vsp = vspecies();
3629             checkMaskFromIndexSize(offset, vsp, m, 2, bb.limit());
3630             maybeSwap(bo).intoByteBuffer0(bb, offset, m);
3631         }
3632     }
3633 
3634     // ================================================
3635 
3636     // Low-level memory operations.
3637     //
3638     // Note that all of these operations *must* inline into a context
3639     // where the exact species of the involved vector is a
3640     // compile-time constant.  Otherwise, the intrinsic generation
3641     // will fail and performance will suffer.
3642     //
3643     // In many cases this is achieved by re-deriving a version of the
3644     // method in each concrete subclass (per species).  The re-derived
3645     // method simply calls one of these generic methods, with exact
3646     // parameters for the controlling metadata, which is either a
3647     // typed vector or constant species instance.
3648 
3649     // Unchecked loading operations in native byte order.
3650     // Caller is responsible for applying index checks, masking, and
3651     // byte swapping.
3652 
3653     /*package-private*/
3654     abstract
3655     ShortVector fromArray0(short[] a, int offset);
3656     @ForceInline
3657     final
3658     ShortVector fromArray0Template(short[] a, int offset) {
3659         ShortSpecies vsp = vspecies();
3660         return VectorSupport.load(
3661             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3662             a, arrayAddress(a, offset),
3663             a, offset, vsp,
3664             (arr, off, s) -> s.ldOp(arr, off,
3665                                     (arr_, off_, i) -> arr_[off_ + i]));
3666     }
3667 
3668     /*package-private*/
3669     abstract
3670     ShortVector fromArray0(short[] a, int offset, VectorMask<Short> m);
3671     @ForceInline
3672     final
3673     <M extends VectorMask<Short>>
3674     ShortVector fromArray0Template(Class<M> maskClass, short[] a, int offset, M m) {
3675         m.check(species());
3676         ShortSpecies vsp = vspecies();
3677         return VectorSupport.loadMasked(
3678             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3679             a, arrayAddress(a, offset), m,
3680             a, offset, vsp,
3681             (arr, off, s, vm) -> s.ldOp(arr, off, vm,
3682                                         (arr_, off_, i) -> arr_[off_ + i]));
3683     }
3684 
3685 
3686     /*package-private*/
3687     abstract
3688     ShortVector fromCharArray0(char[] a, int offset);
3689     @ForceInline
3690     final
3691     ShortVector fromCharArray0Template(char[] a, int offset) {
3692         ShortSpecies vsp = vspecies();
3693         return VectorSupport.load(
3694             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3695             a, charArrayAddress(a, offset),
3696             a, offset, vsp,
3697             (arr, off, s) -> s.ldOp(arr, off,
3698                                     (arr_, off_, i) -> (short) arr_[off_ + i]));
3699     }
3700 
3701     /*package-private*/
3702     abstract
3703     ShortVector fromCharArray0(char[] a, int offset, VectorMask<Short> m);
3704     @ForceInline
3705     final
3706     <M extends VectorMask<Short>>
3707     ShortVector fromCharArray0Template(Class<M> maskClass, char[] a, int offset, M m) {
3708         m.check(species());
3709         ShortSpecies vsp = vspecies();
3710         return VectorSupport.loadMasked(
3711                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3712                 a, charArrayAddress(a, offset), m,
3713                 a, offset, vsp,
3714                 (arr, off, s, vm) -> s.ldOp(arr, off, vm,
3715                                             (arr_, off_, i) -> (short) arr_[off_ + i]));
3716     }
3717 
3718 
3719     @Override
3720     abstract
3721     ShortVector fromByteArray0(byte[] a, int offset);
3722     @ForceInline
3723     final
3724     ShortVector fromByteArray0Template(byte[] a, int offset) {
3725         ShortSpecies vsp = vspecies();
3726         return VectorSupport.load(
3727             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3728             a, byteArrayAddress(a, offset),
3729             a, offset, vsp,
3730             (arr, off, s) -> {
3731                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3732                 return s.ldOp(wb, off,
3733                         (wb_, o, i) -> wb_.getShort(o + i * 2));
3734             });
3735     }
3736 
3737     abstract
3738     ShortVector fromByteArray0(byte[] a, int offset, VectorMask<Short> m);
3739     @ForceInline
3740     final
3741     <M extends VectorMask<Short>>
3742     ShortVector fromByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
3743         ShortSpecies vsp = vspecies();
3744         m.check(vsp);
3745         return VectorSupport.loadMasked(
3746             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3747             a, byteArrayAddress(a, offset), m,
3748             a, offset, vsp,
3749             (arr, off, s, vm) -> {
3750                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3751                 return s.ldOp(wb, off, vm,
3752                         (wb_, o, i) -> wb_.getShort(o + i * 2));
3753             });
3754     }
3755 
3756     abstract
3757     ShortVector fromByteBuffer0(ByteBuffer bb, int offset);
3758     @ForceInline
3759     final
3760     ShortVector fromByteBuffer0Template(ByteBuffer bb, int offset) {
3761         ShortSpecies vsp = vspecies();
3762         return ScopedMemoryAccess.loadFromByteBuffer(
3763                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3764                 bb, offset, vsp,
3765                 (buf, off, s) -> {
3766                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3767                     return s.ldOp(wb, off,
3768                             (wb_, o, i) -> wb_.getShort(o + i * 2));
3769                 });
3770     }
3771 
3772     abstract
3773     ShortVector fromByteBuffer0(ByteBuffer bb, int offset, VectorMask<Short> m);
3774     @ForceInline
3775     final
3776     <M extends VectorMask<Short>>
3777     ShortVector fromByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
3778         ShortSpecies vsp = vspecies();
3779         m.check(vsp);
3780         return ScopedMemoryAccess.loadFromByteBufferMasked(
3781                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3782                 bb, offset, m, vsp,
3783                 (buf, off, s, vm) -> {
3784                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3785                     return s.ldOp(wb, off, vm,
3786                             (wb_, o, i) -> wb_.getShort(o + i * 2));
3787                 });
3788     }
3789 
3790     // Unchecked storing operations in native byte order.
3791     // Caller is responsible for applying index checks, masking, and
3792     // byte swapping.
3793 
3794     abstract
3795     void intoArray0(short[] a, int offset);
3796     @ForceInline
3797     final
3798     void intoArray0Template(short[] a, int offset) {
3799         ShortSpecies vsp = vspecies();
3800         VectorSupport.store(
3801             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3802             a, arrayAddress(a, offset),
3803             this, a, offset,
3804             (arr, off, v)
3805             -> v.stOp(arr, off,
3806                       (arr_, off_, i, e) -> arr_[off_+i] = e));
3807     }
3808 
3809     abstract
3810     void intoArray0(short[] a, int offset, VectorMask<Short> m);
3811     @ForceInline
3812     final
3813     <M extends VectorMask<Short>>
3814     void intoArray0Template(Class<M> maskClass, short[] a, int offset, M m) {
3815         m.check(species());
3816         ShortSpecies vsp = vspecies();
3817         VectorSupport.storeMasked(
3818             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3819             a, arrayAddress(a, offset),
3820             this, m, a, offset,
3821             (arr, off, v, vm)
3822             -> v.stOp(arr, off, vm,
3823                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3824     }
3825 
3826 
3827 
3828     abstract
3829     void intoByteArray0(byte[] a, int offset);
3830     @ForceInline
3831     final
3832     void intoByteArray0Template(byte[] a, int offset) {
3833         ShortSpecies vsp = vspecies();
3834         VectorSupport.store(
3835             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3836             a, byteArrayAddress(a, offset),
3837             this, a, offset,
3838             (arr, off, v) -> {
3839                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3840                 v.stOp(wb, off,
3841                         (tb_, o, i, e) -> tb_.putShort(o + i * 2, e));
3842             });
3843     }
3844 
3845     abstract
3846     void intoByteArray0(byte[] a, int offset, VectorMask<Short> m);
3847     @ForceInline
3848     final
3849     <M extends VectorMask<Short>>
3850     void intoByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
3851         ShortSpecies vsp = vspecies();
3852         m.check(vsp);
3853         VectorSupport.storeMasked(
3854             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3855             a, byteArrayAddress(a, offset),
3856             this, m, a, offset,
3857             (arr, off, v, vm) -> {
3858                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3859                 v.stOp(wb, off, vm,
3860                         (tb_, o, i, e) -> tb_.putShort(o + i * 2, e));
3861             });
3862     }
3863 
3864     @ForceInline
3865     final
3866     void intoByteBuffer0(ByteBuffer bb, int offset) {
3867         ShortSpecies vsp = vspecies();
3868         ScopedMemoryAccess.storeIntoByteBuffer(
3869                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3870                 this, bb, offset,
3871                 (buf, off, v) -> {
3872                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3873                     v.stOp(wb, off,
3874                             (wb_, o, i, e) -> wb_.putShort(o + i * 2, e));
3875                 });
3876     }
3877 
3878     abstract
3879     void intoByteBuffer0(ByteBuffer bb, int offset, VectorMask<Short> m);
3880     @ForceInline
3881     final
3882     <M extends VectorMask<Short>>
3883     void intoByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
3884         ShortSpecies vsp = vspecies();
3885         m.check(vsp);
3886         ScopedMemoryAccess.storeIntoByteBufferMasked(
3887                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3888                 this, m, bb, offset,
3889                 (buf, off, v, vm) -> {
3890                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3891                     v.stOp(wb, off, vm,
3892                             (wb_, o, i, e) -> wb_.putShort(o + i * 2, e));
3893                 });
3894     }
3895 
3896     /*package-private*/
3897     abstract
3898     void intoCharArray0(char[] a, int offset, VectorMask<Short> m);
3899     @ForceInline
3900     final
3901     <M extends VectorMask<Short>>
3902     void intoCharArray0Template(Class<M> maskClass, char[] a, int offset, M m) {
3903         m.check(species());
3904         ShortSpecies vsp = vspecies();
3905         VectorSupport.storeMasked(
3906             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3907             a, charArrayAddress(a, offset),
3908             this, m, a, offset,
3909             (arr, off, v, vm)
3910             -> v.stOp(arr, off, vm,
3911                       (arr_, off_, i, e) -> arr_[off_ + i] = (char) e));
3912     }
3913 
3914     // End of low-level memory operations.
3915 
3916     private static
3917     void checkMaskFromIndexSize(int offset,
3918                                 ShortSpecies vsp,
3919                                 VectorMask<Short> m,
3920                                 int scale,
3921                                 int limit) {
3922         ((AbstractMask<Short>)m)
3923             .checkIndexByLane(offset, limit, vsp.iota(), scale);
3924     }
3925 










3926     @ForceInline
3927     private void conditionalStoreNYI(int offset,
3928                                      ShortSpecies vsp,
3929                                      VectorMask<Short> m,
3930                                      int scale,
3931                                      int limit) {
3932         if (offset < 0 || offset + vsp.laneCount() * scale > limit) {
3933             String msg =
3934                 String.format("unimplemented: store @%d in [0..%d), %s in %s",
3935                               offset, limit, m, vsp);
3936             throw new AssertionError(msg);
3937         }
3938     }
3939 
3940     /*package-private*/
3941     @Override
3942     @ForceInline
3943     final
3944     ShortVector maybeSwap(ByteOrder bo) {
3945         if (bo != NATIVE_ENDIAN) {

4233                 }
4234             }
4235             return dummyVector().vectorFactory(res);
4236         }
4237 
4238         /*package-private*/
4239         @ForceInline
4240         <M> ShortVector ldOp(M memory, int offset,
4241                                       FLdOp<M> f) {
4242             return dummyVector().ldOp(memory, offset, f);
4243         }
4244 
4245         /*package-private*/
4246         @ForceInline
4247         <M> ShortVector ldOp(M memory, int offset,
4248                                       VectorMask<Short> m,
4249                                       FLdOp<M> f) {
4250             return dummyVector().ldOp(memory, offset, m, f);
4251         }
4252 















4253         /*package-private*/
4254         @ForceInline
4255         <M> void stOp(M memory, int offset, FStOp<M> f) {
4256             dummyVector().stOp(memory, offset, f);
4257         }
4258 
4259         /*package-private*/
4260         @ForceInline
4261         <M> void stOp(M memory, int offset,
4262                       AbstractMask<Short> m,
4263                       FStOp<M> f) {
4264             dummyVector().stOp(memory, offset, m, f);
4265         }
4266 














4267         // N.B. Make sure these constant vectors and
4268         // masks load up correctly into registers.
4269         //
4270         // Also, see if we can avoid all that switching.
4271         // Could we cache both vectors and both masks in
4272         // this species object?
4273 
4274         // Zero and iota vector access
4275         @Override
4276         @ForceInline
4277         public final ShortVector zero() {
4278             if ((Class<?>) vectorType() == ShortMaxVector.class)
4279                 return ShortMaxVector.ZERO;
4280             switch (vectorBitSize()) {
4281                 case 64: return Short64Vector.ZERO;
4282                 case 128: return Short128Vector.ZERO;
4283                 case 256: return Short256Vector.ZERO;
4284                 case 512: return Short512Vector.ZERO;
4285             }
4286             throw new AssertionError();

   7  * published by the Free Software Foundation.  Oracle designates this
   8  * particular file as subject to the "Classpath" exception as provided
   9  * by Oracle in the LICENSE file that accompanied this code.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  */
  25 package jdk.incubator.vector;
  26 

  27 import java.nio.ByteOrder;

  28 import java.util.Arrays;
  29 import java.util.Objects;
  30 import java.util.function.Function;

  31 
  32 import jdk.incubator.foreign.MemorySegment;
  33 import jdk.incubator.foreign.ValueLayout;
  34 import jdk.internal.access.foreign.MemorySegmentProxy;
  35 import jdk.internal.misc.ScopedMemoryAccess;
  36 import jdk.internal.misc.Unsafe;
  37 import jdk.internal.vm.annotation.ForceInline;
  38 import jdk.internal.vm.vector.VectorSupport;
  39 
  40 import static jdk.internal.vm.vector.VectorSupport.*;
  41 import static jdk.incubator.vector.VectorIntrinsics.*;
  42 
  43 import static jdk.incubator.vector.VectorOperators.*;
  44 
  45 // -- This file was mechanically generated: Do not edit! -- //
  46 
  47 /**
  48  * A specialized {@link Vector} representing an ordered immutable sequence of
  49  * {@code short} values.
  50  */
  51 @SuppressWarnings("cast")  // warning: redundant cast
  52 public abstract class ShortVector extends AbstractVector<Short> {
  53 
  54     ShortVector(short[] vec) {
  55         super(vec);
  56     }
  57 
  58     static final int FORBID_OPCODE_KIND = VO_ONLYFP;
  59 
  60     static final ValueLayout.OfShort ELEMENT_LAYOUT = ValueLayout.JAVA_SHORT.withBitAlignment(8);
  61 
  62     @ForceInline
  63     static int opCode(Operator op) {
  64         return VectorOperators.opCode(op, VO_OPCODE_VALID, FORBID_OPCODE_KIND);
  65     }
  66     @ForceInline
  67     static int opCode(Operator op, int requireKind) {
  68         requireKind |= VO_OPCODE_VALID;
  69         return VectorOperators.opCode(op, requireKind, FORBID_OPCODE_KIND);
  70     }
  71     @ForceInline
  72     static boolean opKind(Operator op, int bit) {
  73         return VectorOperators.opKind(op, bit);
  74     }
  75 
  76     // Virtualized factories and operators,
  77     // coded with portable definitions.
  78     // These are all @ForceInline in case
  79     // they need to be used performantly.
  80     // The various shape-specific subclasses
  81     // also specialize them by wrapping

 336         return vectorFactory(res);
 337     }
 338 
 339     /*package-private*/
 340     @ForceInline
 341     final
 342     <M> ShortVector ldOp(M memory, int offset,
 343                                   VectorMask<Short> m,
 344                                   FLdOp<M> f) {
 345         //short[] vec = vec();
 346         short[] res = new short[length()];
 347         boolean[] mbits = ((AbstractMask<Short>)m).getBits();
 348         for (int i = 0; i < res.length; i++) {
 349             if (mbits[i]) {
 350                 res[i] = f.apply(memory, offset, i);
 351             }
 352         }
 353         return vectorFactory(res);
 354     }
 355 
 356     /*package-private*/
 357     interface FLdLongOp {
 358         short apply(MemorySegment memory, long offset, int i);
 359     }
 360 
 361     /*package-private*/
 362     @ForceInline
 363     final
 364     ShortVector ldLongOp(MemorySegment memory, long offset,
 365                                   FLdLongOp f) {
 366         //dummy; no vec = vec();
 367         short[] res = new short[length()];
 368         for (int i = 0; i < res.length; i++) {
 369             res[i] = f.apply(memory, offset, i);
 370         }
 371         return vectorFactory(res);
 372     }
 373 
 374     /*package-private*/
 375     @ForceInline
 376     final
 377     ShortVector ldLongOp(MemorySegment memory, long offset,
 378                                   VectorMask<Short> m,
 379                                   FLdLongOp f) {
 380         //short[] vec = vec();
 381         short[] res = new short[length()];
 382         boolean[] mbits = ((AbstractMask<Short>)m).getBits();
 383         for (int i = 0; i < res.length; i++) {
 384             if (mbits[i]) {
 385                 res[i] = f.apply(memory, offset, i);
 386             }
 387         }
 388         return vectorFactory(res);
 389     }
 390 
 391     static short memorySegmentGet(MemorySegment ms, long o, int i) {
 392         return ms.get(ELEMENT_LAYOUT, o + i * 2L);
 393     }
 394 
 395     interface FStOp<M> {
 396         void apply(M memory, int offset, int i, short a);
 397     }
 398 
 399     /*package-private*/
 400     @ForceInline
 401     final
 402     <M> void stOp(M memory, int offset,
 403                   FStOp<M> f) {
 404         short[] vec = vec();
 405         for (int i = 0; i < vec.length; i++) {
 406             f.apply(memory, offset, i, vec[i]);
 407         }
 408     }
 409 
 410     /*package-private*/
 411     @ForceInline
 412     final
 413     <M> void stOp(M memory, int offset,
 414                   VectorMask<Short> m,
 415                   FStOp<M> f) {
 416         short[] vec = vec();
 417         boolean[] mbits = ((AbstractMask<Short>)m).getBits();
 418         for (int i = 0; i < vec.length; i++) {
 419             if (mbits[i]) {
 420                 f.apply(memory, offset, i, vec[i]);
 421             }
 422         }
 423     }
 424 
 425     interface FStLongOp {
 426         void apply(MemorySegment memory, long offset, int i, short a);
 427     }
 428 
 429     /*package-private*/
 430     @ForceInline
 431     final
 432     void stLongOp(MemorySegment memory, long offset,
 433                   FStLongOp f) {
 434         short[] vec = vec();
 435         for (int i = 0; i < vec.length; i++) {
 436             f.apply(memory, offset, i, vec[i]);
 437         }
 438     }
 439 
 440     /*package-private*/
 441     @ForceInline
 442     final
 443     void stLongOp(MemorySegment memory, long offset,
 444                   VectorMask<Short> m,
 445                   FStLongOp f) {
 446         short[] vec = vec();
 447         boolean[] mbits = ((AbstractMask<Short>)m).getBits();
 448         for (int i = 0; i < vec.length; i++) {
 449             if (mbits[i]) {
 450                 f.apply(memory, offset, i, vec[i]);
 451             }
 452         }
 453     }
 454 
 455     static void memorySegmentSet(MemorySegment ms, long o, int i, short e) {
 456         ms.set(ELEMENT_LAYOUT, o + i * 2L, e);
 457     }
 458 
 459     // Binary test
 460 
 461     /*package-private*/
 462     interface FBinTest {
 463         boolean apply(int cond, int i, short a, short b);
 464     }
 465 
 466     /*package-private*/
 467     @ForceInline
 468     final
 469     AbstractMask<Short> bTest(int cond,
 470                                   Vector<Short> o,
 471                                   FBinTest f) {
 472         short[] vec1 = vec();
 473         short[] vec2 = ((ShortVector)o).vec();
 474         boolean[] bits = new boolean[length()];
 475         for (int i = 0; i < length(); i++){
 476             bits[i] = f.apply(cond, i, vec1[i], vec2[i]);
 477         }
 478         return maskFactory(bits);

 489     static short rotateRight(short a, int n) {
 490         return (short)(((((short)a) & Short.toUnsignedInt((short)-1)) >>> (n & Short.SIZE-1)) | ((((short)a) & Short.toUnsignedInt((short)-1)) << (Short.SIZE - (n & Short.SIZE-1))));
 491     }
 492 
 493     /*package-private*/
 494     @Override
 495     abstract ShortSpecies vspecies();
 496 
 497     /*package-private*/
 498     @ForceInline
 499     static long toBits(short e) {
 500         return  e;
 501     }
 502 
 503     /*package-private*/
 504     @ForceInline
 505     static short fromBits(long bits) {
 506         return ((short)bits);
 507     }
 508 
 509     static ShortVector expandHelper(Vector<Short> v, VectorMask<Short> m) {
 510         VectorSpecies<Short> vsp = m.vectorSpecies();
 511         ShortVector r  = (ShortVector) vsp.zero();
 512         ShortVector vi = (ShortVector) v;
 513         if (m.allTrue()) {
 514             return vi;
 515         }
 516         for (int i = 0, j = 0; i < vsp.length(); i++) {
 517             if (m.laneIsSet(i)) {
 518                 r = r.withLane(i, vi.lane(j++));
 519             }
 520         }
 521         return r;
 522     }
 523 
 524     static ShortVector compressHelper(Vector<Short> v, VectorMask<Short> m) {
 525         VectorSpecies<Short> vsp = m.vectorSpecies();
 526         ShortVector r  = (ShortVector) vsp.zero();
 527         ShortVector vi = (ShortVector) v;
 528         if (m.allTrue()) {
 529             return vi;
 530         }
 531         for (int i = 0, j = 0; i < vsp.length(); i++) {
 532             if (m.laneIsSet(i)) {
 533                 r = r.withLane(j++, vi.lane(i));
 534             }
 535         }
 536         return r;
 537     }
 538 
 539     // Static factories (other than memory operations)
 540 
 541     // Note: A surprising behavior in javadoc
 542     // sometimes makes a lone /** {@inheritDoc} */
 543     // comment drop the method altogether,
 544     // apparently if the method mentions an
 545     // parameter or return type of Vector<Short>
 546     // instead of Vector<E> as originally specified.
 547     // Adding an empty HTML fragment appears to
 548     // nudge javadoc into providing the desired
 549     // inherited documentation.  We use the HTML
 550     // comment <!--workaround--> for this.
 551 
 552     /**
 553      * Returns a vector of the given species
 554      * where all lane elements are set to
 555      * zero, the default primitive value.
 556      *
 557      * @param species species of the desired zero vector
 558      * @return a zero vector

 708                 return lanewise(XOR, broadcast(-1), m);
 709             }
 710         }
 711         int opc = opCode(op);
 712         return VectorSupport.unaryOp(
 713             opc, getClass(), maskClass, short.class, length(),
 714             this, m,
 715             UN_IMPL.find(op, opc, ShortVector::unaryOperations));
 716     }
 717 
 718     private static final
 719     ImplCache<Unary, UnaryOperation<ShortVector, VectorMask<Short>>>
 720         UN_IMPL = new ImplCache<>(Unary.class, ShortVector.class);
 721 
 722     private static UnaryOperation<ShortVector, VectorMask<Short>> unaryOperations(int opc_) {
 723         switch (opc_) {
 724             case VECTOR_OP_NEG: return (v0, m) ->
 725                     v0.uOp(m, (i, a) -> (short) -a);
 726             case VECTOR_OP_ABS: return (v0, m) ->
 727                     v0.uOp(m, (i, a) -> (short) Math.abs(a));
 728             case VECTOR_OP_BIT_COUNT: return (v0, m) ->
 729                     v0.uOp(m, (i, a) -> (short) bitCount(a));
 730             case VECTOR_OP_TZ_COUNT: return (v0, m) ->
 731                     v0.uOp(m, (i, a) -> (short) numberOfTrailingZeros(a));
 732             case VECTOR_OP_LZ_COUNT: return (v0, m) ->
 733                     v0.uOp(m, (i, a) -> (short) numberOfLeadingZeros(a));
 734             case VECTOR_OP_REVERSE: return (v0, m) ->
 735                     v0.uOp(m, (i, a) -> reverse(a));
 736             case VECTOR_OP_REVERSE_BYTES: return (v0, m) ->
 737                     v0.uOp(m, (i, a) -> (short) Short.reverseBytes(a));
 738             default: return null;
 739         }
 740     }
 741 
 742     // Binary lanewise support
 743 
 744     /**
 745      * {@inheritDoc} <!--workaround-->
 746      * @see #lanewise(VectorOperators.Binary,short)
 747      * @see #lanewise(VectorOperators.Binary,short,VectorMask)
 748      */
 749     @Override
 750     public abstract
 751     ShortVector lanewise(VectorOperators.Binary op,
 752                                   Vector<Short> v);
 753     @ForceInline
 754     final
 755     ShortVector lanewiseTemplate(VectorOperators.Binary op,
 756                                           Vector<Short> v) {
 757         ShortVector that = (ShortVector) v;

1844     /**
1845      * {@inheritDoc} <!--workaround-->
1846      */
1847     @Override
1848     @ForceInline
1849     public final
1850     ShortVector neg() {
1851         return lanewise(NEG);
1852     }
1853 
1854     /**
1855      * {@inheritDoc} <!--workaround-->
1856      */
1857     @Override
1858     @ForceInline
1859     public final
1860     ShortVector abs() {
1861         return lanewise(ABS);
1862     }
1863 
1864     static int bitCount(short a) {
1865         return Integer.bitCount((int)a & 0xFFFF);
1866     }
1867     static int numberOfTrailingZeros(short a) {
1868         return a != 0 ? Integer.numberOfTrailingZeros(a) : 16;
1869     }
1870     static int numberOfLeadingZeros(short a) {
1871         return a >= 0 ? Integer.numberOfLeadingZeros(a) - 16 : 0;
1872     }
1873 
1874     static short reverse(short a) {
1875         if (a == 0 || a == -1) return a;
1876 
1877         short b = rotateLeft(a, 8);
1878         b = (short) (((b & 0x5555) << 1) | ((b & 0xAAAA) >>> 1));
1879         b = (short) (((b & 0x3333) << 2) | ((b & 0xCCCC) >>> 2));
1880         b = (short) (((b & 0x0F0F) << 4) | ((b & 0xF0F0) >>> 4));
1881         return b;
1882     }
1883 
1884     // not (~)
1885     /**
1886      * Computes the bitwise logical complement ({@code ~})
1887      * of this vector.
1888      *
1889      * This is a lane-wise binary operation which applies the
1890      * the primitive bitwise "not" operation ({@code ~})
1891      * to each lane value.
1892      *
1893      * This method is also equivalent to the expression
1894      * {@link #lanewise(VectorOperators.Unary)
1895      *    lanewise}{@code (}{@link VectorOperators#NOT
1896      *    NOT}{@code )}.
1897      *
1898      * <p>
1899      * This is not a full-service named operation like
1900      * {@link #add(Vector) add}.  A masked version of
1901      * this operation is not directly available
1902      * but may be obtained via the masked version of
1903      * {@code lanewise}.

2490         short[] a = toArray();
2491         int[] sa = new int[a.length];
2492         for (int i = 0; i < a.length; i++) {
2493             sa[i] = (int) a[i];
2494         }
2495         return VectorShuffle.fromArray(dsp, sa, 0);
2496     }
2497 
2498     /*package-private*/
2499     @ForceInline
2500     final
2501     VectorShuffle<Short> toShuffleTemplate(Class<?> shuffleType) {
2502         ShortSpecies vsp = vspecies();
2503         return VectorSupport.convert(VectorSupport.VECTOR_OP_CAST,
2504                                      getClass(), short.class, length(),
2505                                      shuffleType, byte.class, length(),
2506                                      this, vsp,
2507                                      ShortVector::toShuffle0);
2508     }
2509 
2510     /**
2511      * {@inheritDoc} <!--workaround-->
2512      * @since 19
2513      */
2514     @Override
2515     public abstract
2516     ShortVector compress(VectorMask<Short> m);
2517 
2518     /*package-private*/
2519     @ForceInline
2520     final
2521     <M extends AbstractMask<Short>>
2522     ShortVector compressTemplate(Class<M> masktype, M m) {
2523       m.check(masktype, this);
2524       return (ShortVector) VectorSupport.comExpOp(VectorSupport.VECTOR_OP_COMPRESS, getClass(), masktype,
2525                                                    short.class, length(), this, m,
2526                                                    (v1, m1) -> compressHelper(v1, m1));
2527     }
2528 
2529     /**
2530      * {@inheritDoc} <!--workaround-->
2531      * @since 19
2532      */
2533     @Override
2534     public abstract
2535     ShortVector expand(VectorMask<Short> m);
2536 
2537     /*package-private*/
2538     @ForceInline
2539     final
2540     <M extends AbstractMask<Short>>
2541     ShortVector expandTemplate(Class<M> masktype, M m) {
2542       m.check(masktype, this);
2543       return (ShortVector) VectorSupport.comExpOp(VectorSupport.VECTOR_OP_EXPAND, getClass(), masktype,
2544                                                    short.class, length(), this, m,
2545                                                    (v1, m1) -> expandHelper(v1, m1));
2546     }
2547 
2548 
2549     /**
2550      * {@inheritDoc} <!--workaround-->
2551      */
2552     @Override
2553     public abstract
2554     ShortVector selectFrom(Vector<Short> v);
2555 
2556     /*package-private*/
2557     @ForceInline
2558     final ShortVector selectFromTemplate(ShortVector v) {
2559         return v.rearrange(this.toShuffle());
2560     }
2561 
2562     /**
2563      * {@inheritDoc} <!--workaround-->
2564      */
2565     @Override
2566     public abstract
2567     ShortVector selectFrom(Vector<Short> s, VectorMask<Short> m);
2568 

2941         return res;
2942     }
2943 
2944     /** {@inheritDoc} <!--workaround-->
2945      * @implNote
2946      * When this method is used on used on vectors
2947      * of type {@code ShortVector},
2948      * there will be no loss of precision.
2949      */
2950     @ForceInline
2951     @Override
2952     public final double[] toDoubleArray() {
2953         short[] a = toArray();
2954         double[] res = new double[a.length];
2955         for (int i = 0; i < a.length; i++) {
2956             res[i] = (double) a[i];
2957         }
2958         return res;
2959     }
2960 




















































































2961     /**
2962      * Loads a vector from an array of type {@code short[]}
2963      * starting at an offset.
2964      * For each vector lane, where {@code N} is the vector lane index, the
2965      * array element at index {@code offset + N} is placed into the
2966      * resulting vector at lane index {@code N}.
2967      *
2968      * @param species species of desired vector
2969      * @param a the array
2970      * @param offset the offset into the array
2971      * @return the vector loaded from an array
2972      * @throws IndexOutOfBoundsException
2973      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
2974      *         for any lane {@code N} in the vector
2975      */
2976     @ForceInline
2977     public static
2978     ShortVector fromArray(VectorSpecies<Short> species,
2979                                    short[] a, int offset) {
2980         offset = checkFromIndexSize(offset, species.length(), a.length);

3240      *         or if {@code mapOffset+N >= indexMap.length},
3241      *         or if {@code f(N)=offset+indexMap[mapOffset+N]}
3242      *         is an invalid index into {@code a},
3243      *         for any lane {@code N} in the vector
3244      *         where the mask is set
3245      * @see ShortVector#toIntArray()
3246      */
3247     @ForceInline
3248     public static
3249     ShortVector fromCharArray(VectorSpecies<Short> species,
3250                                        char[] a, int offset,
3251                                        int[] indexMap, int mapOffset,
3252                                        VectorMask<Short> m) {
3253         // FIXME: optimize
3254         ShortSpecies vsp = (ShortSpecies) species;
3255         return vsp.vOp(m, n -> (short) a[offset + indexMap[mapOffset + n]]);
3256     }
3257 
3258 
3259     /**
3260      * Loads a vector from a {@linkplain MemorySegment memory segment}
3261      * starting at an offset into the memory segment.
3262      * Bytes are composed into primitive lane elements according
3263      * to the specified byte order.
3264      * The vector is arranged into lanes according to
3265      * <a href="Vector.html#lane-order">memory ordering</a>.
3266      * <p>
3267      * This method behaves as if it returns the result of calling
3268      * {@link #fromMemorySegment(VectorSpecies,MemorySegment,long,ByteOrder,VectorMask)
3269      * fromMemorySegment()} as follows:
3270      * <pre>{@code
3271      * var m = species.maskAll(true);
3272      * return fromMemorySegment(species, ms, offset, bo, m);
3273      * }</pre>
3274      *
3275      * @param species species of desired vector
3276      * @param ms the memory segment
3277      * @param offset the offset into the memory segment
3278      * @param bo the intended byte order
3279      * @return a vector loaded from the memory segment
3280      * @throws IndexOutOfBoundsException
3281      *         if {@code offset+N*2 < 0}
3282      *         or {@code offset+N*2 >= ms.byteSize()}
3283      *         for any lane {@code N} in the vector
3284      * @throws IllegalArgumentException if the memory segment is a heap segment that is
3285      *         not backed by a {@code byte[]} array.
3286      * @throws IllegalStateException if the memory segment's session is not alive,
3287      *         or if access occurs from a thread other than the thread owning the session.
3288      * @since 19
3289      */
3290     @ForceInline
3291     public static
3292     ShortVector fromMemorySegment(VectorSpecies<Short> species,
3293                                            MemorySegment ms, long offset,
3294                                            ByteOrder bo) {
3295         offset = checkFromIndexSize(offset, species.vectorByteSize(), ms.byteSize());
3296         ShortSpecies vsp = (ShortSpecies) species;
3297         return vsp.dummyVector().fromMemorySegment0(ms, offset).maybeSwap(bo);
3298     }
3299 
3300     /**
3301      * Loads a vector from a {@linkplain MemorySegment memory segment}
3302      * starting at an offset into the memory segment
3303      * and using a mask.
3304      * Lanes where the mask is unset are filled with the default
3305      * value of {@code short} (zero).
3306      * Bytes are composed into primitive lane elements according
3307      * to the specified byte order.
3308      * The vector is arranged into lanes according to
3309      * <a href="Vector.html#lane-order">memory ordering</a>.
3310      * <p>
3311      * The following pseudocode illustrates the behavior:
3312      * <pre>{@code
3313      * var slice = ms.asSlice(offset);


3314      * short[] ar = new short[species.length()];
3315      * for (int n = 0; n < ar.length; n++) {
3316      *     if (m.laneIsSet(n)) {
3317      *         ar[n] = slice.getAtIndex(ValuaLayout.JAVA_SHORT.withBitAlignment(8), n);
3318      *     }
3319      * }
3320      * ShortVector r = ShortVector.fromArray(species, ar, 0);
3321      * }</pre>
3322      * @implNote
3323      * This operation is likely to be more efficient if
3324      * the specified byte order is the same as
3325      * {@linkplain ByteOrder#nativeOrder()
3326      * the platform native order},
3327      * since this method will not need to reorder
3328      * the bytes of lane values.
3329      *
3330      * @param species species of desired vector
3331      * @param ms the memory segment
3332      * @param offset the offset into the memory segment
3333      * @param bo the intended byte order
3334      * @param m the mask controlling lane selection
3335      * @return a vector loaded from the memory segment
3336      * @throws IndexOutOfBoundsException
3337      *         if {@code offset+N*2 < 0}
3338      *         or {@code offset+N*2 >= ms.byteSize()}
3339      *         for any lane {@code N} in the vector
3340      *         where the mask is set
3341      * @throws IllegalArgumentException if the memory segment is a heap segment that is
3342      *         not backed by a {@code byte[]} array.
3343      * @throws IllegalStateException if the memory segment's session is not alive,
3344      *         or if access occurs from a thread other than the thread owning the session.
3345      * @since 19
3346      */
3347     @ForceInline
3348     public static
3349     ShortVector fromMemorySegment(VectorSpecies<Short> species,
3350                                            MemorySegment ms, long offset,
3351                                            ByteOrder bo,
3352                                            VectorMask<Short> m) {
3353         ShortSpecies vsp = (ShortSpecies) species;
3354         if (offset >= 0 && offset <= (ms.byteSize() - species.vectorByteSize())) {
3355             return vsp.dummyVector().fromMemorySegment0(ms, offset, m).maybeSwap(bo);
3356         }
3357 
3358         // FIXME: optimize
3359         checkMaskFromIndexSize(offset, vsp, m, 2, ms.byteSize());
3360         return vsp.ldLongOp(ms, offset, m, ShortVector::memorySegmentGet);


3361     }
3362 
3363     // Memory store operations
3364 
3365     /**
3366      * Stores this vector into an array of type {@code short[]}
3367      * starting at an offset.
3368      * <p>
3369      * For each vector lane, where {@code N} is the vector lane index,
3370      * the lane element at index {@code N} is stored into the array
3371      * element {@code a[offset+N]}.
3372      *
3373      * @param a the array, of type {@code short[]}
3374      * @param offset the offset into the array
3375      * @throws IndexOutOfBoundsException
3376      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
3377      *         for any lane {@code N} in the vector
3378      */
3379     @ForceInline
3380     public final
3381     void intoArray(short[] a, int offset) {
3382         offset = checkFromIndexSize(offset, length(), a.length);
3383         ShortSpecies vsp = vspecies();
3384         VectorSupport.store(
3385             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3386             a, arrayAddress(a, offset),
3387             this,
3388             a, offset,
3389             (arr, off, v)
3390             -> v.stOp(arr, (int) off,
3391                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3392     }
3393 
3394     /**
3395      * Stores this vector into an array of type {@code short[]}
3396      * starting at offset and using a mask.
3397      * <p>
3398      * For each vector lane, where {@code N} is the vector lane index,
3399      * the lane element at index {@code N} is stored into the array
3400      * element {@code a[offset+N]}.
3401      * If the mask lane at {@code N} is unset then the corresponding
3402      * array element {@code a[offset+N]} is left unchanged.
3403      * <p>
3404      * Array range checking is done for lanes where the mask is set.
3405      * Lanes where the mask is unset are not stored and do not need
3406      * to correspond to legitimate elements of {@code a}.
3407      * That is, unset lanes may correspond to array indexes less than
3408      * zero or beyond the end of the array.
3409      *
3410      * @param a the array, of type {@code short[]}

3516      * is first cast to a {@code char} value and then
3517      * stored into the array element {@code a[offset+N]}.
3518      *
3519      * @param a the array, of type {@code char[]}
3520      * @param offset the offset into the array
3521      * @throws IndexOutOfBoundsException
3522      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
3523      *         for any lane {@code N} in the vector
3524      */
3525     @ForceInline
3526     public final
3527     void intoCharArray(char[] a, int offset) {
3528         offset = checkFromIndexSize(offset, length(), a.length);
3529         ShortSpecies vsp = vspecies();
3530         VectorSupport.store(
3531             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3532             a, charArrayAddress(a, offset),
3533             this,
3534             a, offset,
3535             (arr, off, v)
3536             -> v.stOp(arr, (int) off,
3537                       (arr_, off_, i, e) -> arr_[off_ + i] = (char) e));
3538     }
3539 
3540     /**
3541      * Stores this vector into an array of type {@code char[]}
3542      * starting at offset and using a mask.
3543      * <p>
3544      * For each vector lane, where {@code N} is the vector lane index,
3545      * the lane element at index {@code N}
3546      * is first cast to a {@code char} value and then
3547      * stored into the array element {@code a[offset+N]}.
3548      * If the mask lane at {@code N} is unset then the corresponding
3549      * array element {@code a[offset+N]} is left unchanged.
3550      * <p>
3551      * Array range checking is done for lanes where the mask is set.
3552      * Lanes where the mask is unset are not stored and do not need
3553      * to correspond to legitimate elements of {@code a}.
3554      * That is, unset lanes may correspond to array indexes less than
3555      * zero or beyond the end of the array.
3556      *

3646      *         for any lane {@code N} in the vector
3647      *         where the mask is set
3648      * @see ShortVector#toIntArray()
3649      */
3650     @ForceInline
3651     public final
3652     void intoCharArray(char[] a, int offset,
3653                        int[] indexMap, int mapOffset,
3654                        VectorMask<Short> m) {
3655         // FIXME: optimize
3656         stOp(a, offset, m,
3657              (arr, off, i, e) -> {
3658                  int j = indexMap[mapOffset + i];
3659                  arr[off + j] = (char) e;
3660              });
3661     }
3662 
3663 
3664     /**
3665      * {@inheritDoc} <!--workaround-->
3666      * @since 19
3667      */
3668     @Override
3669     @ForceInline
3670     public final
3671     void intoMemorySegment(MemorySegment ms, long offset,
3672                            ByteOrder bo) {
3673         if (ms.isReadOnly()) {
3674             throw new UnsupportedOperationException("Attempt to write a read-only segment");

















3675         }

3676 
3677         offset = checkFromIndexSize(offset, byteSize(), ms.byteSize());
3678         maybeSwap(bo).intoMemorySegment0(ms, offset);











3679     }
3680 
3681     /**
3682      * {@inheritDoc} <!--workaround-->
3683      * @since 19
3684      */
3685     @Override
3686     @ForceInline
3687     public final
3688     void intoMemorySegment(MemorySegment ms, long offset,
3689                            ByteOrder bo,
3690                            VectorMask<Short> m) {
3691         if (m.allTrue()) {
3692             intoMemorySegment(ms, offset, bo);
3693         } else {
3694             if (ms.isReadOnly()) {
3695                 throw new UnsupportedOperationException("Attempt to write a read-only segment");
3696             }
3697             ShortSpecies vsp = vspecies();
3698             checkMaskFromIndexSize(offset, vsp, m, 2, ms.byteSize());
3699             maybeSwap(bo).intoMemorySegment0(ms, offset, m);
3700         }
3701     }
3702 
3703     // ================================================
3704 
3705     // Low-level memory operations.
3706     //
3707     // Note that all of these operations *must* inline into a context
3708     // where the exact species of the involved vector is a
3709     // compile-time constant.  Otherwise, the intrinsic generation
3710     // will fail and performance will suffer.
3711     //
3712     // In many cases this is achieved by re-deriving a version of the
3713     // method in each concrete subclass (per species).  The re-derived
3714     // method simply calls one of these generic methods, with exact
3715     // parameters for the controlling metadata, which is either a
3716     // typed vector or constant species instance.
3717 
3718     // Unchecked loading operations in native byte order.
3719     // Caller is responsible for applying index checks, masking, and
3720     // byte swapping.
3721 
3722     /*package-private*/
3723     abstract
3724     ShortVector fromArray0(short[] a, int offset);
3725     @ForceInline
3726     final
3727     ShortVector fromArray0Template(short[] a, int offset) {
3728         ShortSpecies vsp = vspecies();
3729         return VectorSupport.load(
3730             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3731             a, arrayAddress(a, offset),
3732             a, offset, vsp,
3733             (arr, off, s) -> s.ldOp(arr, (int) off,
3734                                     (arr_, off_, i) -> arr_[off_ + i]));
3735     }
3736 
3737     /*package-private*/
3738     abstract
3739     ShortVector fromArray0(short[] a, int offset, VectorMask<Short> m);
3740     @ForceInline
3741     final
3742     <M extends VectorMask<Short>>
3743     ShortVector fromArray0Template(Class<M> maskClass, short[] a, int offset, M m) {
3744         m.check(species());
3745         ShortSpecies vsp = vspecies();
3746         return VectorSupport.loadMasked(
3747             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3748             a, arrayAddress(a, offset), m,
3749             a, offset, vsp,
3750             (arr, off, s, vm) -> s.ldOp(arr, (int) off, vm,
3751                                         (arr_, off_, i) -> arr_[off_ + i]));
3752     }
3753 
3754 
3755     /*package-private*/
3756     abstract
3757     ShortVector fromCharArray0(char[] a, int offset);
3758     @ForceInline
3759     final
3760     ShortVector fromCharArray0Template(char[] a, int offset) {
3761         ShortSpecies vsp = vspecies();
3762         return VectorSupport.load(
3763             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3764             a, charArrayAddress(a, offset),
3765             a, offset, vsp,
3766             (arr, off, s) -> s.ldOp(arr, (int) off,
3767                                     (arr_, off_, i) -> (short) arr_[off_ + i]));
3768     }
3769 
3770     /*package-private*/
3771     abstract
3772     ShortVector fromCharArray0(char[] a, int offset, VectorMask<Short> m);
3773     @ForceInline
3774     final
3775     <M extends VectorMask<Short>>
3776     ShortVector fromCharArray0Template(Class<M> maskClass, char[] a, int offset, M m) {
3777         m.check(species());
3778         ShortSpecies vsp = vspecies();
3779         return VectorSupport.loadMasked(
3780                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3781                 a, charArrayAddress(a, offset), m,
3782                 a, offset, vsp,
3783                 (arr, off, s, vm) -> s.ldOp(arr, (int) off, vm,
3784                                             (arr_, off_, i) -> (short) arr_[off_ + i]));
3785     }
3786 
3787 

3788     abstract
3789     ShortVector fromMemorySegment0(MemorySegment bb, long offset);
3790     @ForceInline
3791     final
3792     ShortVector fromMemorySegment0Template(MemorySegment ms, long offset) {
3793         ShortSpecies vsp = vspecies();
3794         return ScopedMemoryAccess.loadFromMemorySegment(




































3795                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3796                 (MemorySegmentProxy) ms, offset, vsp,
3797                 (msp, off, s) -> {
3798                     return s.ldLongOp((MemorySegment) msp, off, ShortVector::memorySegmentGet);


3799                 });
3800     }
3801 
3802     abstract
3803     ShortVector fromMemorySegment0(MemorySegment ms, long offset, VectorMask<Short> m);
3804     @ForceInline
3805     final
3806     <M extends VectorMask<Short>>
3807     ShortVector fromMemorySegment0Template(Class<M> maskClass, MemorySegment ms, long offset, M m) {
3808         ShortSpecies vsp = vspecies();
3809         m.check(vsp);
3810         return ScopedMemoryAccess.loadFromMemorySegmentMasked(
3811                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3812                 (MemorySegmentProxy) ms, offset, m, vsp,
3813                 (msp, off, s, vm) -> {
3814                     return s.ldLongOp((MemorySegment) msp, off, vm, ShortVector::memorySegmentGet);


3815                 });
3816     }
3817 
3818     // Unchecked storing operations in native byte order.
3819     // Caller is responsible for applying index checks, masking, and
3820     // byte swapping.
3821 
3822     abstract
3823     void intoArray0(short[] a, int offset);
3824     @ForceInline
3825     final
3826     void intoArray0Template(short[] a, int offset) {
3827         ShortSpecies vsp = vspecies();
3828         VectorSupport.store(
3829             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3830             a, arrayAddress(a, offset),
3831             this, a, offset,
3832             (arr, off, v)
3833             -> v.stOp(arr, (int) off,
3834                       (arr_, off_, i, e) -> arr_[off_+i] = e));
3835     }
3836 
3837     abstract
3838     void intoArray0(short[] a, int offset, VectorMask<Short> m);
3839     @ForceInline
3840     final
3841     <M extends VectorMask<Short>>
3842     void intoArray0Template(Class<M> maskClass, short[] a, int offset, M m) {
3843         m.check(species());
3844         ShortSpecies vsp = vspecies();
3845         VectorSupport.storeMasked(
3846             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3847             a, arrayAddress(a, offset),
3848             this, m, a, offset,
3849             (arr, off, v, vm)
3850             -> v.stOp(arr, (int) off, vm,
3851                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3852     }
3853 
3854 
3855 




































3856     @ForceInline
3857     final
3858     void intoMemorySegment0(MemorySegment ms, long offset) {
3859         ShortSpecies vsp = vspecies();
3860         ScopedMemoryAccess.storeIntoMemorySegment(
3861                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3862                 this,
3863                 (MemorySegmentProxy) ms, offset,
3864                 (msp, off, v) -> {
3865                     v.stLongOp((MemorySegment) msp, off, ShortVector::memorySegmentSet);

3866                 });
3867     }
3868 
3869     abstract
3870     void intoMemorySegment0(MemorySegment bb, long offset, VectorMask<Short> m);
3871     @ForceInline
3872     final
3873     <M extends VectorMask<Short>>
3874     void intoMemorySegment0Template(Class<M> maskClass, MemorySegment ms, long offset, M m) {
3875         ShortSpecies vsp = vspecies();
3876         m.check(vsp);
3877         ScopedMemoryAccess.storeIntoMemorySegmentMasked(
3878                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3879                 this, m,
3880                 (MemorySegmentProxy) ms, offset,
3881                 (msp, off, v, vm) -> {
3882                     v.stLongOp((MemorySegment) msp, off, vm, ShortVector::memorySegmentSet);

3883                 });
3884     }
3885 
3886     /*package-private*/
3887     abstract
3888     void intoCharArray0(char[] a, int offset, VectorMask<Short> m);
3889     @ForceInline
3890     final
3891     <M extends VectorMask<Short>>
3892     void intoCharArray0Template(Class<M> maskClass, char[] a, int offset, M m) {
3893         m.check(species());
3894         ShortSpecies vsp = vspecies();
3895         VectorSupport.storeMasked(
3896             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3897             a, charArrayAddress(a, offset),
3898             this, m, a, offset,
3899             (arr, off, v, vm)
3900             -> v.stOp(arr, (int) off, vm,
3901                       (arr_, off_, i, e) -> arr_[off_ + i] = (char) e));
3902     }
3903 
3904     // End of low-level memory operations.
3905 
3906     private static
3907     void checkMaskFromIndexSize(int offset,
3908                                 ShortSpecies vsp,
3909                                 VectorMask<Short> m,
3910                                 int scale,
3911                                 int limit) {
3912         ((AbstractMask<Short>)m)
3913             .checkIndexByLane(offset, limit, vsp.iota(), scale);
3914     }
3915 
3916     private static
3917     void checkMaskFromIndexSize(long offset,
3918                                 ShortSpecies vsp,
3919                                 VectorMask<Short> m,
3920                                 int scale,
3921                                 long limit) {
3922         ((AbstractMask<Short>)m)
3923             .checkIndexByLane(offset, limit, vsp.iota(), scale);
3924     }
3925 
3926     @ForceInline
3927     private void conditionalStoreNYI(int offset,
3928                                      ShortSpecies vsp,
3929                                      VectorMask<Short> m,
3930                                      int scale,
3931                                      int limit) {
3932         if (offset < 0 || offset + vsp.laneCount() * scale > limit) {
3933             String msg =
3934                 String.format("unimplemented: store @%d in [0..%d), %s in %s",
3935                               offset, limit, m, vsp);
3936             throw new AssertionError(msg);
3937         }
3938     }
3939 
3940     /*package-private*/
3941     @Override
3942     @ForceInline
3943     final
3944     ShortVector maybeSwap(ByteOrder bo) {
3945         if (bo != NATIVE_ENDIAN) {

4233                 }
4234             }
4235             return dummyVector().vectorFactory(res);
4236         }
4237 
4238         /*package-private*/
4239         @ForceInline
4240         <M> ShortVector ldOp(M memory, int offset,
4241                                       FLdOp<M> f) {
4242             return dummyVector().ldOp(memory, offset, f);
4243         }
4244 
4245         /*package-private*/
4246         @ForceInline
4247         <M> ShortVector ldOp(M memory, int offset,
4248                                       VectorMask<Short> m,
4249                                       FLdOp<M> f) {
4250             return dummyVector().ldOp(memory, offset, m, f);
4251         }
4252 
4253         /*package-private*/
4254         @ForceInline
4255         ShortVector ldLongOp(MemorySegment memory, long offset,
4256                                       FLdLongOp f) {
4257             return dummyVector().ldLongOp(memory, offset, f);
4258         }
4259 
4260         /*package-private*/
4261         @ForceInline
4262         ShortVector ldLongOp(MemorySegment memory, long offset,
4263                                       VectorMask<Short> m,
4264                                       FLdLongOp f) {
4265             return dummyVector().ldLongOp(memory, offset, m, f);
4266         }
4267 
4268         /*package-private*/
4269         @ForceInline
4270         <M> void stOp(M memory, int offset, FStOp<M> f) {
4271             dummyVector().stOp(memory, offset, f);
4272         }
4273 
4274         /*package-private*/
4275         @ForceInline
4276         <M> void stOp(M memory, int offset,
4277                       AbstractMask<Short> m,
4278                       FStOp<M> f) {
4279             dummyVector().stOp(memory, offset, m, f);
4280         }
4281 
4282         /*package-private*/
4283         @ForceInline
4284         void stLongOp(MemorySegment memory, long offset, FStLongOp f) {
4285             dummyVector().stLongOp(memory, offset, f);
4286         }
4287 
4288         /*package-private*/
4289         @ForceInline
4290         void stLongOp(MemorySegment memory, long offset,
4291                       AbstractMask<Short> m,
4292                       FStLongOp f) {
4293             dummyVector().stLongOp(memory, offset, m, f);
4294         }
4295 
4296         // N.B. Make sure these constant vectors and
4297         // masks load up correctly into registers.
4298         //
4299         // Also, see if we can avoid all that switching.
4300         // Could we cache both vectors and both masks in
4301         // this species object?
4302 
4303         // Zero and iota vector access
4304         @Override
4305         @ForceInline
4306         public final ShortVector zero() {
4307             if ((Class<?>) vectorType() == ShortMaxVector.class)
4308                 return ShortMaxVector.ZERO;
4309             switch (vectorBitSize()) {
4310                 case 64: return Short64Vector.ZERO;
4311                 case 128: return Short128Vector.ZERO;
4312                 case 256: return Short256Vector.ZERO;
4313                 case 512: return Short512Vector.ZERO;
4314             }
4315             throw new AssertionError();
< prev index next >