< prev index next >

src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector.java

Print this page

   7  * published by the Free Software Foundation.  Oracle designates this
   8  * particular file as subject to the "Classpath" exception as provided
   9  * by Oracle in the LICENSE file that accompanied this code.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  */
  25 package jdk.incubator.vector;
  26 
  27 import java.nio.ByteBuffer;
  28 import java.nio.ByteOrder;
  29 import java.nio.ReadOnlyBufferException;
  30 import java.util.Arrays;
  31 import java.util.Objects;
  32 import java.util.function.Function;
  33 import java.util.function.UnaryOperator;
  34 



  35 import jdk.internal.misc.ScopedMemoryAccess;
  36 import jdk.internal.misc.Unsafe;
  37 import jdk.internal.vm.annotation.ForceInline;
  38 import jdk.internal.vm.vector.VectorSupport;
  39 
  40 import static jdk.internal.vm.vector.VectorSupport.*;
  41 import static jdk.incubator.vector.VectorIntrinsics.*;
  42 
  43 import static jdk.incubator.vector.VectorOperators.*;
  44 
  45 // -- This file was mechanically generated: Do not edit! -- //
  46 
  47 /**
  48  * A specialized {@link Vector} representing an ordered immutable sequence of
  49  * {@code float} values.
  50  */
  51 @SuppressWarnings("cast")  // warning: redundant cast
  52 public abstract class FloatVector extends AbstractVector<Float> {
  53 
  54     FloatVector(float[] vec) {
  55         super(vec);
  56     }
  57 
  58     static final int FORBID_OPCODE_KIND = VO_NOFP;
  59 


  60     @ForceInline
  61     static int opCode(Operator op) {
  62         return VectorOperators.opCode(op, VO_OPCODE_VALID, FORBID_OPCODE_KIND);
  63     }
  64     @ForceInline
  65     static int opCode(Operator op, int requireKind) {
  66         requireKind |= VO_OPCODE_VALID;
  67         return VectorOperators.opCode(op, requireKind, FORBID_OPCODE_KIND);
  68     }
  69     @ForceInline
  70     static boolean opKind(Operator op, int bit) {
  71         return VectorOperators.opKind(op, bit);
  72     }
  73 
  74     // Virtualized factories and operators,
  75     // coded with portable definitions.
  76     // These are all @ForceInline in case
  77     // they need to be used performantly.
  78     // The various shape-specific subclasses
  79     // also specialize them by wrapping

 334         return vectorFactory(res);
 335     }
 336 
 337     /*package-private*/
 338     @ForceInline
 339     final
 340     <M> FloatVector ldOp(M memory, int offset,
 341                                   VectorMask<Float> m,
 342                                   FLdOp<M> f) {
 343         //float[] vec = vec();
 344         float[] res = new float[length()];
 345         boolean[] mbits = ((AbstractMask<Float>)m).getBits();
 346         for (int i = 0; i < res.length; i++) {
 347             if (mbits[i]) {
 348                 res[i] = f.apply(memory, offset, i);
 349             }
 350         }
 351         return vectorFactory(res);
 352     }
 353 







































 354     interface FStOp<M> {
 355         void apply(M memory, int offset, int i, float a);
 356     }
 357 
 358     /*package-private*/
 359     @ForceInline
 360     final
 361     <M> void stOp(M memory, int offset,
 362                   FStOp<M> f) {
 363         float[] vec = vec();
 364         for (int i = 0; i < vec.length; i++) {
 365             f.apply(memory, offset, i, vec[i]);
 366         }
 367     }
 368 
 369     /*package-private*/
 370     @ForceInline
 371     final
 372     <M> void stOp(M memory, int offset,
 373                   VectorMask<Float> m,
 374                   FStOp<M> f) {
 375         float[] vec = vec();
 376         boolean[] mbits = ((AbstractMask<Float>)m).getBits();
 377         for (int i = 0; i < vec.length; i++) {
 378             if (mbits[i]) {
 379                 f.apply(memory, offset, i, vec[i]);
 380             }
 381         }
 382     }
 383 


































 384     // Binary test
 385 
 386     /*package-private*/
 387     interface FBinTest {
 388         boolean apply(int cond, int i, float a, float b);
 389     }
 390 
 391     /*package-private*/
 392     @ForceInline
 393     final
 394     AbstractMask<Float> bTest(int cond,
 395                                   Vector<Float> o,
 396                                   FBinTest f) {
 397         float[] vec1 = vec();
 398         float[] vec2 = ((FloatVector)o).vec();
 399         boolean[] bits = new boolean[length()];
 400         for (int i = 0; i < length(); i++){
 401             bits[i] = f.apply(cond, i, vec1[i], vec2[i]);
 402         }
 403         return maskFactory(bits);
 404     }
 405 
 406 
 407     /*package-private*/
 408     @Override
 409     abstract FloatSpecies vspecies();
 410 
 411     /*package-private*/
 412     @ForceInline
 413     static long toBits(float e) {
 414         return  Float.floatToRawIntBits(e);
 415     }
 416 
 417     /*package-private*/
 418     @ForceInline
 419     static float fromBits(long bits) {
 420         return Float.intBitsToFloat((int)bits);
 421     }
 422 






























 423     // Static factories (other than memory operations)
 424 
 425     // Note: A surprising behavior in javadoc
 426     // sometimes makes a lone /** {@inheritDoc} */
 427     // comment drop the method altogether,
 428     // apparently if the method mentions an
 429     // parameter or return type of Vector<Float>
 430     // instead of Vector<E> as originally specified.
 431     // Adding an empty HTML fragment appears to
 432     // nudge javadoc into providing the desired
 433     // inherited documentation.  We use the HTML
 434     // comment <!--workaround--> for this.
 435 
 436     /**
 437      * Returns a vector of the given species
 438      * where all lane elements are set to
 439      * zero, the default primitive value.
 440      *
 441      * @param species species of the desired zero vector
 442      * @return a zero vector

1585      * {@inheritDoc} <!--workaround-->
1586      */
1587     @Override
1588     @ForceInline
1589     public final
1590     FloatVector neg() {
1591         return lanewise(NEG);
1592     }
1593 
1594     /**
1595      * {@inheritDoc} <!--workaround-->
1596      */
1597     @Override
1598     @ForceInline
1599     public final
1600     FloatVector abs() {
1601         return lanewise(ABS);
1602     }
1603 
1604 

1605     // sqrt
1606     /**
1607      * Computes the square root of this vector.
1608      *
1609      * This is a lane-wise unary operation which applies an operation
1610      * conforming to the specification of
1611      * {@link Math#sqrt Math.sqrt(a)}
1612      * to each lane value.
1613      * The operation is adapted to cast the operand and the result,
1614      * specifically widening the {@code float} operand to a {@code double}
1615      * operand and narrowing the {@code double} result to a {@code float}
1616      * result.
1617      *
1618      * This method is also equivalent to the expression
1619      * {@link #lanewise(VectorOperators.Unary)
1620      *    lanewise}{@code (}{@link VectorOperators#SQRT
1621      *    SQRT}{@code )}.
1622      *
1623      * @return the square root of this vector
1624      * @see VectorOperators#SQRT

2236         float[] a = toArray();
2237         int[] sa = new int[a.length];
2238         for (int i = 0; i < a.length; i++) {
2239             sa[i] = (int) a[i];
2240         }
2241         return VectorShuffle.fromArray(dsp, sa, 0);
2242     }
2243 
2244     /*package-private*/
2245     @ForceInline
2246     final
2247     VectorShuffle<Float> toShuffleTemplate(Class<?> shuffleType) {
2248         FloatSpecies vsp = vspecies();
2249         return VectorSupport.convert(VectorSupport.VECTOR_OP_CAST,
2250                                      getClass(), float.class, length(),
2251                                      shuffleType, byte.class, length(),
2252                                      this, vsp,
2253                                      FloatVector::toShuffle0);
2254     }
2255 







































2256     /**
2257      * {@inheritDoc} <!--workaround-->
2258      */
2259     @Override
2260     public abstract
2261     FloatVector selectFrom(Vector<Float> v);
2262 
2263     /*package-private*/
2264     @ForceInline
2265     final FloatVector selectFromTemplate(FloatVector v) {
2266         return v.rearrange(this.toShuffle());
2267     }
2268 
2269     /**
2270      * {@inheritDoc} <!--workaround-->
2271      */
2272     @Override
2273     public abstract
2274     FloatVector selectFrom(Vector<Float> s, VectorMask<Float> m);
2275 

2616         return res;
2617     }
2618 
2619     /** {@inheritDoc} <!--workaround-->
2620      * @implNote
2621      * When this method is used on used on vectors
2622      * of type {@code FloatVector},
2623      * there will be no loss of precision.
2624      */
2625     @ForceInline
2626     @Override
2627     public final double[] toDoubleArray() {
2628         float[] a = toArray();
2629         double[] res = new double[a.length];
2630         for (int i = 0; i < a.length; i++) {
2631             res[i] = (double) a[i];
2632         }
2633         return res;
2634     }
2635 
2636     /**
2637      * Loads a vector from a byte array starting at an offset.
2638      * Bytes are composed into primitive lane elements according
2639      * to the specified byte order.
2640      * The vector is arranged into lanes according to
2641      * <a href="Vector.html#lane-order">memory ordering</a>.
2642      * <p>
2643      * This method behaves as if it returns the result of calling
2644      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
2645      * fromByteBuffer()} as follows:
2646      * <pre>{@code
2647      * var bb = ByteBuffer.wrap(a);
2648      * var m = species.maskAll(true);
2649      * return fromByteBuffer(species, bb, offset, bo, m);
2650      * }</pre>
2651      *
2652      * @param species species of desired vector
2653      * @param a the byte array
2654      * @param offset the offset into the array
2655      * @param bo the intended byte order
2656      * @return a vector loaded from a byte array
2657      * @throws IndexOutOfBoundsException
2658      *         if {@code offset+N*ESIZE < 0}
2659      *         or {@code offset+(N+1)*ESIZE > a.length}
2660      *         for any lane {@code N} in the vector
2661      */
2662     @ForceInline
2663     public static
2664     FloatVector fromByteArray(VectorSpecies<Float> species,
2665                                        byte[] a, int offset,
2666                                        ByteOrder bo) {
2667         offset = checkFromIndexSize(offset, species.vectorByteSize(), a.length);
2668         FloatSpecies vsp = (FloatSpecies) species;
2669         return vsp.dummyVector().fromByteArray0(a, offset).maybeSwap(bo);
2670     }
2671 
2672     /**
2673      * Loads a vector from a byte array starting at an offset
2674      * and using a mask.
2675      * Lanes where the mask is unset are filled with the default
2676      * value of {@code float} (positive zero).
2677      * Bytes are composed into primitive lane elements according
2678      * to the specified byte order.
2679      * The vector is arranged into lanes according to
2680      * <a href="Vector.html#lane-order">memory ordering</a>.
2681      * <p>
2682      * This method behaves as if it returns the result of calling
2683      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
2684      * fromByteBuffer()} as follows:
2685      * <pre>{@code
2686      * var bb = ByteBuffer.wrap(a);
2687      * return fromByteBuffer(species, bb, offset, bo, m);
2688      * }</pre>
2689      *
2690      * @param species species of desired vector
2691      * @param a the byte array
2692      * @param offset the offset into the array
2693      * @param bo the intended byte order
2694      * @param m the mask controlling lane selection
2695      * @return a vector loaded from a byte array
2696      * @throws IndexOutOfBoundsException
2697      *         if {@code offset+N*ESIZE < 0}
2698      *         or {@code offset+(N+1)*ESIZE > a.length}
2699      *         for any lane {@code N} in the vector
2700      *         where the mask is set
2701      */
2702     @ForceInline
2703     public static
2704     FloatVector fromByteArray(VectorSpecies<Float> species,
2705                                        byte[] a, int offset,
2706                                        ByteOrder bo,
2707                                        VectorMask<Float> m) {
2708         FloatSpecies vsp = (FloatSpecies) species;
2709         if (offset >= 0 && offset <= (a.length - species.vectorByteSize())) {
2710             return vsp.dummyVector().fromByteArray0(a, offset, m).maybeSwap(bo);
2711         }
2712 
2713         // FIXME: optimize
2714         checkMaskFromIndexSize(offset, vsp, m, 4, a.length);
2715         ByteBuffer wb = wrapper(a, bo);
2716         return vsp.ldOp(wb, offset, (AbstractMask<Float>)m,
2717                    (wb_, o, i)  -> wb_.getFloat(o + i * 4));
2718     }
2719 
2720     /**
2721      * Loads a vector from an array of type {@code float[]}
2722      * starting at an offset.
2723      * For each vector lane, where {@code N} is the vector lane index, the
2724      * array element at index {@code offset + N} is placed into the
2725      * resulting vector at lane index {@code N}.
2726      *
2727      * @param species species of desired vector
2728      * @param a the array
2729      * @param offset the offset into the array
2730      * @return the vector loaded from an array
2731      * @throws IndexOutOfBoundsException
2732      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
2733      *         for any lane {@code N} in the vector
2734      */
2735     @ForceInline
2736     public static
2737     FloatVector fromArray(VectorSpecies<Float> species,
2738                                    float[] a, int offset) {
2739         offset = checkFromIndexSize(offset, species.length(), a.length);

2872      * @see FloatVector#toIntArray()
2873      */
2874     @ForceInline
2875     public static
2876     FloatVector fromArray(VectorSpecies<Float> species,
2877                                    float[] a, int offset,
2878                                    int[] indexMap, int mapOffset,
2879                                    VectorMask<Float> m) {
2880         if (m.allTrue()) {
2881             return fromArray(species, a, offset, indexMap, mapOffset);
2882         }
2883         else {
2884             FloatSpecies vsp = (FloatSpecies) species;
2885             return vsp.dummyVector().fromArray0(a, offset, indexMap, mapOffset, m);
2886         }
2887     }
2888 
2889 
2890 
2891     /**
2892      * Loads a vector from a {@linkplain ByteBuffer byte buffer}
2893      * starting at an offset into the byte buffer.
2894      * Bytes are composed into primitive lane elements according
2895      * to the specified byte order.
2896      * The vector is arranged into lanes according to
2897      * <a href="Vector.html#lane-order">memory ordering</a>.
2898      * <p>
2899      * This method behaves as if it returns the result of calling
2900      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
2901      * fromByteBuffer()} as follows:
2902      * <pre>{@code
2903      * var m = species.maskAll(true);
2904      * return fromByteBuffer(species, bb, offset, bo, m);
2905      * }</pre>
2906      *
2907      * @param species species of desired vector
2908      * @param bb the byte buffer
2909      * @param offset the offset into the byte buffer
2910      * @param bo the intended byte order
2911      * @return a vector loaded from a byte buffer
2912      * @throws IndexOutOfBoundsException
2913      *         if {@code offset+N*4 < 0}
2914      *         or {@code offset+N*4 >= bb.limit()}
2915      *         for any lane {@code N} in the vector





2916      */
2917     @ForceInline
2918     public static
2919     FloatVector fromByteBuffer(VectorSpecies<Float> species,
2920                                         ByteBuffer bb, int offset,
2921                                         ByteOrder bo) {
2922         offset = checkFromIndexSize(offset, species.vectorByteSize(), bb.limit());
2923         FloatSpecies vsp = (FloatSpecies) species;
2924         return vsp.dummyVector().fromByteBuffer0(bb, offset).maybeSwap(bo);
2925     }
2926 
2927     /**
2928      * Loads a vector from a {@linkplain ByteBuffer byte buffer}
2929      * starting at an offset into the byte buffer
2930      * and using a mask.
2931      * Lanes where the mask is unset are filled with the default
2932      * value of {@code float} (positive zero).
2933      * Bytes are composed into primitive lane elements according
2934      * to the specified byte order.
2935      * The vector is arranged into lanes according to
2936      * <a href="Vector.html#lane-order">memory ordering</a>.
2937      * <p>
2938      * The following pseudocode illustrates the behavior:
2939      * <pre>{@code
2940      * FloatBuffer eb = bb.duplicate()
2941      *     .position(offset)
2942      *     .order(bo).asFloatBuffer();
2943      * float[] ar = new float[species.length()];
2944      * for (int n = 0; n < ar.length; n++) {
2945      *     if (m.laneIsSet(n)) {
2946      *         ar[n] = eb.get(n);
2947      *     }
2948      * }
2949      * FloatVector r = FloatVector.fromArray(species, ar, 0);
2950      * }</pre>
2951      * @implNote
2952      * This operation is likely to be more efficient if
2953      * the specified byte order is the same as
2954      * {@linkplain ByteOrder#nativeOrder()
2955      * the platform native order},
2956      * since this method will not need to reorder
2957      * the bytes of lane values.
2958      *
2959      * @param species species of desired vector
2960      * @param bb the byte buffer
2961      * @param offset the offset into the byte buffer
2962      * @param bo the intended byte order
2963      * @param m the mask controlling lane selection
2964      * @return a vector loaded from a byte buffer
2965      * @throws IndexOutOfBoundsException
2966      *         if {@code offset+N*4 < 0}
2967      *         or {@code offset+N*4 >= bb.limit()}
2968      *         for any lane {@code N} in the vector
2969      *         where the mask is set





2970      */
2971     @ForceInline
2972     public static
2973     FloatVector fromByteBuffer(VectorSpecies<Float> species,
2974                                         ByteBuffer bb, int offset,
2975                                         ByteOrder bo,
2976                                         VectorMask<Float> m) {
2977         FloatSpecies vsp = (FloatSpecies) species;
2978         if (offset >= 0 && offset <= (bb.limit() - species.vectorByteSize())) {
2979             return vsp.dummyVector().fromByteBuffer0(bb, offset, m).maybeSwap(bo);
2980         }
2981 
2982         // FIXME: optimize
2983         checkMaskFromIndexSize(offset, vsp, m, 4, bb.limit());
2984         ByteBuffer wb = wrapper(bb, bo);
2985         return vsp.ldOp(wb, offset, (AbstractMask<Float>)m,
2986                    (wb_, o, i)  -> wb_.getFloat(o + i * 4));
2987     }
2988 
2989     // Memory store operations
2990 
2991     /**
2992      * Stores this vector into an array of type {@code float[]}
2993      * starting at an offset.
2994      * <p>
2995      * For each vector lane, where {@code N} is the vector lane index,
2996      * the lane element at index {@code N} is stored into the array
2997      * element {@code a[offset+N]}.
2998      *
2999      * @param a the array, of type {@code float[]}
3000      * @param offset the offset into the array
3001      * @throws IndexOutOfBoundsException
3002      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
3003      *         for any lane {@code N} in the vector
3004      */
3005     @ForceInline
3006     public final
3007     void intoArray(float[] a, int offset) {
3008         offset = checkFromIndexSize(offset, length(), a.length);
3009         FloatSpecies vsp = vspecies();
3010         VectorSupport.store(
3011             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3012             a, arrayAddress(a, offset),
3013             this,
3014             a, offset,
3015             (arr, off, v)
3016             -> v.stOp(arr, off,
3017                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3018     }
3019 
3020     /**
3021      * Stores this vector into an array of type {@code float[]}
3022      * starting at offset and using a mask.
3023      * <p>
3024      * For each vector lane, where {@code N} is the vector lane index,
3025      * the lane element at index {@code N} is stored into the array
3026      * element {@code a[offset+N]}.
3027      * If the mask lane at {@code N} is unset then the corresponding
3028      * array element {@code a[offset+N]} is left unchanged.
3029      * <p>
3030      * Array range checking is done for lanes where the mask is set.
3031      * Lanes where the mask is unset are not stored and do not need
3032      * to correspond to legitimate elements of {@code a}.
3033      * That is, unset lanes may correspond to array indexes less than
3034      * zero or beyond the end of the array.
3035      *
3036      * @param a the array, of type {@code float[]}

3137      *         where the mask is set
3138      * @see FloatVector#toIntArray()
3139      */
3140     @ForceInline
3141     public final
3142     void intoArray(float[] a, int offset,
3143                    int[] indexMap, int mapOffset,
3144                    VectorMask<Float> m) {
3145         if (m.allTrue()) {
3146             intoArray(a, offset, indexMap, mapOffset);
3147         }
3148         else {
3149             intoArray0(a, offset, indexMap, mapOffset, m);
3150         }
3151     }
3152 
3153 
3154 
3155     /**
3156      * {@inheritDoc} <!--workaround-->

3157      */
3158     @Override
3159     @ForceInline
3160     public final
3161     void intoByteArray(byte[] a, int offset,
3162                        ByteOrder bo) {
3163         offset = checkFromIndexSize(offset, byteSize(), a.length);
3164         maybeSwap(bo).intoByteArray0(a, offset);
3165     }
3166 
3167     /**
3168      * {@inheritDoc} <!--workaround-->
3169      */
3170     @Override
3171     @ForceInline
3172     public final
3173     void intoByteArray(byte[] a, int offset,
3174                        ByteOrder bo,
3175                        VectorMask<Float> m) {
3176         if (m.allTrue()) {
3177             intoByteArray(a, offset, bo);
3178         } else {
3179             FloatSpecies vsp = vspecies();
3180             checkMaskFromIndexSize(offset, vsp, m, 4, a.length);
3181             maybeSwap(bo).intoByteArray0(a, offset, m);
3182         }
3183     }
3184 
3185     /**
3186      * {@inheritDoc} <!--workaround-->
3187      */
3188     @Override
3189     @ForceInline
3190     public final
3191     void intoByteBuffer(ByteBuffer bb, int offset,
3192                         ByteOrder bo) {
3193         if (ScopedMemoryAccess.isReadOnly(bb)) {
3194             throw new ReadOnlyBufferException();
3195         }
3196         offset = checkFromIndexSize(offset, byteSize(), bb.limit());
3197         maybeSwap(bo).intoByteBuffer0(bb, offset);
3198     }
3199 
3200     /**
3201      * {@inheritDoc} <!--workaround-->

3202      */
3203     @Override
3204     @ForceInline
3205     public final
3206     void intoByteBuffer(ByteBuffer bb, int offset,
3207                         ByteOrder bo,
3208                         VectorMask<Float> m) {
3209         if (m.allTrue()) {
3210             intoByteBuffer(bb, offset, bo);
3211         } else {
3212             if (bb.isReadOnly()) {
3213                 throw new ReadOnlyBufferException();
3214             }
3215             FloatSpecies vsp = vspecies();
3216             checkMaskFromIndexSize(offset, vsp, m, 4, bb.limit());
3217             maybeSwap(bo).intoByteBuffer0(bb, offset, m);
3218         }
3219     }
3220 
3221     // ================================================
3222 
3223     // Low-level memory operations.
3224     //
3225     // Note that all of these operations *must* inline into a context
3226     // where the exact species of the involved vector is a
3227     // compile-time constant.  Otherwise, the intrinsic generation
3228     // will fail and performance will suffer.
3229     //
3230     // In many cases this is achieved by re-deriving a version of the
3231     // method in each concrete subclass (per species).  The re-derived
3232     // method simply calls one of these generic methods, with exact
3233     // parameters for the controlling metadata, which is either a
3234     // typed vector or constant species instance.
3235 
3236     // Unchecked loading operations in native byte order.
3237     // Caller is responsible for applying index checks, masking, and
3238     // byte swapping.
3239 
3240     /*package-private*/
3241     abstract
3242     FloatVector fromArray0(float[] a, int offset);
3243     @ForceInline
3244     final
3245     FloatVector fromArray0Template(float[] a, int offset) {
3246         FloatSpecies vsp = vspecies();
3247         return VectorSupport.load(
3248             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3249             a, arrayAddress(a, offset),
3250             a, offset, vsp,
3251             (arr, off, s) -> s.ldOp(arr, off,
3252                                     (arr_, off_, i) -> arr_[off_ + i]));
3253     }
3254 
3255     /*package-private*/
3256     abstract
3257     FloatVector fromArray0(float[] a, int offset, VectorMask<Float> m);
3258     @ForceInline
3259     final
3260     <M extends VectorMask<Float>>
3261     FloatVector fromArray0Template(Class<M> maskClass, float[] a, int offset, M m) {
3262         m.check(species());
3263         FloatSpecies vsp = vspecies();
3264         return VectorSupport.loadMasked(
3265             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3266             a, arrayAddress(a, offset), m,
3267             a, offset, vsp,
3268             (arr, off, s, vm) -> s.ldOp(arr, off, vm,
3269                                         (arr_, off_, i) -> arr_[off_ + i]));
3270     }
3271 
3272     /*package-private*/
3273     abstract
3274     FloatVector fromArray0(float[] a, int offset,
3275                                     int[] indexMap, int mapOffset,
3276                                     VectorMask<Float> m);
3277     @ForceInline
3278     final
3279     <M extends VectorMask<Float>>
3280     FloatVector fromArray0Template(Class<M> maskClass, float[] a, int offset,
3281                                             int[] indexMap, int mapOffset, M m) {
3282         FloatSpecies vsp = vspecies();
3283         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3284         Objects.requireNonNull(a);
3285         Objects.requireNonNull(indexMap);
3286         m.check(vsp);
3287         Class<? extends FloatVector> vectorType = vsp.vectorType();
3288 
3289         // Index vector: vix[0:n] = k -> offset + indexMap[mapOffset + k]
3290         IntVector vix = IntVector
3291             .fromArray(isp, indexMap, mapOffset)
3292             .add(offset);
3293 
3294         // FIXME: Check index under mask controlling.
3295         vix = VectorIntrinsics.checkIndex(vix, a.length);
3296 
3297         return VectorSupport.loadWithMap(
3298             vectorType, maskClass, float.class, vsp.laneCount(),
3299             isp.vectorType(),
3300             a, ARRAY_BASE, vix, m,
3301             a, offset, indexMap, mapOffset, vsp,
3302             (c, idx, iMap, idy, s, vm) ->
3303             s.vOp(vm, n -> c[idx + iMap[idy+n]]));
3304     }
3305 
3306 
3307 
3308     @Override
3309     abstract
3310     FloatVector fromByteArray0(byte[] a, int offset);
3311     @ForceInline
3312     final
3313     FloatVector fromByteArray0Template(byte[] a, int offset) {
3314         FloatSpecies vsp = vspecies();
3315         return VectorSupport.load(
3316             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3317             a, byteArrayAddress(a, offset),
3318             a, offset, vsp,
3319             (arr, off, s) -> {
3320                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3321                 return s.ldOp(wb, off,
3322                         (wb_, o, i) -> wb_.getFloat(o + i * 4));
3323             });
3324     }
3325 
3326     abstract
3327     FloatVector fromByteArray0(byte[] a, int offset, VectorMask<Float> m);
3328     @ForceInline
3329     final
3330     <M extends VectorMask<Float>>
3331     FloatVector fromByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
3332         FloatSpecies vsp = vspecies();
3333         m.check(vsp);
3334         return VectorSupport.loadMasked(
3335             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3336             a, byteArrayAddress(a, offset), m,
3337             a, offset, vsp,
3338             (arr, off, s, vm) -> {
3339                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3340                 return s.ldOp(wb, off, vm,
3341                         (wb_, o, i) -> wb_.getFloat(o + i * 4));
3342             });
3343     }
3344 
3345     abstract
3346     FloatVector fromByteBuffer0(ByteBuffer bb, int offset);
3347     @ForceInline
3348     final
3349     FloatVector fromByteBuffer0Template(ByteBuffer bb, int offset) {
3350         FloatSpecies vsp = vspecies();
3351         return ScopedMemoryAccess.loadFromByteBuffer(
3352                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3353                 bb, offset, vsp,
3354                 (buf, off, s) -> {
3355                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3356                     return s.ldOp(wb, off,
3357                             (wb_, o, i) -> wb_.getFloat(o + i * 4));
3358                 });
3359     }
3360 
3361     abstract
3362     FloatVector fromByteBuffer0(ByteBuffer bb, int offset, VectorMask<Float> m);
3363     @ForceInline
3364     final
3365     <M extends VectorMask<Float>>
3366     FloatVector fromByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
3367         FloatSpecies vsp = vspecies();
3368         m.check(vsp);
3369         return ScopedMemoryAccess.loadFromByteBufferMasked(
3370                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3371                 bb, offset, m, vsp,
3372                 (buf, off, s, vm) -> {
3373                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3374                     return s.ldOp(wb, off, vm,
3375                             (wb_, o, i) -> wb_.getFloat(o + i * 4));
3376                 });
3377     }
3378 
3379     // Unchecked storing operations in native byte order.
3380     // Caller is responsible for applying index checks, masking, and
3381     // byte swapping.
3382 
3383     abstract
3384     void intoArray0(float[] a, int offset);
3385     @ForceInline
3386     final
3387     void intoArray0Template(float[] a, int offset) {
3388         FloatSpecies vsp = vspecies();
3389         VectorSupport.store(
3390             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3391             a, arrayAddress(a, offset),
3392             this, a, offset,
3393             (arr, off, v)
3394             -> v.stOp(arr, off,
3395                       (arr_, off_, i, e) -> arr_[off_+i] = e));
3396     }
3397 
3398     abstract
3399     void intoArray0(float[] a, int offset, VectorMask<Float> m);
3400     @ForceInline
3401     final
3402     <M extends VectorMask<Float>>
3403     void intoArray0Template(Class<M> maskClass, float[] a, int offset, M m) {
3404         m.check(species());
3405         FloatSpecies vsp = vspecies();
3406         VectorSupport.storeMasked(
3407             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3408             a, arrayAddress(a, offset),
3409             this, m, a, offset,
3410             (arr, off, v, vm)
3411             -> v.stOp(arr, off, vm,
3412                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3413     }
3414 
3415     abstract
3416     void intoArray0(float[] a, int offset,
3417                     int[] indexMap, int mapOffset,
3418                     VectorMask<Float> m);
3419     @ForceInline
3420     final
3421     <M extends VectorMask<Float>>
3422     void intoArray0Template(Class<M> maskClass, float[] a, int offset,
3423                             int[] indexMap, int mapOffset, M m) {
3424         m.check(species());
3425         FloatSpecies vsp = vspecies();
3426         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3427         // Index vector: vix[0:n] = i -> offset + indexMap[mo + i]
3428         IntVector vix = IntVector
3429             .fromArray(isp, indexMap, mapOffset)
3430             .add(offset);
3431 
3432         // FIXME: Check index under mask controlling.
3433         vix = VectorIntrinsics.checkIndex(vix, a.length);
3434 
3435         VectorSupport.storeWithMap(
3436             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3437             isp.vectorType(),
3438             a, arrayAddress(a, 0), vix,
3439             this, m,
3440             a, offset, indexMap, mapOffset,
3441             (arr, off, v, map, mo, vm)
3442             -> v.stOp(arr, off, vm,
3443                       (arr_, off_, i, e) -> {
3444                           int j = map[mo + i];
3445                           arr[off + j] = e;
3446                       }));
3447     }
3448 
3449 
3450     abstract
3451     void intoByteArray0(byte[] a, int offset);
3452     @ForceInline
3453     final
3454     void intoByteArray0Template(byte[] a, int offset) {
3455         FloatSpecies vsp = vspecies();
3456         VectorSupport.store(
3457             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3458             a, byteArrayAddress(a, offset),
3459             this, a, offset,
3460             (arr, off, v) -> {
3461                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3462                 v.stOp(wb, off,
3463                         (tb_, o, i, e) -> tb_.putFloat(o + i * 4, e));
3464             });
3465     }
3466 
3467     abstract
3468     void intoByteArray0(byte[] a, int offset, VectorMask<Float> m);
3469     @ForceInline
3470     final
3471     <M extends VectorMask<Float>>
3472     void intoByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
3473         FloatSpecies vsp = vspecies();
3474         m.check(vsp);
3475         VectorSupport.storeMasked(
3476             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3477             a, byteArrayAddress(a, offset),
3478             this, m, a, offset,
3479             (arr, off, v, vm) -> {
3480                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3481                 v.stOp(wb, off, vm,
3482                         (tb_, o, i, e) -> tb_.putFloat(o + i * 4, e));
3483             });
3484     }
3485 
3486     @ForceInline
3487     final
3488     void intoByteBuffer0(ByteBuffer bb, int offset) {
3489         FloatSpecies vsp = vspecies();
3490         ScopedMemoryAccess.storeIntoByteBuffer(
3491                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3492                 this, bb, offset,
3493                 (buf, off, v) -> {
3494                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3495                     v.stOp(wb, off,
3496                             (wb_, o, i, e) -> wb_.putFloat(o + i * 4, e));
3497                 });
3498     }
3499 
3500     abstract
3501     void intoByteBuffer0(ByteBuffer bb, int offset, VectorMask<Float> m);
3502     @ForceInline
3503     final
3504     <M extends VectorMask<Float>>
3505     void intoByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
3506         FloatSpecies vsp = vspecies();
3507         m.check(vsp);
3508         ScopedMemoryAccess.storeIntoByteBufferMasked(
3509                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3510                 this, m, bb, offset,
3511                 (buf, off, v, vm) -> {
3512                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3513                     v.stOp(wb, off, vm,
3514                             (wb_, o, i, e) -> wb_.putFloat(o + i * 4, e));
3515                 });
3516     }
3517 
3518 
3519     // End of low-level memory operations.
3520 
3521     private static
3522     void checkMaskFromIndexSize(int offset,
3523                                 FloatSpecies vsp,
3524                                 VectorMask<Float> m,
3525                                 int scale,
3526                                 int limit) {
3527         ((AbstractMask<Float>)m)
3528             .checkIndexByLane(offset, limit, vsp.iota(), scale);
3529     }
3530 










3531     @ForceInline
3532     private void conditionalStoreNYI(int offset,
3533                                      FloatSpecies vsp,
3534                                      VectorMask<Float> m,
3535                                      int scale,
3536                                      int limit) {
3537         if (offset < 0 || offset + vsp.laneCount() * scale > limit) {
3538             String msg =
3539                 String.format("unimplemented: store @%d in [0..%d), %s in %s",
3540                               offset, limit, m, vsp);
3541             throw new AssertionError(msg);
3542         }
3543     }
3544 
3545     /*package-private*/
3546     @Override
3547     @ForceInline
3548     final
3549     FloatVector maybeSwap(ByteOrder bo) {
3550         if (bo != NATIVE_ENDIAN) {

3821                 }
3822             }
3823             return dummyVector().vectorFactory(res);
3824         }
3825 
3826         /*package-private*/
3827         @ForceInline
3828         <M> FloatVector ldOp(M memory, int offset,
3829                                       FLdOp<M> f) {
3830             return dummyVector().ldOp(memory, offset, f);
3831         }
3832 
3833         /*package-private*/
3834         @ForceInline
3835         <M> FloatVector ldOp(M memory, int offset,
3836                                       VectorMask<Float> m,
3837                                       FLdOp<M> f) {
3838             return dummyVector().ldOp(memory, offset, m, f);
3839         }
3840 















3841         /*package-private*/
3842         @ForceInline
3843         <M> void stOp(M memory, int offset, FStOp<M> f) {
3844             dummyVector().stOp(memory, offset, f);
3845         }
3846 
3847         /*package-private*/
3848         @ForceInline
3849         <M> void stOp(M memory, int offset,
3850                       AbstractMask<Float> m,
3851                       FStOp<M> f) {
3852             dummyVector().stOp(memory, offset, m, f);
3853         }
3854 














3855         // N.B. Make sure these constant vectors and
3856         // masks load up correctly into registers.
3857         //
3858         // Also, see if we can avoid all that switching.
3859         // Could we cache both vectors and both masks in
3860         // this species object?
3861 
3862         // Zero and iota vector access
3863         @Override
3864         @ForceInline
3865         public final FloatVector zero() {
3866             if ((Class<?>) vectorType() == FloatMaxVector.class)
3867                 return FloatMaxVector.ZERO;
3868             switch (vectorBitSize()) {
3869                 case 64: return Float64Vector.ZERO;
3870                 case 128: return Float128Vector.ZERO;
3871                 case 256: return Float256Vector.ZERO;
3872                 case 512: return Float512Vector.ZERO;
3873             }
3874             throw new AssertionError();

   7  * published by the Free Software Foundation.  Oracle designates this
   8  * particular file as subject to the "Classpath" exception as provided
   9  * by Oracle in the LICENSE file that accompanied this code.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  */
  25 package jdk.incubator.vector;
  26 

  27 import java.nio.ByteOrder;

  28 import java.util.Arrays;
  29 import java.util.Objects;
  30 import java.util.function.Function;

  31 
  32 import jdk.incubator.foreign.MemorySegment;
  33 import jdk.incubator.foreign.ValueLayout;
  34 import jdk.internal.access.foreign.MemorySegmentProxy;
  35 import jdk.internal.misc.ScopedMemoryAccess;
  36 import jdk.internal.misc.Unsafe;
  37 import jdk.internal.vm.annotation.ForceInline;
  38 import jdk.internal.vm.vector.VectorSupport;
  39 
  40 import static jdk.internal.vm.vector.VectorSupport.*;
  41 import static jdk.incubator.vector.VectorIntrinsics.*;
  42 
  43 import static jdk.incubator.vector.VectorOperators.*;
  44 
  45 // -- This file was mechanically generated: Do not edit! -- //
  46 
  47 /**
  48  * A specialized {@link Vector} representing an ordered immutable sequence of
  49  * {@code float} values.
  50  */
  51 @SuppressWarnings("cast")  // warning: redundant cast
  52 public abstract class FloatVector extends AbstractVector<Float> {
  53 
  54     FloatVector(float[] vec) {
  55         super(vec);
  56     }
  57 
  58     static final int FORBID_OPCODE_KIND = VO_NOFP;
  59 
  60     static final ValueLayout.OfFloat ELEMENT_LAYOUT = ValueLayout.JAVA_FLOAT.withBitAlignment(8);
  61 
  62     @ForceInline
  63     static int opCode(Operator op) {
  64         return VectorOperators.opCode(op, VO_OPCODE_VALID, FORBID_OPCODE_KIND);
  65     }
  66     @ForceInline
  67     static int opCode(Operator op, int requireKind) {
  68         requireKind |= VO_OPCODE_VALID;
  69         return VectorOperators.opCode(op, requireKind, FORBID_OPCODE_KIND);
  70     }
  71     @ForceInline
  72     static boolean opKind(Operator op, int bit) {
  73         return VectorOperators.opKind(op, bit);
  74     }
  75 
  76     // Virtualized factories and operators,
  77     // coded with portable definitions.
  78     // These are all @ForceInline in case
  79     // they need to be used performantly.
  80     // The various shape-specific subclasses
  81     // also specialize them by wrapping

 336         return vectorFactory(res);
 337     }
 338 
 339     /*package-private*/
 340     @ForceInline
 341     final
 342     <M> FloatVector ldOp(M memory, int offset,
 343                                   VectorMask<Float> m,
 344                                   FLdOp<M> f) {
 345         //float[] vec = vec();
 346         float[] res = new float[length()];
 347         boolean[] mbits = ((AbstractMask<Float>)m).getBits();
 348         for (int i = 0; i < res.length; i++) {
 349             if (mbits[i]) {
 350                 res[i] = f.apply(memory, offset, i);
 351             }
 352         }
 353         return vectorFactory(res);
 354     }
 355 
 356     /*package-private*/
 357     interface FLdLongOp {
 358         float apply(MemorySegment memory, long offset, int i);
 359     }
 360 
 361     /*package-private*/
 362     @ForceInline
 363     final
 364     FloatVector ldLongOp(MemorySegment memory, long offset,
 365                                   FLdLongOp f) {
 366         //dummy; no vec = vec();
 367         float[] res = new float[length()];
 368         for (int i = 0; i < res.length; i++) {
 369             res[i] = f.apply(memory, offset, i);
 370         }
 371         return vectorFactory(res);
 372     }
 373 
 374     /*package-private*/
 375     @ForceInline
 376     final
 377     FloatVector ldLongOp(MemorySegment memory, long offset,
 378                                   VectorMask<Float> m,
 379                                   FLdLongOp f) {
 380         //float[] vec = vec();
 381         float[] res = new float[length()];
 382         boolean[] mbits = ((AbstractMask<Float>)m).getBits();
 383         for (int i = 0; i < res.length; i++) {
 384             if (mbits[i]) {
 385                 res[i] = f.apply(memory, offset, i);
 386             }
 387         }
 388         return vectorFactory(res);
 389     }
 390 
 391     static float memorySegmentGet(MemorySegment ms, long o, int i) {
 392         return ms.get(ELEMENT_LAYOUT, o + i * 4L);
 393     }
 394 
 395     interface FStOp<M> {
 396         void apply(M memory, int offset, int i, float a);
 397     }
 398 
 399     /*package-private*/
 400     @ForceInline
 401     final
 402     <M> void stOp(M memory, int offset,
 403                   FStOp<M> f) {
 404         float[] vec = vec();
 405         for (int i = 0; i < vec.length; i++) {
 406             f.apply(memory, offset, i, vec[i]);
 407         }
 408     }
 409 
 410     /*package-private*/
 411     @ForceInline
 412     final
 413     <M> void stOp(M memory, int offset,
 414                   VectorMask<Float> m,
 415                   FStOp<M> f) {
 416         float[] vec = vec();
 417         boolean[] mbits = ((AbstractMask<Float>)m).getBits();
 418         for (int i = 0; i < vec.length; i++) {
 419             if (mbits[i]) {
 420                 f.apply(memory, offset, i, vec[i]);
 421             }
 422         }
 423     }
 424 
 425     interface FStLongOp {
 426         void apply(MemorySegment memory, long offset, int i, float a);
 427     }
 428 
 429     /*package-private*/
 430     @ForceInline
 431     final
 432     void stLongOp(MemorySegment memory, long offset,
 433                   FStLongOp f) {
 434         float[] vec = vec();
 435         for (int i = 0; i < vec.length; i++) {
 436             f.apply(memory, offset, i, vec[i]);
 437         }
 438     }
 439 
 440     /*package-private*/
 441     @ForceInline
 442     final
 443     void stLongOp(MemorySegment memory, long offset,
 444                   VectorMask<Float> m,
 445                   FStLongOp f) {
 446         float[] vec = vec();
 447         boolean[] mbits = ((AbstractMask<Float>)m).getBits();
 448         for (int i = 0; i < vec.length; i++) {
 449             if (mbits[i]) {
 450                 f.apply(memory, offset, i, vec[i]);
 451             }
 452         }
 453     }
 454 
 455     static void memorySegmentSet(MemorySegment ms, long o, int i, float e) {
 456         ms.set(ELEMENT_LAYOUT, o + i * 4L, e);
 457     }
 458 
 459     // Binary test
 460 
 461     /*package-private*/
 462     interface FBinTest {
 463         boolean apply(int cond, int i, float a, float b);
 464     }
 465 
 466     /*package-private*/
 467     @ForceInline
 468     final
 469     AbstractMask<Float> bTest(int cond,
 470                                   Vector<Float> o,
 471                                   FBinTest f) {
 472         float[] vec1 = vec();
 473         float[] vec2 = ((FloatVector)o).vec();
 474         boolean[] bits = new boolean[length()];
 475         for (int i = 0; i < length(); i++){
 476             bits[i] = f.apply(cond, i, vec1[i], vec2[i]);
 477         }
 478         return maskFactory(bits);
 479     }
 480 
 481 
 482     /*package-private*/
 483     @Override
 484     abstract FloatSpecies vspecies();
 485 
 486     /*package-private*/
 487     @ForceInline
 488     static long toBits(float e) {
 489         return  Float.floatToRawIntBits(e);
 490     }
 491 
 492     /*package-private*/
 493     @ForceInline
 494     static float fromBits(long bits) {
 495         return Float.intBitsToFloat((int)bits);
 496     }
 497 
 498     static FloatVector expandHelper(Vector<Float> v, VectorMask<Float> m) {
 499         VectorSpecies<Float> vsp = m.vectorSpecies();
 500         FloatVector r  = (FloatVector) vsp.zero();
 501         FloatVector vi = (FloatVector) v;
 502         if (m.allTrue()) {
 503             return vi;
 504         }
 505         for (int i = 0, j = 0; i < vsp.length(); i++) {
 506             if (m.laneIsSet(i)) {
 507                 r = r.withLane(i, vi.lane(j++));
 508             }
 509         }
 510         return r;
 511     }
 512 
 513     static FloatVector compressHelper(Vector<Float> v, VectorMask<Float> m) {
 514         VectorSpecies<Float> vsp = m.vectorSpecies();
 515         FloatVector r  = (FloatVector) vsp.zero();
 516         FloatVector vi = (FloatVector) v;
 517         if (m.allTrue()) {
 518             return vi;
 519         }
 520         for (int i = 0, j = 0; i < vsp.length(); i++) {
 521             if (m.laneIsSet(i)) {
 522                 r = r.withLane(j++, vi.lane(i));
 523             }
 524         }
 525         return r;
 526     }
 527 
 528     // Static factories (other than memory operations)
 529 
 530     // Note: A surprising behavior in javadoc
 531     // sometimes makes a lone /** {@inheritDoc} */
 532     // comment drop the method altogether,
 533     // apparently if the method mentions an
 534     // parameter or return type of Vector<Float>
 535     // instead of Vector<E> as originally specified.
 536     // Adding an empty HTML fragment appears to
 537     // nudge javadoc into providing the desired
 538     // inherited documentation.  We use the HTML
 539     // comment <!--workaround--> for this.
 540 
 541     /**
 542      * Returns a vector of the given species
 543      * where all lane elements are set to
 544      * zero, the default primitive value.
 545      *
 546      * @param species species of the desired zero vector
 547      * @return a zero vector

1690      * {@inheritDoc} <!--workaround-->
1691      */
1692     @Override
1693     @ForceInline
1694     public final
1695     FloatVector neg() {
1696         return lanewise(NEG);
1697     }
1698 
1699     /**
1700      * {@inheritDoc} <!--workaround-->
1701      */
1702     @Override
1703     @ForceInline
1704     public final
1705     FloatVector abs() {
1706         return lanewise(ABS);
1707     }
1708 
1709 
1710 
1711     // sqrt
1712     /**
1713      * Computes the square root of this vector.
1714      *
1715      * This is a lane-wise unary operation which applies an operation
1716      * conforming to the specification of
1717      * {@link Math#sqrt Math.sqrt(a)}
1718      * to each lane value.
1719      * The operation is adapted to cast the operand and the result,
1720      * specifically widening the {@code float} operand to a {@code double}
1721      * operand and narrowing the {@code double} result to a {@code float}
1722      * result.
1723      *
1724      * This method is also equivalent to the expression
1725      * {@link #lanewise(VectorOperators.Unary)
1726      *    lanewise}{@code (}{@link VectorOperators#SQRT
1727      *    SQRT}{@code )}.
1728      *
1729      * @return the square root of this vector
1730      * @see VectorOperators#SQRT

2342         float[] a = toArray();
2343         int[] sa = new int[a.length];
2344         for (int i = 0; i < a.length; i++) {
2345             sa[i] = (int) a[i];
2346         }
2347         return VectorShuffle.fromArray(dsp, sa, 0);
2348     }
2349 
2350     /*package-private*/
2351     @ForceInline
2352     final
2353     VectorShuffle<Float> toShuffleTemplate(Class<?> shuffleType) {
2354         FloatSpecies vsp = vspecies();
2355         return VectorSupport.convert(VectorSupport.VECTOR_OP_CAST,
2356                                      getClass(), float.class, length(),
2357                                      shuffleType, byte.class, length(),
2358                                      this, vsp,
2359                                      FloatVector::toShuffle0);
2360     }
2361 
2362     /**
2363      * {@inheritDoc} <!--workaround-->
2364      * @since 19
2365      */
2366     @Override
2367     public abstract
2368     FloatVector compress(VectorMask<Float> m);
2369 
2370     /*package-private*/
2371     @ForceInline
2372     final
2373     <M extends AbstractMask<Float>>
2374     FloatVector compressTemplate(Class<M> masktype, M m) {
2375       m.check(masktype, this);
2376       return (FloatVector) VectorSupport.comExpOp(VectorSupport.VECTOR_OP_COMPRESS, getClass(), masktype,
2377                                                    float.class, length(), this, m,
2378                                                    (v1, m1) -> compressHelper(v1, m1));
2379     }
2380 
2381     /**
2382      * {@inheritDoc} <!--workaround-->
2383      * @since 19
2384      */
2385     @Override
2386     public abstract
2387     FloatVector expand(VectorMask<Float> m);
2388 
2389     /*package-private*/
2390     @ForceInline
2391     final
2392     <M extends AbstractMask<Float>>
2393     FloatVector expandTemplate(Class<M> masktype, M m) {
2394       m.check(masktype, this);
2395       return (FloatVector) VectorSupport.comExpOp(VectorSupport.VECTOR_OP_EXPAND, getClass(), masktype,
2396                                                    float.class, length(), this, m,
2397                                                    (v1, m1) -> expandHelper(v1, m1));
2398     }
2399 
2400 
2401     /**
2402      * {@inheritDoc} <!--workaround-->
2403      */
2404     @Override
2405     public abstract
2406     FloatVector selectFrom(Vector<Float> v);
2407 
2408     /*package-private*/
2409     @ForceInline
2410     final FloatVector selectFromTemplate(FloatVector v) {
2411         return v.rearrange(this.toShuffle());
2412     }
2413 
2414     /**
2415      * {@inheritDoc} <!--workaround-->
2416      */
2417     @Override
2418     public abstract
2419     FloatVector selectFrom(Vector<Float> s, VectorMask<Float> m);
2420 

2761         return res;
2762     }
2763 
2764     /** {@inheritDoc} <!--workaround-->
2765      * @implNote
2766      * When this method is used on used on vectors
2767      * of type {@code FloatVector},
2768      * there will be no loss of precision.
2769      */
2770     @ForceInline
2771     @Override
2772     public final double[] toDoubleArray() {
2773         float[] a = toArray();
2774         double[] res = new double[a.length];
2775         for (int i = 0; i < a.length; i++) {
2776             res[i] = (double) a[i];
2777         }
2778         return res;
2779     }
2780 




















































































2781     /**
2782      * Loads a vector from an array of type {@code float[]}
2783      * starting at an offset.
2784      * For each vector lane, where {@code N} is the vector lane index, the
2785      * array element at index {@code offset + N} is placed into the
2786      * resulting vector at lane index {@code N}.
2787      *
2788      * @param species species of desired vector
2789      * @param a the array
2790      * @param offset the offset into the array
2791      * @return the vector loaded from an array
2792      * @throws IndexOutOfBoundsException
2793      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
2794      *         for any lane {@code N} in the vector
2795      */
2796     @ForceInline
2797     public static
2798     FloatVector fromArray(VectorSpecies<Float> species,
2799                                    float[] a, int offset) {
2800         offset = checkFromIndexSize(offset, species.length(), a.length);

2933      * @see FloatVector#toIntArray()
2934      */
2935     @ForceInline
2936     public static
2937     FloatVector fromArray(VectorSpecies<Float> species,
2938                                    float[] a, int offset,
2939                                    int[] indexMap, int mapOffset,
2940                                    VectorMask<Float> m) {
2941         if (m.allTrue()) {
2942             return fromArray(species, a, offset, indexMap, mapOffset);
2943         }
2944         else {
2945             FloatSpecies vsp = (FloatSpecies) species;
2946             return vsp.dummyVector().fromArray0(a, offset, indexMap, mapOffset, m);
2947         }
2948     }
2949 
2950 
2951 
2952     /**
2953      * Loads a vector from a {@linkplain MemorySegment memory segment}
2954      * starting at an offset into the memory segment.
2955      * Bytes are composed into primitive lane elements according
2956      * to the specified byte order.
2957      * The vector is arranged into lanes according to
2958      * <a href="Vector.html#lane-order">memory ordering</a>.
2959      * <p>
2960      * This method behaves as if it returns the result of calling
2961      * {@link #fromMemorySegment(VectorSpecies,MemorySegment,long,ByteOrder,VectorMask)
2962      * fromMemorySegment()} as follows:
2963      * <pre>{@code
2964      * var m = species.maskAll(true);
2965      * return fromMemorySegment(species, ms, offset, bo, m);
2966      * }</pre>
2967      *
2968      * @param species species of desired vector
2969      * @param ms the memory segment
2970      * @param offset the offset into the memory segment
2971      * @param bo the intended byte order
2972      * @return a vector loaded from the memory segment
2973      * @throws IndexOutOfBoundsException
2974      *         if {@code offset+N*4 < 0}
2975      *         or {@code offset+N*4 >= ms.byteSize()}
2976      *         for any lane {@code N} in the vector
2977      * @throws IllegalArgumentException if the memory segment is a heap segment that is
2978      *         not backed by a {@code byte[]} array.
2979      * @throws IllegalStateException if the memory segment's session is not alive,
2980      *         or if access occurs from a thread other than the thread owning the session.
2981      * @since 19
2982      */
2983     @ForceInline
2984     public static
2985     FloatVector fromMemorySegment(VectorSpecies<Float> species,
2986                                            MemorySegment ms, long offset,
2987                                            ByteOrder bo) {
2988         offset = checkFromIndexSize(offset, species.vectorByteSize(), ms.byteSize());
2989         FloatSpecies vsp = (FloatSpecies) species;
2990         return vsp.dummyVector().fromMemorySegment0(ms, offset).maybeSwap(bo);
2991     }
2992 
2993     /**
2994      * Loads a vector from a {@linkplain MemorySegment memory segment}
2995      * starting at an offset into the memory segment
2996      * and using a mask.
2997      * Lanes where the mask is unset are filled with the default
2998      * value of {@code float} (positive zero).
2999      * Bytes are composed into primitive lane elements according
3000      * to the specified byte order.
3001      * The vector is arranged into lanes according to
3002      * <a href="Vector.html#lane-order">memory ordering</a>.
3003      * <p>
3004      * The following pseudocode illustrates the behavior:
3005      * <pre>{@code
3006      * var slice = ms.asSlice(offset);


3007      * float[] ar = new float[species.length()];
3008      * for (int n = 0; n < ar.length; n++) {
3009      *     if (m.laneIsSet(n)) {
3010      *         ar[n] = slice.getAtIndex(ValuaLayout.JAVA_FLOAT.withBitAlignment(8), n);
3011      *     }
3012      * }
3013      * FloatVector r = FloatVector.fromArray(species, ar, 0);
3014      * }</pre>
3015      * @implNote
3016      * This operation is likely to be more efficient if
3017      * the specified byte order is the same as
3018      * {@linkplain ByteOrder#nativeOrder()
3019      * the platform native order},
3020      * since this method will not need to reorder
3021      * the bytes of lane values.
3022      *
3023      * @param species species of desired vector
3024      * @param ms the memory segment
3025      * @param offset the offset into the memory segment
3026      * @param bo the intended byte order
3027      * @param m the mask controlling lane selection
3028      * @return a vector loaded from the memory segment
3029      * @throws IndexOutOfBoundsException
3030      *         if {@code offset+N*4 < 0}
3031      *         or {@code offset+N*4 >= ms.byteSize()}
3032      *         for any lane {@code N} in the vector
3033      *         where the mask is set
3034      * @throws IllegalArgumentException if the memory segment is a heap segment that is
3035      *         not backed by a {@code byte[]} array.
3036      * @throws IllegalStateException if the memory segment's session is not alive,
3037      *         or if access occurs from a thread other than the thread owning the session.
3038      * @since 19
3039      */
3040     @ForceInline
3041     public static
3042     FloatVector fromMemorySegment(VectorSpecies<Float> species,
3043                                            MemorySegment ms, long offset,
3044                                            ByteOrder bo,
3045                                            VectorMask<Float> m) {
3046         FloatSpecies vsp = (FloatSpecies) species;
3047         if (offset >= 0 && offset <= (ms.byteSize() - species.vectorByteSize())) {
3048             return vsp.dummyVector().fromMemorySegment0(ms, offset, m).maybeSwap(bo);
3049         }
3050 
3051         // FIXME: optimize
3052         checkMaskFromIndexSize(offset, vsp, m, 4, ms.byteSize());
3053         return vsp.ldLongOp(ms, offset, m, FloatVector::memorySegmentGet);


3054     }
3055 
3056     // Memory store operations
3057 
3058     /**
3059      * Stores this vector into an array of type {@code float[]}
3060      * starting at an offset.
3061      * <p>
3062      * For each vector lane, where {@code N} is the vector lane index,
3063      * the lane element at index {@code N} is stored into the array
3064      * element {@code a[offset+N]}.
3065      *
3066      * @param a the array, of type {@code float[]}
3067      * @param offset the offset into the array
3068      * @throws IndexOutOfBoundsException
3069      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
3070      *         for any lane {@code N} in the vector
3071      */
3072     @ForceInline
3073     public final
3074     void intoArray(float[] a, int offset) {
3075         offset = checkFromIndexSize(offset, length(), a.length);
3076         FloatSpecies vsp = vspecies();
3077         VectorSupport.store(
3078             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3079             a, arrayAddress(a, offset),
3080             this,
3081             a, offset,
3082             (arr, off, v)
3083             -> v.stOp(arr, (int) off,
3084                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3085     }
3086 
3087     /**
3088      * Stores this vector into an array of type {@code float[]}
3089      * starting at offset and using a mask.
3090      * <p>
3091      * For each vector lane, where {@code N} is the vector lane index,
3092      * the lane element at index {@code N} is stored into the array
3093      * element {@code a[offset+N]}.
3094      * If the mask lane at {@code N} is unset then the corresponding
3095      * array element {@code a[offset+N]} is left unchanged.
3096      * <p>
3097      * Array range checking is done for lanes where the mask is set.
3098      * Lanes where the mask is unset are not stored and do not need
3099      * to correspond to legitimate elements of {@code a}.
3100      * That is, unset lanes may correspond to array indexes less than
3101      * zero or beyond the end of the array.
3102      *
3103      * @param a the array, of type {@code float[]}

3204      *         where the mask is set
3205      * @see FloatVector#toIntArray()
3206      */
3207     @ForceInline
3208     public final
3209     void intoArray(float[] a, int offset,
3210                    int[] indexMap, int mapOffset,
3211                    VectorMask<Float> m) {
3212         if (m.allTrue()) {
3213             intoArray(a, offset, indexMap, mapOffset);
3214         }
3215         else {
3216             intoArray0(a, offset, indexMap, mapOffset, m);
3217         }
3218     }
3219 
3220 
3221 
3222     /**
3223      * {@inheritDoc} <!--workaround-->
3224      * @since 19
3225      */
3226     @Override
3227     @ForceInline
3228     public final
3229     void intoMemorySegment(MemorySegment ms, long offset,
3230                            ByteOrder bo) {
3231         if (ms.isReadOnly()) {
3232             throw new UnsupportedOperationException("Attempt to write a read-only segment");

















3233         }

3234 
3235         offset = checkFromIndexSize(offset, byteSize(), ms.byteSize());
3236         maybeSwap(bo).intoMemorySegment0(ms, offset);











3237     }
3238 
3239     /**
3240      * {@inheritDoc} <!--workaround-->
3241      * @since 19
3242      */
3243     @Override
3244     @ForceInline
3245     public final
3246     void intoMemorySegment(MemorySegment ms, long offset,
3247                            ByteOrder bo,
3248                            VectorMask<Float> m) {
3249         if (m.allTrue()) {
3250             intoMemorySegment(ms, offset, bo);
3251         } else {
3252             if (ms.isReadOnly()) {
3253                 throw new UnsupportedOperationException("Attempt to write a read-only segment");
3254             }
3255             FloatSpecies vsp = vspecies();
3256             checkMaskFromIndexSize(offset, vsp, m, 4, ms.byteSize());
3257             maybeSwap(bo).intoMemorySegment0(ms, offset, m);
3258         }
3259     }
3260 
3261     // ================================================
3262 
3263     // Low-level memory operations.
3264     //
3265     // Note that all of these operations *must* inline into a context
3266     // where the exact species of the involved vector is a
3267     // compile-time constant.  Otherwise, the intrinsic generation
3268     // will fail and performance will suffer.
3269     //
3270     // In many cases this is achieved by re-deriving a version of the
3271     // method in each concrete subclass (per species).  The re-derived
3272     // method simply calls one of these generic methods, with exact
3273     // parameters for the controlling metadata, which is either a
3274     // typed vector or constant species instance.
3275 
3276     // Unchecked loading operations in native byte order.
3277     // Caller is responsible for applying index checks, masking, and
3278     // byte swapping.
3279 
3280     /*package-private*/
3281     abstract
3282     FloatVector fromArray0(float[] a, int offset);
3283     @ForceInline
3284     final
3285     FloatVector fromArray0Template(float[] a, int offset) {
3286         FloatSpecies vsp = vspecies();
3287         return VectorSupport.load(
3288             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3289             a, arrayAddress(a, offset),
3290             a, offset, vsp,
3291             (arr, off, s) -> s.ldOp(arr, (int) off,
3292                                     (arr_, off_, i) -> arr_[off_ + i]));
3293     }
3294 
3295     /*package-private*/
3296     abstract
3297     FloatVector fromArray0(float[] a, int offset, VectorMask<Float> m);
3298     @ForceInline
3299     final
3300     <M extends VectorMask<Float>>
3301     FloatVector fromArray0Template(Class<M> maskClass, float[] a, int offset, M m) {
3302         m.check(species());
3303         FloatSpecies vsp = vspecies();
3304         return VectorSupport.loadMasked(
3305             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3306             a, arrayAddress(a, offset), m,
3307             a, offset, vsp,
3308             (arr, off, s, vm) -> s.ldOp(arr, (int) off, vm,
3309                                         (arr_, off_, i) -> arr_[off_ + i]));
3310     }
3311 
3312     /*package-private*/
3313     abstract
3314     FloatVector fromArray0(float[] a, int offset,
3315                                     int[] indexMap, int mapOffset,
3316                                     VectorMask<Float> m);
3317     @ForceInline
3318     final
3319     <M extends VectorMask<Float>>
3320     FloatVector fromArray0Template(Class<M> maskClass, float[] a, int offset,
3321                                             int[] indexMap, int mapOffset, M m) {
3322         FloatSpecies vsp = vspecies();
3323         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3324         Objects.requireNonNull(a);
3325         Objects.requireNonNull(indexMap);
3326         m.check(vsp);
3327         Class<? extends FloatVector> vectorType = vsp.vectorType();
3328 
3329         // Index vector: vix[0:n] = k -> offset + indexMap[mapOffset + k]
3330         IntVector vix = IntVector
3331             .fromArray(isp, indexMap, mapOffset)
3332             .add(offset);
3333 
3334         // FIXME: Check index under mask controlling.
3335         vix = VectorIntrinsics.checkIndex(vix, a.length);
3336 
3337         return VectorSupport.loadWithMap(
3338             vectorType, maskClass, float.class, vsp.laneCount(),
3339             isp.vectorType(),
3340             a, ARRAY_BASE, vix, m,
3341             a, offset, indexMap, mapOffset, vsp,
3342             (c, idx, iMap, idy, s, vm) ->
3343             s.vOp(vm, n -> c[idx + iMap[idy+n]]));
3344     }
3345 
3346 
3347 

3348     abstract
3349     FloatVector fromMemorySegment0(MemorySegment bb, long offset);
3350     @ForceInline
3351     final
3352     FloatVector fromMemorySegment0Template(MemorySegment ms, long offset) {
3353         FloatSpecies vsp = vspecies();
3354         return ScopedMemoryAccess.loadFromMemorySegment(




































3355                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3356                 (MemorySegmentProxy) ms, offset, vsp,
3357                 (msp, off, s) -> {
3358                     return s.ldLongOp((MemorySegment) msp, off, FloatVector::memorySegmentGet);


3359                 });
3360     }
3361 
3362     abstract
3363     FloatVector fromMemorySegment0(MemorySegment ms, long offset, VectorMask<Float> m);
3364     @ForceInline
3365     final
3366     <M extends VectorMask<Float>>
3367     FloatVector fromMemorySegment0Template(Class<M> maskClass, MemorySegment ms, long offset, M m) {
3368         FloatSpecies vsp = vspecies();
3369         m.check(vsp);
3370         return ScopedMemoryAccess.loadFromMemorySegmentMasked(
3371                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3372                 (MemorySegmentProxy) ms, offset, m, vsp,
3373                 (msp, off, s, vm) -> {
3374                     return s.ldLongOp((MemorySegment) msp, off, vm, FloatVector::memorySegmentGet);


3375                 });
3376     }
3377 
3378     // Unchecked storing operations in native byte order.
3379     // Caller is responsible for applying index checks, masking, and
3380     // byte swapping.
3381 
3382     abstract
3383     void intoArray0(float[] a, int offset);
3384     @ForceInline
3385     final
3386     void intoArray0Template(float[] a, int offset) {
3387         FloatSpecies vsp = vspecies();
3388         VectorSupport.store(
3389             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3390             a, arrayAddress(a, offset),
3391             this, a, offset,
3392             (arr, off, v)
3393             -> v.stOp(arr, (int) off,
3394                       (arr_, off_, i, e) -> arr_[off_+i] = e));
3395     }
3396 
3397     abstract
3398     void intoArray0(float[] a, int offset, VectorMask<Float> m);
3399     @ForceInline
3400     final
3401     <M extends VectorMask<Float>>
3402     void intoArray0Template(Class<M> maskClass, float[] a, int offset, M m) {
3403         m.check(species());
3404         FloatSpecies vsp = vspecies();
3405         VectorSupport.storeMasked(
3406             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3407             a, arrayAddress(a, offset),
3408             this, m, a, offset,
3409             (arr, off, v, vm)
3410             -> v.stOp(arr, (int) off, vm,
3411                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3412     }
3413 
3414     abstract
3415     void intoArray0(float[] a, int offset,
3416                     int[] indexMap, int mapOffset,
3417                     VectorMask<Float> m);
3418     @ForceInline
3419     final
3420     <M extends VectorMask<Float>>
3421     void intoArray0Template(Class<M> maskClass, float[] a, int offset,
3422                             int[] indexMap, int mapOffset, M m) {
3423         m.check(species());
3424         FloatSpecies vsp = vspecies();
3425         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3426         // Index vector: vix[0:n] = i -> offset + indexMap[mo + i]
3427         IntVector vix = IntVector
3428             .fromArray(isp, indexMap, mapOffset)
3429             .add(offset);
3430 
3431         // FIXME: Check index under mask controlling.
3432         vix = VectorIntrinsics.checkIndex(vix, a.length);
3433 
3434         VectorSupport.storeWithMap(
3435             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3436             isp.vectorType(),
3437             a, arrayAddress(a, 0), vix,
3438             this, m,
3439             a, offset, indexMap, mapOffset,
3440             (arr, off, v, map, mo, vm)
3441             -> v.stOp(arr, off, vm,
3442                       (arr_, off_, i, e) -> {
3443                           int j = map[mo + i];
3444                           arr[off + j] = e;
3445                       }));
3446     }
3447 
3448 



















3449     @ForceInline
3450     final
3451     void intoMemorySegment0(MemorySegment ms, long offset) {

3452         FloatSpecies vsp = vspecies();
3453         ScopedMemoryAccess.storeIntoMemorySegment(
















3454                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3455                 this,
3456                 (MemorySegmentProxy) ms, offset,
3457                 (msp, off, v) -> {
3458                     v.stLongOp((MemorySegment) msp, off, FloatVector::memorySegmentSet);

3459                 });
3460     }
3461 
3462     abstract
3463     void intoMemorySegment0(MemorySegment bb, long offset, VectorMask<Float> m);
3464     @ForceInline
3465     final
3466     <M extends VectorMask<Float>>
3467     void intoMemorySegment0Template(Class<M> maskClass, MemorySegment ms, long offset, M m) {
3468         FloatSpecies vsp = vspecies();
3469         m.check(vsp);
3470         ScopedMemoryAccess.storeIntoMemorySegmentMasked(
3471                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3472                 this, m,
3473                 (MemorySegmentProxy) ms, offset,
3474                 (msp, off, v, vm) -> {
3475                     v.stLongOp((MemorySegment) msp, off, vm, FloatVector::memorySegmentSet);

3476                 });
3477     }
3478 
3479 
3480     // End of low-level memory operations.
3481 
3482     private static
3483     void checkMaskFromIndexSize(int offset,
3484                                 FloatSpecies vsp,
3485                                 VectorMask<Float> m,
3486                                 int scale,
3487                                 int limit) {
3488         ((AbstractMask<Float>)m)
3489             .checkIndexByLane(offset, limit, vsp.iota(), scale);
3490     }
3491 
3492     private static
3493     void checkMaskFromIndexSize(long offset,
3494                                 FloatSpecies vsp,
3495                                 VectorMask<Float> m,
3496                                 int scale,
3497                                 long limit) {
3498         ((AbstractMask<Float>)m)
3499             .checkIndexByLane(offset, limit, vsp.iota(), scale);
3500     }
3501 
3502     @ForceInline
3503     private void conditionalStoreNYI(int offset,
3504                                      FloatSpecies vsp,
3505                                      VectorMask<Float> m,
3506                                      int scale,
3507                                      int limit) {
3508         if (offset < 0 || offset + vsp.laneCount() * scale > limit) {
3509             String msg =
3510                 String.format("unimplemented: store @%d in [0..%d), %s in %s",
3511                               offset, limit, m, vsp);
3512             throw new AssertionError(msg);
3513         }
3514     }
3515 
3516     /*package-private*/
3517     @Override
3518     @ForceInline
3519     final
3520     FloatVector maybeSwap(ByteOrder bo) {
3521         if (bo != NATIVE_ENDIAN) {

3792                 }
3793             }
3794             return dummyVector().vectorFactory(res);
3795         }
3796 
3797         /*package-private*/
3798         @ForceInline
3799         <M> FloatVector ldOp(M memory, int offset,
3800                                       FLdOp<M> f) {
3801             return dummyVector().ldOp(memory, offset, f);
3802         }
3803 
3804         /*package-private*/
3805         @ForceInline
3806         <M> FloatVector ldOp(M memory, int offset,
3807                                       VectorMask<Float> m,
3808                                       FLdOp<M> f) {
3809             return dummyVector().ldOp(memory, offset, m, f);
3810         }
3811 
3812         /*package-private*/
3813         @ForceInline
3814         FloatVector ldLongOp(MemorySegment memory, long offset,
3815                                       FLdLongOp f) {
3816             return dummyVector().ldLongOp(memory, offset, f);
3817         }
3818 
3819         /*package-private*/
3820         @ForceInline
3821         FloatVector ldLongOp(MemorySegment memory, long offset,
3822                                       VectorMask<Float> m,
3823                                       FLdLongOp f) {
3824             return dummyVector().ldLongOp(memory, offset, m, f);
3825         }
3826 
3827         /*package-private*/
3828         @ForceInline
3829         <M> void stOp(M memory, int offset, FStOp<M> f) {
3830             dummyVector().stOp(memory, offset, f);
3831         }
3832 
3833         /*package-private*/
3834         @ForceInline
3835         <M> void stOp(M memory, int offset,
3836                       AbstractMask<Float> m,
3837                       FStOp<M> f) {
3838             dummyVector().stOp(memory, offset, m, f);
3839         }
3840 
3841         /*package-private*/
3842         @ForceInline
3843         void stLongOp(MemorySegment memory, long offset, FStLongOp f) {
3844             dummyVector().stLongOp(memory, offset, f);
3845         }
3846 
3847         /*package-private*/
3848         @ForceInline
3849         void stLongOp(MemorySegment memory, long offset,
3850                       AbstractMask<Float> m,
3851                       FStLongOp f) {
3852             dummyVector().stLongOp(memory, offset, m, f);
3853         }
3854 
3855         // N.B. Make sure these constant vectors and
3856         // masks load up correctly into registers.
3857         //
3858         // Also, see if we can avoid all that switching.
3859         // Could we cache both vectors and both masks in
3860         // this species object?
3861 
3862         // Zero and iota vector access
3863         @Override
3864         @ForceInline
3865         public final FloatVector zero() {
3866             if ((Class<?>) vectorType() == FloatMaxVector.class)
3867                 return FloatMaxVector.ZERO;
3868             switch (vectorBitSize()) {
3869                 case 64: return Float64Vector.ZERO;
3870                 case 128: return Float128Vector.ZERO;
3871                 case 256: return Float256Vector.ZERO;
3872                 case 512: return Float512Vector.ZERO;
3873             }
3874             throw new AssertionError();
< prev index next >