< prev index next >

src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector.java

Print this page

   7  * published by the Free Software Foundation.  Oracle designates this
   8  * particular file as subject to the "Classpath" exception as provided
   9  * by Oracle in the LICENSE file that accompanied this code.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  */
  25 package jdk.incubator.vector;
  26 
  27 import java.nio.ByteBuffer;
  28 import java.nio.ByteOrder;
  29 import java.nio.ReadOnlyBufferException;
  30 import java.util.Arrays;
  31 import java.util.Objects;
  32 import java.util.function.Function;
  33 import java.util.function.UnaryOperator;
  34 



  35 import jdk.internal.misc.ScopedMemoryAccess;
  36 import jdk.internal.misc.Unsafe;
  37 import jdk.internal.vm.annotation.ForceInline;
  38 import jdk.internal.vm.vector.VectorSupport;
  39 
  40 import static jdk.internal.vm.vector.VectorSupport.*;
  41 import static jdk.incubator.vector.VectorIntrinsics.*;
  42 
  43 import static jdk.incubator.vector.VectorOperators.*;
  44 
  45 // -- This file was mechanically generated: Do not edit! -- //
  46 
  47 /**
  48  * A specialized {@link Vector} representing an ordered immutable sequence of
  49  * {@code double} values.
  50  */
  51 @SuppressWarnings("cast")  // warning: redundant cast
  52 public abstract class DoubleVector extends AbstractVector<Double> {
  53 
  54     DoubleVector(double[] vec) {
  55         super(vec);
  56     }
  57 
  58     static final int FORBID_OPCODE_KIND = VO_NOFP;
  59 


  60     @ForceInline
  61     static int opCode(Operator op) {
  62         return VectorOperators.opCode(op, VO_OPCODE_VALID, FORBID_OPCODE_KIND);
  63     }
  64     @ForceInline
  65     static int opCode(Operator op, int requireKind) {
  66         requireKind |= VO_OPCODE_VALID;
  67         return VectorOperators.opCode(op, requireKind, FORBID_OPCODE_KIND);
  68     }
  69     @ForceInline
  70     static boolean opKind(Operator op, int bit) {
  71         return VectorOperators.opKind(op, bit);
  72     }
  73 
  74     // Virtualized factories and operators,
  75     // coded with portable definitions.
  76     // These are all @ForceInline in case
  77     // they need to be used performantly.
  78     // The various shape-specific subclasses
  79     // also specialize them by wrapping

 334         return vectorFactory(res);
 335     }
 336 
 337     /*package-private*/
 338     @ForceInline
 339     final
 340     <M> DoubleVector ldOp(M memory, int offset,
 341                                   VectorMask<Double> m,
 342                                   FLdOp<M> f) {
 343         //double[] vec = vec();
 344         double[] res = new double[length()];
 345         boolean[] mbits = ((AbstractMask<Double>)m).getBits();
 346         for (int i = 0; i < res.length; i++) {
 347             if (mbits[i]) {
 348                 res[i] = f.apply(memory, offset, i);
 349             }
 350         }
 351         return vectorFactory(res);
 352     }
 353 







































 354     interface FStOp<M> {
 355         void apply(M memory, int offset, int i, double a);
 356     }
 357 
 358     /*package-private*/
 359     @ForceInline
 360     final
 361     <M> void stOp(M memory, int offset,
 362                   FStOp<M> f) {
 363         double[] vec = vec();
 364         for (int i = 0; i < vec.length; i++) {
 365             f.apply(memory, offset, i, vec[i]);
 366         }
 367     }
 368 
 369     /*package-private*/
 370     @ForceInline
 371     final
 372     <M> void stOp(M memory, int offset,
 373                   VectorMask<Double> m,
 374                   FStOp<M> f) {
 375         double[] vec = vec();
 376         boolean[] mbits = ((AbstractMask<Double>)m).getBits();
 377         for (int i = 0; i < vec.length; i++) {
 378             if (mbits[i]) {
 379                 f.apply(memory, offset, i, vec[i]);
 380             }
 381         }
 382     }
 383 


































 384     // Binary test
 385 
 386     /*package-private*/
 387     interface FBinTest {
 388         boolean apply(int cond, int i, double a, double b);
 389     }
 390 
 391     /*package-private*/
 392     @ForceInline
 393     final
 394     AbstractMask<Double> bTest(int cond,
 395                                   Vector<Double> o,
 396                                   FBinTest f) {
 397         double[] vec1 = vec();
 398         double[] vec2 = ((DoubleVector)o).vec();
 399         boolean[] bits = new boolean[length()];
 400         for (int i = 0; i < length(); i++){
 401             bits[i] = f.apply(cond, i, vec1[i], vec2[i]);
 402         }
 403         return maskFactory(bits);
 404     }
 405 
 406 
 407     /*package-private*/
 408     @Override
 409     abstract DoubleSpecies vspecies();
 410 
 411     /*package-private*/
 412     @ForceInline
 413     static long toBits(double e) {
 414         return  Double.doubleToRawLongBits(e);
 415     }
 416 
 417     /*package-private*/
 418     @ForceInline
 419     static double fromBits(long bits) {
 420         return Double.longBitsToDouble((long)bits);
 421     }
 422 






























 423     // Static factories (other than memory operations)
 424 
 425     // Note: A surprising behavior in javadoc
 426     // sometimes makes a lone /** {@inheritDoc} */
 427     // comment drop the method altogether,
 428     // apparently if the method mentions an
 429     // parameter or return type of Vector<Double>
 430     // instead of Vector<E> as originally specified.
 431     // Adding an empty HTML fragment appears to
 432     // nudge javadoc into providing the desired
 433     // inherited documentation.  We use the HTML
 434     // comment <!--workaround--> for this.
 435 
 436     /**
 437      * Returns a vector of the given species
 438      * where all lane elements are set to
 439      * zero, the default primitive value.
 440      *
 441      * @param species species of the desired zero vector
 442      * @return a zero vector

1577      * {@inheritDoc} <!--workaround-->
1578      */
1579     @Override
1580     @ForceInline
1581     public final
1582     DoubleVector neg() {
1583         return lanewise(NEG);
1584     }
1585 
1586     /**
1587      * {@inheritDoc} <!--workaround-->
1588      */
1589     @Override
1590     @ForceInline
1591     public final
1592     DoubleVector abs() {
1593         return lanewise(ABS);
1594     }
1595 
1596 

1597     // sqrt
1598     /**
1599      * Computes the square root of this vector.
1600      *
1601      * This is a lane-wise unary operation which applies an operation
1602      * conforming to the specification of
1603      * {@link Math#sqrt Math.sqrt(a)}
1604      * to each lane value.
1605      *
1606      * This method is also equivalent to the expression
1607      * {@link #lanewise(VectorOperators.Unary)
1608      *    lanewise}{@code (}{@link VectorOperators#SQRT
1609      *    SQRT}{@code )}.
1610      *
1611      * @return the square root of this vector
1612      * @see VectorOperators#SQRT
1613      * @see #lanewise(VectorOperators.Unary,VectorMask)
1614      */
1615     @ForceInline
1616     public final DoubleVector sqrt() {

2224         double[] a = toArray();
2225         int[] sa = new int[a.length];
2226         for (int i = 0; i < a.length; i++) {
2227             sa[i] = (int) a[i];
2228         }
2229         return VectorShuffle.fromArray(dsp, sa, 0);
2230     }
2231 
2232     /*package-private*/
2233     @ForceInline
2234     final
2235     VectorShuffle<Double> toShuffleTemplate(Class<?> shuffleType) {
2236         DoubleSpecies vsp = vspecies();
2237         return VectorSupport.convert(VectorSupport.VECTOR_OP_CAST,
2238                                      getClass(), double.class, length(),
2239                                      shuffleType, byte.class, length(),
2240                                      this, vsp,
2241                                      DoubleVector::toShuffle0);
2242     }
2243 







































2244     /**
2245      * {@inheritDoc} <!--workaround-->
2246      */
2247     @Override
2248     public abstract
2249     DoubleVector selectFrom(Vector<Double> v);
2250 
2251     /*package-private*/
2252     @ForceInline
2253     final DoubleVector selectFromTemplate(DoubleVector v) {
2254         return v.rearrange(this.toShuffle());
2255     }
2256 
2257     /**
2258      * {@inheritDoc} <!--workaround-->
2259      */
2260     @Override
2261     public abstract
2262     DoubleVector selectFrom(Vector<Double> s, VectorMask<Double> m);
2263 

2592         for (int i = 0; i < a.length; i++) {
2593             double e = a[i];
2594             res[i] = DoubleSpecies.toIntegralChecked(e, false);
2595         }
2596         return res;
2597     }
2598 
2599     /** {@inheritDoc} <!--workaround-->
2600      * @implNote
2601      * This is an alias for {@link #toArray()}
2602      * When this method is used on used on vectors
2603      * of type {@code DoubleVector},
2604      * there will be no loss of precision.
2605      */
2606     @ForceInline
2607     @Override
2608     public final double[] toDoubleArray() {
2609         return toArray();
2610     }
2611 
2612     /**
2613      * Loads a vector from a byte array starting at an offset.
2614      * Bytes are composed into primitive lane elements according
2615      * to the specified byte order.
2616      * The vector is arranged into lanes according to
2617      * <a href="Vector.html#lane-order">memory ordering</a>.
2618      * <p>
2619      * This method behaves as if it returns the result of calling
2620      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
2621      * fromByteBuffer()} as follows:
2622      * <pre>{@code
2623      * var bb = ByteBuffer.wrap(a);
2624      * var m = species.maskAll(true);
2625      * return fromByteBuffer(species, bb, offset, bo, m);
2626      * }</pre>
2627      *
2628      * @param species species of desired vector
2629      * @param a the byte array
2630      * @param offset the offset into the array
2631      * @param bo the intended byte order
2632      * @return a vector loaded from a byte array
2633      * @throws IndexOutOfBoundsException
2634      *         if {@code offset+N*ESIZE < 0}
2635      *         or {@code offset+(N+1)*ESIZE > a.length}
2636      *         for any lane {@code N} in the vector
2637      */
2638     @ForceInline
2639     public static
2640     DoubleVector fromByteArray(VectorSpecies<Double> species,
2641                                        byte[] a, int offset,
2642                                        ByteOrder bo) {
2643         offset = checkFromIndexSize(offset, species.vectorByteSize(), a.length);
2644         DoubleSpecies vsp = (DoubleSpecies) species;
2645         return vsp.dummyVector().fromByteArray0(a, offset).maybeSwap(bo);
2646     }
2647 
2648     /**
2649      * Loads a vector from a byte array starting at an offset
2650      * and using a mask.
2651      * Lanes where the mask is unset are filled with the default
2652      * value of {@code double} (positive zero).
2653      * Bytes are composed into primitive lane elements according
2654      * to the specified byte order.
2655      * The vector is arranged into lanes according to
2656      * <a href="Vector.html#lane-order">memory ordering</a>.
2657      * <p>
2658      * This method behaves as if it returns the result of calling
2659      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
2660      * fromByteBuffer()} as follows:
2661      * <pre>{@code
2662      * var bb = ByteBuffer.wrap(a);
2663      * return fromByteBuffer(species, bb, offset, bo, m);
2664      * }</pre>
2665      *
2666      * @param species species of desired vector
2667      * @param a the byte array
2668      * @param offset the offset into the array
2669      * @param bo the intended byte order
2670      * @param m the mask controlling lane selection
2671      * @return a vector loaded from a byte array
2672      * @throws IndexOutOfBoundsException
2673      *         if {@code offset+N*ESIZE < 0}
2674      *         or {@code offset+(N+1)*ESIZE > a.length}
2675      *         for any lane {@code N} in the vector
2676      *         where the mask is set
2677      */
2678     @ForceInline
2679     public static
2680     DoubleVector fromByteArray(VectorSpecies<Double> species,
2681                                        byte[] a, int offset,
2682                                        ByteOrder bo,
2683                                        VectorMask<Double> m) {
2684         DoubleSpecies vsp = (DoubleSpecies) species;
2685         if (offset >= 0 && offset <= (a.length - species.vectorByteSize())) {
2686             return vsp.dummyVector().fromByteArray0(a, offset, m).maybeSwap(bo);
2687         }
2688 
2689         // FIXME: optimize
2690         checkMaskFromIndexSize(offset, vsp, m, 8, a.length);
2691         ByteBuffer wb = wrapper(a, bo);
2692         return vsp.ldOp(wb, offset, (AbstractMask<Double>)m,
2693                    (wb_, o, i)  -> wb_.getDouble(o + i * 8));
2694     }
2695 
2696     /**
2697      * Loads a vector from an array of type {@code double[]}
2698      * starting at an offset.
2699      * For each vector lane, where {@code N} is the vector lane index, the
2700      * array element at index {@code offset + N} is placed into the
2701      * resulting vector at lane index {@code N}.
2702      *
2703      * @param species species of desired vector
2704      * @param a the array
2705      * @param offset the offset into the array
2706      * @return the vector loaded from an array
2707      * @throws IndexOutOfBoundsException
2708      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
2709      *         for any lane {@code N} in the vector
2710      */
2711     @ForceInline
2712     public static
2713     DoubleVector fromArray(VectorSpecies<Double> species,
2714                                    double[] a, int offset) {
2715         offset = checkFromIndexSize(offset, species.length(), a.length);

2866      * @see DoubleVector#toIntArray()
2867      */
2868     @ForceInline
2869     public static
2870     DoubleVector fromArray(VectorSpecies<Double> species,
2871                                    double[] a, int offset,
2872                                    int[] indexMap, int mapOffset,
2873                                    VectorMask<Double> m) {
2874         if (m.allTrue()) {
2875             return fromArray(species, a, offset, indexMap, mapOffset);
2876         }
2877         else {
2878             DoubleSpecies vsp = (DoubleSpecies) species;
2879             return vsp.dummyVector().fromArray0(a, offset, indexMap, mapOffset, m);
2880         }
2881     }
2882 
2883 
2884 
2885     /**
2886      * Loads a vector from a {@linkplain ByteBuffer byte buffer}
2887      * starting at an offset into the byte buffer.
2888      * Bytes are composed into primitive lane elements according
2889      * to the specified byte order.
2890      * The vector is arranged into lanes according to
2891      * <a href="Vector.html#lane-order">memory ordering</a>.
2892      * <p>
2893      * This method behaves as if it returns the result of calling
2894      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
2895      * fromByteBuffer()} as follows:
2896      * <pre>{@code
2897      * var m = species.maskAll(true);
2898      * return fromByteBuffer(species, bb, offset, bo, m);
2899      * }</pre>
2900      *
2901      * @param species species of desired vector
2902      * @param bb the byte buffer
2903      * @param offset the offset into the byte buffer
2904      * @param bo the intended byte order
2905      * @return a vector loaded from a byte buffer
2906      * @throws IndexOutOfBoundsException
2907      *         if {@code offset+N*8 < 0}
2908      *         or {@code offset+N*8 >= bb.limit()}
2909      *         for any lane {@code N} in the vector





2910      */
2911     @ForceInline
2912     public static
2913     DoubleVector fromByteBuffer(VectorSpecies<Double> species,
2914                                         ByteBuffer bb, int offset,
2915                                         ByteOrder bo) {
2916         offset = checkFromIndexSize(offset, species.vectorByteSize(), bb.limit());
2917         DoubleSpecies vsp = (DoubleSpecies) species;
2918         return vsp.dummyVector().fromByteBuffer0(bb, offset).maybeSwap(bo);
2919     }
2920 
2921     /**
2922      * Loads a vector from a {@linkplain ByteBuffer byte buffer}
2923      * starting at an offset into the byte buffer
2924      * and using a mask.
2925      * Lanes where the mask is unset are filled with the default
2926      * value of {@code double} (positive zero).
2927      * Bytes are composed into primitive lane elements according
2928      * to the specified byte order.
2929      * The vector is arranged into lanes according to
2930      * <a href="Vector.html#lane-order">memory ordering</a>.
2931      * <p>
2932      * The following pseudocode illustrates the behavior:
2933      * <pre>{@code
2934      * DoubleBuffer eb = bb.duplicate()
2935      *     .position(offset)
2936      *     .order(bo).asDoubleBuffer();
2937      * double[] ar = new double[species.length()];
2938      * for (int n = 0; n < ar.length; n++) {
2939      *     if (m.laneIsSet(n)) {
2940      *         ar[n] = eb.get(n);
2941      *     }
2942      * }
2943      * DoubleVector r = DoubleVector.fromArray(species, ar, 0);
2944      * }</pre>
2945      * @implNote
2946      * This operation is likely to be more efficient if
2947      * the specified byte order is the same as
2948      * {@linkplain ByteOrder#nativeOrder()
2949      * the platform native order},
2950      * since this method will not need to reorder
2951      * the bytes of lane values.
2952      *
2953      * @param species species of desired vector
2954      * @param bb the byte buffer
2955      * @param offset the offset into the byte buffer
2956      * @param bo the intended byte order
2957      * @param m the mask controlling lane selection
2958      * @return a vector loaded from a byte buffer
2959      * @throws IndexOutOfBoundsException
2960      *         if {@code offset+N*8 < 0}
2961      *         or {@code offset+N*8 >= bb.limit()}
2962      *         for any lane {@code N} in the vector
2963      *         where the mask is set





2964      */
2965     @ForceInline
2966     public static
2967     DoubleVector fromByteBuffer(VectorSpecies<Double> species,
2968                                         ByteBuffer bb, int offset,
2969                                         ByteOrder bo,
2970                                         VectorMask<Double> m) {
2971         DoubleSpecies vsp = (DoubleSpecies) species;
2972         if (offset >= 0 && offset <= (bb.limit() - species.vectorByteSize())) {
2973             return vsp.dummyVector().fromByteBuffer0(bb, offset, m).maybeSwap(bo);
2974         }
2975 
2976         // FIXME: optimize
2977         checkMaskFromIndexSize(offset, vsp, m, 8, bb.limit());
2978         ByteBuffer wb = wrapper(bb, bo);
2979         return vsp.ldOp(wb, offset, (AbstractMask<Double>)m,
2980                    (wb_, o, i)  -> wb_.getDouble(o + i * 8));
2981     }
2982 
2983     // Memory store operations
2984 
2985     /**
2986      * Stores this vector into an array of type {@code double[]}
2987      * starting at an offset.
2988      * <p>
2989      * For each vector lane, where {@code N} is the vector lane index,
2990      * the lane element at index {@code N} is stored into the array
2991      * element {@code a[offset+N]}.
2992      *
2993      * @param a the array, of type {@code double[]}
2994      * @param offset the offset into the array
2995      * @throws IndexOutOfBoundsException
2996      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
2997      *         for any lane {@code N} in the vector
2998      */
2999     @ForceInline
3000     public final
3001     void intoArray(double[] a, int offset) {
3002         offset = checkFromIndexSize(offset, length(), a.length);
3003         DoubleSpecies vsp = vspecies();
3004         VectorSupport.store(
3005             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3006             a, arrayAddress(a, offset),
3007             this,
3008             a, offset,
3009             (arr, off, v)
3010             -> v.stOp(arr, off,
3011                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3012     }
3013 
3014     /**
3015      * Stores this vector into an array of type {@code double[]}
3016      * starting at offset and using a mask.
3017      * <p>
3018      * For each vector lane, where {@code N} is the vector lane index,
3019      * the lane element at index {@code N} is stored into the array
3020      * element {@code a[offset+N]}.
3021      * If the mask lane at {@code N} is unset then the corresponding
3022      * array element {@code a[offset+N]} is left unchanged.
3023      * <p>
3024      * Array range checking is done for lanes where the mask is set.
3025      * Lanes where the mask is unset are not stored and do not need
3026      * to correspond to legitimate elements of {@code a}.
3027      * That is, unset lanes may correspond to array indexes less than
3028      * zero or beyond the end of the array.
3029      *
3030      * @param a the array, of type {@code double[]}

3150      *         where the mask is set
3151      * @see DoubleVector#toIntArray()
3152      */
3153     @ForceInline
3154     public final
3155     void intoArray(double[] a, int offset,
3156                    int[] indexMap, int mapOffset,
3157                    VectorMask<Double> m) {
3158         if (m.allTrue()) {
3159             intoArray(a, offset, indexMap, mapOffset);
3160         }
3161         else {
3162             intoArray0(a, offset, indexMap, mapOffset, m);
3163         }
3164     }
3165 
3166 
3167 
3168     /**
3169      * {@inheritDoc} <!--workaround-->

3170      */
3171     @Override
3172     @ForceInline
3173     public final
3174     void intoByteArray(byte[] a, int offset,
3175                        ByteOrder bo) {
3176         offset = checkFromIndexSize(offset, byteSize(), a.length);
3177         maybeSwap(bo).intoByteArray0(a, offset);
3178     }
3179 
3180     /**
3181      * {@inheritDoc} <!--workaround-->
3182      */
3183     @Override
3184     @ForceInline
3185     public final
3186     void intoByteArray(byte[] a, int offset,
3187                        ByteOrder bo,
3188                        VectorMask<Double> m) {
3189         if (m.allTrue()) {
3190             intoByteArray(a, offset, bo);
3191         } else {
3192             DoubleSpecies vsp = vspecies();
3193             checkMaskFromIndexSize(offset, vsp, m, 8, a.length);
3194             maybeSwap(bo).intoByteArray0(a, offset, m);
3195         }
3196     }
3197 
3198     /**
3199      * {@inheritDoc} <!--workaround-->
3200      */
3201     @Override
3202     @ForceInline
3203     public final
3204     void intoByteBuffer(ByteBuffer bb, int offset,
3205                         ByteOrder bo) {
3206         if (ScopedMemoryAccess.isReadOnly(bb)) {
3207             throw new ReadOnlyBufferException();
3208         }
3209         offset = checkFromIndexSize(offset, byteSize(), bb.limit());
3210         maybeSwap(bo).intoByteBuffer0(bb, offset);
3211     }
3212 
3213     /**
3214      * {@inheritDoc} <!--workaround-->

3215      */
3216     @Override
3217     @ForceInline
3218     public final
3219     void intoByteBuffer(ByteBuffer bb, int offset,
3220                         ByteOrder bo,
3221                         VectorMask<Double> m) {
3222         if (m.allTrue()) {
3223             intoByteBuffer(bb, offset, bo);
3224         } else {
3225             if (bb.isReadOnly()) {
3226                 throw new ReadOnlyBufferException();
3227             }
3228             DoubleSpecies vsp = vspecies();
3229             checkMaskFromIndexSize(offset, vsp, m, 8, bb.limit());
3230             maybeSwap(bo).intoByteBuffer0(bb, offset, m);
3231         }
3232     }
3233 
3234     // ================================================
3235 
3236     // Low-level memory operations.
3237     //
3238     // Note that all of these operations *must* inline into a context
3239     // where the exact species of the involved vector is a
3240     // compile-time constant.  Otherwise, the intrinsic generation
3241     // will fail and performance will suffer.
3242     //
3243     // In many cases this is achieved by re-deriving a version of the
3244     // method in each concrete subclass (per species).  The re-derived
3245     // method simply calls one of these generic methods, with exact
3246     // parameters for the controlling metadata, which is either a
3247     // typed vector or constant species instance.
3248 
3249     // Unchecked loading operations in native byte order.
3250     // Caller is responsible for applying index checks, masking, and
3251     // byte swapping.
3252 
3253     /*package-private*/
3254     abstract
3255     DoubleVector fromArray0(double[] a, int offset);
3256     @ForceInline
3257     final
3258     DoubleVector fromArray0Template(double[] a, int offset) {
3259         DoubleSpecies vsp = vspecies();
3260         return VectorSupport.load(
3261             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3262             a, arrayAddress(a, offset),
3263             a, offset, vsp,
3264             (arr, off, s) -> s.ldOp(arr, off,
3265                                     (arr_, off_, i) -> arr_[off_ + i]));
3266     }
3267 
3268     /*package-private*/
3269     abstract
3270     DoubleVector fromArray0(double[] a, int offset, VectorMask<Double> m);
3271     @ForceInline
3272     final
3273     <M extends VectorMask<Double>>
3274     DoubleVector fromArray0Template(Class<M> maskClass, double[] a, int offset, M m) {
3275         m.check(species());
3276         DoubleSpecies vsp = vspecies();
3277         return VectorSupport.loadMasked(
3278             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3279             a, arrayAddress(a, offset), m,
3280             a, offset, vsp,
3281             (arr, off, s, vm) -> s.ldOp(arr, off, vm,
3282                                         (arr_, off_, i) -> arr_[off_ + i]));
3283     }
3284 
3285     /*package-private*/
3286     abstract
3287     DoubleVector fromArray0(double[] a, int offset,
3288                                     int[] indexMap, int mapOffset,
3289                                     VectorMask<Double> m);
3290     @ForceInline
3291     final
3292     <M extends VectorMask<Double>>
3293     DoubleVector fromArray0Template(Class<M> maskClass, double[] a, int offset,
3294                                             int[] indexMap, int mapOffset, M m) {
3295         DoubleSpecies vsp = vspecies();
3296         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3297         Objects.requireNonNull(a);
3298         Objects.requireNonNull(indexMap);
3299         m.check(vsp);
3300         Class<? extends DoubleVector> vectorType = vsp.vectorType();
3301 

3319         } else {
3320             vix = IntVector
3321                 .fromArray(isp, indexMap, mapOffset)
3322                 .add(offset);
3323         }
3324 
3325         // FIXME: Check index under mask controlling.
3326         vix = VectorIntrinsics.checkIndex(vix, a.length);
3327 
3328         return VectorSupport.loadWithMap(
3329             vectorType, maskClass, double.class, vsp.laneCount(),
3330             isp.vectorType(),
3331             a, ARRAY_BASE, vix, m,
3332             a, offset, indexMap, mapOffset, vsp,
3333             (c, idx, iMap, idy, s, vm) ->
3334             s.vOp(vm, n -> c[idx + iMap[idy+n]]));
3335     }
3336 
3337 
3338 
3339     @Override
3340     abstract
3341     DoubleVector fromByteArray0(byte[] a, int offset);
3342     @ForceInline
3343     final
3344     DoubleVector fromByteArray0Template(byte[] a, int offset) {
3345         DoubleSpecies vsp = vspecies();
3346         return VectorSupport.load(
3347             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3348             a, byteArrayAddress(a, offset),
3349             a, offset, vsp,
3350             (arr, off, s) -> {
3351                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3352                 return s.ldOp(wb, off,
3353                         (wb_, o, i) -> wb_.getDouble(o + i * 8));
3354             });
3355     }
3356 
3357     abstract
3358     DoubleVector fromByteArray0(byte[] a, int offset, VectorMask<Double> m);
3359     @ForceInline
3360     final
3361     <M extends VectorMask<Double>>
3362     DoubleVector fromByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
3363         DoubleSpecies vsp = vspecies();
3364         m.check(vsp);
3365         return VectorSupport.loadMasked(
3366             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3367             a, byteArrayAddress(a, offset), m,
3368             a, offset, vsp,
3369             (arr, off, s, vm) -> {
3370                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3371                 return s.ldOp(wb, off, vm,
3372                         (wb_, o, i) -> wb_.getDouble(o + i * 8));
3373             });
3374     }
3375 
3376     abstract
3377     DoubleVector fromByteBuffer0(ByteBuffer bb, int offset);
3378     @ForceInline
3379     final
3380     DoubleVector fromByteBuffer0Template(ByteBuffer bb, int offset) {
3381         DoubleSpecies vsp = vspecies();
3382         return ScopedMemoryAccess.loadFromByteBuffer(
3383                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3384                 bb, offset, vsp,
3385                 (buf, off, s) -> {
3386                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3387                     return s.ldOp(wb, off,
3388                             (wb_, o, i) -> wb_.getDouble(o + i * 8));
3389                 });
3390     }
3391 
3392     abstract
3393     DoubleVector fromByteBuffer0(ByteBuffer bb, int offset, VectorMask<Double> m);
3394     @ForceInline
3395     final
3396     <M extends VectorMask<Double>>
3397     DoubleVector fromByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
3398         DoubleSpecies vsp = vspecies();
3399         m.check(vsp);
3400         return ScopedMemoryAccess.loadFromByteBufferMasked(
3401                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3402                 bb, offset, m, vsp,
3403                 (buf, off, s, vm) -> {
3404                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3405                     return s.ldOp(wb, off, vm,
3406                             (wb_, o, i) -> wb_.getDouble(o + i * 8));
3407                 });
3408     }
3409 
3410     // Unchecked storing operations in native byte order.
3411     // Caller is responsible for applying index checks, masking, and
3412     // byte swapping.
3413 
3414     abstract
3415     void intoArray0(double[] a, int offset);
3416     @ForceInline
3417     final
3418     void intoArray0Template(double[] a, int offset) {
3419         DoubleSpecies vsp = vspecies();
3420         VectorSupport.store(
3421             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3422             a, arrayAddress(a, offset),
3423             this, a, offset,
3424             (arr, off, v)
3425             -> v.stOp(arr, off,
3426                       (arr_, off_, i, e) -> arr_[off_+i] = e));
3427     }
3428 
3429     abstract
3430     void intoArray0(double[] a, int offset, VectorMask<Double> m);
3431     @ForceInline
3432     final
3433     <M extends VectorMask<Double>>
3434     void intoArray0Template(Class<M> maskClass, double[] a, int offset, M m) {
3435         m.check(species());
3436         DoubleSpecies vsp = vspecies();
3437         VectorSupport.storeMasked(
3438             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3439             a, arrayAddress(a, offset),
3440             this, m, a, offset,
3441             (arr, off, v, vm)
3442             -> v.stOp(arr, off, vm,
3443                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3444     }
3445 
3446     abstract
3447     void intoArray0(double[] a, int offset,
3448                     int[] indexMap, int mapOffset,
3449                     VectorMask<Double> m);
3450     @ForceInline
3451     final
3452     <M extends VectorMask<Double>>
3453     void intoArray0Template(Class<M> maskClass, double[] a, int offset,
3454                             int[] indexMap, int mapOffset, M m) {
3455         m.check(species());
3456         DoubleSpecies vsp = vspecies();
3457         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3458         if (vsp.laneCount() == 1) {
3459             intoArray(a, offset + indexMap[mapOffset], m);
3460             return;
3461         }
3462 

3480 
3481 
3482         // FIXME: Check index under mask controlling.
3483         vix = VectorIntrinsics.checkIndex(vix, a.length);
3484 
3485         VectorSupport.storeWithMap(
3486             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3487             isp.vectorType(),
3488             a, arrayAddress(a, 0), vix,
3489             this, m,
3490             a, offset, indexMap, mapOffset,
3491             (arr, off, v, map, mo, vm)
3492             -> v.stOp(arr, off, vm,
3493                       (arr_, off_, i, e) -> {
3494                           int j = map[mo + i];
3495                           arr[off + j] = e;
3496                       }));
3497     }
3498 
3499 
3500     abstract
3501     void intoByteArray0(byte[] a, int offset);
3502     @ForceInline
3503     final
3504     void intoByteArray0Template(byte[] a, int offset) {
3505         DoubleSpecies vsp = vspecies();
3506         VectorSupport.store(
3507             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3508             a, byteArrayAddress(a, offset),
3509             this, a, offset,
3510             (arr, off, v) -> {
3511                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3512                 v.stOp(wb, off,
3513                         (tb_, o, i, e) -> tb_.putDouble(o + i * 8, e));
3514             });
3515     }
3516 
3517     abstract
3518     void intoByteArray0(byte[] a, int offset, VectorMask<Double> m);
3519     @ForceInline
3520     final
3521     <M extends VectorMask<Double>>
3522     void intoByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
3523         DoubleSpecies vsp = vspecies();
3524         m.check(vsp);
3525         VectorSupport.storeMasked(
3526             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3527             a, byteArrayAddress(a, offset),
3528             this, m, a, offset,
3529             (arr, off, v, vm) -> {
3530                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3531                 v.stOp(wb, off, vm,
3532                         (tb_, o, i, e) -> tb_.putDouble(o + i * 8, e));
3533             });
3534     }
3535 
3536     @ForceInline
3537     final
3538     void intoByteBuffer0(ByteBuffer bb, int offset) {
3539         DoubleSpecies vsp = vspecies();
3540         ScopedMemoryAccess.storeIntoByteBuffer(
3541                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3542                 this, bb, offset,
3543                 (buf, off, v) -> {
3544                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3545                     v.stOp(wb, off,
3546                             (wb_, o, i, e) -> wb_.putDouble(o + i * 8, e));
3547                 });
3548     }
3549 
3550     abstract
3551     void intoByteBuffer0(ByteBuffer bb, int offset, VectorMask<Double> m);
3552     @ForceInline
3553     final
3554     <M extends VectorMask<Double>>
3555     void intoByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
3556         DoubleSpecies vsp = vspecies();
3557         m.check(vsp);
3558         ScopedMemoryAccess.storeIntoByteBufferMasked(
3559                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3560                 this, m, bb, offset,
3561                 (buf, off, v, vm) -> {
3562                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3563                     v.stOp(wb, off, vm,
3564                             (wb_, o, i, e) -> wb_.putDouble(o + i * 8, e));
3565                 });
3566     }
3567 
3568 
3569     // End of low-level memory operations.
3570 
3571     private static
3572     void checkMaskFromIndexSize(int offset,
3573                                 DoubleSpecies vsp,
3574                                 VectorMask<Double> m,
3575                                 int scale,
3576                                 int limit) {
3577         ((AbstractMask<Double>)m)
3578             .checkIndexByLane(offset, limit, vsp.iota(), scale);
3579     }
3580 










3581     @ForceInline
3582     private void conditionalStoreNYI(int offset,
3583                                      DoubleSpecies vsp,
3584                                      VectorMask<Double> m,
3585                                      int scale,
3586                                      int limit) {
3587         if (offset < 0 || offset + vsp.laneCount() * scale > limit) {
3588             String msg =
3589                 String.format("unimplemented: store @%d in [0..%d), %s in %s",
3590                               offset, limit, m, vsp);
3591             throw new AssertionError(msg);
3592         }
3593     }
3594 
3595     /*package-private*/
3596     @Override
3597     @ForceInline
3598     final
3599     DoubleVector maybeSwap(ByteOrder bo) {
3600         if (bo != NATIVE_ENDIAN) {

3871                 }
3872             }
3873             return dummyVector().vectorFactory(res);
3874         }
3875 
3876         /*package-private*/
3877         @ForceInline
3878         <M> DoubleVector ldOp(M memory, int offset,
3879                                       FLdOp<M> f) {
3880             return dummyVector().ldOp(memory, offset, f);
3881         }
3882 
3883         /*package-private*/
3884         @ForceInline
3885         <M> DoubleVector ldOp(M memory, int offset,
3886                                       VectorMask<Double> m,
3887                                       FLdOp<M> f) {
3888             return dummyVector().ldOp(memory, offset, m, f);
3889         }
3890 















3891         /*package-private*/
3892         @ForceInline
3893         <M> void stOp(M memory, int offset, FStOp<M> f) {
3894             dummyVector().stOp(memory, offset, f);
3895         }
3896 
3897         /*package-private*/
3898         @ForceInline
3899         <M> void stOp(M memory, int offset,
3900                       AbstractMask<Double> m,
3901                       FStOp<M> f) {
3902             dummyVector().stOp(memory, offset, m, f);
3903         }
3904 














3905         // N.B. Make sure these constant vectors and
3906         // masks load up correctly into registers.
3907         //
3908         // Also, see if we can avoid all that switching.
3909         // Could we cache both vectors and both masks in
3910         // this species object?
3911 
3912         // Zero and iota vector access
3913         @Override
3914         @ForceInline
3915         public final DoubleVector zero() {
3916             if ((Class<?>) vectorType() == DoubleMaxVector.class)
3917                 return DoubleMaxVector.ZERO;
3918             switch (vectorBitSize()) {
3919                 case 64: return Double64Vector.ZERO;
3920                 case 128: return Double128Vector.ZERO;
3921                 case 256: return Double256Vector.ZERO;
3922                 case 512: return Double512Vector.ZERO;
3923             }
3924             throw new AssertionError();

   7  * published by the Free Software Foundation.  Oracle designates this
   8  * particular file as subject to the "Classpath" exception as provided
   9  * by Oracle in the LICENSE file that accompanied this code.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  */
  25 package jdk.incubator.vector;
  26 

  27 import java.nio.ByteOrder;

  28 import java.util.Arrays;
  29 import java.util.Objects;
  30 import java.util.function.Function;

  31 
  32 import jdk.incubator.foreign.MemorySegment;
  33 import jdk.incubator.foreign.ValueLayout;
  34 import jdk.internal.access.foreign.MemorySegmentProxy;
  35 import jdk.internal.misc.ScopedMemoryAccess;
  36 import jdk.internal.misc.Unsafe;
  37 import jdk.internal.vm.annotation.ForceInline;
  38 import jdk.internal.vm.vector.VectorSupport;
  39 
  40 import static jdk.internal.vm.vector.VectorSupport.*;
  41 import static jdk.incubator.vector.VectorIntrinsics.*;
  42 
  43 import static jdk.incubator.vector.VectorOperators.*;
  44 
  45 // -- This file was mechanically generated: Do not edit! -- //
  46 
  47 /**
  48  * A specialized {@link Vector} representing an ordered immutable sequence of
  49  * {@code double} values.
  50  */
  51 @SuppressWarnings("cast")  // warning: redundant cast
  52 public abstract class DoubleVector extends AbstractVector<Double> {
  53 
  54     DoubleVector(double[] vec) {
  55         super(vec);
  56     }
  57 
  58     static final int FORBID_OPCODE_KIND = VO_NOFP;
  59 
  60     static final ValueLayout.OfDouble ELEMENT_LAYOUT = ValueLayout.JAVA_DOUBLE.withBitAlignment(8);
  61 
  62     @ForceInline
  63     static int opCode(Operator op) {
  64         return VectorOperators.opCode(op, VO_OPCODE_VALID, FORBID_OPCODE_KIND);
  65     }
  66     @ForceInline
  67     static int opCode(Operator op, int requireKind) {
  68         requireKind |= VO_OPCODE_VALID;
  69         return VectorOperators.opCode(op, requireKind, FORBID_OPCODE_KIND);
  70     }
  71     @ForceInline
  72     static boolean opKind(Operator op, int bit) {
  73         return VectorOperators.opKind(op, bit);
  74     }
  75 
  76     // Virtualized factories and operators,
  77     // coded with portable definitions.
  78     // These are all @ForceInline in case
  79     // they need to be used performantly.
  80     // The various shape-specific subclasses
  81     // also specialize them by wrapping

 336         return vectorFactory(res);
 337     }
 338 
 339     /*package-private*/
 340     @ForceInline
 341     final
 342     <M> DoubleVector ldOp(M memory, int offset,
 343                                   VectorMask<Double> m,
 344                                   FLdOp<M> f) {
 345         //double[] vec = vec();
 346         double[] res = new double[length()];
 347         boolean[] mbits = ((AbstractMask<Double>)m).getBits();
 348         for (int i = 0; i < res.length; i++) {
 349             if (mbits[i]) {
 350                 res[i] = f.apply(memory, offset, i);
 351             }
 352         }
 353         return vectorFactory(res);
 354     }
 355 
 356     /*package-private*/
 357     interface FLdLongOp {
 358         double apply(MemorySegment memory, long offset, int i);
 359     }
 360 
 361     /*package-private*/
 362     @ForceInline
 363     final
 364     DoubleVector ldLongOp(MemorySegment memory, long offset,
 365                                   FLdLongOp f) {
 366         //dummy; no vec = vec();
 367         double[] res = new double[length()];
 368         for (int i = 0; i < res.length; i++) {
 369             res[i] = f.apply(memory, offset, i);
 370         }
 371         return vectorFactory(res);
 372     }
 373 
 374     /*package-private*/
 375     @ForceInline
 376     final
 377     DoubleVector ldLongOp(MemorySegment memory, long offset,
 378                                   VectorMask<Double> m,
 379                                   FLdLongOp f) {
 380         //double[] vec = vec();
 381         double[] res = new double[length()];
 382         boolean[] mbits = ((AbstractMask<Double>)m).getBits();
 383         for (int i = 0; i < res.length; i++) {
 384             if (mbits[i]) {
 385                 res[i] = f.apply(memory, offset, i);
 386             }
 387         }
 388         return vectorFactory(res);
 389     }
 390 
 391     static double memorySegmentGet(MemorySegment ms, long o, int i) {
 392         return ms.get(ELEMENT_LAYOUT, o + i * 8L);
 393     }
 394 
 395     interface FStOp<M> {
 396         void apply(M memory, int offset, int i, double a);
 397     }
 398 
 399     /*package-private*/
 400     @ForceInline
 401     final
 402     <M> void stOp(M memory, int offset,
 403                   FStOp<M> f) {
 404         double[] vec = vec();
 405         for (int i = 0; i < vec.length; i++) {
 406             f.apply(memory, offset, i, vec[i]);
 407         }
 408     }
 409 
 410     /*package-private*/
 411     @ForceInline
 412     final
 413     <M> void stOp(M memory, int offset,
 414                   VectorMask<Double> m,
 415                   FStOp<M> f) {
 416         double[] vec = vec();
 417         boolean[] mbits = ((AbstractMask<Double>)m).getBits();
 418         for (int i = 0; i < vec.length; i++) {
 419             if (mbits[i]) {
 420                 f.apply(memory, offset, i, vec[i]);
 421             }
 422         }
 423     }
 424 
 425     interface FStLongOp {
 426         void apply(MemorySegment memory, long offset, int i, double a);
 427     }
 428 
 429     /*package-private*/
 430     @ForceInline
 431     final
 432     void stLongOp(MemorySegment memory, long offset,
 433                   FStLongOp f) {
 434         double[] vec = vec();
 435         for (int i = 0; i < vec.length; i++) {
 436             f.apply(memory, offset, i, vec[i]);
 437         }
 438     }
 439 
 440     /*package-private*/
 441     @ForceInline
 442     final
 443     void stLongOp(MemorySegment memory, long offset,
 444                   VectorMask<Double> m,
 445                   FStLongOp f) {
 446         double[] vec = vec();
 447         boolean[] mbits = ((AbstractMask<Double>)m).getBits();
 448         for (int i = 0; i < vec.length; i++) {
 449             if (mbits[i]) {
 450                 f.apply(memory, offset, i, vec[i]);
 451             }
 452         }
 453     }
 454 
 455     static void memorySegmentSet(MemorySegment ms, long o, int i, double e) {
 456         ms.set(ELEMENT_LAYOUT, o + i * 8L, e);
 457     }
 458 
 459     // Binary test
 460 
 461     /*package-private*/
 462     interface FBinTest {
 463         boolean apply(int cond, int i, double a, double b);
 464     }
 465 
 466     /*package-private*/
 467     @ForceInline
 468     final
 469     AbstractMask<Double> bTest(int cond,
 470                                   Vector<Double> o,
 471                                   FBinTest f) {
 472         double[] vec1 = vec();
 473         double[] vec2 = ((DoubleVector)o).vec();
 474         boolean[] bits = new boolean[length()];
 475         for (int i = 0; i < length(); i++){
 476             bits[i] = f.apply(cond, i, vec1[i], vec2[i]);
 477         }
 478         return maskFactory(bits);
 479     }
 480 
 481 
 482     /*package-private*/
 483     @Override
 484     abstract DoubleSpecies vspecies();
 485 
 486     /*package-private*/
 487     @ForceInline
 488     static long toBits(double e) {
 489         return  Double.doubleToRawLongBits(e);
 490     }
 491 
 492     /*package-private*/
 493     @ForceInline
 494     static double fromBits(long bits) {
 495         return Double.longBitsToDouble((long)bits);
 496     }
 497 
 498     static DoubleVector expandHelper(Vector<Double> v, VectorMask<Double> m) {
 499         VectorSpecies<Double> vsp = m.vectorSpecies();
 500         DoubleVector r  = (DoubleVector) vsp.zero();
 501         DoubleVector vi = (DoubleVector) v;
 502         if (m.allTrue()) {
 503             return vi;
 504         }
 505         for (int i = 0, j = 0; i < vsp.length(); i++) {
 506             if (m.laneIsSet(i)) {
 507                 r = r.withLane(i, vi.lane(j++));
 508             }
 509         }
 510         return r;
 511     }
 512 
 513     static DoubleVector compressHelper(Vector<Double> v, VectorMask<Double> m) {
 514         VectorSpecies<Double> vsp = m.vectorSpecies();
 515         DoubleVector r  = (DoubleVector) vsp.zero();
 516         DoubleVector vi = (DoubleVector) v;
 517         if (m.allTrue()) {
 518             return vi;
 519         }
 520         for (int i = 0, j = 0; i < vsp.length(); i++) {
 521             if (m.laneIsSet(i)) {
 522                 r = r.withLane(j++, vi.lane(i));
 523             }
 524         }
 525         return r;
 526     }
 527 
 528     // Static factories (other than memory operations)
 529 
 530     // Note: A surprising behavior in javadoc
 531     // sometimes makes a lone /** {@inheritDoc} */
 532     // comment drop the method altogether,
 533     // apparently if the method mentions an
 534     // parameter or return type of Vector<Double>
 535     // instead of Vector<E> as originally specified.
 536     // Adding an empty HTML fragment appears to
 537     // nudge javadoc into providing the desired
 538     // inherited documentation.  We use the HTML
 539     // comment <!--workaround--> for this.
 540 
 541     /**
 542      * Returns a vector of the given species
 543      * where all lane elements are set to
 544      * zero, the default primitive value.
 545      *
 546      * @param species species of the desired zero vector
 547      * @return a zero vector

1682      * {@inheritDoc} <!--workaround-->
1683      */
1684     @Override
1685     @ForceInline
1686     public final
1687     DoubleVector neg() {
1688         return lanewise(NEG);
1689     }
1690 
1691     /**
1692      * {@inheritDoc} <!--workaround-->
1693      */
1694     @Override
1695     @ForceInline
1696     public final
1697     DoubleVector abs() {
1698         return lanewise(ABS);
1699     }
1700 
1701 
1702 
1703     // sqrt
1704     /**
1705      * Computes the square root of this vector.
1706      *
1707      * This is a lane-wise unary operation which applies an operation
1708      * conforming to the specification of
1709      * {@link Math#sqrt Math.sqrt(a)}
1710      * to each lane value.
1711      *
1712      * This method is also equivalent to the expression
1713      * {@link #lanewise(VectorOperators.Unary)
1714      *    lanewise}{@code (}{@link VectorOperators#SQRT
1715      *    SQRT}{@code )}.
1716      *
1717      * @return the square root of this vector
1718      * @see VectorOperators#SQRT
1719      * @see #lanewise(VectorOperators.Unary,VectorMask)
1720      */
1721     @ForceInline
1722     public final DoubleVector sqrt() {

2330         double[] a = toArray();
2331         int[] sa = new int[a.length];
2332         for (int i = 0; i < a.length; i++) {
2333             sa[i] = (int) a[i];
2334         }
2335         return VectorShuffle.fromArray(dsp, sa, 0);
2336     }
2337 
2338     /*package-private*/
2339     @ForceInline
2340     final
2341     VectorShuffle<Double> toShuffleTemplate(Class<?> shuffleType) {
2342         DoubleSpecies vsp = vspecies();
2343         return VectorSupport.convert(VectorSupport.VECTOR_OP_CAST,
2344                                      getClass(), double.class, length(),
2345                                      shuffleType, byte.class, length(),
2346                                      this, vsp,
2347                                      DoubleVector::toShuffle0);
2348     }
2349 
2350     /**
2351      * {@inheritDoc} <!--workaround-->
2352      * @since 19
2353      */
2354     @Override
2355     public abstract
2356     DoubleVector compress(VectorMask<Double> m);
2357 
2358     /*package-private*/
2359     @ForceInline
2360     final
2361     <M extends AbstractMask<Double>>
2362     DoubleVector compressTemplate(Class<M> masktype, M m) {
2363       m.check(masktype, this);
2364       return (DoubleVector) VectorSupport.comExpOp(VectorSupport.VECTOR_OP_COMPRESS, getClass(), masktype,
2365                                                    double.class, length(), this, m,
2366                                                    (v1, m1) -> compressHelper(v1, m1));
2367     }
2368 
2369     /**
2370      * {@inheritDoc} <!--workaround-->
2371      * @since 19
2372      */
2373     @Override
2374     public abstract
2375     DoubleVector expand(VectorMask<Double> m);
2376 
2377     /*package-private*/
2378     @ForceInline
2379     final
2380     <M extends AbstractMask<Double>>
2381     DoubleVector expandTemplate(Class<M> masktype, M m) {
2382       m.check(masktype, this);
2383       return (DoubleVector) VectorSupport.comExpOp(VectorSupport.VECTOR_OP_EXPAND, getClass(), masktype,
2384                                                    double.class, length(), this, m,
2385                                                    (v1, m1) -> expandHelper(v1, m1));
2386     }
2387 
2388 
2389     /**
2390      * {@inheritDoc} <!--workaround-->
2391      */
2392     @Override
2393     public abstract
2394     DoubleVector selectFrom(Vector<Double> v);
2395 
2396     /*package-private*/
2397     @ForceInline
2398     final DoubleVector selectFromTemplate(DoubleVector v) {
2399         return v.rearrange(this.toShuffle());
2400     }
2401 
2402     /**
2403      * {@inheritDoc} <!--workaround-->
2404      */
2405     @Override
2406     public abstract
2407     DoubleVector selectFrom(Vector<Double> s, VectorMask<Double> m);
2408 

2737         for (int i = 0; i < a.length; i++) {
2738             double e = a[i];
2739             res[i] = DoubleSpecies.toIntegralChecked(e, false);
2740         }
2741         return res;
2742     }
2743 
2744     /** {@inheritDoc} <!--workaround-->
2745      * @implNote
2746      * This is an alias for {@link #toArray()}
2747      * When this method is used on used on vectors
2748      * of type {@code DoubleVector},
2749      * there will be no loss of precision.
2750      */
2751     @ForceInline
2752     @Override
2753     public final double[] toDoubleArray() {
2754         return toArray();
2755     }
2756 




















































































2757     /**
2758      * Loads a vector from an array of type {@code double[]}
2759      * starting at an offset.
2760      * For each vector lane, where {@code N} is the vector lane index, the
2761      * array element at index {@code offset + N} is placed into the
2762      * resulting vector at lane index {@code N}.
2763      *
2764      * @param species species of desired vector
2765      * @param a the array
2766      * @param offset the offset into the array
2767      * @return the vector loaded from an array
2768      * @throws IndexOutOfBoundsException
2769      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
2770      *         for any lane {@code N} in the vector
2771      */
2772     @ForceInline
2773     public static
2774     DoubleVector fromArray(VectorSpecies<Double> species,
2775                                    double[] a, int offset) {
2776         offset = checkFromIndexSize(offset, species.length(), a.length);

2927      * @see DoubleVector#toIntArray()
2928      */
2929     @ForceInline
2930     public static
2931     DoubleVector fromArray(VectorSpecies<Double> species,
2932                                    double[] a, int offset,
2933                                    int[] indexMap, int mapOffset,
2934                                    VectorMask<Double> m) {
2935         if (m.allTrue()) {
2936             return fromArray(species, a, offset, indexMap, mapOffset);
2937         }
2938         else {
2939             DoubleSpecies vsp = (DoubleSpecies) species;
2940             return vsp.dummyVector().fromArray0(a, offset, indexMap, mapOffset, m);
2941         }
2942     }
2943 
2944 
2945 
2946     /**
2947      * Loads a vector from a {@linkplain MemorySegment memory segment}
2948      * starting at an offset into the memory segment.
2949      * Bytes are composed into primitive lane elements according
2950      * to the specified byte order.
2951      * The vector is arranged into lanes according to
2952      * <a href="Vector.html#lane-order">memory ordering</a>.
2953      * <p>
2954      * This method behaves as if it returns the result of calling
2955      * {@link #fromMemorySegment(VectorSpecies,MemorySegment,long,ByteOrder,VectorMask)
2956      * fromMemorySegment()} as follows:
2957      * <pre>{@code
2958      * var m = species.maskAll(true);
2959      * return fromMemorySegment(species, ms, offset, bo, m);
2960      * }</pre>
2961      *
2962      * @param species species of desired vector
2963      * @param ms the memory segment
2964      * @param offset the offset into the memory segment
2965      * @param bo the intended byte order
2966      * @return a vector loaded from the memory segment
2967      * @throws IndexOutOfBoundsException
2968      *         if {@code offset+N*8 < 0}
2969      *         or {@code offset+N*8 >= ms.byteSize()}
2970      *         for any lane {@code N} in the vector
2971      * @throws IllegalArgumentException if the memory segment is a heap segment that is
2972      *         not backed by a {@code byte[]} array.
2973      * @throws IllegalStateException if the memory segment's session is not alive,
2974      *         or if access occurs from a thread other than the thread owning the session.
2975      * @since 19
2976      */
2977     @ForceInline
2978     public static
2979     DoubleVector fromMemorySegment(VectorSpecies<Double> species,
2980                                            MemorySegment ms, long offset,
2981                                            ByteOrder bo) {
2982         offset = checkFromIndexSize(offset, species.vectorByteSize(), ms.byteSize());
2983         DoubleSpecies vsp = (DoubleSpecies) species;
2984         return vsp.dummyVector().fromMemorySegment0(ms, offset).maybeSwap(bo);
2985     }
2986 
2987     /**
2988      * Loads a vector from a {@linkplain MemorySegment memory segment}
2989      * starting at an offset into the memory segment
2990      * and using a mask.
2991      * Lanes where the mask is unset are filled with the default
2992      * value of {@code double} (positive zero).
2993      * Bytes are composed into primitive lane elements according
2994      * to the specified byte order.
2995      * The vector is arranged into lanes according to
2996      * <a href="Vector.html#lane-order">memory ordering</a>.
2997      * <p>
2998      * The following pseudocode illustrates the behavior:
2999      * <pre>{@code
3000      * var slice = ms.asSlice(offset);


3001      * double[] ar = new double[species.length()];
3002      * for (int n = 0; n < ar.length; n++) {
3003      *     if (m.laneIsSet(n)) {
3004      *         ar[n] = slice.getAtIndex(ValuaLayout.JAVA_DOUBLE.withBitAlignment(8), n);
3005      *     }
3006      * }
3007      * DoubleVector r = DoubleVector.fromArray(species, ar, 0);
3008      * }</pre>
3009      * @implNote
3010      * This operation is likely to be more efficient if
3011      * the specified byte order is the same as
3012      * {@linkplain ByteOrder#nativeOrder()
3013      * the platform native order},
3014      * since this method will not need to reorder
3015      * the bytes of lane values.
3016      *
3017      * @param species species of desired vector
3018      * @param ms the memory segment
3019      * @param offset the offset into the memory segment
3020      * @param bo the intended byte order
3021      * @param m the mask controlling lane selection
3022      * @return a vector loaded from the memory segment
3023      * @throws IndexOutOfBoundsException
3024      *         if {@code offset+N*8 < 0}
3025      *         or {@code offset+N*8 >= ms.byteSize()}
3026      *         for any lane {@code N} in the vector
3027      *         where the mask is set
3028      * @throws IllegalArgumentException if the memory segment is a heap segment that is
3029      *         not backed by a {@code byte[]} array.
3030      * @throws IllegalStateException if the memory segment's session is not alive,
3031      *         or if access occurs from a thread other than the thread owning the session.
3032      * @since 19
3033      */
3034     @ForceInline
3035     public static
3036     DoubleVector fromMemorySegment(VectorSpecies<Double> species,
3037                                            MemorySegment ms, long offset,
3038                                            ByteOrder bo,
3039                                            VectorMask<Double> m) {
3040         DoubleSpecies vsp = (DoubleSpecies) species;
3041         if (offset >= 0 && offset <= (ms.byteSize() - species.vectorByteSize())) {
3042             return vsp.dummyVector().fromMemorySegment0(ms, offset, m).maybeSwap(bo);
3043         }
3044 
3045         // FIXME: optimize
3046         checkMaskFromIndexSize(offset, vsp, m, 8, ms.byteSize());
3047         return vsp.ldLongOp(ms, offset, m, DoubleVector::memorySegmentGet);


3048     }
3049 
3050     // Memory store operations
3051 
3052     /**
3053      * Stores this vector into an array of type {@code double[]}
3054      * starting at an offset.
3055      * <p>
3056      * For each vector lane, where {@code N} is the vector lane index,
3057      * the lane element at index {@code N} is stored into the array
3058      * element {@code a[offset+N]}.
3059      *
3060      * @param a the array, of type {@code double[]}
3061      * @param offset the offset into the array
3062      * @throws IndexOutOfBoundsException
3063      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
3064      *         for any lane {@code N} in the vector
3065      */
3066     @ForceInline
3067     public final
3068     void intoArray(double[] a, int offset) {
3069         offset = checkFromIndexSize(offset, length(), a.length);
3070         DoubleSpecies vsp = vspecies();
3071         VectorSupport.store(
3072             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3073             a, arrayAddress(a, offset),
3074             this,
3075             a, offset,
3076             (arr, off, v)
3077             -> v.stOp(arr, (int) off,
3078                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3079     }
3080 
3081     /**
3082      * Stores this vector into an array of type {@code double[]}
3083      * starting at offset and using a mask.
3084      * <p>
3085      * For each vector lane, where {@code N} is the vector lane index,
3086      * the lane element at index {@code N} is stored into the array
3087      * element {@code a[offset+N]}.
3088      * If the mask lane at {@code N} is unset then the corresponding
3089      * array element {@code a[offset+N]} is left unchanged.
3090      * <p>
3091      * Array range checking is done for lanes where the mask is set.
3092      * Lanes where the mask is unset are not stored and do not need
3093      * to correspond to legitimate elements of {@code a}.
3094      * That is, unset lanes may correspond to array indexes less than
3095      * zero or beyond the end of the array.
3096      *
3097      * @param a the array, of type {@code double[]}

3217      *         where the mask is set
3218      * @see DoubleVector#toIntArray()
3219      */
3220     @ForceInline
3221     public final
3222     void intoArray(double[] a, int offset,
3223                    int[] indexMap, int mapOffset,
3224                    VectorMask<Double> m) {
3225         if (m.allTrue()) {
3226             intoArray(a, offset, indexMap, mapOffset);
3227         }
3228         else {
3229             intoArray0(a, offset, indexMap, mapOffset, m);
3230         }
3231     }
3232 
3233 
3234 
3235     /**
3236      * {@inheritDoc} <!--workaround-->
3237      * @since 19
3238      */
3239     @Override
3240     @ForceInline
3241     public final
3242     void intoMemorySegment(MemorySegment ms, long offset,
3243                            ByteOrder bo) {
3244         if (ms.isReadOnly()) {
3245             throw new UnsupportedOperationException("Attempt to write a read-only segment");

















3246         }

3247 
3248         offset = checkFromIndexSize(offset, byteSize(), ms.byteSize());
3249         maybeSwap(bo).intoMemorySegment0(ms, offset);











3250     }
3251 
3252     /**
3253      * {@inheritDoc} <!--workaround-->
3254      * @since 19
3255      */
3256     @Override
3257     @ForceInline
3258     public final
3259     void intoMemorySegment(MemorySegment ms, long offset,
3260                            ByteOrder bo,
3261                            VectorMask<Double> m) {
3262         if (m.allTrue()) {
3263             intoMemorySegment(ms, offset, bo);
3264         } else {
3265             if (ms.isReadOnly()) {
3266                 throw new UnsupportedOperationException("Attempt to write a read-only segment");
3267             }
3268             DoubleSpecies vsp = vspecies();
3269             checkMaskFromIndexSize(offset, vsp, m, 8, ms.byteSize());
3270             maybeSwap(bo).intoMemorySegment0(ms, offset, m);
3271         }
3272     }
3273 
3274     // ================================================
3275 
3276     // Low-level memory operations.
3277     //
3278     // Note that all of these operations *must* inline into a context
3279     // where the exact species of the involved vector is a
3280     // compile-time constant.  Otherwise, the intrinsic generation
3281     // will fail and performance will suffer.
3282     //
3283     // In many cases this is achieved by re-deriving a version of the
3284     // method in each concrete subclass (per species).  The re-derived
3285     // method simply calls one of these generic methods, with exact
3286     // parameters for the controlling metadata, which is either a
3287     // typed vector or constant species instance.
3288 
3289     // Unchecked loading operations in native byte order.
3290     // Caller is responsible for applying index checks, masking, and
3291     // byte swapping.
3292 
3293     /*package-private*/
3294     abstract
3295     DoubleVector fromArray0(double[] a, int offset);
3296     @ForceInline
3297     final
3298     DoubleVector fromArray0Template(double[] a, int offset) {
3299         DoubleSpecies vsp = vspecies();
3300         return VectorSupport.load(
3301             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3302             a, arrayAddress(a, offset),
3303             a, offset, vsp,
3304             (arr, off, s) -> s.ldOp(arr, (int) off,
3305                                     (arr_, off_, i) -> arr_[off_ + i]));
3306     }
3307 
3308     /*package-private*/
3309     abstract
3310     DoubleVector fromArray0(double[] a, int offset, VectorMask<Double> m);
3311     @ForceInline
3312     final
3313     <M extends VectorMask<Double>>
3314     DoubleVector fromArray0Template(Class<M> maskClass, double[] a, int offset, M m) {
3315         m.check(species());
3316         DoubleSpecies vsp = vspecies();
3317         return VectorSupport.loadMasked(
3318             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3319             a, arrayAddress(a, offset), m,
3320             a, offset, vsp,
3321             (arr, off, s, vm) -> s.ldOp(arr, (int) off, vm,
3322                                         (arr_, off_, i) -> arr_[off_ + i]));
3323     }
3324 
3325     /*package-private*/
3326     abstract
3327     DoubleVector fromArray0(double[] a, int offset,
3328                                     int[] indexMap, int mapOffset,
3329                                     VectorMask<Double> m);
3330     @ForceInline
3331     final
3332     <M extends VectorMask<Double>>
3333     DoubleVector fromArray0Template(Class<M> maskClass, double[] a, int offset,
3334                                             int[] indexMap, int mapOffset, M m) {
3335         DoubleSpecies vsp = vspecies();
3336         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3337         Objects.requireNonNull(a);
3338         Objects.requireNonNull(indexMap);
3339         m.check(vsp);
3340         Class<? extends DoubleVector> vectorType = vsp.vectorType();
3341 

3359         } else {
3360             vix = IntVector
3361                 .fromArray(isp, indexMap, mapOffset)
3362                 .add(offset);
3363         }
3364 
3365         // FIXME: Check index under mask controlling.
3366         vix = VectorIntrinsics.checkIndex(vix, a.length);
3367 
3368         return VectorSupport.loadWithMap(
3369             vectorType, maskClass, double.class, vsp.laneCount(),
3370             isp.vectorType(),
3371             a, ARRAY_BASE, vix, m,
3372             a, offset, indexMap, mapOffset, vsp,
3373             (c, idx, iMap, idy, s, vm) ->
3374             s.vOp(vm, n -> c[idx + iMap[idy+n]]));
3375     }
3376 
3377 
3378 

3379     abstract
3380     DoubleVector fromMemorySegment0(MemorySegment bb, long offset);
3381     @ForceInline
3382     final
3383     DoubleVector fromMemorySegment0Template(MemorySegment ms, long offset) {
3384         DoubleSpecies vsp = vspecies();
3385         return ScopedMemoryAccess.loadFromMemorySegment(




































3386                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3387                 (MemorySegmentProxy) ms, offset, vsp,
3388                 (msp, off, s) -> {
3389                     return s.ldLongOp((MemorySegment) msp, off, DoubleVector::memorySegmentGet);


3390                 });
3391     }
3392 
3393     abstract
3394     DoubleVector fromMemorySegment0(MemorySegment ms, long offset, VectorMask<Double> m);
3395     @ForceInline
3396     final
3397     <M extends VectorMask<Double>>
3398     DoubleVector fromMemorySegment0Template(Class<M> maskClass, MemorySegment ms, long offset, M m) {
3399         DoubleSpecies vsp = vspecies();
3400         m.check(vsp);
3401         return ScopedMemoryAccess.loadFromMemorySegmentMasked(
3402                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3403                 (MemorySegmentProxy) ms, offset, m, vsp,
3404                 (msp, off, s, vm) -> {
3405                     return s.ldLongOp((MemorySegment) msp, off, vm, DoubleVector::memorySegmentGet);


3406                 });
3407     }
3408 
3409     // Unchecked storing operations in native byte order.
3410     // Caller is responsible for applying index checks, masking, and
3411     // byte swapping.
3412 
3413     abstract
3414     void intoArray0(double[] a, int offset);
3415     @ForceInline
3416     final
3417     void intoArray0Template(double[] a, int offset) {
3418         DoubleSpecies vsp = vspecies();
3419         VectorSupport.store(
3420             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3421             a, arrayAddress(a, offset),
3422             this, a, offset,
3423             (arr, off, v)
3424             -> v.stOp(arr, (int) off,
3425                       (arr_, off_, i, e) -> arr_[off_+i] = e));
3426     }
3427 
3428     abstract
3429     void intoArray0(double[] a, int offset, VectorMask<Double> m);
3430     @ForceInline
3431     final
3432     <M extends VectorMask<Double>>
3433     void intoArray0Template(Class<M> maskClass, double[] a, int offset, M m) {
3434         m.check(species());
3435         DoubleSpecies vsp = vspecies();
3436         VectorSupport.storeMasked(
3437             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3438             a, arrayAddress(a, offset),
3439             this, m, a, offset,
3440             (arr, off, v, vm)
3441             -> v.stOp(arr, (int) off, vm,
3442                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3443     }
3444 
3445     abstract
3446     void intoArray0(double[] a, int offset,
3447                     int[] indexMap, int mapOffset,
3448                     VectorMask<Double> m);
3449     @ForceInline
3450     final
3451     <M extends VectorMask<Double>>
3452     void intoArray0Template(Class<M> maskClass, double[] a, int offset,
3453                             int[] indexMap, int mapOffset, M m) {
3454         m.check(species());
3455         DoubleSpecies vsp = vspecies();
3456         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3457         if (vsp.laneCount() == 1) {
3458             intoArray(a, offset + indexMap[mapOffset], m);
3459             return;
3460         }
3461 

3479 
3480 
3481         // FIXME: Check index under mask controlling.
3482         vix = VectorIntrinsics.checkIndex(vix, a.length);
3483 
3484         VectorSupport.storeWithMap(
3485             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3486             isp.vectorType(),
3487             a, arrayAddress(a, 0), vix,
3488             this, m,
3489             a, offset, indexMap, mapOffset,
3490             (arr, off, v, map, mo, vm)
3491             -> v.stOp(arr, off, vm,
3492                       (arr_, off_, i, e) -> {
3493                           int j = map[mo + i];
3494                           arr[off + j] = e;
3495                       }));
3496     }
3497 
3498 



















3499     @ForceInline
3500     final
3501     void intoMemorySegment0(MemorySegment ms, long offset) {

3502         DoubleSpecies vsp = vspecies();
3503         ScopedMemoryAccess.storeIntoMemorySegment(
















3504                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3505                 this,
3506                 (MemorySegmentProxy) ms, offset,
3507                 (msp, off, v) -> {
3508                     v.stLongOp((MemorySegment) msp, off, DoubleVector::memorySegmentSet);

3509                 });
3510     }
3511 
3512     abstract
3513     void intoMemorySegment0(MemorySegment bb, long offset, VectorMask<Double> m);
3514     @ForceInline
3515     final
3516     <M extends VectorMask<Double>>
3517     void intoMemorySegment0Template(Class<M> maskClass, MemorySegment ms, long offset, M m) {
3518         DoubleSpecies vsp = vspecies();
3519         m.check(vsp);
3520         ScopedMemoryAccess.storeIntoMemorySegmentMasked(
3521                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3522                 this, m,
3523                 (MemorySegmentProxy) ms, offset,
3524                 (msp, off, v, vm) -> {
3525                     v.stLongOp((MemorySegment) msp, off, vm, DoubleVector::memorySegmentSet);

3526                 });
3527     }
3528 
3529 
3530     // End of low-level memory operations.
3531 
3532     private static
3533     void checkMaskFromIndexSize(int offset,
3534                                 DoubleSpecies vsp,
3535                                 VectorMask<Double> m,
3536                                 int scale,
3537                                 int limit) {
3538         ((AbstractMask<Double>)m)
3539             .checkIndexByLane(offset, limit, vsp.iota(), scale);
3540     }
3541 
3542     private static
3543     void checkMaskFromIndexSize(long offset,
3544                                 DoubleSpecies vsp,
3545                                 VectorMask<Double> m,
3546                                 int scale,
3547                                 long limit) {
3548         ((AbstractMask<Double>)m)
3549             .checkIndexByLane(offset, limit, vsp.iota(), scale);
3550     }
3551 
3552     @ForceInline
3553     private void conditionalStoreNYI(int offset,
3554                                      DoubleSpecies vsp,
3555                                      VectorMask<Double> m,
3556                                      int scale,
3557                                      int limit) {
3558         if (offset < 0 || offset + vsp.laneCount() * scale > limit) {
3559             String msg =
3560                 String.format("unimplemented: store @%d in [0..%d), %s in %s",
3561                               offset, limit, m, vsp);
3562             throw new AssertionError(msg);
3563         }
3564     }
3565 
3566     /*package-private*/
3567     @Override
3568     @ForceInline
3569     final
3570     DoubleVector maybeSwap(ByteOrder bo) {
3571         if (bo != NATIVE_ENDIAN) {

3842                 }
3843             }
3844             return dummyVector().vectorFactory(res);
3845         }
3846 
3847         /*package-private*/
3848         @ForceInline
3849         <M> DoubleVector ldOp(M memory, int offset,
3850                                       FLdOp<M> f) {
3851             return dummyVector().ldOp(memory, offset, f);
3852         }
3853 
3854         /*package-private*/
3855         @ForceInline
3856         <M> DoubleVector ldOp(M memory, int offset,
3857                                       VectorMask<Double> m,
3858                                       FLdOp<M> f) {
3859             return dummyVector().ldOp(memory, offset, m, f);
3860         }
3861 
3862         /*package-private*/
3863         @ForceInline
3864         DoubleVector ldLongOp(MemorySegment memory, long offset,
3865                                       FLdLongOp f) {
3866             return dummyVector().ldLongOp(memory, offset, f);
3867         }
3868 
3869         /*package-private*/
3870         @ForceInline
3871         DoubleVector ldLongOp(MemorySegment memory, long offset,
3872                                       VectorMask<Double> m,
3873                                       FLdLongOp f) {
3874             return dummyVector().ldLongOp(memory, offset, m, f);
3875         }
3876 
3877         /*package-private*/
3878         @ForceInline
3879         <M> void stOp(M memory, int offset, FStOp<M> f) {
3880             dummyVector().stOp(memory, offset, f);
3881         }
3882 
3883         /*package-private*/
3884         @ForceInline
3885         <M> void stOp(M memory, int offset,
3886                       AbstractMask<Double> m,
3887                       FStOp<M> f) {
3888             dummyVector().stOp(memory, offset, m, f);
3889         }
3890 
3891         /*package-private*/
3892         @ForceInline
3893         void stLongOp(MemorySegment memory, long offset, FStLongOp f) {
3894             dummyVector().stLongOp(memory, offset, f);
3895         }
3896 
3897         /*package-private*/
3898         @ForceInline
3899         void stLongOp(MemorySegment memory, long offset,
3900                       AbstractMask<Double> m,
3901                       FStLongOp f) {
3902             dummyVector().stLongOp(memory, offset, m, f);
3903         }
3904 
3905         // N.B. Make sure these constant vectors and
3906         // masks load up correctly into registers.
3907         //
3908         // Also, see if we can avoid all that switching.
3909         // Could we cache both vectors and both masks in
3910         // this species object?
3911 
3912         // Zero and iota vector access
3913         @Override
3914         @ForceInline
3915         public final DoubleVector zero() {
3916             if ((Class<?>) vectorType() == DoubleMaxVector.class)
3917                 return DoubleMaxVector.ZERO;
3918             switch (vectorBitSize()) {
3919                 case 64: return Double64Vector.ZERO;
3920                 case 128: return Double128Vector.ZERO;
3921                 case 256: return Double256Vector.ZERO;
3922                 case 512: return Double512Vector.ZERO;
3923             }
3924             throw new AssertionError();
< prev index next >