1 /*
   2  * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.  Oracle designates this
   8  * particular file as subject to the "Classpath" exception as provided
   9  * by Oracle in the LICENSE file that accompanied this code.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  */
  25 package jdk.incubator.vector;
  26 
  27 import java.nio.ByteOrder;
  28 import java.util.Arrays;
  29 import java.util.Objects;
  30 import java.util.function.Function;
  31 
  32 import jdk.incubator.foreign.MemorySegment;
  33 import jdk.incubator.foreign.ValueLayout;
  34 import jdk.internal.access.foreign.MemorySegmentProxy;
  35 import jdk.internal.misc.ScopedMemoryAccess;
  36 import jdk.internal.misc.Unsafe;
  37 import jdk.internal.vm.annotation.ForceInline;
  38 import jdk.internal.vm.vector.VectorSupport;
  39 
  40 import static jdk.internal.vm.vector.VectorSupport.*;
  41 import static jdk.incubator.vector.VectorIntrinsics.*;
  42 
  43 import static jdk.incubator.vector.VectorOperators.*;
  44 
  45 // -- This file was mechanically generated: Do not edit! -- //
  46 
  47 /**
  48  * A specialized {@link Vector} representing an ordered immutable sequence of
  49  * {@code int} values.
  50  */
  51 @SuppressWarnings("cast")  // warning: redundant cast
  52 public abstract class IntVector extends AbstractVector<Integer> {
  53 
  54     IntVector(int[] vec) {
  55         super(vec);
  56     }
  57 
  58     static final int FORBID_OPCODE_KIND = VO_ONLYFP;
  59 
  60     static final ValueLayout.OfInt ELEMENT_LAYOUT = ValueLayout.JAVA_INT.withBitAlignment(8);
  61 
  62     @ForceInline
  63     static int opCode(Operator op) {
  64         return VectorOperators.opCode(op, VO_OPCODE_VALID, FORBID_OPCODE_KIND);
  65     }
  66     @ForceInline
  67     static int opCode(Operator op, int requireKind) {
  68         requireKind |= VO_OPCODE_VALID;
  69         return VectorOperators.opCode(op, requireKind, FORBID_OPCODE_KIND);
  70     }
  71     @ForceInline
  72     static boolean opKind(Operator op, int bit) {
  73         return VectorOperators.opKind(op, bit);
  74     }
  75 
  76     // Virtualized factories and operators,
  77     // coded with portable definitions.
  78     // These are all @ForceInline in case
  79     // they need to be used performantly.
  80     // The various shape-specific subclasses
  81     // also specialize them by wrapping
  82     // them in a call like this:
  83     //    return (Byte128Vector)
  84     //       super.bOp((Byte128Vector) o);
  85     // The purpose of that is to forcibly inline
  86     // the generic definition from this file
  87     // into a sharply type- and size-specific
  88     // wrapper in the subclass file, so that
  89     // the JIT can specialize the code.
  90     // The code is only inlined and expanded
  91     // if it gets hot.  Think of it as a cheap
  92     // and lazy version of C++ templates.
  93 
  94     // Virtualized getter
  95 
  96     /*package-private*/
  97     abstract int[] vec();
  98 
  99     // Virtualized constructors
 100 
 101     /**
 102      * Build a vector directly using my own constructor.
 103      * It is an error if the array is aliased elsewhere.
 104      */
 105     /*package-private*/
 106     abstract IntVector vectorFactory(int[] vec);
 107 
 108     /**
 109      * Build a mask directly using my species.
 110      * It is an error if the array is aliased elsewhere.
 111      */
 112     /*package-private*/
 113     @ForceInline
 114     final
 115     AbstractMask<Integer> maskFactory(boolean[] bits) {
 116         return vspecies().maskFactory(bits);
 117     }
 118 
 119     // Constant loader (takes dummy as vector arg)
 120     interface FVOp {
 121         int apply(int i);
 122     }
 123 
 124     /*package-private*/
 125     @ForceInline
 126     final
 127     IntVector vOp(FVOp f) {
 128         int[] res = new int[length()];
 129         for (int i = 0; i < res.length; i++) {
 130             res[i] = f.apply(i);
 131         }
 132         return vectorFactory(res);
 133     }
 134 
 135     @ForceInline
 136     final
 137     IntVector vOp(VectorMask<Integer> m, FVOp f) {
 138         int[] res = new int[length()];
 139         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 140         for (int i = 0; i < res.length; i++) {
 141             if (mbits[i]) {
 142                 res[i] = f.apply(i);
 143             }
 144         }
 145         return vectorFactory(res);
 146     }
 147 
 148     // Unary operator
 149 
 150     /*package-private*/
 151     interface FUnOp {
 152         int apply(int i, int a);
 153     }
 154 
 155     /*package-private*/
 156     abstract
 157     IntVector uOp(FUnOp f);
 158     @ForceInline
 159     final
 160     IntVector uOpTemplate(FUnOp f) {
 161         int[] vec = vec();
 162         int[] res = new int[length()];
 163         for (int i = 0; i < res.length; i++) {
 164             res[i] = f.apply(i, vec[i]);
 165         }
 166         return vectorFactory(res);
 167     }
 168 
 169     /*package-private*/
 170     abstract
 171     IntVector uOp(VectorMask<Integer> m,
 172                              FUnOp f);
 173     @ForceInline
 174     final
 175     IntVector uOpTemplate(VectorMask<Integer> m,
 176                                      FUnOp f) {
 177         if (m == null) {
 178             return uOpTemplate(f);
 179         }
 180         int[] vec = vec();
 181         int[] res = new int[length()];
 182         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 183         for (int i = 0; i < res.length; i++) {
 184             res[i] = mbits[i] ? f.apply(i, vec[i]) : vec[i];
 185         }
 186         return vectorFactory(res);
 187     }
 188 
 189     // Binary operator
 190 
 191     /*package-private*/
 192     interface FBinOp {
 193         int apply(int i, int a, int b);
 194     }
 195 
 196     /*package-private*/
 197     abstract
 198     IntVector bOp(Vector<Integer> o,
 199                              FBinOp f);
 200     @ForceInline
 201     final
 202     IntVector bOpTemplate(Vector<Integer> o,
 203                                      FBinOp f) {
 204         int[] res = new int[length()];
 205         int[] vec1 = this.vec();
 206         int[] vec2 = ((IntVector)o).vec();
 207         for (int i = 0; i < res.length; i++) {
 208             res[i] = f.apply(i, vec1[i], vec2[i]);
 209         }
 210         return vectorFactory(res);
 211     }
 212 
 213     /*package-private*/
 214     abstract
 215     IntVector bOp(Vector<Integer> o,
 216                              VectorMask<Integer> m,
 217                              FBinOp f);
 218     @ForceInline
 219     final
 220     IntVector bOpTemplate(Vector<Integer> o,
 221                                      VectorMask<Integer> m,
 222                                      FBinOp f) {
 223         if (m == null) {
 224             return bOpTemplate(o, f);
 225         }
 226         int[] res = new int[length()];
 227         int[] vec1 = this.vec();
 228         int[] vec2 = ((IntVector)o).vec();
 229         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 230         for (int i = 0; i < res.length; i++) {
 231             res[i] = mbits[i] ? f.apply(i, vec1[i], vec2[i]) : vec1[i];
 232         }
 233         return vectorFactory(res);
 234     }
 235 
 236     // Ternary operator
 237 
 238     /*package-private*/
 239     interface FTriOp {
 240         int apply(int i, int a, int b, int c);
 241     }
 242 
 243     /*package-private*/
 244     abstract
 245     IntVector tOp(Vector<Integer> o1,
 246                              Vector<Integer> o2,
 247                              FTriOp f);
 248     @ForceInline
 249     final
 250     IntVector tOpTemplate(Vector<Integer> o1,
 251                                      Vector<Integer> o2,
 252                                      FTriOp f) {
 253         int[] res = new int[length()];
 254         int[] vec1 = this.vec();
 255         int[] vec2 = ((IntVector)o1).vec();
 256         int[] vec3 = ((IntVector)o2).vec();
 257         for (int i = 0; i < res.length; i++) {
 258             res[i] = f.apply(i, vec1[i], vec2[i], vec3[i]);
 259         }
 260         return vectorFactory(res);
 261     }
 262 
 263     /*package-private*/
 264     abstract
 265     IntVector tOp(Vector<Integer> o1,
 266                              Vector<Integer> o2,
 267                              VectorMask<Integer> m,
 268                              FTriOp f);
 269     @ForceInline
 270     final
 271     IntVector tOpTemplate(Vector<Integer> o1,
 272                                      Vector<Integer> o2,
 273                                      VectorMask<Integer> m,
 274                                      FTriOp f) {
 275         if (m == null) {
 276             return tOpTemplate(o1, o2, f);
 277         }
 278         int[] res = new int[length()];
 279         int[] vec1 = this.vec();
 280         int[] vec2 = ((IntVector)o1).vec();
 281         int[] vec3 = ((IntVector)o2).vec();
 282         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 283         for (int i = 0; i < res.length; i++) {
 284             res[i] = mbits[i] ? f.apply(i, vec1[i], vec2[i], vec3[i]) : vec1[i];
 285         }
 286         return vectorFactory(res);
 287     }
 288 
 289     // Reduction operator
 290 
 291     /*package-private*/
 292     abstract
 293     int rOp(int v, VectorMask<Integer> m, FBinOp f);
 294 
 295     @ForceInline
 296     final
 297     int rOpTemplate(int v, VectorMask<Integer> m, FBinOp f) {
 298         if (m == null) {
 299             return rOpTemplate(v, f);
 300         }
 301         int[] vec = vec();
 302         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 303         for (int i = 0; i < vec.length; i++) {
 304             v = mbits[i] ? f.apply(i, v, vec[i]) : v;
 305         }
 306         return v;
 307     }
 308 
 309     @ForceInline
 310     final
 311     int rOpTemplate(int v, FBinOp f) {
 312         int[] vec = vec();
 313         for (int i = 0; i < vec.length; i++) {
 314             v = f.apply(i, v, vec[i]);
 315         }
 316         return v;
 317     }
 318 
 319     // Memory reference
 320 
 321     /*package-private*/
 322     interface FLdOp<M> {
 323         int apply(M memory, int offset, int i);
 324     }
 325 
 326     /*package-private*/
 327     @ForceInline
 328     final
 329     <M> IntVector ldOp(M memory, int offset,
 330                                   FLdOp<M> f) {
 331         //dummy; no vec = vec();
 332         int[] res = new int[length()];
 333         for (int i = 0; i < res.length; i++) {
 334             res[i] = f.apply(memory, offset, i);
 335         }
 336         return vectorFactory(res);
 337     }
 338 
 339     /*package-private*/
 340     @ForceInline
 341     final
 342     <M> IntVector ldOp(M memory, int offset,
 343                                   VectorMask<Integer> m,
 344                                   FLdOp<M> f) {
 345         //int[] vec = vec();
 346         int[] res = new int[length()];
 347         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 348         for (int i = 0; i < res.length; i++) {
 349             if (mbits[i]) {
 350                 res[i] = f.apply(memory, offset, i);
 351             }
 352         }
 353         return vectorFactory(res);
 354     }
 355 
 356     /*package-private*/
 357     interface FLdLongOp {
 358         int apply(MemorySegment memory, long offset, int i);
 359     }
 360 
 361     /*package-private*/
 362     @ForceInline
 363     final
 364     IntVector ldLongOp(MemorySegment memory, long offset,
 365                                   FLdLongOp f) {
 366         //dummy; no vec = vec();
 367         int[] res = new int[length()];
 368         for (int i = 0; i < res.length; i++) {
 369             res[i] = f.apply(memory, offset, i);
 370         }
 371         return vectorFactory(res);
 372     }
 373 
 374     /*package-private*/
 375     @ForceInline
 376     final
 377     IntVector ldLongOp(MemorySegment memory, long offset,
 378                                   VectorMask<Integer> m,
 379                                   FLdLongOp f) {
 380         //int[] vec = vec();
 381         int[] res = new int[length()];
 382         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 383         for (int i = 0; i < res.length; i++) {
 384             if (mbits[i]) {
 385                 res[i] = f.apply(memory, offset, i);
 386             }
 387         }
 388         return vectorFactory(res);
 389     }
 390 
 391     static int memorySegmentGet(MemorySegment ms, long o, int i) {
 392         return ms.get(ELEMENT_LAYOUT, o + i * 4L);
 393     }
 394 
 395     interface FStOp<M> {
 396         void apply(M memory, int offset, int i, int a);
 397     }
 398 
 399     /*package-private*/
 400     @ForceInline
 401     final
 402     <M> void stOp(M memory, int offset,
 403                   FStOp<M> f) {
 404         int[] vec = vec();
 405         for (int i = 0; i < vec.length; i++) {
 406             f.apply(memory, offset, i, vec[i]);
 407         }
 408     }
 409 
 410     /*package-private*/
 411     @ForceInline
 412     final
 413     <M> void stOp(M memory, int offset,
 414                   VectorMask<Integer> m,
 415                   FStOp<M> f) {
 416         int[] vec = vec();
 417         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 418         for (int i = 0; i < vec.length; i++) {
 419             if (mbits[i]) {
 420                 f.apply(memory, offset, i, vec[i]);
 421             }
 422         }
 423     }
 424 
 425     interface FStLongOp {
 426         void apply(MemorySegment memory, long offset, int i, int a);
 427     }
 428 
 429     /*package-private*/
 430     @ForceInline
 431     final
 432     void stLongOp(MemorySegment memory, long offset,
 433                   FStLongOp f) {
 434         int[] vec = vec();
 435         for (int i = 0; i < vec.length; i++) {
 436             f.apply(memory, offset, i, vec[i]);
 437         }
 438     }
 439 
 440     /*package-private*/
 441     @ForceInline
 442     final
 443     void stLongOp(MemorySegment memory, long offset,
 444                   VectorMask<Integer> m,
 445                   FStLongOp f) {
 446         int[] vec = vec();
 447         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 448         for (int i = 0; i < vec.length; i++) {
 449             if (mbits[i]) {
 450                 f.apply(memory, offset, i, vec[i]);
 451             }
 452         }
 453     }
 454 
 455     static void memorySegmentSet(MemorySegment ms, long o, int i, int e) {
 456         ms.set(ELEMENT_LAYOUT, o + i * 4L, e);
 457     }
 458 
 459     // Binary test
 460 
 461     /*package-private*/
 462     interface FBinTest {
 463         boolean apply(int cond, int i, int a, int b);
 464     }
 465 
 466     /*package-private*/
 467     @ForceInline
 468     final
 469     AbstractMask<Integer> bTest(int cond,
 470                                   Vector<Integer> o,
 471                                   FBinTest f) {
 472         int[] vec1 = vec();
 473         int[] vec2 = ((IntVector)o).vec();
 474         boolean[] bits = new boolean[length()];
 475         for (int i = 0; i < length(); i++){
 476             bits[i] = f.apply(cond, i, vec1[i], vec2[i]);
 477         }
 478         return maskFactory(bits);
 479     }
 480 
 481     /*package-private*/
 482     @ForceInline
 483     static int rotateLeft(int a, int n) {
 484         return Integer.rotateLeft(a, n);
 485     }
 486 
 487     /*package-private*/
 488     @ForceInline
 489     static int rotateRight(int a, int n) {
 490         return Integer.rotateRight(a, n);
 491     }
 492 
 493     /*package-private*/
 494     @Override
 495     abstract IntSpecies vspecies();
 496 
 497     /*package-private*/
 498     @ForceInline
 499     static long toBits(int e) {
 500         return  e;
 501     }
 502 
 503     /*package-private*/
 504     @ForceInline
 505     static int fromBits(long bits) {
 506         return ((int)bits);
 507     }
 508 
 509     static IntVector expandHelper(Vector<Integer> v, VectorMask<Integer> m) {
 510         VectorSpecies<Integer> vsp = m.vectorSpecies();
 511         IntVector r  = (IntVector) vsp.zero();
 512         IntVector vi = (IntVector) v;
 513         if (m.allTrue()) {
 514             return vi;
 515         }
 516         for (int i = 0, j = 0; i < vsp.length(); i++) {
 517             if (m.laneIsSet(i)) {
 518                 r = r.withLane(i, vi.lane(j++));
 519             }
 520         }
 521         return r;
 522     }
 523 
 524     static IntVector compressHelper(Vector<Integer> v, VectorMask<Integer> m) {
 525         VectorSpecies<Integer> vsp = m.vectorSpecies();
 526         IntVector r  = (IntVector) vsp.zero();
 527         IntVector vi = (IntVector) v;
 528         if (m.allTrue()) {
 529             return vi;
 530         }
 531         for (int i = 0, j = 0; i < vsp.length(); i++) {
 532             if (m.laneIsSet(i)) {
 533                 r = r.withLane(j++, vi.lane(i));
 534             }
 535         }
 536         return r;
 537     }
 538 
 539     // Static factories (other than memory operations)
 540 
 541     // Note: A surprising behavior in javadoc
 542     // sometimes makes a lone /** {@inheritDoc} */
 543     // comment drop the method altogether,
 544     // apparently if the method mentions an
 545     // parameter or return type of Vector<Integer>
 546     // instead of Vector<E> as originally specified.
 547     // Adding an empty HTML fragment appears to
 548     // nudge javadoc into providing the desired
 549     // inherited documentation.  We use the HTML
 550     // comment <!--workaround--> for this.
 551 
 552     /**
 553      * Returns a vector of the given species
 554      * where all lane elements are set to
 555      * zero, the default primitive value.
 556      *
 557      * @param species species of the desired zero vector
 558      * @return a zero vector
 559      */
 560     @ForceInline
 561     public static IntVector zero(VectorSpecies<Integer> species) {
 562         IntSpecies vsp = (IntSpecies) species;
 563         return VectorSupport.fromBitsCoerced(vsp.vectorType(), int.class, species.length(),
 564                                 0, MODE_BROADCAST, vsp,
 565                                 ((bits_, s_) -> s_.rvOp(i -> bits_)));
 566     }
 567 
 568     /**
 569      * Returns a vector of the same species as this one
 570      * where all lane elements are set to
 571      * the primitive value {@code e}.
 572      *
 573      * The contents of the current vector are discarded;
 574      * only the species is relevant to this operation.
 575      *
 576      * <p> This method returns the value of this expression:
 577      * {@code IntVector.broadcast(this.species(), e)}.
 578      *
 579      * @apiNote
 580      * Unlike the similar method named {@code broadcast()}
 581      * in the supertype {@code Vector}, this method does not
 582      * need to validate its argument, and cannot throw
 583      * {@code IllegalArgumentException}.  This method is
 584      * therefore preferable to the supertype method.
 585      *
 586      * @param e the value to broadcast
 587      * @return a vector where all lane elements are set to
 588      *         the primitive value {@code e}
 589      * @see #broadcast(VectorSpecies,long)
 590      * @see Vector#broadcast(long)
 591      * @see VectorSpecies#broadcast(long)
 592      */
 593     public abstract IntVector broadcast(int e);
 594 
 595     /**
 596      * Returns a vector of the given species
 597      * where all lane elements are set to
 598      * the primitive value {@code e}.
 599      *
 600      * @param species species of the desired vector
 601      * @param e the value to broadcast
 602      * @return a vector where all lane elements are set to
 603      *         the primitive value {@code e}
 604      * @see #broadcast(long)
 605      * @see Vector#broadcast(long)
 606      * @see VectorSpecies#broadcast(long)
 607      */
 608     @ForceInline
 609     public static IntVector broadcast(VectorSpecies<Integer> species, int e) {
 610         IntSpecies vsp = (IntSpecies) species;
 611         return vsp.broadcast(e);
 612     }
 613 
 614     /*package-private*/
 615     @ForceInline
 616     final IntVector broadcastTemplate(int e) {
 617         IntSpecies vsp = vspecies();
 618         return vsp.broadcast(e);
 619     }
 620 
 621     /**
 622      * {@inheritDoc} <!--workaround-->
 623      * @apiNote
 624      * When working with vector subtypes like {@code IntVector},
 625      * {@linkplain #broadcast(int) the more strongly typed method}
 626      * is typically selected.  It can be explicitly selected
 627      * using a cast: {@code v.broadcast((int)e)}.
 628      * The two expressions will produce numerically identical results.
 629      */
 630     @Override
 631     public abstract IntVector broadcast(long e);
 632 
 633     /**
 634      * Returns a vector of the given species
 635      * where all lane elements are set to
 636      * the primitive value {@code e}.
 637      *
 638      * The {@code long} value must be accurately representable
 639      * by the {@code ETYPE} of the vector species, so that
 640      * {@code e==(long)(ETYPE)e}.
 641      *
 642      * @param species species of the desired vector
 643      * @param e the value to broadcast
 644      * @return a vector where all lane elements are set to
 645      *         the primitive value {@code e}
 646      * @throws IllegalArgumentException
 647      *         if the given {@code long} value cannot
 648      *         be represented by the vector's {@code ETYPE}
 649      * @see #broadcast(VectorSpecies,int)
 650      * @see VectorSpecies#checkValue(long)
 651      */
 652     @ForceInline
 653     public static IntVector broadcast(VectorSpecies<Integer> species, long e) {
 654         IntSpecies vsp = (IntSpecies) species;
 655         return vsp.broadcast(e);
 656     }
 657 
 658     /*package-private*/
 659     @ForceInline
 660     final IntVector broadcastTemplate(long e) {
 661         return vspecies().broadcast(e);
 662     }
 663 
 664     // Unary lanewise support
 665 
 666     /**
 667      * {@inheritDoc} <!--workaround-->
 668      */
 669     public abstract
 670     IntVector lanewise(VectorOperators.Unary op);
 671 
 672     @ForceInline
 673     final
 674     IntVector lanewiseTemplate(VectorOperators.Unary op) {
 675         if (opKind(op, VO_SPECIAL)) {
 676             if (op == ZOMO) {
 677                 return blend(broadcast(-1), compare(NE, 0));
 678             }
 679             if (op == NOT) {
 680                 return broadcast(-1).lanewise(XOR, this);
 681             }
 682         }
 683         int opc = opCode(op);
 684         return VectorSupport.unaryOp(
 685             opc, getClass(), null, int.class, length(),
 686             this, null,
 687             UN_IMPL.find(op, opc, IntVector::unaryOperations));
 688     }
 689 
 690     /**
 691      * {@inheritDoc} <!--workaround-->
 692      */
 693     @Override
 694     public abstract
 695     IntVector lanewise(VectorOperators.Unary op,
 696                                   VectorMask<Integer> m);
 697     @ForceInline
 698     final
 699     IntVector lanewiseTemplate(VectorOperators.Unary op,
 700                                           Class<? extends VectorMask<Integer>> maskClass,
 701                                           VectorMask<Integer> m) {
 702         m.check(maskClass, this);
 703         if (opKind(op, VO_SPECIAL)) {
 704             if (op == ZOMO) {
 705                 return blend(broadcast(-1), compare(NE, 0, m));
 706             }
 707             if (op == NOT) {
 708                 return lanewise(XOR, broadcast(-1), m);
 709             }
 710         }
 711         int opc = opCode(op);
 712         return VectorSupport.unaryOp(
 713             opc, getClass(), maskClass, int.class, length(),
 714             this, m,
 715             UN_IMPL.find(op, opc, IntVector::unaryOperations));
 716     }
 717 
 718     private static final
 719     ImplCache<Unary, UnaryOperation<IntVector, VectorMask<Integer>>>
 720         UN_IMPL = new ImplCache<>(Unary.class, IntVector.class);
 721 
 722     private static UnaryOperation<IntVector, VectorMask<Integer>> unaryOperations(int opc_) {
 723         switch (opc_) {
 724             case VECTOR_OP_NEG: return (v0, m) ->
 725                     v0.uOp(m, (i, a) -> (int) -a);
 726             case VECTOR_OP_ABS: return (v0, m) ->
 727                     v0.uOp(m, (i, a) -> (int) Math.abs(a));
 728             case VECTOR_OP_BIT_COUNT: return (v0, m) ->
 729                     v0.uOp(m, (i, a) -> (int) Integer.bitCount(a));
 730             case VECTOR_OP_TZ_COUNT: return (v0, m) ->
 731                     v0.uOp(m, (i, a) -> (int) Integer.numberOfTrailingZeros(a));
 732             case VECTOR_OP_LZ_COUNT: return (v0, m) ->
 733                     v0.uOp(m, (i, a) -> (int) Integer.numberOfLeadingZeros(a));
 734             case VECTOR_OP_REVERSE: return (v0, m) ->
 735                     v0.uOp(m, (i, a) -> (int) Integer.reverse(a));
 736             case VECTOR_OP_REVERSE_BYTES: return (v0, m) ->
 737                     v0.uOp(m, (i, a) -> (int) Integer.reverseBytes(a));
 738             default: return null;
 739         }
 740     }
 741 
 742     // Binary lanewise support
 743 
 744     /**
 745      * {@inheritDoc} <!--workaround-->
 746      * @see #lanewise(VectorOperators.Binary,int)
 747      * @see #lanewise(VectorOperators.Binary,int,VectorMask)
 748      */
 749     @Override
 750     public abstract
 751     IntVector lanewise(VectorOperators.Binary op,
 752                                   Vector<Integer> v);
 753     @ForceInline
 754     final
 755     IntVector lanewiseTemplate(VectorOperators.Binary op,
 756                                           Vector<Integer> v) {
 757         IntVector that = (IntVector) v;
 758         that.check(this);
 759 
 760         if (opKind(op, VO_SPECIAL  | VO_SHIFT)) {
 761             if (op == FIRST_NONZERO) {
 762                 // FIXME: Support this in the JIT.
 763                 VectorMask<Integer> thisNZ
 764                     = this.viewAsIntegralLanes().compare(NE, (int) 0);
 765                 that = that.blend((int) 0, thisNZ.cast(vspecies()));
 766                 op = OR_UNCHECKED;
 767             }
 768             if (opKind(op, VO_SHIFT)) {
 769                 // As per shift specification for Java, mask the shift count.
 770                 // This allows the JIT to ignore some ISA details.
 771                 that = that.lanewise(AND, SHIFT_MASK);
 772             }
 773             if (op == AND_NOT) {
 774                 // FIXME: Support this in the JIT.
 775                 that = that.lanewise(NOT);
 776                 op = AND;
 777             } else if (op == DIV) {
 778                 VectorMask<Integer> eqz = that.eq((int) 0);
 779                 if (eqz.anyTrue()) {
 780                     throw that.divZeroException();
 781                 }
 782             }
 783         }
 784 
 785         int opc = opCode(op);
 786         return VectorSupport.binaryOp(
 787             opc, getClass(), null, int.class, length(),
 788             this, that, null,
 789             BIN_IMPL.find(op, opc, IntVector::binaryOperations));
 790     }
 791 
 792     /**
 793      * {@inheritDoc} <!--workaround-->
 794      * @see #lanewise(VectorOperators.Binary,int,VectorMask)
 795      */
 796     @Override
 797     public abstract
 798     IntVector lanewise(VectorOperators.Binary op,
 799                                   Vector<Integer> v,
 800                                   VectorMask<Integer> m);
 801     @ForceInline
 802     final
 803     IntVector lanewiseTemplate(VectorOperators.Binary op,
 804                                           Class<? extends VectorMask<Integer>> maskClass,
 805                                           Vector<Integer> v, VectorMask<Integer> m) {
 806         IntVector that = (IntVector) v;
 807         that.check(this);
 808         m.check(maskClass, this);
 809 
 810         if (opKind(op, VO_SPECIAL  | VO_SHIFT)) {
 811             if (op == FIRST_NONZERO) {
 812                 // FIXME: Support this in the JIT.
 813                 VectorMask<Integer> thisNZ
 814                     = this.viewAsIntegralLanes().compare(NE, (int) 0);
 815                 that = that.blend((int) 0, thisNZ.cast(vspecies()));
 816                 op = OR_UNCHECKED;
 817             }
 818             if (opKind(op, VO_SHIFT)) {
 819                 // As per shift specification for Java, mask the shift count.
 820                 // This allows the JIT to ignore some ISA details.
 821                 that = that.lanewise(AND, SHIFT_MASK);
 822             }
 823             if (op == AND_NOT) {
 824                 // FIXME: Support this in the JIT.
 825                 that = that.lanewise(NOT);
 826                 op = AND;
 827             } else if (op == DIV) {
 828                 VectorMask<Integer> eqz = that.eq((int)0);
 829                 if (eqz.and(m).anyTrue()) {
 830                     throw that.divZeroException();
 831                 }
 832                 // suppress div/0 exceptions in unset lanes
 833                 that = that.lanewise(NOT, eqz);
 834             }
 835         }
 836 
 837         int opc = opCode(op);
 838         return VectorSupport.binaryOp(
 839             opc, getClass(), maskClass, int.class, length(),
 840             this, that, m,
 841             BIN_IMPL.find(op, opc, IntVector::binaryOperations));
 842     }
 843 
 844     private static final
 845     ImplCache<Binary, BinaryOperation<IntVector, VectorMask<Integer>>>
 846         BIN_IMPL = new ImplCache<>(Binary.class, IntVector.class);
 847 
 848     private static BinaryOperation<IntVector, VectorMask<Integer>> binaryOperations(int opc_) {
 849         switch (opc_) {
 850             case VECTOR_OP_ADD: return (v0, v1, vm) ->
 851                     v0.bOp(v1, vm, (i, a, b) -> (int)(a + b));
 852             case VECTOR_OP_SUB: return (v0, v1, vm) ->
 853                     v0.bOp(v1, vm, (i, a, b) -> (int)(a - b));
 854             case VECTOR_OP_MUL: return (v0, v1, vm) ->
 855                     v0.bOp(v1, vm, (i, a, b) -> (int)(a * b));
 856             case VECTOR_OP_DIV: return (v0, v1, vm) ->
 857                     v0.bOp(v1, vm, (i, a, b) -> (int)(a / b));
 858             case VECTOR_OP_MAX: return (v0, v1, vm) ->
 859                     v0.bOp(v1, vm, (i, a, b) -> (int)Math.max(a, b));
 860             case VECTOR_OP_MIN: return (v0, v1, vm) ->
 861                     v0.bOp(v1, vm, (i, a, b) -> (int)Math.min(a, b));
 862             case VECTOR_OP_AND: return (v0, v1, vm) ->
 863                     v0.bOp(v1, vm, (i, a, b) -> (int)(a & b));
 864             case VECTOR_OP_OR: return (v0, v1, vm) ->
 865                     v0.bOp(v1, vm, (i, a, b) -> (int)(a | b));
 866             case VECTOR_OP_XOR: return (v0, v1, vm) ->
 867                     v0.bOp(v1, vm, (i, a, b) -> (int)(a ^ b));
 868             case VECTOR_OP_LSHIFT: return (v0, v1, vm) ->
 869                     v0.bOp(v1, vm, (i, a, n) -> (int)(a << n));
 870             case VECTOR_OP_RSHIFT: return (v0, v1, vm) ->
 871                     v0.bOp(v1, vm, (i, a, n) -> (int)(a >> n));
 872             case VECTOR_OP_URSHIFT: return (v0, v1, vm) ->
 873                     v0.bOp(v1, vm, (i, a, n) -> (int)((a & LSHR_SETUP_MASK) >>> n));
 874             case VECTOR_OP_LROTATE: return (v0, v1, vm) ->
 875                     v0.bOp(v1, vm, (i, a, n) -> rotateLeft(a, (int)n));
 876             case VECTOR_OP_RROTATE: return (v0, v1, vm) ->
 877                     v0.bOp(v1, vm, (i, a, n) -> rotateRight(a, (int)n));
 878             case VECTOR_OP_COMPRESS_BITS: return (v0, v1, vm) ->
 879                     v0.bOp(v1, vm, (i, a, n) -> Integer.compress(a, n));
 880             case VECTOR_OP_EXPAND_BITS: return (v0, v1, vm) ->
 881                     v0.bOp(v1, vm, (i, a, n) -> Integer.expand(a, n));
 882             default: return null;
 883         }
 884     }
 885 
 886     // FIXME: Maybe all of the public final methods in this file (the
 887     // simple ones that just call lanewise) should be pushed down to
 888     // the X-VectorBits template.  They can't optimize properly at
 889     // this level, and must rely on inlining.  Does it work?
 890     // (If it works, of course keep the code here.)
 891 
 892     /**
 893      * Combines the lane values of this vector
 894      * with the value of a broadcast scalar.
 895      *
 896      * This is a lane-wise binary operation which applies
 897      * the selected operation to each lane.
 898      * The return value will be equal to this expression:
 899      * {@code this.lanewise(op, this.broadcast(e))}.
 900      *
 901      * @param op the operation used to process lane values
 902      * @param e the input scalar
 903      * @return the result of applying the operation lane-wise
 904      *         to the two input vectors
 905      * @throws UnsupportedOperationException if this vector does
 906      *         not support the requested operation
 907      * @see #lanewise(VectorOperators.Binary,Vector)
 908      * @see #lanewise(VectorOperators.Binary,int,VectorMask)
 909      */
 910     @ForceInline
 911     public final
 912     IntVector lanewise(VectorOperators.Binary op,
 913                                   int e) {
 914         if (opKind(op, VO_SHIFT) && (int)(int)e == e) {
 915             return lanewiseShift(op, (int) e);
 916         }
 917         if (op == AND_NOT) {
 918             op = AND; e = (int) ~e;
 919         }
 920         return lanewise(op, broadcast(e));
 921     }
 922 
 923     /**
 924      * Combines the lane values of this vector
 925      * with the value of a broadcast scalar,
 926      * with selection of lane elements controlled by a mask.
 927      *
 928      * This is a masked lane-wise binary operation which applies
 929      * the selected operation to each lane.
 930      * The return value will be equal to this expression:
 931      * {@code this.lanewise(op, this.broadcast(e), m)}.
 932      *
 933      * @param op the operation used to process lane values
 934      * @param e the input scalar
 935      * @param m the mask controlling lane selection
 936      * @return the result of applying the operation lane-wise
 937      *         to the input vector and the scalar
 938      * @throws UnsupportedOperationException if this vector does
 939      *         not support the requested operation
 940      * @see #lanewise(VectorOperators.Binary,Vector,VectorMask)
 941      * @see #lanewise(VectorOperators.Binary,int)
 942      */
 943     @ForceInline
 944     public final
 945     IntVector lanewise(VectorOperators.Binary op,
 946                                   int e,
 947                                   VectorMask<Integer> m) {
 948         if (opKind(op, VO_SHIFT) && (int)(int)e == e) {
 949             return lanewiseShift(op, (int) e, m);
 950         }
 951         if (op == AND_NOT) {
 952             op = AND; e = (int) ~e;
 953         }
 954         return lanewise(op, broadcast(e), m);
 955     }
 956 
 957     /**
 958      * {@inheritDoc} <!--workaround-->
 959      * @apiNote
 960      * When working with vector subtypes like {@code IntVector},
 961      * {@linkplain #lanewise(VectorOperators.Binary,int)
 962      * the more strongly typed method}
 963      * is typically selected.  It can be explicitly selected
 964      * using a cast: {@code v.lanewise(op,(int)e)}.
 965      * The two expressions will produce numerically identical results.
 966      */
 967     @ForceInline
 968     public final
 969     IntVector lanewise(VectorOperators.Binary op,
 970                                   long e) {
 971         int e1 = (int) e;
 972         if ((long)e1 != e
 973             // allow shift ops to clip down their int parameters
 974             && !(opKind(op, VO_SHIFT) && (int)e1 == e)) {
 975             vspecies().checkValue(e);  // for exception
 976         }
 977         return lanewise(op, e1);
 978     }
 979 
 980     /**
 981      * {@inheritDoc} <!--workaround-->
 982      * @apiNote
 983      * When working with vector subtypes like {@code IntVector},
 984      * {@linkplain #lanewise(VectorOperators.Binary,int,VectorMask)
 985      * the more strongly typed method}
 986      * is typically selected.  It can be explicitly selected
 987      * using a cast: {@code v.lanewise(op,(int)e,m)}.
 988      * The two expressions will produce numerically identical results.
 989      */
 990     @ForceInline
 991     public final
 992     IntVector lanewise(VectorOperators.Binary op,
 993                                   long e, VectorMask<Integer> m) {
 994         int e1 = (int) e;
 995         if ((long)e1 != e
 996             // allow shift ops to clip down their int parameters
 997             && !(opKind(op, VO_SHIFT) && (int)e1 == e)) {
 998             vspecies().checkValue(e);  // for exception
 999         }
1000         return lanewise(op, e1, m);
1001     }
1002 
1003     /*package-private*/
1004     abstract IntVector
1005     lanewiseShift(VectorOperators.Binary op, int e);
1006 
1007     /*package-private*/
1008     @ForceInline
1009     final IntVector
1010     lanewiseShiftTemplate(VectorOperators.Binary op, int e) {
1011         // Special handling for these.  FIXME: Refactor?
1012         assert(opKind(op, VO_SHIFT));
1013         // As per shift specification for Java, mask the shift count.
1014         e &= SHIFT_MASK;
1015         int opc = opCode(op);
1016         return VectorSupport.broadcastInt(
1017             opc, getClass(), null, int.class, length(),
1018             this, e, null,
1019             BIN_INT_IMPL.find(op, opc, IntVector::broadcastIntOperations));
1020     }
1021 
1022     /*package-private*/
1023     abstract IntVector
1024     lanewiseShift(VectorOperators.Binary op, int e, VectorMask<Integer> m);
1025 
1026     /*package-private*/
1027     @ForceInline
1028     final IntVector
1029     lanewiseShiftTemplate(VectorOperators.Binary op,
1030                           Class<? extends VectorMask<Integer>> maskClass,
1031                           int e, VectorMask<Integer> m) {
1032         m.check(maskClass, this);
1033         assert(opKind(op, VO_SHIFT));
1034         // As per shift specification for Java, mask the shift count.
1035         e &= SHIFT_MASK;
1036         int opc = opCode(op);
1037         return VectorSupport.broadcastInt(
1038             opc, getClass(), maskClass, int.class, length(),
1039             this, e, m,
1040             BIN_INT_IMPL.find(op, opc, IntVector::broadcastIntOperations));
1041     }
1042 
1043     private static final
1044     ImplCache<Binary,VectorBroadcastIntOp<IntVector, VectorMask<Integer>>> BIN_INT_IMPL
1045         = new ImplCache<>(Binary.class, IntVector.class);
1046 
1047     private static VectorBroadcastIntOp<IntVector, VectorMask<Integer>> broadcastIntOperations(int opc_) {
1048         switch (opc_) {
1049             case VECTOR_OP_LSHIFT: return (v, n, m) ->
1050                     v.uOp(m, (i, a) -> (int)(a << n));
1051             case VECTOR_OP_RSHIFT: return (v, n, m) ->
1052                     v.uOp(m, (i, a) -> (int)(a >> n));
1053             case VECTOR_OP_URSHIFT: return (v, n, m) ->
1054                     v.uOp(m, (i, a) -> (int)((a & LSHR_SETUP_MASK) >>> n));
1055             case VECTOR_OP_LROTATE: return (v, n, m) ->
1056                     v.uOp(m, (i, a) -> rotateLeft(a, (int)n));
1057             case VECTOR_OP_RROTATE: return (v, n, m) ->
1058                     v.uOp(m, (i, a) -> rotateRight(a, (int)n));
1059             default: return null;
1060         }
1061     }
1062 
1063     // As per shift specification for Java, mask the shift count.
1064     // We mask 0X3F (long), 0X1F (int), 0x0F (short), 0x7 (byte).
1065     // The latter two maskings go beyond the JLS, but seem reasonable
1066     // since our lane types are first-class types, not just dressed
1067     // up ints.
1068     private static final int SHIFT_MASK = (Integer.SIZE - 1);
1069     private static final int LSHR_SETUP_MASK = -1;
1070 
1071     // Ternary lanewise support
1072 
1073     // Ternary operators come in eight variations:
1074     //   lanewise(op, [broadcast(e1)|v1], [broadcast(e2)|v2])
1075     //   lanewise(op, [broadcast(e1)|v1], [broadcast(e2)|v2], mask)
1076 
1077     // It is annoying to support all of these variations of masking
1078     // and broadcast, but it would be more surprising not to continue
1079     // the obvious pattern started by unary and binary.
1080 
1081    /**
1082      * {@inheritDoc} <!--workaround-->
1083      * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask)
1084      * @see #lanewise(VectorOperators.Ternary,Vector,int,VectorMask)
1085      * @see #lanewise(VectorOperators.Ternary,int,Vector,VectorMask)
1086      * @see #lanewise(VectorOperators.Ternary,int,int)
1087      * @see #lanewise(VectorOperators.Ternary,Vector,int)
1088      * @see #lanewise(VectorOperators.Ternary,int,Vector)
1089      */
1090     @Override
1091     public abstract
1092     IntVector lanewise(VectorOperators.Ternary op,
1093                                                   Vector<Integer> v1,
1094                                                   Vector<Integer> v2);
1095     @ForceInline
1096     final
1097     IntVector lanewiseTemplate(VectorOperators.Ternary op,
1098                                           Vector<Integer> v1,
1099                                           Vector<Integer> v2) {
1100         IntVector that = (IntVector) v1;
1101         IntVector tother = (IntVector) v2;
1102         // It's a word: https://www.dictionary.com/browse/tother
1103         // See also Chapter 11 of Dickens, Our Mutual Friend:
1104         // "Totherest Governor," replied Mr Riderhood...
1105         that.check(this);
1106         tother.check(this);
1107         if (op == BITWISE_BLEND) {
1108             // FIXME: Support this in the JIT.
1109             that = this.lanewise(XOR, that).lanewise(AND, tother);
1110             return this.lanewise(XOR, that);
1111         }
1112         int opc = opCode(op);
1113         return VectorSupport.ternaryOp(
1114             opc, getClass(), null, int.class, length(),
1115             this, that, tother, null,
1116             TERN_IMPL.find(op, opc, IntVector::ternaryOperations));
1117     }
1118 
1119     /**
1120      * {@inheritDoc} <!--workaround-->
1121      * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask)
1122      * @see #lanewise(VectorOperators.Ternary,Vector,int,VectorMask)
1123      * @see #lanewise(VectorOperators.Ternary,int,Vector,VectorMask)
1124      */
1125     @Override
1126     public abstract
1127     IntVector lanewise(VectorOperators.Ternary op,
1128                                   Vector<Integer> v1,
1129                                   Vector<Integer> v2,
1130                                   VectorMask<Integer> m);
1131     @ForceInline
1132     final
1133     IntVector lanewiseTemplate(VectorOperators.Ternary op,
1134                                           Class<? extends VectorMask<Integer>> maskClass,
1135                                           Vector<Integer> v1,
1136                                           Vector<Integer> v2,
1137                                           VectorMask<Integer> m) {
1138         IntVector that = (IntVector) v1;
1139         IntVector tother = (IntVector) v2;
1140         // It's a word: https://www.dictionary.com/browse/tother
1141         // See also Chapter 11 of Dickens, Our Mutual Friend:
1142         // "Totherest Governor," replied Mr Riderhood...
1143         that.check(this);
1144         tother.check(this);
1145         m.check(maskClass, this);
1146 
1147         if (op == BITWISE_BLEND) {
1148             // FIXME: Support this in the JIT.
1149             that = this.lanewise(XOR, that).lanewise(AND, tother);
1150             return this.lanewise(XOR, that, m);
1151         }
1152         int opc = opCode(op);
1153         return VectorSupport.ternaryOp(
1154             opc, getClass(), maskClass, int.class, length(),
1155             this, that, tother, m,
1156             TERN_IMPL.find(op, opc, IntVector::ternaryOperations));
1157     }
1158 
1159     private static final
1160     ImplCache<Ternary, TernaryOperation<IntVector, VectorMask<Integer>>>
1161         TERN_IMPL = new ImplCache<>(Ternary.class, IntVector.class);
1162 
1163     private static TernaryOperation<IntVector, VectorMask<Integer>> ternaryOperations(int opc_) {
1164         switch (opc_) {
1165             default: return null;
1166         }
1167     }
1168 
1169     /**
1170      * Combines the lane values of this vector
1171      * with the values of two broadcast scalars.
1172      *
1173      * This is a lane-wise ternary operation which applies
1174      * the selected operation to each lane.
1175      * The return value will be equal to this expression:
1176      * {@code this.lanewise(op, this.broadcast(e1), this.broadcast(e2))}.
1177      *
1178      * @param op the operation used to combine lane values
1179      * @param e1 the first input scalar
1180      * @param e2 the second input scalar
1181      * @return the result of applying the operation lane-wise
1182      *         to the input vector and the scalars
1183      * @throws UnsupportedOperationException if this vector does
1184      *         not support the requested operation
1185      * @see #lanewise(VectorOperators.Ternary,Vector,Vector)
1186      * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask)
1187      */
1188     @ForceInline
1189     public final
1190     IntVector lanewise(VectorOperators.Ternary op, //(op,e1,e2)
1191                                   int e1,
1192                                   int e2) {
1193         return lanewise(op, broadcast(e1), broadcast(e2));
1194     }
1195 
1196     /**
1197      * Combines the lane values of this vector
1198      * with the values of two broadcast scalars,
1199      * with selection of lane elements controlled by a mask.
1200      *
1201      * This is a masked lane-wise ternary operation which applies
1202      * the selected operation to each lane.
1203      * The return value will be equal to this expression:
1204      * {@code this.lanewise(op, this.broadcast(e1), this.broadcast(e2), m)}.
1205      *
1206      * @param op the operation used to combine lane values
1207      * @param e1 the first input scalar
1208      * @param e2 the second input scalar
1209      * @param m the mask controlling lane selection
1210      * @return the result of applying the operation lane-wise
1211      *         to the input vector and the scalars
1212      * @throws UnsupportedOperationException if this vector does
1213      *         not support the requested operation
1214      * @see #lanewise(VectorOperators.Ternary,Vector,Vector,VectorMask)
1215      * @see #lanewise(VectorOperators.Ternary,int,int)
1216      */
1217     @ForceInline
1218     public final
1219     IntVector lanewise(VectorOperators.Ternary op, //(op,e1,e2,m)
1220                                   int e1,
1221                                   int e2,
1222                                   VectorMask<Integer> m) {
1223         return lanewise(op, broadcast(e1), broadcast(e2), m);
1224     }
1225 
1226     /**
1227      * Combines the lane values of this vector
1228      * with the values of another vector and a broadcast scalar.
1229      *
1230      * This is a lane-wise ternary operation which applies
1231      * the selected operation to each lane.
1232      * The return value will be equal to this expression:
1233      * {@code this.lanewise(op, v1, this.broadcast(e2))}.
1234      *
1235      * @param op the operation used to combine lane values
1236      * @param v1 the other input vector
1237      * @param e2 the input scalar
1238      * @return the result of applying the operation lane-wise
1239      *         to the input vectors and the scalar
1240      * @throws UnsupportedOperationException if this vector does
1241      *         not support the requested operation
1242      * @see #lanewise(VectorOperators.Ternary,int,int)
1243      * @see #lanewise(VectorOperators.Ternary,Vector,int,VectorMask)
1244      */
1245     @ForceInline
1246     public final
1247     IntVector lanewise(VectorOperators.Ternary op, //(op,v1,e2)
1248                                   Vector<Integer> v1,
1249                                   int e2) {
1250         return lanewise(op, v1, broadcast(e2));
1251     }
1252 
1253     /**
1254      * Combines the lane values of this vector
1255      * with the values of another vector and a broadcast scalar,
1256      * with selection of lane elements controlled by a mask.
1257      *
1258      * This is a masked lane-wise ternary operation which applies
1259      * the selected operation to each lane.
1260      * The return value will be equal to this expression:
1261      * {@code this.lanewise(op, v1, this.broadcast(e2), m)}.
1262      *
1263      * @param op the operation used to combine lane values
1264      * @param v1 the other input vector
1265      * @param e2 the input scalar
1266      * @param m the mask controlling lane selection
1267      * @return the result of applying the operation lane-wise
1268      *         to the input vectors and the scalar
1269      * @throws UnsupportedOperationException if this vector does
1270      *         not support the requested operation
1271      * @see #lanewise(VectorOperators.Ternary,Vector,Vector)
1272      * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask)
1273      * @see #lanewise(VectorOperators.Ternary,Vector,int)
1274      */
1275     @ForceInline
1276     public final
1277     IntVector lanewise(VectorOperators.Ternary op, //(op,v1,e2,m)
1278                                   Vector<Integer> v1,
1279                                   int e2,
1280                                   VectorMask<Integer> m) {
1281         return lanewise(op, v1, broadcast(e2), m);
1282     }
1283 
1284     /**
1285      * Combines the lane values of this vector
1286      * with the values of another vector and a broadcast scalar.
1287      *
1288      * This is a lane-wise ternary operation which applies
1289      * the selected operation to each lane.
1290      * The return value will be equal to this expression:
1291      * {@code this.lanewise(op, this.broadcast(e1), v2)}.
1292      *
1293      * @param op the operation used to combine lane values
1294      * @param e1 the input scalar
1295      * @param v2 the other input vector
1296      * @return the result of applying the operation lane-wise
1297      *         to the input vectors and the scalar
1298      * @throws UnsupportedOperationException if this vector does
1299      *         not support the requested operation
1300      * @see #lanewise(VectorOperators.Ternary,Vector,Vector)
1301      * @see #lanewise(VectorOperators.Ternary,int,Vector,VectorMask)
1302      */
1303     @ForceInline
1304     public final
1305     IntVector lanewise(VectorOperators.Ternary op, //(op,e1,v2)
1306                                   int e1,
1307                                   Vector<Integer> v2) {
1308         return lanewise(op, broadcast(e1), v2);
1309     }
1310 
1311     /**
1312      * Combines the lane values of this vector
1313      * with the values of another vector and a broadcast scalar,
1314      * with selection of lane elements controlled by a mask.
1315      *
1316      * This is a masked lane-wise ternary operation which applies
1317      * the selected operation to each lane.
1318      * The return value will be equal to this expression:
1319      * {@code this.lanewise(op, this.broadcast(e1), v2, m)}.
1320      *
1321      * @param op the operation used to combine lane values
1322      * @param e1 the input scalar
1323      * @param v2 the other input vector
1324      * @param m the mask controlling lane selection
1325      * @return the result of applying the operation lane-wise
1326      *         to the input vectors and the scalar
1327      * @throws UnsupportedOperationException if this vector does
1328      *         not support the requested operation
1329      * @see #lanewise(VectorOperators.Ternary,Vector,Vector,VectorMask)
1330      * @see #lanewise(VectorOperators.Ternary,int,Vector)
1331      */
1332     @ForceInline
1333     public final
1334     IntVector lanewise(VectorOperators.Ternary op, //(op,e1,v2,m)
1335                                   int e1,
1336                                   Vector<Integer> v2,
1337                                   VectorMask<Integer> m) {
1338         return lanewise(op, broadcast(e1), v2, m);
1339     }
1340 
1341     // (Thus endeth the Great and Mighty Ternary Ogdoad.)
1342     // https://en.wikipedia.org/wiki/Ogdoad
1343 
1344     /// FULL-SERVICE BINARY METHODS: ADD, SUB, MUL, DIV
1345     //
1346     // These include masked and non-masked versions.
1347     // This subclass adds broadcast (masked or not).
1348 
1349     /**
1350      * {@inheritDoc} <!--workaround-->
1351      * @see #add(int)
1352      */
1353     @Override
1354     @ForceInline
1355     public final IntVector add(Vector<Integer> v) {
1356         return lanewise(ADD, v);
1357     }
1358 
1359     /**
1360      * Adds this vector to the broadcast of an input scalar.
1361      *
1362      * This is a lane-wise binary operation which applies
1363      * the primitive addition operation ({@code +}) to each lane.
1364      *
1365      * This method is also equivalent to the expression
1366      * {@link #lanewise(VectorOperators.Binary,int)
1367      *    lanewise}{@code (}{@link VectorOperators#ADD
1368      *    ADD}{@code , e)}.
1369      *
1370      * @param e the input scalar
1371      * @return the result of adding each lane of this vector to the scalar
1372      * @see #add(Vector)
1373      * @see #broadcast(int)
1374      * @see #add(int,VectorMask)
1375      * @see VectorOperators#ADD
1376      * @see #lanewise(VectorOperators.Binary,Vector)
1377      * @see #lanewise(VectorOperators.Binary,int)
1378      */
1379     @ForceInline
1380     public final
1381     IntVector add(int e) {
1382         return lanewise(ADD, e);
1383     }
1384 
1385     /**
1386      * {@inheritDoc} <!--workaround-->
1387      * @see #add(int,VectorMask)
1388      */
1389     @Override
1390     @ForceInline
1391     public final IntVector add(Vector<Integer> v,
1392                                           VectorMask<Integer> m) {
1393         return lanewise(ADD, v, m);
1394     }
1395 
1396     /**
1397      * Adds this vector to the broadcast of an input scalar,
1398      * selecting lane elements controlled by a mask.
1399      *
1400      * This is a masked lane-wise binary operation which applies
1401      * the primitive addition operation ({@code +}) to each lane.
1402      *
1403      * This method is also equivalent to the expression
1404      * {@link #lanewise(VectorOperators.Binary,int,VectorMask)
1405      *    lanewise}{@code (}{@link VectorOperators#ADD
1406      *    ADD}{@code , s, m)}.
1407      *
1408      * @param e the input scalar
1409      * @param m the mask controlling lane selection
1410      * @return the result of adding each lane of this vector to the scalar
1411      * @see #add(Vector,VectorMask)
1412      * @see #broadcast(int)
1413      * @see #add(int)
1414      * @see VectorOperators#ADD
1415      * @see #lanewise(VectorOperators.Binary,Vector)
1416      * @see #lanewise(VectorOperators.Binary,int)
1417      */
1418     @ForceInline
1419     public final IntVector add(int e,
1420                                           VectorMask<Integer> m) {
1421         return lanewise(ADD, e, m);
1422     }
1423 
1424     /**
1425      * {@inheritDoc} <!--workaround-->
1426      * @see #sub(int)
1427      */
1428     @Override
1429     @ForceInline
1430     public final IntVector sub(Vector<Integer> v) {
1431         return lanewise(SUB, v);
1432     }
1433 
1434     /**
1435      * Subtracts an input scalar from this vector.
1436      *
1437      * This is a masked lane-wise binary operation which applies
1438      * the primitive subtraction operation ({@code -}) to each lane.
1439      *
1440      * This method is also equivalent to the expression
1441      * {@link #lanewise(VectorOperators.Binary,int)
1442      *    lanewise}{@code (}{@link VectorOperators#SUB
1443      *    SUB}{@code , e)}.
1444      *
1445      * @param e the input scalar
1446      * @return the result of subtracting the scalar from each lane of this vector
1447      * @see #sub(Vector)
1448      * @see #broadcast(int)
1449      * @see #sub(int,VectorMask)
1450      * @see VectorOperators#SUB
1451      * @see #lanewise(VectorOperators.Binary,Vector)
1452      * @see #lanewise(VectorOperators.Binary,int)
1453      */
1454     @ForceInline
1455     public final IntVector sub(int e) {
1456         return lanewise(SUB, e);
1457     }
1458 
1459     /**
1460      * {@inheritDoc} <!--workaround-->
1461      * @see #sub(int,VectorMask)
1462      */
1463     @Override
1464     @ForceInline
1465     public final IntVector sub(Vector<Integer> v,
1466                                           VectorMask<Integer> m) {
1467         return lanewise(SUB, v, m);
1468     }
1469 
1470     /**
1471      * Subtracts an input scalar from this vector
1472      * under the control of a mask.
1473      *
1474      * This is a masked lane-wise binary operation which applies
1475      * the primitive subtraction operation ({@code -}) to each lane.
1476      *
1477      * This method is also equivalent to the expression
1478      * {@link #lanewise(VectorOperators.Binary,int,VectorMask)
1479      *    lanewise}{@code (}{@link VectorOperators#SUB
1480      *    SUB}{@code , s, m)}.
1481      *
1482      * @param e the input scalar
1483      * @param m the mask controlling lane selection
1484      * @return the result of subtracting the scalar from each lane of this vector
1485      * @see #sub(Vector,VectorMask)
1486      * @see #broadcast(int)
1487      * @see #sub(int)
1488      * @see VectorOperators#SUB
1489      * @see #lanewise(VectorOperators.Binary,Vector)
1490      * @see #lanewise(VectorOperators.Binary,int)
1491      */
1492     @ForceInline
1493     public final IntVector sub(int e,
1494                                           VectorMask<Integer> m) {
1495         return lanewise(SUB, e, m);
1496     }
1497 
1498     /**
1499      * {@inheritDoc} <!--workaround-->
1500      * @see #mul(int)
1501      */
1502     @Override
1503     @ForceInline
1504     public final IntVector mul(Vector<Integer> v) {
1505         return lanewise(MUL, v);
1506     }
1507 
1508     /**
1509      * Multiplies this vector by the broadcast of an input scalar.
1510      *
1511      * This is a lane-wise binary operation which applies
1512      * the primitive multiplication operation ({@code *}) to each lane.
1513      *
1514      * This method is also equivalent to the expression
1515      * {@link #lanewise(VectorOperators.Binary,int)
1516      *    lanewise}{@code (}{@link VectorOperators#MUL
1517      *    MUL}{@code , e)}.
1518      *
1519      * @param e the input scalar
1520      * @return the result of multiplying this vector by the given scalar
1521      * @see #mul(Vector)
1522      * @see #broadcast(int)
1523      * @see #mul(int,VectorMask)
1524      * @see VectorOperators#MUL
1525      * @see #lanewise(VectorOperators.Binary,Vector)
1526      * @see #lanewise(VectorOperators.Binary,int)
1527      */
1528     @ForceInline
1529     public final IntVector mul(int e) {
1530         return lanewise(MUL, e);
1531     }
1532 
1533     /**
1534      * {@inheritDoc} <!--workaround-->
1535      * @see #mul(int,VectorMask)
1536      */
1537     @Override
1538     @ForceInline
1539     public final IntVector mul(Vector<Integer> v,
1540                                           VectorMask<Integer> m) {
1541         return lanewise(MUL, v, m);
1542     }
1543 
1544     /**
1545      * Multiplies this vector by the broadcast of an input scalar,
1546      * selecting lane elements controlled by a mask.
1547      *
1548      * This is a masked lane-wise binary operation which applies
1549      * the primitive multiplication operation ({@code *}) to each lane.
1550      *
1551      * This method is also equivalent to the expression
1552      * {@link #lanewise(VectorOperators.Binary,int,VectorMask)
1553      *    lanewise}{@code (}{@link VectorOperators#MUL
1554      *    MUL}{@code , s, m)}.
1555      *
1556      * @param e the input scalar
1557      * @param m the mask controlling lane selection
1558      * @return the result of muling each lane of this vector to the scalar
1559      * @see #mul(Vector,VectorMask)
1560      * @see #broadcast(int)
1561      * @see #mul(int)
1562      * @see VectorOperators#MUL
1563      * @see #lanewise(VectorOperators.Binary,Vector)
1564      * @see #lanewise(VectorOperators.Binary,int)
1565      */
1566     @ForceInline
1567     public final IntVector mul(int e,
1568                                           VectorMask<Integer> m) {
1569         return lanewise(MUL, e, m);
1570     }
1571 
1572     /**
1573      * {@inheritDoc} <!--workaround-->
1574      * @apiNote If there is a zero divisor, {@code
1575      * ArithmeticException} will be thrown.
1576      */
1577     @Override
1578     @ForceInline
1579     public final IntVector div(Vector<Integer> v) {
1580         return lanewise(DIV, v);
1581     }
1582 
1583     /**
1584      * Divides this vector by the broadcast of an input scalar.
1585      *
1586      * This is a lane-wise binary operation which applies
1587      * the primitive division operation ({@code /}) to each lane.
1588      *
1589      * This method is also equivalent to the expression
1590      * {@link #lanewise(VectorOperators.Binary,int)
1591      *    lanewise}{@code (}{@link VectorOperators#DIV
1592      *    DIV}{@code , e)}.
1593      *
1594      * @apiNote If there is a zero divisor, {@code
1595      * ArithmeticException} will be thrown.
1596      *
1597      * @param e the input scalar
1598      * @return the result of dividing each lane of this vector by the scalar
1599      * @see #div(Vector)
1600      * @see #broadcast(int)
1601      * @see #div(int,VectorMask)
1602      * @see VectorOperators#DIV
1603      * @see #lanewise(VectorOperators.Binary,Vector)
1604      * @see #lanewise(VectorOperators.Binary,int)
1605      */
1606     @ForceInline
1607     public final IntVector div(int e) {
1608         return lanewise(DIV, e);
1609     }
1610 
1611     /**
1612      * {@inheritDoc} <!--workaround-->
1613      * @see #div(int,VectorMask)
1614      * @apiNote If there is a zero divisor, {@code
1615      * ArithmeticException} will be thrown.
1616      */
1617     @Override
1618     @ForceInline
1619     public final IntVector div(Vector<Integer> v,
1620                                           VectorMask<Integer> m) {
1621         return lanewise(DIV, v, m);
1622     }
1623 
1624     /**
1625      * Divides this vector by the broadcast of an input scalar,
1626      * selecting lane elements controlled by a mask.
1627      *
1628      * This is a masked lane-wise binary operation which applies
1629      * the primitive division operation ({@code /}) to each lane.
1630      *
1631      * This method is also equivalent to the expression
1632      * {@link #lanewise(VectorOperators.Binary,int,VectorMask)
1633      *    lanewise}{@code (}{@link VectorOperators#DIV
1634      *    DIV}{@code , s, m)}.
1635      *
1636      * @apiNote If there is a zero divisor, {@code
1637      * ArithmeticException} will be thrown.
1638      *
1639      * @param e the input scalar
1640      * @param m the mask controlling lane selection
1641      * @return the result of dividing each lane of this vector by the scalar
1642      * @see #div(Vector,VectorMask)
1643      * @see #broadcast(int)
1644      * @see #div(int)
1645      * @see VectorOperators#DIV
1646      * @see #lanewise(VectorOperators.Binary,Vector)
1647      * @see #lanewise(VectorOperators.Binary,int)
1648      */
1649     @ForceInline
1650     public final IntVector div(int e,
1651                                           VectorMask<Integer> m) {
1652         return lanewise(DIV, e, m);
1653     }
1654 
1655     /// END OF FULL-SERVICE BINARY METHODS
1656 
1657     /// SECOND-TIER BINARY METHODS
1658     //
1659     // There are no masked versions.
1660 
1661     /**
1662      * {@inheritDoc} <!--workaround-->
1663      */
1664     @Override
1665     @ForceInline
1666     public final IntVector min(Vector<Integer> v) {
1667         return lanewise(MIN, v);
1668     }
1669 
1670     // FIXME:  "broadcast of an input scalar" is really wordy.  Reduce?
1671     /**
1672      * Computes the smaller of this vector and the broadcast of an input scalar.
1673      *
1674      * This is a lane-wise binary operation which applies the
1675      * operation {@code Math.min()} to each pair of
1676      * corresponding lane values.
1677      *
1678      * This method is also equivalent to the expression
1679      * {@link #lanewise(VectorOperators.Binary,int)
1680      *    lanewise}{@code (}{@link VectorOperators#MIN
1681      *    MIN}{@code , e)}.
1682      *
1683      * @param e the input scalar
1684      * @return the result of multiplying this vector by the given scalar
1685      * @see #min(Vector)
1686      * @see #broadcast(int)
1687      * @see VectorOperators#MIN
1688      * @see #lanewise(VectorOperators.Binary,int,VectorMask)
1689      */
1690     @ForceInline
1691     public final IntVector min(int e) {
1692         return lanewise(MIN, e);
1693     }
1694 
1695     /**
1696      * {@inheritDoc} <!--workaround-->
1697      */
1698     @Override
1699     @ForceInline
1700     public final IntVector max(Vector<Integer> v) {
1701         return lanewise(MAX, v);
1702     }
1703 
1704     /**
1705      * Computes the larger of this vector and the broadcast of an input scalar.
1706      *
1707      * This is a lane-wise binary operation which applies the
1708      * operation {@code Math.max()} to each pair of
1709      * corresponding lane values.
1710      *
1711      * This method is also equivalent to the expression
1712      * {@link #lanewise(VectorOperators.Binary,int)
1713      *    lanewise}{@code (}{@link VectorOperators#MAX
1714      *    MAX}{@code , e)}.
1715      *
1716      * @param e the input scalar
1717      * @return the result of multiplying this vector by the given scalar
1718      * @see #max(Vector)
1719      * @see #broadcast(int)
1720      * @see VectorOperators#MAX
1721      * @see #lanewise(VectorOperators.Binary,int,VectorMask)
1722      */
1723     @ForceInline
1724     public final IntVector max(int e) {
1725         return lanewise(MAX, e);
1726     }
1727 
1728     // common bitwise operators: and, or, not (with scalar versions)
1729     /**
1730      * Computes the bitwise logical conjunction ({@code &})
1731      * of this vector and a second input vector.
1732      *
1733      * This is a lane-wise binary operation which applies the
1734      * the primitive bitwise "and" operation ({@code &})
1735      * to each pair of corresponding lane values.
1736      *
1737      * This method is also equivalent to the expression
1738      * {@link #lanewise(VectorOperators.Binary,Vector)
1739      *    lanewise}{@code (}{@link VectorOperators#AND
1740      *    AND}{@code , v)}.
1741      *
1742      * <p>
1743      * This is not a full-service named operation like
1744      * {@link #add(Vector) add}.  A masked version of
1745      * this operation is not directly available
1746      * but may be obtained via the masked version of
1747      * {@code lanewise}.
1748      *
1749      * @param v a second input vector
1750      * @return the bitwise {@code &} of this vector and the second input vector
1751      * @see #and(int)
1752      * @see #or(Vector)
1753      * @see #not()
1754      * @see VectorOperators#AND
1755      * @see #lanewise(VectorOperators.Binary,Vector,VectorMask)
1756      */
1757     @ForceInline
1758     public final IntVector and(Vector<Integer> v) {
1759         return lanewise(AND, v);
1760     }
1761 
1762     /**
1763      * Computes the bitwise logical conjunction ({@code &})
1764      * of this vector and a scalar.
1765      *
1766      * This is a lane-wise binary operation which applies the
1767      * the primitive bitwise "and" operation ({@code &})
1768      * to each pair of corresponding lane values.
1769      *
1770      * This method is also equivalent to the expression
1771      * {@link #lanewise(VectorOperators.Binary,Vector)
1772      *    lanewise}{@code (}{@link VectorOperators#AND
1773      *    AND}{@code , e)}.
1774      *
1775      * @param e an input scalar
1776      * @return the bitwise {@code &} of this vector and scalar
1777      * @see #and(Vector)
1778      * @see VectorOperators#AND
1779      * @see #lanewise(VectorOperators.Binary,Vector,VectorMask)
1780      */
1781     @ForceInline
1782     public final IntVector and(int e) {
1783         return lanewise(AND, e);
1784     }
1785 
1786     /**
1787      * Computes the bitwise logical disjunction ({@code |})
1788      * of this vector and a second input vector.
1789      *
1790      * This is a lane-wise binary operation which applies the
1791      * the primitive bitwise "or" operation ({@code |})
1792      * to each pair of corresponding lane values.
1793      *
1794      * This method is also equivalent to the expression
1795      * {@link #lanewise(VectorOperators.Binary,Vector)
1796      *    lanewise}{@code (}{@link VectorOperators#OR
1797      *    AND}{@code , v)}.
1798      *
1799      * <p>
1800      * This is not a full-service named operation like
1801      * {@link #add(Vector) add}.  A masked version of
1802      * this operation is not directly available
1803      * but may be obtained via the masked version of
1804      * {@code lanewise}.
1805      *
1806      * @param v a second input vector
1807      * @return the bitwise {@code |} of this vector and the second input vector
1808      * @see #or(int)
1809      * @see #and(Vector)
1810      * @see #not()
1811      * @see VectorOperators#OR
1812      * @see #lanewise(VectorOperators.Binary,Vector,VectorMask)
1813      */
1814     @ForceInline
1815     public final IntVector or(Vector<Integer> v) {
1816         return lanewise(OR, v);
1817     }
1818 
1819     /**
1820      * Computes the bitwise logical disjunction ({@code |})
1821      * of this vector and a scalar.
1822      *
1823      * This is a lane-wise binary operation which applies the
1824      * the primitive bitwise "or" operation ({@code |})
1825      * to each pair of corresponding lane values.
1826      *
1827      * This method is also equivalent to the expression
1828      * {@link #lanewise(VectorOperators.Binary,Vector)
1829      *    lanewise}{@code (}{@link VectorOperators#OR
1830      *    OR}{@code , e)}.
1831      *
1832      * @param e an input scalar
1833      * @return the bitwise {@code |} of this vector and scalar
1834      * @see #or(Vector)
1835      * @see VectorOperators#OR
1836      * @see #lanewise(VectorOperators.Binary,Vector,VectorMask)
1837      */
1838     @ForceInline
1839     public final IntVector or(int e) {
1840         return lanewise(OR, e);
1841     }
1842 
1843 
1844 
1845     /// UNARY METHODS
1846 
1847     /**
1848      * {@inheritDoc} <!--workaround-->
1849      */
1850     @Override
1851     @ForceInline
1852     public final
1853     IntVector neg() {
1854         return lanewise(NEG);
1855     }
1856 
1857     /**
1858      * {@inheritDoc} <!--workaround-->
1859      */
1860     @Override
1861     @ForceInline
1862     public final
1863     IntVector abs() {
1864         return lanewise(ABS);
1865     }
1866 
1867 
1868     // not (~)
1869     /**
1870      * Computes the bitwise logical complement ({@code ~})
1871      * of this vector.
1872      *
1873      * This is a lane-wise binary operation which applies the
1874      * the primitive bitwise "not" operation ({@code ~})
1875      * to each lane value.
1876      *
1877      * This method is also equivalent to the expression
1878      * {@link #lanewise(VectorOperators.Unary)
1879      *    lanewise}{@code (}{@link VectorOperators#NOT
1880      *    NOT}{@code )}.
1881      *
1882      * <p>
1883      * This is not a full-service named operation like
1884      * {@link #add(Vector) add}.  A masked version of
1885      * this operation is not directly available
1886      * but may be obtained via the masked version of
1887      * {@code lanewise}.
1888      *
1889      * @return the bitwise complement {@code ~} of this vector
1890      * @see #and(Vector)
1891      * @see VectorOperators#NOT
1892      * @see #lanewise(VectorOperators.Unary,VectorMask)
1893      */
1894     @ForceInline
1895     public final IntVector not() {
1896         return lanewise(NOT);
1897     }
1898 
1899 
1900     /// COMPARISONS
1901 
1902     /**
1903      * {@inheritDoc} <!--workaround-->
1904      */
1905     @Override
1906     @ForceInline
1907     public final
1908     VectorMask<Integer> eq(Vector<Integer> v) {
1909         return compare(EQ, v);
1910     }
1911 
1912     /**
1913      * Tests if this vector is equal to an input scalar.
1914      *
1915      * This is a lane-wise binary test operation which applies
1916      * the primitive equals operation ({@code ==}) to each lane.
1917      * The result is the same as {@code compare(VectorOperators.Comparison.EQ, e)}.
1918      *
1919      * @param e the input scalar
1920      * @return the result mask of testing if this vector
1921      *         is equal to {@code e}
1922      * @see #compare(VectorOperators.Comparison,int)
1923      */
1924     @ForceInline
1925     public final
1926     VectorMask<Integer> eq(int e) {
1927         return compare(EQ, e);
1928     }
1929 
1930     /**
1931      * {@inheritDoc} <!--workaround-->
1932      */
1933     @Override
1934     @ForceInline
1935     public final
1936     VectorMask<Integer> lt(Vector<Integer> v) {
1937         return compare(LT, v);
1938     }
1939 
1940     /**
1941      * Tests if this vector is less than an input scalar.
1942      *
1943      * This is a lane-wise binary test operation which applies
1944      * the primitive less than operation ({@code <}) to each lane.
1945      * The result is the same as {@code compare(VectorOperators.LT, e)}.
1946      *
1947      * @param e the input scalar
1948      * @return the mask result of testing if this vector
1949      *         is less than the input scalar
1950      * @see #compare(VectorOperators.Comparison,int)
1951      */
1952     @ForceInline
1953     public final
1954     VectorMask<Integer> lt(int e) {
1955         return compare(LT, e);
1956     }
1957 
1958     /**
1959      * {@inheritDoc} <!--workaround-->
1960      */
1961     @Override
1962     public abstract
1963     VectorMask<Integer> test(VectorOperators.Test op);
1964 
1965     /*package-private*/
1966     @ForceInline
1967     final
1968     <M extends VectorMask<Integer>>
1969     M testTemplate(Class<M> maskType, Test op) {
1970         IntSpecies vsp = vspecies();
1971         if (opKind(op, VO_SPECIAL)) {
1972             VectorMask<Integer> m;
1973             if (op == IS_DEFAULT) {
1974                 m = compare(EQ, (int) 0);
1975             } else if (op == IS_NEGATIVE) {
1976                 m = compare(LT, (int) 0);
1977             }
1978             else {
1979                 throw new AssertionError(op);
1980             }
1981             return maskType.cast(m);
1982         }
1983         int opc = opCode(op);
1984         throw new AssertionError(op);
1985     }
1986 
1987     /**
1988      * {@inheritDoc} <!--workaround-->
1989      */
1990     @Override
1991     public abstract
1992     VectorMask<Integer> test(VectorOperators.Test op,
1993                                   VectorMask<Integer> m);
1994 
1995     /*package-private*/
1996     @ForceInline
1997     final
1998     <M extends VectorMask<Integer>>
1999     M testTemplate(Class<M> maskType, Test op, M mask) {
2000         IntSpecies vsp = vspecies();
2001         mask.check(maskType, this);
2002         if (opKind(op, VO_SPECIAL)) {
2003             VectorMask<Integer> m = mask;
2004             if (op == IS_DEFAULT) {
2005                 m = compare(EQ, (int) 0, m);
2006             } else if (op == IS_NEGATIVE) {
2007                 m = compare(LT, (int) 0, m);
2008             }
2009             else {
2010                 throw new AssertionError(op);
2011             }
2012             return maskType.cast(m);
2013         }
2014         int opc = opCode(op);
2015         throw new AssertionError(op);
2016     }
2017 
2018     /**
2019      * {@inheritDoc} <!--workaround-->
2020      */
2021     @Override
2022     public abstract
2023     VectorMask<Integer> compare(VectorOperators.Comparison op, Vector<Integer> v);
2024 
2025     /*package-private*/
2026     @ForceInline
2027     final
2028     <M extends VectorMask<Integer>>
2029     M compareTemplate(Class<M> maskType, Comparison op, Vector<Integer> v) {
2030         IntVector that = (IntVector) v;
2031         that.check(this);
2032         int opc = opCode(op);
2033         return VectorSupport.compare(
2034             opc, getClass(), maskType, int.class, length(),
2035             this, that, null,
2036             (cond, v0, v1, m1) -> {
2037                 AbstractMask<Integer> m
2038                     = v0.bTest(cond, v1, (cond_, i, a, b)
2039                                -> compareWithOp(cond, a, b));
2040                 @SuppressWarnings("unchecked")
2041                 M m2 = (M) m;
2042                 return m2;
2043             });
2044     }
2045 
2046     /*package-private*/
2047     @ForceInline
2048     final
2049     <M extends VectorMask<Integer>>
2050     M compareTemplate(Class<M> maskType, Comparison op, Vector<Integer> v, M m) {
2051         IntVector that = (IntVector) v;
2052         that.check(this);
2053         m.check(maskType, this);
2054         int opc = opCode(op);
2055         return VectorSupport.compare(
2056             opc, getClass(), maskType, int.class, length(),
2057             this, that, m,
2058             (cond, v0, v1, m1) -> {
2059                 AbstractMask<Integer> cmpM
2060                     = v0.bTest(cond, v1, (cond_, i, a, b)
2061                                -> compareWithOp(cond, a, b));
2062                 @SuppressWarnings("unchecked")
2063                 M m2 = (M) cmpM.and(m1);
2064                 return m2;
2065             });
2066     }
2067 
2068     @ForceInline
2069     private static boolean compareWithOp(int cond, int a, int b) {
2070         return switch (cond) {
2071             case BT_eq -> a == b;
2072             case BT_ne -> a != b;
2073             case BT_lt -> a < b;
2074             case BT_le -> a <= b;
2075             case BT_gt -> a > b;
2076             case BT_ge -> a >= b;
2077             case BT_ult -> Integer.compareUnsigned(a, b) < 0;
2078             case BT_ule -> Integer.compareUnsigned(a, b) <= 0;
2079             case BT_ugt -> Integer.compareUnsigned(a, b) > 0;
2080             case BT_uge -> Integer.compareUnsigned(a, b) >= 0;
2081             default -> throw new AssertionError();
2082         };
2083     }
2084 
2085     /**
2086      * Tests this vector by comparing it with an input scalar,
2087      * according to the given comparison operation.
2088      *
2089      * This is a lane-wise binary test operation which applies
2090      * the comparison operation to each lane.
2091      * <p>
2092      * The result is the same as
2093      * {@code compare(op, broadcast(species(), e))}.
2094      * That is, the scalar may be regarded as broadcast to
2095      * a vector of the same species, and then compared
2096      * against the original vector, using the selected
2097      * comparison operation.
2098      *
2099      * @param op the operation used to compare lane values
2100      * @param e the input scalar
2101      * @return the mask result of testing lane-wise if this vector
2102      *         compares to the input, according to the selected
2103      *         comparison operator
2104      * @see IntVector#compare(VectorOperators.Comparison,Vector)
2105      * @see #eq(int)
2106      * @see #lt(int)
2107      */
2108     public abstract
2109     VectorMask<Integer> compare(Comparison op, int e);
2110 
2111     /*package-private*/
2112     @ForceInline
2113     final
2114     <M extends VectorMask<Integer>>
2115     M compareTemplate(Class<M> maskType, Comparison op, int e) {
2116         return compareTemplate(maskType, op, broadcast(e));
2117     }
2118 
2119     /**
2120      * Tests this vector by comparing it with an input scalar,
2121      * according to the given comparison operation,
2122      * in lanes selected by a mask.
2123      *
2124      * This is a masked lane-wise binary test operation which applies
2125      * to each pair of corresponding lane values.
2126      *
2127      * The returned result is equal to the expression
2128      * {@code compare(op,s).and(m)}.
2129      *
2130      * @param op the operation used to compare lane values
2131      * @param e the input scalar
2132      * @param m the mask controlling lane selection
2133      * @return the mask result of testing lane-wise if this vector
2134      *         compares to the input, according to the selected
2135      *         comparison operator,
2136      *         and only in the lanes selected by the mask
2137      * @see IntVector#compare(VectorOperators.Comparison,Vector,VectorMask)
2138      */
2139     @ForceInline
2140     public final VectorMask<Integer> compare(VectorOperators.Comparison op,
2141                                                int e,
2142                                                VectorMask<Integer> m) {
2143         return compare(op, broadcast(e), m);
2144     }
2145 
2146     /**
2147      * {@inheritDoc} <!--workaround-->
2148      */
2149     @Override
2150     public abstract
2151     VectorMask<Integer> compare(Comparison op, long e);
2152 
2153     /*package-private*/
2154     @ForceInline
2155     final
2156     <M extends VectorMask<Integer>>
2157     M compareTemplate(Class<M> maskType, Comparison op, long e) {
2158         return compareTemplate(maskType, op, broadcast(e));
2159     }
2160 
2161     /**
2162      * {@inheritDoc} <!--workaround-->
2163      */
2164     @Override
2165     @ForceInline
2166     public final
2167     VectorMask<Integer> compare(Comparison op, long e, VectorMask<Integer> m) {
2168         return compare(op, broadcast(e), m);
2169     }
2170 
2171 
2172 
2173     /**
2174      * {@inheritDoc} <!--workaround-->
2175      */
2176     @Override public abstract
2177     IntVector blend(Vector<Integer> v, VectorMask<Integer> m);
2178 
2179     /*package-private*/
2180     @ForceInline
2181     final
2182     <M extends VectorMask<Integer>>
2183     IntVector
2184     blendTemplate(Class<M> maskType, IntVector v, M m) {
2185         v.check(this);
2186         return VectorSupport.blend(
2187             getClass(), maskType, int.class, length(),
2188             this, v, m,
2189             (v0, v1, m_) -> v0.bOp(v1, m_, (i, a, b) -> b));
2190     }
2191 
2192     /**
2193      * {@inheritDoc} <!--workaround-->
2194      */
2195     @Override public abstract IntVector addIndex(int scale);
2196 
2197     /*package-private*/
2198     @ForceInline
2199     final IntVector addIndexTemplate(int scale) {
2200         IntSpecies vsp = vspecies();
2201         // make sure VLENGTH*scale doesn't overflow:
2202         vsp.checkScale(scale);
2203         return VectorSupport.indexVector(
2204             getClass(), int.class, length(),
2205             this, scale, vsp,
2206             (v, scale_, s)
2207             -> {
2208                 // If the platform doesn't support an INDEX
2209                 // instruction directly, load IOTA from memory
2210                 // and multiply.
2211                 IntVector iota = s.iota();
2212                 int sc = (int) scale_;
2213                 return v.add(sc == 1 ? iota : iota.mul(sc));
2214             });
2215     }
2216 
2217     /**
2218      * Replaces selected lanes of this vector with
2219      * a scalar value
2220      * under the control of a mask.
2221      *
2222      * This is a masked lane-wise binary operation which
2223      * selects each lane value from one or the other input.
2224      *
2225      * The returned result is equal to the expression
2226      * {@code blend(broadcast(e),m)}.
2227      *
2228      * @param e the input scalar, containing the replacement lane value
2229      * @param m the mask controlling lane selection of the scalar
2230      * @return the result of blending the lane elements of this vector with
2231      *         the scalar value
2232      */
2233     @ForceInline
2234     public final IntVector blend(int e,
2235                                             VectorMask<Integer> m) {
2236         return blend(broadcast(e), m);
2237     }
2238 
2239     /**
2240      * Replaces selected lanes of this vector with
2241      * a scalar value
2242      * under the control of a mask.
2243      *
2244      * This is a masked lane-wise binary operation which
2245      * selects each lane value from one or the other input.
2246      *
2247      * The returned result is equal to the expression
2248      * {@code blend(broadcast(e),m)}.
2249      *
2250      * @param e the input scalar, containing the replacement lane value
2251      * @param m the mask controlling lane selection of the scalar
2252      * @return the result of blending the lane elements of this vector with
2253      *         the scalar value
2254      */
2255     @ForceInline
2256     public final IntVector blend(long e,
2257                                             VectorMask<Integer> m) {
2258         return blend(broadcast(e), m);
2259     }
2260 
2261     /**
2262      * {@inheritDoc} <!--workaround-->
2263      */
2264     @Override
2265     public abstract
2266     IntVector slice(int origin, Vector<Integer> v1);
2267 
2268     /*package-private*/
2269     final
2270     @ForceInline
2271     IntVector sliceTemplate(int origin, Vector<Integer> v1) {
2272         IntVector that = (IntVector) v1;
2273         that.check(this);
2274         Objects.checkIndex(origin, length() + 1);
2275         VectorShuffle<Integer> iota = iotaShuffle();
2276         VectorMask<Integer> blendMask = iota.toVector().compare(VectorOperators.LT, (broadcast((int)(length() - origin))));
2277         iota = iotaShuffle(origin, 1, true);
2278         return that.rearrange(iota).blend(this.rearrange(iota), blendMask);
2279     }
2280 
2281     /**
2282      * {@inheritDoc} <!--workaround-->
2283      */
2284     @Override
2285     @ForceInline
2286     public final
2287     IntVector slice(int origin,
2288                                Vector<Integer> w,
2289                                VectorMask<Integer> m) {
2290         return broadcast(0).blend(slice(origin, w), m);
2291     }
2292 
2293     /**
2294      * {@inheritDoc} <!--workaround-->
2295      */
2296     @Override
2297     public abstract
2298     IntVector slice(int origin);
2299 
2300     /*package-private*/
2301     final
2302     @ForceInline
2303     IntVector sliceTemplate(int origin) {
2304         Objects.checkIndex(origin, length() + 1);
2305         VectorShuffle<Integer> iota = iotaShuffle();
2306         VectorMask<Integer> blendMask = iota.toVector().compare(VectorOperators.LT, (broadcast((int)(length() - origin))));
2307         iota = iotaShuffle(origin, 1, true);
2308         return vspecies().zero().blend(this.rearrange(iota), blendMask);
2309     }
2310 
2311     /**
2312      * {@inheritDoc} <!--workaround-->
2313      */
2314     @Override
2315     public abstract
2316     IntVector unslice(int origin, Vector<Integer> w, int part);
2317 
2318     /*package-private*/
2319     final
2320     @ForceInline
2321     IntVector
2322     unsliceTemplate(int origin, Vector<Integer> w, int part) {
2323         IntVector that = (IntVector) w;
2324         that.check(this);
2325         Objects.checkIndex(origin, length() + 1);
2326         VectorShuffle<Integer> iota = iotaShuffle();
2327         VectorMask<Integer> blendMask = iota.toVector().compare((part == 0) ? VectorOperators.GE : VectorOperators.LT,
2328                                                                   (broadcast((int)(origin))));
2329         iota = iotaShuffle(-origin, 1, true);
2330         return that.blend(this.rearrange(iota), blendMask);
2331     }
2332 
2333     /*package-private*/
2334     final
2335     @ForceInline
2336     <M extends VectorMask<Integer>>
2337     IntVector
2338     unsliceTemplate(Class<M> maskType, int origin, Vector<Integer> w, int part, M m) {
2339         IntVector that = (IntVector) w;
2340         that.check(this);
2341         IntVector slice = that.sliceTemplate(origin, that);
2342         slice = slice.blendTemplate(maskType, this, m);
2343         return slice.unsliceTemplate(origin, w, part);
2344     }
2345 
2346     /**
2347      * {@inheritDoc} <!--workaround-->
2348      */
2349     @Override
2350     public abstract
2351     IntVector unslice(int origin, Vector<Integer> w, int part, VectorMask<Integer> m);
2352 
2353     /**
2354      * {@inheritDoc} <!--workaround-->
2355      */
2356     @Override
2357     public abstract
2358     IntVector unslice(int origin);
2359 
2360     /*package-private*/
2361     final
2362     @ForceInline
2363     IntVector
2364     unsliceTemplate(int origin) {
2365         Objects.checkIndex(origin, length() + 1);
2366         VectorShuffle<Integer> iota = iotaShuffle();
2367         VectorMask<Integer> blendMask = iota.toVector().compare(VectorOperators.GE,
2368                                                                   (broadcast((int)(origin))));
2369         iota = iotaShuffle(-origin, 1, true);
2370         return vspecies().zero().blend(this.rearrange(iota), blendMask);
2371     }
2372 
2373     private ArrayIndexOutOfBoundsException
2374     wrongPartForSlice(int part) {
2375         String msg = String.format("bad part number %d for slice operation",
2376                                    part);
2377         return new ArrayIndexOutOfBoundsException(msg);
2378     }
2379 
2380     /**
2381      * {@inheritDoc} <!--workaround-->
2382      */
2383     @Override
2384     public abstract
2385     IntVector rearrange(VectorShuffle<Integer> m);
2386 
2387     /*package-private*/
2388     @ForceInline
2389     final
2390     <S extends VectorShuffle<Integer>>
2391     IntVector rearrangeTemplate(Class<S> shuffletype, S shuffle) {
2392         shuffle.checkIndexes();
2393         return VectorSupport.rearrangeOp(
2394             getClass(), shuffletype, null, int.class, length(),
2395             this, shuffle, null,
2396             (v1, s_, m_) -> v1.uOp((i, a) -> {
2397                 int ei = s_.laneSource(i);
2398                 return v1.lane(ei);
2399             }));
2400     }
2401 
2402     /**
2403      * {@inheritDoc} <!--workaround-->
2404      */
2405     @Override
2406     public abstract
2407     IntVector rearrange(VectorShuffle<Integer> s,
2408                                    VectorMask<Integer> m);
2409 
2410     /*package-private*/
2411     @ForceInline
2412     final
2413     <S extends VectorShuffle<Integer>, M extends VectorMask<Integer>>
2414     IntVector rearrangeTemplate(Class<S> shuffletype,
2415                                            Class<M> masktype,
2416                                            S shuffle,
2417                                            M m) {
2418 
2419         m.check(masktype, this);
2420         VectorMask<Integer> valid = shuffle.laneIsValid();
2421         if (m.andNot(valid).anyTrue()) {
2422             shuffle.checkIndexes();
2423             throw new AssertionError();
2424         }
2425         return VectorSupport.rearrangeOp(
2426                    getClass(), shuffletype, masktype, int.class, length(),
2427                    this, shuffle, m,
2428                    (v1, s_, m_) -> v1.uOp((i, a) -> {
2429                         int ei = s_.laneSource(i);
2430                         return ei < 0  || !m_.laneIsSet(i) ? 0 : v1.lane(ei);
2431                    }));
2432     }
2433 
2434     /**
2435      * {@inheritDoc} <!--workaround-->
2436      */
2437     @Override
2438     public abstract
2439     IntVector rearrange(VectorShuffle<Integer> s,
2440                                    Vector<Integer> v);
2441 
2442     /*package-private*/
2443     @ForceInline
2444     final
2445     <S extends VectorShuffle<Integer>>
2446     IntVector rearrangeTemplate(Class<S> shuffletype,
2447                                            S shuffle,
2448                                            IntVector v) {
2449         VectorMask<Integer> valid = shuffle.laneIsValid();
2450         @SuppressWarnings("unchecked")
2451         S ws = (S) shuffle.wrapIndexes();
2452         IntVector r0 =
2453             VectorSupport.rearrangeOp(
2454                 getClass(), shuffletype, null, int.class, length(),
2455                 this, ws, null,
2456                 (v0, s_, m_) -> v0.uOp((i, a) -> {
2457                     int ei = s_.laneSource(i);
2458                     return v0.lane(ei);
2459                 }));
2460         IntVector r1 =
2461             VectorSupport.rearrangeOp(
2462                 getClass(), shuffletype, null, int.class, length(),
2463                 v, ws, null,
2464                 (v1, s_, m_) -> v1.uOp((i, a) -> {
2465                     int ei = s_.laneSource(i);
2466                     return v1.lane(ei);
2467                 }));
2468         return r1.blend(r0, valid);
2469     }
2470 
2471     @ForceInline
2472     private final
2473     VectorShuffle<Integer> toShuffle0(IntSpecies dsp) {
2474         int[] a = toArray();
2475         int[] sa = new int[a.length];
2476         for (int i = 0; i < a.length; i++) {
2477             sa[i] = (int) a[i];
2478         }
2479         return VectorShuffle.fromArray(dsp, sa, 0);
2480     }
2481 
2482     /*package-private*/
2483     @ForceInline
2484     final
2485     VectorShuffle<Integer> toShuffleTemplate(Class<?> shuffleType) {
2486         IntSpecies vsp = vspecies();
2487         return VectorSupport.convert(VectorSupport.VECTOR_OP_CAST,
2488                                      getClass(), int.class, length(),
2489                                      shuffleType, byte.class, length(),
2490                                      this, vsp,
2491                                      IntVector::toShuffle0);
2492     }
2493 
2494     /**
2495      * {@inheritDoc} <!--workaround-->
2496      * @since 19
2497      */
2498     @Override
2499     public abstract
2500     IntVector compress(VectorMask<Integer> m);
2501 
2502     /*package-private*/
2503     @ForceInline
2504     final
2505     <M extends AbstractMask<Integer>>
2506     IntVector compressTemplate(Class<M> masktype, M m) {
2507       m.check(masktype, this);
2508       return (IntVector) VectorSupport.comExpOp(VectorSupport.VECTOR_OP_COMPRESS, getClass(), masktype,
2509                                                    int.class, length(), this, m,
2510                                                    (v1, m1) -> compressHelper(v1, m1));
2511     }
2512 
2513     /**
2514      * {@inheritDoc} <!--workaround-->
2515      * @since 19
2516      */
2517     @Override
2518     public abstract
2519     IntVector expand(VectorMask<Integer> m);
2520 
2521     /*package-private*/
2522     @ForceInline
2523     final
2524     <M extends AbstractMask<Integer>>
2525     IntVector expandTemplate(Class<M> masktype, M m) {
2526       m.check(masktype, this);
2527       return (IntVector) VectorSupport.comExpOp(VectorSupport.VECTOR_OP_EXPAND, getClass(), masktype,
2528                                                    int.class, length(), this, m,
2529                                                    (v1, m1) -> expandHelper(v1, m1));
2530     }
2531 
2532 
2533     /**
2534      * {@inheritDoc} <!--workaround-->
2535      */
2536     @Override
2537     public abstract
2538     IntVector selectFrom(Vector<Integer> v);
2539 
2540     /*package-private*/
2541     @ForceInline
2542     final IntVector selectFromTemplate(IntVector v) {
2543         return v.rearrange(this.toShuffle());
2544     }
2545 
2546     /**
2547      * {@inheritDoc} <!--workaround-->
2548      */
2549     @Override
2550     public abstract
2551     IntVector selectFrom(Vector<Integer> s, VectorMask<Integer> m);
2552 
2553     /*package-private*/
2554     @ForceInline
2555     final IntVector selectFromTemplate(IntVector v,
2556                                                   AbstractMask<Integer> m) {
2557         return v.rearrange(this.toShuffle(), m);
2558     }
2559 
2560     /// Ternary operations
2561 
2562     /**
2563      * Blends together the bits of two vectors under
2564      * the control of a third, which supplies mask bits.
2565      *
2566      * This is a lane-wise ternary operation which performs
2567      * a bitwise blending operation {@code (a&~c)|(b&c)}
2568      * to each lane.
2569      *
2570      * This method is also equivalent to the expression
2571      * {@link #lanewise(VectorOperators.Ternary,Vector,Vector)
2572      *    lanewise}{@code (}{@link VectorOperators#BITWISE_BLEND
2573      *    BITWISE_BLEND}{@code , bits, mask)}.
2574      *
2575      * @param bits input bits to blend into the current vector
2576      * @param mask a bitwise mask to enable blending of the input bits
2577      * @return the bitwise blend of the given bits into the current vector,
2578      *         under control of the bitwise mask
2579      * @see #bitwiseBlend(int,int)
2580      * @see #bitwiseBlend(int,Vector)
2581      * @see #bitwiseBlend(Vector,int)
2582      * @see VectorOperators#BITWISE_BLEND
2583      * @see #lanewise(VectorOperators.Ternary,Vector,Vector,VectorMask)
2584      */
2585     @ForceInline
2586     public final
2587     IntVector bitwiseBlend(Vector<Integer> bits, Vector<Integer> mask) {
2588         return lanewise(BITWISE_BLEND, bits, mask);
2589     }
2590 
2591     /**
2592      * Blends together the bits of a vector and a scalar under
2593      * the control of another scalar, which supplies mask bits.
2594      *
2595      * This is a lane-wise ternary operation which performs
2596      * a bitwise blending operation {@code (a&~c)|(b&c)}
2597      * to each lane.
2598      *
2599      * This method is also equivalent to the expression
2600      * {@link #lanewise(VectorOperators.Ternary,Vector,Vector)
2601      *    lanewise}{@code (}{@link VectorOperators#BITWISE_BLEND
2602      *    BITWISE_BLEND}{@code , bits, mask)}.
2603      *
2604      * @param bits input bits to blend into the current vector
2605      * @param mask a bitwise mask to enable blending of the input bits
2606      * @return the bitwise blend of the given bits into the current vector,
2607      *         under control of the bitwise mask
2608      * @see #bitwiseBlend(Vector,Vector)
2609      * @see VectorOperators#BITWISE_BLEND
2610      * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask)
2611      */
2612     @ForceInline
2613     public final
2614     IntVector bitwiseBlend(int bits, int mask) {
2615         return lanewise(BITWISE_BLEND, bits, mask);
2616     }
2617 
2618     /**
2619      * Blends together the bits of a vector and a scalar under
2620      * the control of another vector, which supplies mask bits.
2621      *
2622      * This is a lane-wise ternary operation which performs
2623      * a bitwise blending operation {@code (a&~c)|(b&c)}
2624      * to each lane.
2625      *
2626      * This method is also equivalent to the expression
2627      * {@link #lanewise(VectorOperators.Ternary,Vector,Vector)
2628      *    lanewise}{@code (}{@link VectorOperators#BITWISE_BLEND
2629      *    BITWISE_BLEND}{@code , bits, mask)}.
2630      *
2631      * @param bits input bits to blend into the current vector
2632      * @param mask a bitwise mask to enable blending of the input bits
2633      * @return the bitwise blend of the given bits into the current vector,
2634      *         under control of the bitwise mask
2635      * @see #bitwiseBlend(Vector,Vector)
2636      * @see VectorOperators#BITWISE_BLEND
2637      * @see #lanewise(VectorOperators.Ternary,int,Vector,VectorMask)
2638      */
2639     @ForceInline
2640     public final
2641     IntVector bitwiseBlend(int bits, Vector<Integer> mask) {
2642         return lanewise(BITWISE_BLEND, bits, mask);
2643     }
2644 
2645     /**
2646      * Blends together the bits of two vectors under
2647      * the control of a scalar, which supplies mask bits.
2648      *
2649      * This is a lane-wise ternary operation which performs
2650      * a bitwise blending operation {@code (a&~c)|(b&c)}
2651      * to each lane.
2652      *
2653      * This method is also equivalent to the expression
2654      * {@link #lanewise(VectorOperators.Ternary,Vector,Vector)
2655      *    lanewise}{@code (}{@link VectorOperators#BITWISE_BLEND
2656      *    BITWISE_BLEND}{@code , bits, mask)}.
2657      *
2658      * @param bits input bits to blend into the current vector
2659      * @param mask a bitwise mask to enable blending of the input bits
2660      * @return the bitwise blend of the given bits into the current vector,
2661      *         under control of the bitwise mask
2662      * @see #bitwiseBlend(Vector,Vector)
2663      * @see VectorOperators#BITWISE_BLEND
2664      * @see #lanewise(VectorOperators.Ternary,Vector,int,VectorMask)
2665      */
2666     @ForceInline
2667     public final
2668     IntVector bitwiseBlend(Vector<Integer> bits, int mask) {
2669         return lanewise(BITWISE_BLEND, bits, mask);
2670     }
2671 
2672 
2673     // Type specific horizontal reductions
2674 
2675     /**
2676      * Returns a value accumulated from all the lanes of this vector.
2677      *
2678      * This is an associative cross-lane reduction operation which
2679      * applies the specified operation to all the lane elements.
2680      * <p>
2681      * A few reduction operations do not support arbitrary reordering
2682      * of their operands, yet are included here because of their
2683      * usefulness.
2684      * <ul>
2685      * <li>
2686      * In the case of {@code FIRST_NONZERO}, the reduction returns
2687      * the value from the lowest-numbered non-zero lane.
2688      * <li>
2689      * All other reduction operations are fully commutative and
2690      * associative.  The implementation can choose any order of
2691      * processing, yet it will always produce the same result.
2692      * </ul>
2693      *
2694      * @param op the operation used to combine lane values
2695      * @return the accumulated result
2696      * @throws UnsupportedOperationException if this vector does
2697      *         not support the requested operation
2698      * @see #reduceLanes(VectorOperators.Associative,VectorMask)
2699      * @see #add(Vector)
2700      * @see #mul(Vector)
2701      * @see #min(Vector)
2702      * @see #max(Vector)
2703      * @see #and(Vector)
2704      * @see #or(Vector)
2705      * @see VectorOperators#XOR
2706      * @see VectorOperators#FIRST_NONZERO
2707      */
2708     public abstract int reduceLanes(VectorOperators.Associative op);
2709 
2710     /**
2711      * Returns a value accumulated from selected lanes of this vector,
2712      * controlled by a mask.
2713      *
2714      * This is an associative cross-lane reduction operation which
2715      * applies the specified operation to the selected lane elements.
2716      * <p>
2717      * If no elements are selected, an operation-specific identity
2718      * value is returned.
2719      * <ul>
2720      * <li>
2721      * If the operation is
2722      *  {@code ADD}, {@code XOR}, {@code OR},
2723      * or {@code FIRST_NONZERO},
2724      * then the identity value is zero, the default {@code int} value.
2725      * <li>
2726      * If the operation is {@code MUL},
2727      * then the identity value is one.
2728      * <li>
2729      * If the operation is {@code AND},
2730      * then the identity value is minus one (all bits set).
2731      * <li>
2732      * If the operation is {@code MAX},
2733      * then the identity value is {@code Integer.MIN_VALUE}.
2734      * <li>
2735      * If the operation is {@code MIN},
2736      * then the identity value is {@code Integer.MAX_VALUE}.
2737      * </ul>
2738      * <p>
2739      * A few reduction operations do not support arbitrary reordering
2740      * of their operands, yet are included here because of their
2741      * usefulness.
2742      * <ul>
2743      * <li>
2744      * In the case of {@code FIRST_NONZERO}, the reduction returns
2745      * the value from the lowest-numbered non-zero lane.
2746      * <li>
2747      * All other reduction operations are fully commutative and
2748      * associative.  The implementation can choose any order of
2749      * processing, yet it will always produce the same result.
2750      * </ul>
2751      *
2752      * @param op the operation used to combine lane values
2753      * @param m the mask controlling lane selection
2754      * @return the reduced result accumulated from the selected lane values
2755      * @throws UnsupportedOperationException if this vector does
2756      *         not support the requested operation
2757      * @see #reduceLanes(VectorOperators.Associative)
2758      */
2759     public abstract int reduceLanes(VectorOperators.Associative op,
2760                                        VectorMask<Integer> m);
2761 
2762     /*package-private*/
2763     @ForceInline
2764     final
2765     int reduceLanesTemplate(VectorOperators.Associative op,
2766                                Class<? extends VectorMask<Integer>> maskClass,
2767                                VectorMask<Integer> m) {
2768         m.check(maskClass, this);
2769         if (op == FIRST_NONZERO) {
2770             // FIXME:  The JIT should handle this.
2771             IntVector v = broadcast((int) 0).blend(this, m);
2772             return v.reduceLanesTemplate(op);
2773         }
2774         int opc = opCode(op);
2775         return fromBits(VectorSupport.reductionCoerced(
2776             opc, getClass(), maskClass, int.class, length(),
2777             this, m,
2778             REDUCE_IMPL.find(op, opc, IntVector::reductionOperations)));
2779     }
2780 
2781     /*package-private*/
2782     @ForceInline
2783     final
2784     int reduceLanesTemplate(VectorOperators.Associative op) {
2785         if (op == FIRST_NONZERO) {
2786             // FIXME:  The JIT should handle this.
2787             VectorMask<Integer> thisNZ
2788                 = this.viewAsIntegralLanes().compare(NE, (int) 0);
2789             int ft = thisNZ.firstTrue();
2790             return ft < length() ? this.lane(ft) : (int) 0;
2791         }
2792         int opc = opCode(op);
2793         return fromBits(VectorSupport.reductionCoerced(
2794             opc, getClass(), null, int.class, length(),
2795             this, null,
2796             REDUCE_IMPL.find(op, opc, IntVector::reductionOperations)));
2797     }
2798 
2799     private static final
2800     ImplCache<Associative, ReductionOperation<IntVector, VectorMask<Integer>>>
2801         REDUCE_IMPL = new ImplCache<>(Associative.class, IntVector.class);
2802 
2803     private static ReductionOperation<IntVector, VectorMask<Integer>> reductionOperations(int opc_) {
2804         switch (opc_) {
2805             case VECTOR_OP_ADD: return (v, m) ->
2806                     toBits(v.rOp((int)0, m, (i, a, b) -> (int)(a + b)));
2807             case VECTOR_OP_MUL: return (v, m) ->
2808                     toBits(v.rOp((int)1, m, (i, a, b) -> (int)(a * b)));
2809             case VECTOR_OP_MIN: return (v, m) ->
2810                     toBits(v.rOp(MAX_OR_INF, m, (i, a, b) -> (int) Math.min(a, b)));
2811             case VECTOR_OP_MAX: return (v, m) ->
2812                     toBits(v.rOp(MIN_OR_INF, m, (i, a, b) -> (int) Math.max(a, b)));
2813             case VECTOR_OP_AND: return (v, m) ->
2814                     toBits(v.rOp((int)-1, m, (i, a, b) -> (int)(a & b)));
2815             case VECTOR_OP_OR: return (v, m) ->
2816                     toBits(v.rOp((int)0, m, (i, a, b) -> (int)(a | b)));
2817             case VECTOR_OP_XOR: return (v, m) ->
2818                     toBits(v.rOp((int)0, m, (i, a, b) -> (int)(a ^ b)));
2819             default: return null;
2820         }
2821     }
2822 
2823     private static final int MIN_OR_INF = Integer.MIN_VALUE;
2824     private static final int MAX_OR_INF = Integer.MAX_VALUE;
2825 
2826     public @Override abstract long reduceLanesToLong(VectorOperators.Associative op);
2827     public @Override abstract long reduceLanesToLong(VectorOperators.Associative op,
2828                                                      VectorMask<Integer> m);
2829 
2830     // Type specific accessors
2831 
2832     /**
2833      * Gets the lane element at lane index {@code i}
2834      *
2835      * @param i the lane index
2836      * @return the lane element at lane index {@code i}
2837      * @throws IllegalArgumentException if the index is is out of range
2838      * ({@code < 0 || >= length()})
2839      */
2840     public abstract int lane(int i);
2841 
2842     /**
2843      * Replaces the lane element of this vector at lane index {@code i} with
2844      * value {@code e}.
2845      *
2846      * This is a cross-lane operation and behaves as if it returns the result
2847      * of blending this vector with an input vector that is the result of
2848      * broadcasting {@code e} and a mask that has only one lane set at lane
2849      * index {@code i}.
2850      *
2851      * @param i the lane index of the lane element to be replaced
2852      * @param e the value to be placed
2853      * @return the result of replacing the lane element of this vector at lane
2854      * index {@code i} with value {@code e}.
2855      * @throws IllegalArgumentException if the index is is out of range
2856      * ({@code < 0 || >= length()})
2857      */
2858     public abstract IntVector withLane(int i, int e);
2859 
2860     // Memory load operations
2861 
2862     /**
2863      * Returns an array of type {@code int[]}
2864      * containing all the lane values.
2865      * The array length is the same as the vector length.
2866      * The array elements are stored in lane order.
2867      * <p>
2868      * This method behaves as if it stores
2869      * this vector into an allocated array
2870      * (using {@link #intoArray(int[], int) intoArray})
2871      * and returns the array as follows:
2872      * <pre>{@code
2873      *   int[] a = new int[this.length()];
2874      *   this.intoArray(a, 0);
2875      *   return a;
2876      * }</pre>
2877      *
2878      * @return an array containing the lane values of this vector
2879      */
2880     @ForceInline
2881     @Override
2882     public final int[] toArray() {
2883         int[] a = new int[vspecies().laneCount()];
2884         intoArray(a, 0);
2885         return a;
2886     }
2887 
2888     /**
2889      * {@inheritDoc} <!--workaround-->
2890      * This is an alias for {@link #toArray()}
2891      * When this method is used on used on vectors
2892      * of type {@code IntVector},
2893      * there will be no loss of range or precision.
2894      */
2895     @ForceInline
2896     @Override
2897     public final int[] toIntArray() {
2898         return toArray();
2899     }
2900 
2901     /** {@inheritDoc} <!--workaround-->
2902      * @implNote
2903      * When this method is used on used on vectors
2904      * of type {@code IntVector},
2905      * there will be no loss of precision or range,
2906      * and so no {@code UnsupportedOperationException} will
2907      * be thrown.
2908      */
2909     @ForceInline
2910     @Override
2911     public final long[] toLongArray() {
2912         int[] a = toArray();
2913         long[] res = new long[a.length];
2914         for (int i = 0; i < a.length; i++) {
2915             int e = a[i];
2916             res[i] = IntSpecies.toIntegralChecked(e, false);
2917         }
2918         return res;
2919     }
2920 
2921     /** {@inheritDoc} <!--workaround-->
2922      * @implNote
2923      * When this method is used on used on vectors
2924      * of type {@code IntVector},
2925      * there will be no loss of precision.
2926      */
2927     @ForceInline
2928     @Override
2929     public final double[] toDoubleArray() {
2930         int[] a = toArray();
2931         double[] res = new double[a.length];
2932         for (int i = 0; i < a.length; i++) {
2933             res[i] = (double) a[i];
2934         }
2935         return res;
2936     }
2937 
2938     /**
2939      * Loads a vector from an array of type {@code int[]}
2940      * starting at an offset.
2941      * For each vector lane, where {@code N} is the vector lane index, the
2942      * array element at index {@code offset + N} is placed into the
2943      * resulting vector at lane index {@code N}.
2944      *
2945      * @param species species of desired vector
2946      * @param a the array
2947      * @param offset the offset into the array
2948      * @return the vector loaded from an array
2949      * @throws IndexOutOfBoundsException
2950      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
2951      *         for any lane {@code N} in the vector
2952      */
2953     @ForceInline
2954     public static
2955     IntVector fromArray(VectorSpecies<Integer> species,
2956                                    int[] a, int offset) {
2957         offset = checkFromIndexSize(offset, species.length(), a.length);
2958         IntSpecies vsp = (IntSpecies) species;
2959         return vsp.dummyVector().fromArray0(a, offset);
2960     }
2961 
2962     /**
2963      * Loads a vector from an array of type {@code int[]}
2964      * starting at an offset and using a mask.
2965      * Lanes where the mask is unset are filled with the default
2966      * value of {@code int} (zero).
2967      * For each vector lane, where {@code N} is the vector lane index,
2968      * if the mask lane at index {@code N} is set then the array element at
2969      * index {@code offset + N} is placed into the resulting vector at lane index
2970      * {@code N}, otherwise the default element value is placed into the
2971      * resulting vector at lane index {@code N}.
2972      *
2973      * @param species species of desired vector
2974      * @param a the array
2975      * @param offset the offset into the array
2976      * @param m the mask controlling lane selection
2977      * @return the vector loaded from an array
2978      * @throws IndexOutOfBoundsException
2979      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
2980      *         for any lane {@code N} in the vector
2981      *         where the mask is set
2982      */
2983     @ForceInline
2984     public static
2985     IntVector fromArray(VectorSpecies<Integer> species,
2986                                    int[] a, int offset,
2987                                    VectorMask<Integer> m) {
2988         IntSpecies vsp = (IntSpecies) species;
2989         if (offset >= 0 && offset <= (a.length - species.length())) {
2990             return vsp.dummyVector().fromArray0(a, offset, m);
2991         }
2992 
2993         // FIXME: optimize
2994         checkMaskFromIndexSize(offset, vsp, m, 1, a.length);
2995         return vsp.vOp(m, i -> a[offset + i]);
2996     }
2997 
2998     /**
2999      * Gathers a new vector composed of elements from an array of type
3000      * {@code int[]},
3001      * using indexes obtained by adding a fixed {@code offset} to a
3002      * series of secondary offsets from an <em>index map</em>.
3003      * The index map is a contiguous sequence of {@code VLENGTH}
3004      * elements in a second array of {@code int}s, starting at a given
3005      * {@code mapOffset}.
3006      * <p>
3007      * For each vector lane, where {@code N} is the vector lane index,
3008      * the lane is loaded from the array
3009      * element {@code a[f(N)]}, where {@code f(N)} is the
3010      * index mapping expression
3011      * {@code offset + indexMap[mapOffset + N]]}.
3012      *
3013      * @param species species of desired vector
3014      * @param a the array
3015      * @param offset the offset into the array, may be negative if relative
3016      * indexes in the index map compensate to produce a value within the
3017      * array bounds
3018      * @param indexMap the index map
3019      * @param mapOffset the offset into the index map
3020      * @return the vector loaded from the indexed elements of the array
3021      * @throws IndexOutOfBoundsException
3022      *         if {@code mapOffset+N < 0}
3023      *         or if {@code mapOffset+N >= indexMap.length},
3024      *         or if {@code f(N)=offset+indexMap[mapOffset+N]}
3025      *         is an invalid index into {@code a},
3026      *         for any lane {@code N} in the vector
3027      * @see IntVector#toIntArray()
3028      */
3029     @ForceInline
3030     public static
3031     IntVector fromArray(VectorSpecies<Integer> species,
3032                                    int[] a, int offset,
3033                                    int[] indexMap, int mapOffset) {
3034         IntSpecies vsp = (IntSpecies) species;
3035         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3036         Objects.requireNonNull(a);
3037         Objects.requireNonNull(indexMap);
3038         Class<? extends IntVector> vectorType = vsp.vectorType();
3039 
3040         // Index vector: vix[0:n] = k -> offset + indexMap[mapOffset + k]
3041         IntVector vix = IntVector
3042             .fromArray(isp, indexMap, mapOffset)
3043             .add(offset);
3044 
3045         vix = VectorIntrinsics.checkIndex(vix, a.length);
3046 
3047         return VectorSupport.loadWithMap(
3048             vectorType, null, int.class, vsp.laneCount(),
3049             isp.vectorType(),
3050             a, ARRAY_BASE, vix, null,
3051             a, offset, indexMap, mapOffset, vsp,
3052             (c, idx, iMap, idy, s, vm) ->
3053             s.vOp(n -> c[idx + iMap[idy+n]]));
3054     }
3055 
3056     /**
3057      * Gathers a new vector composed of elements from an array of type
3058      * {@code int[]},
3059      * under the control of a mask, and
3060      * using indexes obtained by adding a fixed {@code offset} to a
3061      * series of secondary offsets from an <em>index map</em>.
3062      * The index map is a contiguous sequence of {@code VLENGTH}
3063      * elements in a second array of {@code int}s, starting at a given
3064      * {@code mapOffset}.
3065      * <p>
3066      * For each vector lane, where {@code N} is the vector lane index,
3067      * if the lane is set in the mask,
3068      * the lane is loaded from the array
3069      * element {@code a[f(N)]}, where {@code f(N)} is the
3070      * index mapping expression
3071      * {@code offset + indexMap[mapOffset + N]]}.
3072      * Unset lanes in the resulting vector are set to zero.
3073      *
3074      * @param species species of desired vector
3075      * @param a the array
3076      * @param offset the offset into the array, may be negative if relative
3077      * indexes in the index map compensate to produce a value within the
3078      * array bounds
3079      * @param indexMap the index map
3080      * @param mapOffset the offset into the index map
3081      * @param m the mask controlling lane selection
3082      * @return the vector loaded from the indexed elements of the array
3083      * @throws IndexOutOfBoundsException
3084      *         if {@code mapOffset+N < 0}
3085      *         or if {@code mapOffset+N >= indexMap.length},
3086      *         or if {@code f(N)=offset+indexMap[mapOffset+N]}
3087      *         is an invalid index into {@code a},
3088      *         for any lane {@code N} in the vector
3089      *         where the mask is set
3090      * @see IntVector#toIntArray()
3091      */
3092     @ForceInline
3093     public static
3094     IntVector fromArray(VectorSpecies<Integer> species,
3095                                    int[] a, int offset,
3096                                    int[] indexMap, int mapOffset,
3097                                    VectorMask<Integer> m) {
3098         if (m.allTrue()) {
3099             return fromArray(species, a, offset, indexMap, mapOffset);
3100         }
3101         else {
3102             IntSpecies vsp = (IntSpecies) species;
3103             return vsp.dummyVector().fromArray0(a, offset, indexMap, mapOffset, m);
3104         }
3105     }
3106 
3107 
3108 
3109     /**
3110      * Loads a vector from a {@linkplain MemorySegment memory segment}
3111      * starting at an offset into the memory segment.
3112      * Bytes are composed into primitive lane elements according
3113      * to the specified byte order.
3114      * The vector is arranged into lanes according to
3115      * <a href="Vector.html#lane-order">memory ordering</a>.
3116      * <p>
3117      * This method behaves as if it returns the result of calling
3118      * {@link #fromMemorySegment(VectorSpecies,MemorySegment,long,ByteOrder,VectorMask)
3119      * fromMemorySegment()} as follows:
3120      * <pre>{@code
3121      * var m = species.maskAll(true);
3122      * return fromMemorySegment(species, ms, offset, bo, m);
3123      * }</pre>
3124      *
3125      * @param species species of desired vector
3126      * @param ms the memory segment
3127      * @param offset the offset into the memory segment
3128      * @param bo the intended byte order
3129      * @return a vector loaded from the memory segment
3130      * @throws IndexOutOfBoundsException
3131      *         if {@code offset+N*4 < 0}
3132      *         or {@code offset+N*4 >= ms.byteSize()}
3133      *         for any lane {@code N} in the vector
3134      * @throws IllegalArgumentException if the memory segment is a heap segment that is
3135      *         not backed by a {@code byte[]} array.
3136      * @throws IllegalStateException if the memory segment's session is not alive,
3137      *         or if access occurs from a thread other than the thread owning the session.
3138      * @since 19
3139      */
3140     @ForceInline
3141     public static
3142     IntVector fromMemorySegment(VectorSpecies<Integer> species,
3143                                            MemorySegment ms, long offset,
3144                                            ByteOrder bo) {
3145         offset = checkFromIndexSize(offset, species.vectorByteSize(), ms.byteSize());
3146         IntSpecies vsp = (IntSpecies) species;
3147         return vsp.dummyVector().fromMemorySegment0(ms, offset).maybeSwap(bo);
3148     }
3149 
3150     /**
3151      * Loads a vector from a {@linkplain MemorySegment memory segment}
3152      * starting at an offset into the memory segment
3153      * and using a mask.
3154      * Lanes where the mask is unset are filled with the default
3155      * value of {@code int} (zero).
3156      * Bytes are composed into primitive lane elements according
3157      * to the specified byte order.
3158      * The vector is arranged into lanes according to
3159      * <a href="Vector.html#lane-order">memory ordering</a>.
3160      * <p>
3161      * The following pseudocode illustrates the behavior:
3162      * <pre>{@code
3163      * var slice = ms.asSlice(offset);
3164      * int[] ar = new int[species.length()];
3165      * for (int n = 0; n < ar.length; n++) {
3166      *     if (m.laneIsSet(n)) {
3167      *         ar[n] = slice.getAtIndex(ValuaLayout.JAVA_INT.withBitAlignment(8), n);
3168      *     }
3169      * }
3170      * IntVector r = IntVector.fromArray(species, ar, 0);
3171      * }</pre>
3172      * @implNote
3173      * This operation is likely to be more efficient if
3174      * the specified byte order is the same as
3175      * {@linkplain ByteOrder#nativeOrder()
3176      * the platform native order},
3177      * since this method will not need to reorder
3178      * the bytes of lane values.
3179      *
3180      * @param species species of desired vector
3181      * @param ms the memory segment
3182      * @param offset the offset into the memory segment
3183      * @param bo the intended byte order
3184      * @param m the mask controlling lane selection
3185      * @return a vector loaded from the memory segment
3186      * @throws IndexOutOfBoundsException
3187      *         if {@code offset+N*4 < 0}
3188      *         or {@code offset+N*4 >= ms.byteSize()}
3189      *         for any lane {@code N} in the vector
3190      *         where the mask is set
3191      * @throws IllegalArgumentException if the memory segment is a heap segment that is
3192      *         not backed by a {@code byte[]} array.
3193      * @throws IllegalStateException if the memory segment's session is not alive,
3194      *         or if access occurs from a thread other than the thread owning the session.
3195      * @since 19
3196      */
3197     @ForceInline
3198     public static
3199     IntVector fromMemorySegment(VectorSpecies<Integer> species,
3200                                            MemorySegment ms, long offset,
3201                                            ByteOrder bo,
3202                                            VectorMask<Integer> m) {
3203         IntSpecies vsp = (IntSpecies) species;
3204         if (offset >= 0 && offset <= (ms.byteSize() - species.vectorByteSize())) {
3205             return vsp.dummyVector().fromMemorySegment0(ms, offset, m).maybeSwap(bo);
3206         }
3207 
3208         // FIXME: optimize
3209         checkMaskFromIndexSize(offset, vsp, m, 4, ms.byteSize());
3210         return vsp.ldLongOp(ms, offset, m, IntVector::memorySegmentGet);
3211     }
3212 
3213     // Memory store operations
3214 
3215     /**
3216      * Stores this vector into an array of type {@code int[]}
3217      * starting at an offset.
3218      * <p>
3219      * For each vector lane, where {@code N} is the vector lane index,
3220      * the lane element at index {@code N} is stored into the array
3221      * element {@code a[offset+N]}.
3222      *
3223      * @param a the array, of type {@code int[]}
3224      * @param offset the offset into the array
3225      * @throws IndexOutOfBoundsException
3226      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
3227      *         for any lane {@code N} in the vector
3228      */
3229     @ForceInline
3230     public final
3231     void intoArray(int[] a, int offset) {
3232         offset = checkFromIndexSize(offset, length(), a.length);
3233         IntSpecies vsp = vspecies();
3234         VectorSupport.store(
3235             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3236             a, arrayAddress(a, offset),
3237             this,
3238             a, offset,
3239             (arr, off, v)
3240             -> v.stOp(arr, (int) off,
3241                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3242     }
3243 
3244     /**
3245      * Stores this vector into an array of type {@code int[]}
3246      * starting at offset and using a mask.
3247      * <p>
3248      * For each vector lane, where {@code N} is the vector lane index,
3249      * the lane element at index {@code N} is stored into the array
3250      * element {@code a[offset+N]}.
3251      * If the mask lane at {@code N} is unset then the corresponding
3252      * array element {@code a[offset+N]} is left unchanged.
3253      * <p>
3254      * Array range checking is done for lanes where the mask is set.
3255      * Lanes where the mask is unset are not stored and do not need
3256      * to correspond to legitimate elements of {@code a}.
3257      * That is, unset lanes may correspond to array indexes less than
3258      * zero or beyond the end of the array.
3259      *
3260      * @param a the array, of type {@code int[]}
3261      * @param offset the offset into the array
3262      * @param m the mask controlling lane storage
3263      * @throws IndexOutOfBoundsException
3264      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
3265      *         for any lane {@code N} in the vector
3266      *         where the mask is set
3267      */
3268     @ForceInline
3269     public final
3270     void intoArray(int[] a, int offset,
3271                    VectorMask<Integer> m) {
3272         if (m.allTrue()) {
3273             intoArray(a, offset);
3274         } else {
3275             IntSpecies vsp = vspecies();
3276             checkMaskFromIndexSize(offset, vsp, m, 1, a.length);
3277             intoArray0(a, offset, m);
3278         }
3279     }
3280 
3281     /**
3282      * Scatters this vector into an array of type {@code int[]}
3283      * using indexes obtained by adding a fixed {@code offset} to a
3284      * series of secondary offsets from an <em>index map</em>.
3285      * The index map is a contiguous sequence of {@code VLENGTH}
3286      * elements in a second array of {@code int}s, starting at a given
3287      * {@code mapOffset}.
3288      * <p>
3289      * For each vector lane, where {@code N} is the vector lane index,
3290      * the lane element at index {@code N} is stored into the array
3291      * element {@code a[f(N)]}, where {@code f(N)} is the
3292      * index mapping expression
3293      * {@code offset + indexMap[mapOffset + N]]}.
3294      *
3295      * @param a the array
3296      * @param offset an offset to combine with the index map offsets
3297      * @param indexMap the index map
3298      * @param mapOffset the offset into the index map
3299      * @throws IndexOutOfBoundsException
3300      *         if {@code mapOffset+N < 0}
3301      *         or if {@code mapOffset+N >= indexMap.length},
3302      *         or if {@code f(N)=offset+indexMap[mapOffset+N]}
3303      *         is an invalid index into {@code a},
3304      *         for any lane {@code N} in the vector
3305      * @see IntVector#toIntArray()
3306      */
3307     @ForceInline
3308     public final
3309     void intoArray(int[] a, int offset,
3310                    int[] indexMap, int mapOffset) {
3311         IntSpecies vsp = vspecies();
3312         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3313         // Index vector: vix[0:n] = i -> offset + indexMap[mo + i]
3314         IntVector vix = IntVector
3315             .fromArray(isp, indexMap, mapOffset)
3316             .add(offset);
3317 
3318         vix = VectorIntrinsics.checkIndex(vix, a.length);
3319 
3320         VectorSupport.storeWithMap(
3321             vsp.vectorType(), null, vsp.elementType(), vsp.laneCount(),
3322             isp.vectorType(),
3323             a, arrayAddress(a, 0), vix,
3324             this, null,
3325             a, offset, indexMap, mapOffset,
3326             (arr, off, v, map, mo, vm)
3327             -> v.stOp(arr, off,
3328                       (arr_, off_, i, e) -> {
3329                           int j = map[mo + i];
3330                           arr[off + j] = e;
3331                       }));
3332     }
3333 
3334     /**
3335      * Scatters this vector into an array of type {@code int[]},
3336      * under the control of a mask, and
3337      * using indexes obtained by adding a fixed {@code offset} to a
3338      * series of secondary offsets from an <em>index map</em>.
3339      * The index map is a contiguous sequence of {@code VLENGTH}
3340      * elements in a second array of {@code int}s, starting at a given
3341      * {@code mapOffset}.
3342      * <p>
3343      * For each vector lane, where {@code N} is the vector lane index,
3344      * if the mask lane at index {@code N} is set then
3345      * the lane element at index {@code N} is stored into the array
3346      * element {@code a[f(N)]}, where {@code f(N)} is the
3347      * index mapping expression
3348      * {@code offset + indexMap[mapOffset + N]]}.
3349      *
3350      * @param a the array
3351      * @param offset an offset to combine with the index map offsets
3352      * @param indexMap the index map
3353      * @param mapOffset the offset into the index map
3354      * @param m the mask
3355      * @throws IndexOutOfBoundsException
3356      *         if {@code mapOffset+N < 0}
3357      *         or if {@code mapOffset+N >= indexMap.length},
3358      *         or if {@code f(N)=offset+indexMap[mapOffset+N]}
3359      *         is an invalid index into {@code a},
3360      *         for any lane {@code N} in the vector
3361      *         where the mask is set
3362      * @see IntVector#toIntArray()
3363      */
3364     @ForceInline
3365     public final
3366     void intoArray(int[] a, int offset,
3367                    int[] indexMap, int mapOffset,
3368                    VectorMask<Integer> m) {
3369         if (m.allTrue()) {
3370             intoArray(a, offset, indexMap, mapOffset);
3371         }
3372         else {
3373             intoArray0(a, offset, indexMap, mapOffset, m);
3374         }
3375     }
3376 
3377 
3378 
3379     /**
3380      * {@inheritDoc} <!--workaround-->
3381      * @since 19
3382      */
3383     @Override
3384     @ForceInline
3385     public final
3386     void intoMemorySegment(MemorySegment ms, long offset,
3387                            ByteOrder bo) {
3388         if (ms.isReadOnly()) {
3389             throw new UnsupportedOperationException("Attempt to write a read-only segment");
3390         }
3391 
3392         offset = checkFromIndexSize(offset, byteSize(), ms.byteSize());
3393         maybeSwap(bo).intoMemorySegment0(ms, offset);
3394     }
3395 
3396     /**
3397      * {@inheritDoc} <!--workaround-->
3398      * @since 19
3399      */
3400     @Override
3401     @ForceInline
3402     public final
3403     void intoMemorySegment(MemorySegment ms, long offset,
3404                            ByteOrder bo,
3405                            VectorMask<Integer> m) {
3406         if (m.allTrue()) {
3407             intoMemorySegment(ms, offset, bo);
3408         } else {
3409             if (ms.isReadOnly()) {
3410                 throw new UnsupportedOperationException("Attempt to write a read-only segment");
3411             }
3412             IntSpecies vsp = vspecies();
3413             checkMaskFromIndexSize(offset, vsp, m, 4, ms.byteSize());
3414             maybeSwap(bo).intoMemorySegment0(ms, offset, m);
3415         }
3416     }
3417 
3418     // ================================================
3419 
3420     // Low-level memory operations.
3421     //
3422     // Note that all of these operations *must* inline into a context
3423     // where the exact species of the involved vector is a
3424     // compile-time constant.  Otherwise, the intrinsic generation
3425     // will fail and performance will suffer.
3426     //
3427     // In many cases this is achieved by re-deriving a version of the
3428     // method in each concrete subclass (per species).  The re-derived
3429     // method simply calls one of these generic methods, with exact
3430     // parameters for the controlling metadata, which is either a
3431     // typed vector or constant species instance.
3432 
3433     // Unchecked loading operations in native byte order.
3434     // Caller is responsible for applying index checks, masking, and
3435     // byte swapping.
3436 
3437     /*package-private*/
3438     abstract
3439     IntVector fromArray0(int[] a, int offset);
3440     @ForceInline
3441     final
3442     IntVector fromArray0Template(int[] a, int offset) {
3443         IntSpecies vsp = vspecies();
3444         return VectorSupport.load(
3445             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3446             a, arrayAddress(a, offset),
3447             a, offset, vsp,
3448             (arr, off, s) -> s.ldOp(arr, (int) off,
3449                                     (arr_, off_, i) -> arr_[off_ + i]));
3450     }
3451 
3452     /*package-private*/
3453     abstract
3454     IntVector fromArray0(int[] a, int offset, VectorMask<Integer> m);
3455     @ForceInline
3456     final
3457     <M extends VectorMask<Integer>>
3458     IntVector fromArray0Template(Class<M> maskClass, int[] a, int offset, M m) {
3459         m.check(species());
3460         IntSpecies vsp = vspecies();
3461         return VectorSupport.loadMasked(
3462             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3463             a, arrayAddress(a, offset), m,
3464             a, offset, vsp,
3465             (arr, off, s, vm) -> s.ldOp(arr, (int) off, vm,
3466                                         (arr_, off_, i) -> arr_[off_ + i]));
3467     }
3468 
3469     /*package-private*/
3470     abstract
3471     IntVector fromArray0(int[] a, int offset,
3472                                     int[] indexMap, int mapOffset,
3473                                     VectorMask<Integer> m);
3474     @ForceInline
3475     final
3476     <M extends VectorMask<Integer>>
3477     IntVector fromArray0Template(Class<M> maskClass, int[] a, int offset,
3478                                             int[] indexMap, int mapOffset, M m) {
3479         IntSpecies vsp = vspecies();
3480         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3481         Objects.requireNonNull(a);
3482         Objects.requireNonNull(indexMap);
3483         m.check(vsp);
3484         Class<? extends IntVector> vectorType = vsp.vectorType();
3485 
3486         // Index vector: vix[0:n] = k -> offset + indexMap[mapOffset + k]
3487         IntVector vix = IntVector
3488             .fromArray(isp, indexMap, mapOffset)
3489             .add(offset);
3490 
3491         // FIXME: Check index under mask controlling.
3492         vix = VectorIntrinsics.checkIndex(vix, a.length);
3493 
3494         return VectorSupport.loadWithMap(
3495             vectorType, maskClass, int.class, vsp.laneCount(),
3496             isp.vectorType(),
3497             a, ARRAY_BASE, vix, m,
3498             a, offset, indexMap, mapOffset, vsp,
3499             (c, idx, iMap, idy, s, vm) ->
3500             s.vOp(vm, n -> c[idx + iMap[idy+n]]));
3501     }
3502 
3503 
3504 
3505     abstract
3506     IntVector fromMemorySegment0(MemorySegment bb, long offset);
3507     @ForceInline
3508     final
3509     IntVector fromMemorySegment0Template(MemorySegment ms, long offset) {
3510         IntSpecies vsp = vspecies();
3511         return ScopedMemoryAccess.loadFromMemorySegment(
3512                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3513                 (MemorySegmentProxy) ms, offset, vsp,
3514                 (msp, off, s) -> {
3515                     return s.ldLongOp((MemorySegment) msp, off, IntVector::memorySegmentGet);
3516                 });
3517     }
3518 
3519     abstract
3520     IntVector fromMemorySegment0(MemorySegment ms, long offset, VectorMask<Integer> m);
3521     @ForceInline
3522     final
3523     <M extends VectorMask<Integer>>
3524     IntVector fromMemorySegment0Template(Class<M> maskClass, MemorySegment ms, long offset, M m) {
3525         IntSpecies vsp = vspecies();
3526         m.check(vsp);
3527         return ScopedMemoryAccess.loadFromMemorySegmentMasked(
3528                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3529                 (MemorySegmentProxy) ms, offset, m, vsp,
3530                 (msp, off, s, vm) -> {
3531                     return s.ldLongOp((MemorySegment) msp, off, vm, IntVector::memorySegmentGet);
3532                 });
3533     }
3534 
3535     // Unchecked storing operations in native byte order.
3536     // Caller is responsible for applying index checks, masking, and
3537     // byte swapping.
3538 
3539     abstract
3540     void intoArray0(int[] a, int offset);
3541     @ForceInline
3542     final
3543     void intoArray0Template(int[] a, int offset) {
3544         IntSpecies vsp = vspecies();
3545         VectorSupport.store(
3546             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3547             a, arrayAddress(a, offset),
3548             this, a, offset,
3549             (arr, off, v)
3550             -> v.stOp(arr, (int) off,
3551                       (arr_, off_, i, e) -> arr_[off_+i] = e));
3552     }
3553 
3554     abstract
3555     void intoArray0(int[] a, int offset, VectorMask<Integer> m);
3556     @ForceInline
3557     final
3558     <M extends VectorMask<Integer>>
3559     void intoArray0Template(Class<M> maskClass, int[] a, int offset, M m) {
3560         m.check(species());
3561         IntSpecies vsp = vspecies();
3562         VectorSupport.storeMasked(
3563             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3564             a, arrayAddress(a, offset),
3565             this, m, a, offset,
3566             (arr, off, v, vm)
3567             -> v.stOp(arr, (int) off, vm,
3568                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3569     }
3570 
3571     abstract
3572     void intoArray0(int[] a, int offset,
3573                     int[] indexMap, int mapOffset,
3574                     VectorMask<Integer> m);
3575     @ForceInline
3576     final
3577     <M extends VectorMask<Integer>>
3578     void intoArray0Template(Class<M> maskClass, int[] a, int offset,
3579                             int[] indexMap, int mapOffset, M m) {
3580         m.check(species());
3581         IntSpecies vsp = vspecies();
3582         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3583         // Index vector: vix[0:n] = i -> offset + indexMap[mo + i]
3584         IntVector vix = IntVector
3585             .fromArray(isp, indexMap, mapOffset)
3586             .add(offset);
3587 
3588         // FIXME: Check index under mask controlling.
3589         vix = VectorIntrinsics.checkIndex(vix, a.length);
3590 
3591         VectorSupport.storeWithMap(
3592             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3593             isp.vectorType(),
3594             a, arrayAddress(a, 0), vix,
3595             this, m,
3596             a, offset, indexMap, mapOffset,
3597             (arr, off, v, map, mo, vm)
3598             -> v.stOp(arr, off, vm,
3599                       (arr_, off_, i, e) -> {
3600                           int j = map[mo + i];
3601                           arr[off + j] = e;
3602                       }));
3603     }
3604 
3605 
3606     @ForceInline
3607     final
3608     void intoMemorySegment0(MemorySegment ms, long offset) {
3609         IntSpecies vsp = vspecies();
3610         ScopedMemoryAccess.storeIntoMemorySegment(
3611                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3612                 this,
3613                 (MemorySegmentProxy) ms, offset,
3614                 (msp, off, v) -> {
3615                     v.stLongOp((MemorySegment) msp, off, IntVector::memorySegmentSet);
3616                 });
3617     }
3618 
3619     abstract
3620     void intoMemorySegment0(MemorySegment bb, long offset, VectorMask<Integer> m);
3621     @ForceInline
3622     final
3623     <M extends VectorMask<Integer>>
3624     void intoMemorySegment0Template(Class<M> maskClass, MemorySegment ms, long offset, M m) {
3625         IntSpecies vsp = vspecies();
3626         m.check(vsp);
3627         ScopedMemoryAccess.storeIntoMemorySegmentMasked(
3628                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3629                 this, m,
3630                 (MemorySegmentProxy) ms, offset,
3631                 (msp, off, v, vm) -> {
3632                     v.stLongOp((MemorySegment) msp, off, vm, IntVector::memorySegmentSet);
3633                 });
3634     }
3635 
3636 
3637     // End of low-level memory operations.
3638 
3639     private static
3640     void checkMaskFromIndexSize(int offset,
3641                                 IntSpecies vsp,
3642                                 VectorMask<Integer> m,
3643                                 int scale,
3644                                 int limit) {
3645         ((AbstractMask<Integer>)m)
3646             .checkIndexByLane(offset, limit, vsp.iota(), scale);
3647     }
3648 
3649     private static
3650     void checkMaskFromIndexSize(long offset,
3651                                 IntSpecies vsp,
3652                                 VectorMask<Integer> m,
3653                                 int scale,
3654                                 long limit) {
3655         ((AbstractMask<Integer>)m)
3656             .checkIndexByLane(offset, limit, vsp.iota(), scale);
3657     }
3658 
3659     @ForceInline
3660     private void conditionalStoreNYI(int offset,
3661                                      IntSpecies vsp,
3662                                      VectorMask<Integer> m,
3663                                      int scale,
3664                                      int limit) {
3665         if (offset < 0 || offset + vsp.laneCount() * scale > limit) {
3666             String msg =
3667                 String.format("unimplemented: store @%d in [0..%d), %s in %s",
3668                               offset, limit, m, vsp);
3669             throw new AssertionError(msg);
3670         }
3671     }
3672 
3673     /*package-private*/
3674     @Override
3675     @ForceInline
3676     final
3677     IntVector maybeSwap(ByteOrder bo) {
3678         if (bo != NATIVE_ENDIAN) {
3679             return this.reinterpretAsBytes()
3680                 .rearrange(swapBytesShuffle())
3681                 .reinterpretAsInts();
3682         }
3683         return this;
3684     }
3685 
3686     static final int ARRAY_SHIFT =
3687         31 - Integer.numberOfLeadingZeros(Unsafe.ARRAY_INT_INDEX_SCALE);
3688     static final long ARRAY_BASE =
3689         Unsafe.ARRAY_INT_BASE_OFFSET;
3690 
3691     @ForceInline
3692     static long arrayAddress(int[] a, int index) {
3693         return ARRAY_BASE + (((long)index) << ARRAY_SHIFT);
3694     }
3695 
3696 
3697 
3698     @ForceInline
3699     static long byteArrayAddress(byte[] a, int index) {
3700         return Unsafe.ARRAY_BYTE_BASE_OFFSET + index;
3701     }
3702 
3703     // ================================================
3704 
3705     /// Reinterpreting view methods:
3706     //   lanewise reinterpret: viewAsXVector()
3707     //   keep shape, redraw lanes: reinterpretAsEs()
3708 
3709     /**
3710      * {@inheritDoc} <!--workaround-->
3711      */
3712     @ForceInline
3713     @Override
3714     public final ByteVector reinterpretAsBytes() {
3715          // Going to ByteVector, pay close attention to byte order.
3716          assert(REGISTER_ENDIAN == ByteOrder.LITTLE_ENDIAN);
3717          return asByteVectorRaw();
3718          //return asByteVectorRaw().rearrange(swapBytesShuffle());
3719     }
3720 
3721     /**
3722      * {@inheritDoc} <!--workaround-->
3723      */
3724     @ForceInline
3725     @Override
3726     public final IntVector viewAsIntegralLanes() {
3727         return this;
3728     }
3729 
3730     /**
3731      * {@inheritDoc} <!--workaround-->
3732      */
3733     @ForceInline
3734     @Override
3735     public final
3736     FloatVector
3737     viewAsFloatingLanes() {
3738         LaneType flt = LaneType.INT.asFloating();
3739         return (FloatVector) asVectorRaw(flt);
3740     }
3741 
3742     // ================================================
3743 
3744     /// Object methods: toString, equals, hashCode
3745     //
3746     // Object methods are defined as if via Arrays.toString, etc.,
3747     // is applied to the array of elements.  Two equal vectors
3748     // are required to have equal species and equal lane values.
3749 
3750     /**
3751      * Returns a string representation of this vector, of the form
3752      * {@code "[0,1,2...]"}, reporting the lane values of this vector,
3753      * in lane order.
3754      *
3755      * The string is produced as if by a call to {@link
3756      * java.util.Arrays#toString(int[]) Arrays.toString()},
3757      * as appropriate to the {@code int} array returned by
3758      * {@link #toArray this.toArray()}.
3759      *
3760      * @return a string of the form {@code "[0,1,2...]"}
3761      * reporting the lane values of this vector
3762      */
3763     @Override
3764     @ForceInline
3765     public final
3766     String toString() {
3767         // now that toArray is strongly typed, we can define this
3768         return Arrays.toString(toArray());
3769     }
3770 
3771     /**
3772      * {@inheritDoc} <!--workaround-->
3773      */
3774     @Override
3775     @ForceInline
3776     public final
3777     boolean equals(Object obj) {
3778         if (obj instanceof Vector) {
3779             Vector<?> that = (Vector<?>) obj;
3780             if (this.species().equals(that.species())) {
3781                 return this.eq(that.check(this.species())).allTrue();
3782             }
3783         }
3784         return false;
3785     }
3786 
3787     /**
3788      * {@inheritDoc} <!--workaround-->
3789      */
3790     @Override
3791     @ForceInline
3792     public final
3793     int hashCode() {
3794         // now that toArray is strongly typed, we can define this
3795         return Objects.hash(species(), Arrays.hashCode(toArray()));
3796     }
3797 
3798     // ================================================
3799 
3800     // Species
3801 
3802     /**
3803      * Class representing {@link IntVector}'s of the same {@link VectorShape VectorShape}.
3804      */
3805     /*package-private*/
3806     static final class IntSpecies extends AbstractSpecies<Integer> {
3807         private IntSpecies(VectorShape shape,
3808                 Class<? extends IntVector> vectorType,
3809                 Class<? extends AbstractMask<Integer>> maskType,
3810                 Function<Object, IntVector> vectorFactory) {
3811             super(shape, LaneType.of(int.class),
3812                   vectorType, maskType,
3813                   vectorFactory);
3814             assert(this.elementSize() == Integer.SIZE);
3815         }
3816 
3817         // Specializing overrides:
3818 
3819         @Override
3820         @ForceInline
3821         public final Class<Integer> elementType() {
3822             return int.class;
3823         }
3824 
3825         @Override
3826         @ForceInline
3827         final Class<Integer> genericElementType() {
3828             return Integer.class;
3829         }
3830 
3831         @SuppressWarnings("unchecked")
3832         @Override
3833         @ForceInline
3834         public final Class<? extends IntVector> vectorType() {
3835             return (Class<? extends IntVector>) vectorType;
3836         }
3837 
3838         @Override
3839         @ForceInline
3840         public final long checkValue(long e) {
3841             longToElementBits(e);  // only for exception
3842             return e;
3843         }
3844 
3845         /*package-private*/
3846         @Override
3847         @ForceInline
3848         final IntVector broadcastBits(long bits) {
3849             return (IntVector)
3850                 VectorSupport.fromBitsCoerced(
3851                     vectorType, int.class, laneCount,
3852                     bits, MODE_BROADCAST, this,
3853                     (bits_, s_) -> s_.rvOp(i -> bits_));
3854         }
3855 
3856         /*package-private*/
3857         @ForceInline
3858         final IntVector broadcast(int e) {
3859             return broadcastBits(toBits(e));
3860         }
3861 
3862         @Override
3863         @ForceInline
3864         public final IntVector broadcast(long e) {
3865             return broadcastBits(longToElementBits(e));
3866         }
3867 
3868         /*package-private*/
3869         final @Override
3870         @ForceInline
3871         long longToElementBits(long value) {
3872             // Do the conversion, and then test it for failure.
3873             int e = (int) value;
3874             if ((long) e != value) {
3875                 throw badElementBits(value, e);
3876             }
3877             return toBits(e);
3878         }
3879 
3880         /*package-private*/
3881         @ForceInline
3882         static long toIntegralChecked(int e, boolean convertToInt) {
3883             long value = convertToInt ? (int) e : (long) e;
3884             if ((int) value != e) {
3885                 throw badArrayBits(e, convertToInt, value);
3886             }
3887             return value;
3888         }
3889 
3890         /* this non-public one is for internal conversions */
3891         @Override
3892         @ForceInline
3893         final IntVector fromIntValues(int[] values) {
3894             VectorIntrinsics.requireLength(values.length, laneCount);
3895             int[] va = new int[laneCount()];
3896             for (int i = 0; i < va.length; i++) {
3897                 int lv = values[i];
3898                 int v = (int) lv;
3899                 va[i] = v;
3900                 if ((int)v != lv) {
3901                     throw badElementBits(lv, v);
3902                 }
3903             }
3904             return dummyVector().fromArray0(va, 0);
3905         }
3906 
3907         // Virtual constructors
3908 
3909         @ForceInline
3910         @Override final
3911         public IntVector fromArray(Object a, int offset) {
3912             // User entry point:  Be careful with inputs.
3913             return IntVector
3914                 .fromArray(this, (int[]) a, offset);
3915         }
3916 
3917         @ForceInline
3918         @Override final
3919         IntVector dummyVector() {
3920             return (IntVector) super.dummyVector();
3921         }
3922 
3923         /*package-private*/
3924         final @Override
3925         @ForceInline
3926         IntVector rvOp(RVOp f) {
3927             int[] res = new int[laneCount()];
3928             for (int i = 0; i < res.length; i++) {
3929                 int bits = (int) f.apply(i);
3930                 res[i] = fromBits(bits);
3931             }
3932             return dummyVector().vectorFactory(res);
3933         }
3934 
3935         IntVector vOp(FVOp f) {
3936             int[] res = new int[laneCount()];
3937             for (int i = 0; i < res.length; i++) {
3938                 res[i] = f.apply(i);
3939             }
3940             return dummyVector().vectorFactory(res);
3941         }
3942 
3943         IntVector vOp(VectorMask<Integer> m, FVOp f) {
3944             int[] res = new int[laneCount()];
3945             boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
3946             for (int i = 0; i < res.length; i++) {
3947                 if (mbits[i]) {
3948                     res[i] = f.apply(i);
3949                 }
3950             }
3951             return dummyVector().vectorFactory(res);
3952         }
3953 
3954         /*package-private*/
3955         @ForceInline
3956         <M> IntVector ldOp(M memory, int offset,
3957                                       FLdOp<M> f) {
3958             return dummyVector().ldOp(memory, offset, f);
3959         }
3960 
3961         /*package-private*/
3962         @ForceInline
3963         <M> IntVector ldOp(M memory, int offset,
3964                                       VectorMask<Integer> m,
3965                                       FLdOp<M> f) {
3966             return dummyVector().ldOp(memory, offset, m, f);
3967         }
3968 
3969         /*package-private*/
3970         @ForceInline
3971         IntVector ldLongOp(MemorySegment memory, long offset,
3972                                       FLdLongOp f) {
3973             return dummyVector().ldLongOp(memory, offset, f);
3974         }
3975 
3976         /*package-private*/
3977         @ForceInline
3978         IntVector ldLongOp(MemorySegment memory, long offset,
3979                                       VectorMask<Integer> m,
3980                                       FLdLongOp f) {
3981             return dummyVector().ldLongOp(memory, offset, m, f);
3982         }
3983 
3984         /*package-private*/
3985         @ForceInline
3986         <M> void stOp(M memory, int offset, FStOp<M> f) {
3987             dummyVector().stOp(memory, offset, f);
3988         }
3989 
3990         /*package-private*/
3991         @ForceInline
3992         <M> void stOp(M memory, int offset,
3993                       AbstractMask<Integer> m,
3994                       FStOp<M> f) {
3995             dummyVector().stOp(memory, offset, m, f);
3996         }
3997 
3998         /*package-private*/
3999         @ForceInline
4000         void stLongOp(MemorySegment memory, long offset, FStLongOp f) {
4001             dummyVector().stLongOp(memory, offset, f);
4002         }
4003 
4004         /*package-private*/
4005         @ForceInline
4006         void stLongOp(MemorySegment memory, long offset,
4007                       AbstractMask<Integer> m,
4008                       FStLongOp f) {
4009             dummyVector().stLongOp(memory, offset, m, f);
4010         }
4011 
4012         // N.B. Make sure these constant vectors and
4013         // masks load up correctly into registers.
4014         //
4015         // Also, see if we can avoid all that switching.
4016         // Could we cache both vectors and both masks in
4017         // this species object?
4018 
4019         // Zero and iota vector access
4020         @Override
4021         @ForceInline
4022         public final IntVector zero() {
4023             if ((Class<?>) vectorType() == IntMaxVector.class)
4024                 return IntMaxVector.ZERO;
4025             switch (vectorBitSize()) {
4026                 case 64: return Int64Vector.ZERO;
4027                 case 128: return Int128Vector.ZERO;
4028                 case 256: return Int256Vector.ZERO;
4029                 case 512: return Int512Vector.ZERO;
4030             }
4031             throw new AssertionError();
4032         }
4033 
4034         @Override
4035         @ForceInline
4036         public final IntVector iota() {
4037             if ((Class<?>) vectorType() == IntMaxVector.class)
4038                 return IntMaxVector.IOTA;
4039             switch (vectorBitSize()) {
4040                 case 64: return Int64Vector.IOTA;
4041                 case 128: return Int128Vector.IOTA;
4042                 case 256: return Int256Vector.IOTA;
4043                 case 512: return Int512Vector.IOTA;
4044             }
4045             throw new AssertionError();
4046         }
4047 
4048         // Mask access
4049         @Override
4050         @ForceInline
4051         public final VectorMask<Integer> maskAll(boolean bit) {
4052             if ((Class<?>) vectorType() == IntMaxVector.class)
4053                 return IntMaxVector.IntMaxMask.maskAll(bit);
4054             switch (vectorBitSize()) {
4055                 case 64: return Int64Vector.Int64Mask.maskAll(bit);
4056                 case 128: return Int128Vector.Int128Mask.maskAll(bit);
4057                 case 256: return Int256Vector.Int256Mask.maskAll(bit);
4058                 case 512: return Int512Vector.Int512Mask.maskAll(bit);
4059             }
4060             throw new AssertionError();
4061         }
4062     }
4063 
4064     /**
4065      * Finds a species for an element type of {@code int} and shape.
4066      *
4067      * @param s the shape
4068      * @return a species for an element type of {@code int} and shape
4069      * @throws IllegalArgumentException if no such species exists for the shape
4070      */
4071     static IntSpecies species(VectorShape s) {
4072         Objects.requireNonNull(s);
4073         switch (s.switchKey) {
4074             case VectorShape.SK_64_BIT: return (IntSpecies) SPECIES_64;
4075             case VectorShape.SK_128_BIT: return (IntSpecies) SPECIES_128;
4076             case VectorShape.SK_256_BIT: return (IntSpecies) SPECIES_256;
4077             case VectorShape.SK_512_BIT: return (IntSpecies) SPECIES_512;
4078             case VectorShape.SK_Max_BIT: return (IntSpecies) SPECIES_MAX;
4079             default: throw new IllegalArgumentException("Bad shape: " + s);
4080         }
4081     }
4082 
4083     /** Species representing {@link IntVector}s of {@link VectorShape#S_64_BIT VectorShape.S_64_BIT}. */
4084     public static final VectorSpecies<Integer> SPECIES_64
4085         = new IntSpecies(VectorShape.S_64_BIT,
4086                             Int64Vector.class,
4087                             Int64Vector.Int64Mask.class,
4088                             Int64Vector::new);
4089 
4090     /** Species representing {@link IntVector}s of {@link VectorShape#S_128_BIT VectorShape.S_128_BIT}. */
4091     public static final VectorSpecies<Integer> SPECIES_128
4092         = new IntSpecies(VectorShape.S_128_BIT,
4093                             Int128Vector.class,
4094                             Int128Vector.Int128Mask.class,
4095                             Int128Vector::new);
4096 
4097     /** Species representing {@link IntVector}s of {@link VectorShape#S_256_BIT VectorShape.S_256_BIT}. */
4098     public static final VectorSpecies<Integer> SPECIES_256
4099         = new IntSpecies(VectorShape.S_256_BIT,
4100                             Int256Vector.class,
4101                             Int256Vector.Int256Mask.class,
4102                             Int256Vector::new);
4103 
4104     /** Species representing {@link IntVector}s of {@link VectorShape#S_512_BIT VectorShape.S_512_BIT}. */
4105     public static final VectorSpecies<Integer> SPECIES_512
4106         = new IntSpecies(VectorShape.S_512_BIT,
4107                             Int512Vector.class,
4108                             Int512Vector.Int512Mask.class,
4109                             Int512Vector::new);
4110 
4111     /** Species representing {@link IntVector}s of {@link VectorShape#S_Max_BIT VectorShape.S_Max_BIT}. */
4112     public static final VectorSpecies<Integer> SPECIES_MAX
4113         = new IntSpecies(VectorShape.S_Max_BIT,
4114                             IntMaxVector.class,
4115                             IntMaxVector.IntMaxMask.class,
4116                             IntMaxVector::new);
4117 
4118     /**
4119      * Preferred species for {@link IntVector}s.
4120      * A preferred species is a species of maximal bit-size for the platform.
4121      */
4122     public static final VectorSpecies<Integer> SPECIES_PREFERRED
4123         = (IntSpecies) VectorSpecies.ofPreferred(int.class);
4124 }