1 /*
   2  * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.  Oracle designates this
   8  * particular file as subject to the "Classpath" exception as provided
   9  * by Oracle in the LICENSE file that accompanied this code.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  */
  25 package jdk.incubator.vector;
  26 
  27 import java.nio.ByteBuffer;
  28 import java.nio.ByteOrder;
  29 import java.nio.ReadOnlyBufferException;
  30 import java.util.Arrays;
  31 import java.util.Objects;
  32 import java.util.function.Function;
  33 import java.util.function.UnaryOperator;
  34 
  35 import jdk.internal.misc.ScopedMemoryAccess;
  36 import jdk.internal.misc.Unsafe;
  37 import jdk.internal.vm.annotation.ForceInline;
  38 import jdk.internal.vm.vector.VectorSupport;
  39 
  40 import static jdk.internal.vm.vector.VectorSupport.*;
  41 import static jdk.incubator.vector.VectorIntrinsics.*;
  42 
  43 import static jdk.incubator.vector.VectorOperators.*;
  44 
  45 // -- This file was mechanically generated: Do not edit! -- //
  46 
  47 /**
  48  * A specialized {@link Vector} representing an ordered immutable sequence of
  49  * {@code int} values.
  50  */
  51 @SuppressWarnings("cast")  // warning: redundant cast
  52 public abstract class IntVector extends AbstractVector<Integer> {
  53 
  54     IntVector(int[] vec) {
  55         super(vec);
  56     }
  57 
  58     static final int FORBID_OPCODE_KIND = VO_ONLYFP;
  59 
  60     @ForceInline
  61     static int opCode(Operator op) {
  62         return VectorOperators.opCode(op, VO_OPCODE_VALID, FORBID_OPCODE_KIND);
  63     }
  64     @ForceInline
  65     static int opCode(Operator op, int requireKind) {
  66         requireKind |= VO_OPCODE_VALID;
  67         return VectorOperators.opCode(op, requireKind, FORBID_OPCODE_KIND);
  68     }
  69     @ForceInline
  70     static boolean opKind(Operator op, int bit) {
  71         return VectorOperators.opKind(op, bit);
  72     }
  73 
  74     // Virtualized factories and operators,
  75     // coded with portable definitions.
  76     // These are all @ForceInline in case
  77     // they need to be used performantly.
  78     // The various shape-specific subclasses
  79     // also specialize them by wrapping
  80     // them in a call like this:
  81     //    return (Byte128Vector)
  82     //       super.bOp((Byte128Vector) o);
  83     // The purpose of that is to forcibly inline
  84     // the generic definition from this file
  85     // into a sharply type- and size-specific
  86     // wrapper in the subclass file, so that
  87     // the JIT can specialize the code.
  88     // The code is only inlined and expanded
  89     // if it gets hot.  Think of it as a cheap
  90     // and lazy version of C++ templates.
  91 
  92     // Virtualized getter
  93 
  94     /*package-private*/
  95     abstract int[] vec();
  96 
  97     // Virtualized constructors
  98 
  99     /**
 100      * Build a vector directly using my own constructor.
 101      * It is an error if the array is aliased elsewhere.
 102      */
 103     /*package-private*/
 104     abstract IntVector vectorFactory(int[] vec);
 105 
 106     /**
 107      * Build a mask directly using my species.
 108      * It is an error if the array is aliased elsewhere.
 109      */
 110     /*package-private*/
 111     @ForceInline
 112     final
 113     AbstractMask<Integer> maskFactory(boolean[] bits) {
 114         return vspecies().maskFactory(bits);
 115     }
 116 
 117     // Constant loader (takes dummy as vector arg)
 118     interface FVOp {
 119         int apply(int i);
 120     }
 121 
 122     /*package-private*/
 123     @ForceInline
 124     final
 125     IntVector vOp(FVOp f) {
 126         int[] res = new int[length()];
 127         for (int i = 0; i < res.length; i++) {
 128             res[i] = f.apply(i);
 129         }
 130         return vectorFactory(res);
 131     }
 132 
 133     @ForceInline
 134     final
 135     IntVector vOp(VectorMask<Integer> m, FVOp f) {
 136         int[] res = new int[length()];
 137         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 138         for (int i = 0; i < res.length; i++) {
 139             if (mbits[i]) {
 140                 res[i] = f.apply(i);
 141             }
 142         }
 143         return vectorFactory(res);
 144     }
 145 
 146     // Unary operator
 147 
 148     /*package-private*/
 149     interface FUnOp {
 150         int apply(int i, int a);
 151     }
 152 
 153     /*package-private*/
 154     abstract
 155     IntVector uOp(FUnOp f);
 156     @ForceInline
 157     final
 158     IntVector uOpTemplate(FUnOp f) {
 159         int[] vec = vec();
 160         int[] res = new int[length()];
 161         for (int i = 0; i < res.length; i++) {
 162             res[i] = f.apply(i, vec[i]);
 163         }
 164         return vectorFactory(res);
 165     }
 166 
 167     /*package-private*/
 168     abstract
 169     IntVector uOp(VectorMask<Integer> m,
 170                              FUnOp f);
 171     @ForceInline
 172     final
 173     IntVector uOpTemplate(VectorMask<Integer> m,
 174                                      FUnOp f) {
 175         if (m == null) {
 176             return uOpTemplate(f);
 177         }
 178         int[] vec = vec();
 179         int[] res = new int[length()];
 180         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 181         for (int i = 0; i < res.length; i++) {
 182             res[i] = mbits[i] ? f.apply(i, vec[i]) : vec[i];
 183         }
 184         return vectorFactory(res);
 185     }
 186 
 187     // Binary operator
 188 
 189     /*package-private*/
 190     interface FBinOp {
 191         int apply(int i, int a, int b);
 192     }
 193 
 194     /*package-private*/
 195     abstract
 196     IntVector bOp(Vector<Integer> o,
 197                              FBinOp f);
 198     @ForceInline
 199     final
 200     IntVector bOpTemplate(Vector<Integer> o,
 201                                      FBinOp f) {
 202         int[] res = new int[length()];
 203         int[] vec1 = this.vec();
 204         int[] vec2 = ((IntVector)o).vec();
 205         for (int i = 0; i < res.length; i++) {
 206             res[i] = f.apply(i, vec1[i], vec2[i]);
 207         }
 208         return vectorFactory(res);
 209     }
 210 
 211     /*package-private*/
 212     abstract
 213     IntVector bOp(Vector<Integer> o,
 214                              VectorMask<Integer> m,
 215                              FBinOp f);
 216     @ForceInline
 217     final
 218     IntVector bOpTemplate(Vector<Integer> o,
 219                                      VectorMask<Integer> m,
 220                                      FBinOp f) {
 221         if (m == null) {
 222             return bOpTemplate(o, f);
 223         }
 224         int[] res = new int[length()];
 225         int[] vec1 = this.vec();
 226         int[] vec2 = ((IntVector)o).vec();
 227         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 228         for (int i = 0; i < res.length; i++) {
 229             res[i] = mbits[i] ? f.apply(i, vec1[i], vec2[i]) : vec1[i];
 230         }
 231         return vectorFactory(res);
 232     }
 233 
 234     // Ternary operator
 235 
 236     /*package-private*/
 237     interface FTriOp {
 238         int apply(int i, int a, int b, int c);
 239     }
 240 
 241     /*package-private*/
 242     abstract
 243     IntVector tOp(Vector<Integer> o1,
 244                              Vector<Integer> o2,
 245                              FTriOp f);
 246     @ForceInline
 247     final
 248     IntVector tOpTemplate(Vector<Integer> o1,
 249                                      Vector<Integer> o2,
 250                                      FTriOp f) {
 251         int[] res = new int[length()];
 252         int[] vec1 = this.vec();
 253         int[] vec2 = ((IntVector)o1).vec();
 254         int[] vec3 = ((IntVector)o2).vec();
 255         for (int i = 0; i < res.length; i++) {
 256             res[i] = f.apply(i, vec1[i], vec2[i], vec3[i]);
 257         }
 258         return vectorFactory(res);
 259     }
 260 
 261     /*package-private*/
 262     abstract
 263     IntVector tOp(Vector<Integer> o1,
 264                              Vector<Integer> o2,
 265                              VectorMask<Integer> m,
 266                              FTriOp f);
 267     @ForceInline
 268     final
 269     IntVector tOpTemplate(Vector<Integer> o1,
 270                                      Vector<Integer> o2,
 271                                      VectorMask<Integer> m,
 272                                      FTriOp f) {
 273         if (m == null) {
 274             return tOpTemplate(o1, o2, f);
 275         }
 276         int[] res = new int[length()];
 277         int[] vec1 = this.vec();
 278         int[] vec2 = ((IntVector)o1).vec();
 279         int[] vec3 = ((IntVector)o2).vec();
 280         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 281         for (int i = 0; i < res.length; i++) {
 282             res[i] = mbits[i] ? f.apply(i, vec1[i], vec2[i], vec3[i]) : vec1[i];
 283         }
 284         return vectorFactory(res);
 285     }
 286 
 287     // Reduction operator
 288 
 289     /*package-private*/
 290     abstract
 291     int rOp(int v, VectorMask<Integer> m, FBinOp f);
 292 
 293     @ForceInline
 294     final
 295     int rOpTemplate(int v, VectorMask<Integer> m, FBinOp f) {
 296         if (m == null) {
 297             return rOpTemplate(v, f);
 298         }
 299         int[] vec = vec();
 300         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 301         for (int i = 0; i < vec.length; i++) {
 302             v = mbits[i] ? f.apply(i, v, vec[i]) : v;
 303         }
 304         return v;
 305     }
 306 
 307     @ForceInline
 308     final
 309     int rOpTemplate(int v, FBinOp f) {
 310         int[] vec = vec();
 311         for (int i = 0; i < vec.length; i++) {
 312             v = f.apply(i, v, vec[i]);
 313         }
 314         return v;
 315     }
 316 
 317     // Memory reference
 318 
 319     /*package-private*/
 320     interface FLdOp<M> {
 321         int apply(M memory, int offset, int i);
 322     }
 323 
 324     /*package-private*/
 325     @ForceInline
 326     final
 327     <M> IntVector ldOp(M memory, int offset,
 328                                   FLdOp<M> f) {
 329         //dummy; no vec = vec();
 330         int[] res = new int[length()];
 331         for (int i = 0; i < res.length; i++) {
 332             res[i] = f.apply(memory, offset, i);
 333         }
 334         return vectorFactory(res);
 335     }
 336 
 337     /*package-private*/
 338     @ForceInline
 339     final
 340     <M> IntVector ldOp(M memory, int offset,
 341                                   VectorMask<Integer> m,
 342                                   FLdOp<M> f) {
 343         //int[] vec = vec();
 344         int[] res = new int[length()];
 345         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 346         for (int i = 0; i < res.length; i++) {
 347             if (mbits[i]) {
 348                 res[i] = f.apply(memory, offset, i);
 349             }
 350         }
 351         return vectorFactory(res);
 352     }
 353 
 354     interface FStOp<M> {
 355         void apply(M memory, int offset, int i, int a);
 356     }
 357 
 358     /*package-private*/
 359     @ForceInline
 360     final
 361     <M> void stOp(M memory, int offset,
 362                   FStOp<M> f) {
 363         int[] vec = vec();
 364         for (int i = 0; i < vec.length; i++) {
 365             f.apply(memory, offset, i, vec[i]);
 366         }
 367     }
 368 
 369     /*package-private*/
 370     @ForceInline
 371     final
 372     <M> void stOp(M memory, int offset,
 373                   VectorMask<Integer> m,
 374                   FStOp<M> f) {
 375         int[] vec = vec();
 376         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 377         for (int i = 0; i < vec.length; i++) {
 378             if (mbits[i]) {
 379                 f.apply(memory, offset, i, vec[i]);
 380             }
 381         }
 382     }
 383 
 384     // Binary test
 385 
 386     /*package-private*/
 387     interface FBinTest {
 388         boolean apply(int cond, int i, int a, int b);
 389     }
 390 
 391     /*package-private*/
 392     @ForceInline
 393     final
 394     AbstractMask<Integer> bTest(int cond,
 395                                   Vector<Integer> o,
 396                                   FBinTest f) {
 397         int[] vec1 = vec();
 398         int[] vec2 = ((IntVector)o).vec();
 399         boolean[] bits = new boolean[length()];
 400         for (int i = 0; i < length(); i++){
 401             bits[i] = f.apply(cond, i, vec1[i], vec2[i]);
 402         }
 403         return maskFactory(bits);
 404     }
 405 
 406     /*package-private*/
 407     @ForceInline
 408     static int rotateLeft(int a, int n) {
 409         return Integer.rotateLeft(a, n);
 410     }
 411 
 412     /*package-private*/
 413     @ForceInline
 414     static int rotateRight(int a, int n) {
 415         return Integer.rotateRight(a, n);
 416     }
 417 
 418     /*package-private*/
 419     @Override
 420     abstract IntSpecies vspecies();
 421 
 422     /*package-private*/
 423     @ForceInline
 424     static long toBits(int e) {
 425         return  e;
 426     }
 427 
 428     /*package-private*/
 429     @ForceInline
 430     static int fromBits(long bits) {
 431         return ((int)bits);
 432     }
 433 
 434     // Static factories (other than memory operations)
 435 
 436     // Note: A surprising behavior in javadoc
 437     // sometimes makes a lone /** {@inheritDoc} */
 438     // comment drop the method altogether,
 439     // apparently if the method mentions an
 440     // parameter or return type of Vector<Integer>
 441     // instead of Vector<E> as originally specified.
 442     // Adding an empty HTML fragment appears to
 443     // nudge javadoc into providing the desired
 444     // inherited documentation.  We use the HTML
 445     // comment <!--workaround--> for this.
 446 
 447     /**
 448      * Returns a vector of the given species
 449      * where all lane elements are set to
 450      * zero, the default primitive value.
 451      *
 452      * @param species species of the desired zero vector
 453      * @return a zero vector
 454      */
 455     @ForceInline
 456     public static IntVector zero(VectorSpecies<Integer> species) {
 457         IntSpecies vsp = (IntSpecies) species;
 458         return VectorSupport.broadcastCoerced(vsp.vectorType(), int.class, species.length(),
 459                                 0, vsp,
 460                                 ((bits_, s_) -> s_.rvOp(i -> bits_)));
 461     }
 462 
 463     /**
 464      * Returns a vector of the same species as this one
 465      * where all lane elements are set to
 466      * the primitive value {@code e}.
 467      *
 468      * The contents of the current vector are discarded;
 469      * only the species is relevant to this operation.
 470      *
 471      * <p> This method returns the value of this expression:
 472      * {@code IntVector.broadcast(this.species(), e)}.
 473      *
 474      * @apiNote
 475      * Unlike the similar method named {@code broadcast()}
 476      * in the supertype {@code Vector}, this method does not
 477      * need to validate its argument, and cannot throw
 478      * {@code IllegalArgumentException}.  This method is
 479      * therefore preferable to the supertype method.
 480      *
 481      * @param e the value to broadcast
 482      * @return a vector where all lane elements are set to
 483      *         the primitive value {@code e}
 484      * @see #broadcast(VectorSpecies,long)
 485      * @see Vector#broadcast(long)
 486      * @see VectorSpecies#broadcast(long)
 487      */
 488     public abstract IntVector broadcast(int e);
 489 
 490     /**
 491      * Returns a vector of the given species
 492      * where all lane elements are set to
 493      * the primitive value {@code e}.
 494      *
 495      * @param species species of the desired vector
 496      * @param e the value to broadcast
 497      * @return a vector where all lane elements are set to
 498      *         the primitive value {@code e}
 499      * @see #broadcast(long)
 500      * @see Vector#broadcast(long)
 501      * @see VectorSpecies#broadcast(long)
 502      */
 503     @ForceInline
 504     public static IntVector broadcast(VectorSpecies<Integer> species, int e) {
 505         IntSpecies vsp = (IntSpecies) species;
 506         return vsp.broadcast(e);
 507     }
 508 
 509     /*package-private*/
 510     @ForceInline
 511     final IntVector broadcastTemplate(int e) {
 512         IntSpecies vsp = vspecies();
 513         return vsp.broadcast(e);
 514     }
 515 
 516     /**
 517      * {@inheritDoc} <!--workaround-->
 518      * @apiNote
 519      * When working with vector subtypes like {@code IntVector},
 520      * {@linkplain #broadcast(int) the more strongly typed method}
 521      * is typically selected.  It can be explicitly selected
 522      * using a cast: {@code v.broadcast((int)e)}.
 523      * The two expressions will produce numerically identical results.
 524      */
 525     @Override
 526     public abstract IntVector broadcast(long e);
 527 
 528     /**
 529      * Returns a vector of the given species
 530      * where all lane elements are set to
 531      * the primitive value {@code e}.
 532      *
 533      * The {@code long} value must be accurately representable
 534      * by the {@code ETYPE} of the vector species, so that
 535      * {@code e==(long)(ETYPE)e}.
 536      *
 537      * @param species species of the desired vector
 538      * @param e the value to broadcast
 539      * @return a vector where all lane elements are set to
 540      *         the primitive value {@code e}
 541      * @throws IllegalArgumentException
 542      *         if the given {@code long} value cannot
 543      *         be represented by the vector's {@code ETYPE}
 544      * @see #broadcast(VectorSpecies,int)
 545      * @see VectorSpecies#checkValue(long)
 546      */
 547     @ForceInline
 548     public static IntVector broadcast(VectorSpecies<Integer> species, long e) {
 549         IntSpecies vsp = (IntSpecies) species;
 550         return vsp.broadcast(e);
 551     }
 552 
 553     /*package-private*/
 554     @ForceInline
 555     final IntVector broadcastTemplate(long e) {
 556         return vspecies().broadcast(e);
 557     }
 558 
 559     // Unary lanewise support
 560 
 561     /**
 562      * {@inheritDoc} <!--workaround-->
 563      */
 564     public abstract
 565     IntVector lanewise(VectorOperators.Unary op);
 566 
 567     @ForceInline
 568     final
 569     IntVector lanewiseTemplate(VectorOperators.Unary op) {
 570         if (opKind(op, VO_SPECIAL)) {
 571             if (op == ZOMO) {
 572                 return blend(broadcast(-1), compare(NE, 0));
 573             }
 574             if (op == NOT) {
 575                 return broadcast(-1).lanewise(XOR, this);
 576             } else if (op == NEG) {
 577                 // FIXME: Support this in the JIT.
 578                 return broadcast(0).lanewise(SUB, this);
 579             }
 580         }
 581         int opc = opCode(op);
 582         return VectorSupport.unaryOp(
 583             opc, getClass(), null, int.class, length(),
 584             this, null,
 585             UN_IMPL.find(op, opc, IntVector::unaryOperations));
 586     }
 587 
 588     /**
 589      * {@inheritDoc} <!--workaround-->
 590      */
 591     @Override
 592     public abstract
 593     IntVector lanewise(VectorOperators.Unary op,
 594                                   VectorMask<Integer> m);
 595     @ForceInline
 596     final
 597     IntVector lanewiseTemplate(VectorOperators.Unary op,
 598                                           Class<? extends VectorMask<Integer>> maskClass,
 599                                           VectorMask<Integer> m) {
 600         m.check(maskClass, this);
 601         if (opKind(op, VO_SPECIAL)) {
 602             if (op == ZOMO) {
 603                 return blend(broadcast(-1), compare(NE, 0, m));
 604             }
 605             if (op == NOT) {
 606                 return lanewise(XOR, broadcast(-1), m);
 607             } else if (op == NEG) {
 608                 return lanewise(NOT, m).lanewise(ADD, broadcast(1), m);
 609             }
 610         }
 611         int opc = opCode(op);
 612         return VectorSupport.unaryOp(
 613             opc, getClass(), maskClass, int.class, length(),
 614             this, m,
 615             UN_IMPL.find(op, opc, IntVector::unaryOperations));
 616     }
 617 
 618     private static final
 619     ImplCache<Unary, UnaryOperation<IntVector, VectorMask<Integer>>>
 620         UN_IMPL = new ImplCache<>(Unary.class, IntVector.class);
 621 
 622     private static UnaryOperation<IntVector, VectorMask<Integer>> unaryOperations(int opc_) {
 623         switch (opc_) {
 624             case VECTOR_OP_NEG: return (v0, m) ->
 625                     v0.uOp(m, (i, a) -> (int) -a);
 626             case VECTOR_OP_ABS: return (v0, m) ->
 627                     v0.uOp(m, (i, a) -> (int) Math.abs(a));
 628             default: return null;
 629         }
 630     }
 631 
 632     // Binary lanewise support
 633 
 634     /**
 635      * {@inheritDoc} <!--workaround-->
 636      * @see #lanewise(VectorOperators.Binary,int)
 637      * @see #lanewise(VectorOperators.Binary,int,VectorMask)
 638      */
 639     @Override
 640     public abstract
 641     IntVector lanewise(VectorOperators.Binary op,
 642                                   Vector<Integer> v);
 643     @ForceInline
 644     final
 645     IntVector lanewiseTemplate(VectorOperators.Binary op,
 646                                           Vector<Integer> v) {
 647         IntVector that = (IntVector) v;
 648         that.check(this);
 649 
 650         if (opKind(op, VO_SPECIAL  | VO_SHIFT)) {
 651             if (op == FIRST_NONZERO) {
 652                 // FIXME: Support this in the JIT.
 653                 VectorMask<Integer> thisNZ
 654                     = this.viewAsIntegralLanes().compare(NE, (int) 0);
 655                 that = that.blend((int) 0, thisNZ.cast(vspecies()));
 656                 op = OR_UNCHECKED;
 657             }
 658             if (opKind(op, VO_SHIFT)) {
 659                 // As per shift specification for Java, mask the shift count.
 660                 // This allows the JIT to ignore some ISA details.
 661                 that = that.lanewise(AND, SHIFT_MASK);
 662             }
 663             if (op == AND_NOT) {
 664                 // FIXME: Support this in the JIT.
 665                 that = that.lanewise(NOT);
 666                 op = AND;
 667             } else if (op == DIV) {
 668                 VectorMask<Integer> eqz = that.eq((int) 0);
 669                 if (eqz.anyTrue()) {
 670                     throw that.divZeroException();
 671                 }
 672             }
 673         }
 674 
 675         int opc = opCode(op);
 676         return VectorSupport.binaryOp(
 677             opc, getClass(), null, int.class, length(),
 678             this, that, null,
 679             BIN_IMPL.find(op, opc, IntVector::binaryOperations));
 680     }
 681 
 682     /**
 683      * {@inheritDoc} <!--workaround-->
 684      * @see #lanewise(VectorOperators.Binary,int,VectorMask)
 685      */
 686     @Override
 687     public abstract
 688     IntVector lanewise(VectorOperators.Binary op,
 689                                   Vector<Integer> v,
 690                                   VectorMask<Integer> m);
 691     @ForceInline
 692     final
 693     IntVector lanewiseTemplate(VectorOperators.Binary op,
 694                                           Class<? extends VectorMask<Integer>> maskClass,
 695                                           Vector<Integer> v, VectorMask<Integer> m) {
 696         IntVector that = (IntVector) v;
 697         that.check(this);
 698         m.check(maskClass, this);
 699 
 700         if (opKind(op, VO_SPECIAL  | VO_SHIFT)) {
 701             if (op == FIRST_NONZERO) {
 702                 // FIXME: Support this in the JIT.
 703                 VectorMask<Integer> thisNZ
 704                     = this.viewAsIntegralLanes().compare(NE, (int) 0);
 705                 that = that.blend((int) 0, thisNZ.cast(vspecies()));
 706                 op = OR_UNCHECKED;
 707             }
 708             if (opKind(op, VO_SHIFT)) {
 709                 // As per shift specification for Java, mask the shift count.
 710                 // This allows the JIT to ignore some ISA details.
 711                 that = that.lanewise(AND, SHIFT_MASK);
 712             }
 713             if (op == AND_NOT) {
 714                 // FIXME: Support this in the JIT.
 715                 that = that.lanewise(NOT);
 716                 op = AND;
 717             } else if (op == DIV) {
 718                 VectorMask<Integer> eqz = that.eq((int)0);
 719                 if (eqz.and(m).anyTrue()) {
 720                     throw that.divZeroException();
 721                 }
 722                 // suppress div/0 exceptions in unset lanes
 723                 that = that.lanewise(NOT, eqz);
 724             }
 725         }
 726 
 727         int opc = opCode(op);
 728         return VectorSupport.binaryOp(
 729             opc, getClass(), maskClass, int.class, length(),
 730             this, that, m,
 731             BIN_IMPL.find(op, opc, IntVector::binaryOperations));
 732     }
 733 
 734     private static final
 735     ImplCache<Binary, BinaryOperation<IntVector, VectorMask<Integer>>>
 736         BIN_IMPL = new ImplCache<>(Binary.class, IntVector.class);
 737 
 738     private static BinaryOperation<IntVector, VectorMask<Integer>> binaryOperations(int opc_) {
 739         switch (opc_) {
 740             case VECTOR_OP_ADD: return (v0, v1, vm) ->
 741                     v0.bOp(v1, vm, (i, a, b) -> (int)(a + b));
 742             case VECTOR_OP_SUB: return (v0, v1, vm) ->
 743                     v0.bOp(v1, vm, (i, a, b) -> (int)(a - b));
 744             case VECTOR_OP_MUL: return (v0, v1, vm) ->
 745                     v0.bOp(v1, vm, (i, a, b) -> (int)(a * b));
 746             case VECTOR_OP_DIV: return (v0, v1, vm) ->
 747                     v0.bOp(v1, vm, (i, a, b) -> (int)(a / b));
 748             case VECTOR_OP_MAX: return (v0, v1, vm) ->
 749                     v0.bOp(v1, vm, (i, a, b) -> (int)Math.max(a, b));
 750             case VECTOR_OP_MIN: return (v0, v1, vm) ->
 751                     v0.bOp(v1, vm, (i, a, b) -> (int)Math.min(a, b));
 752             case VECTOR_OP_AND: return (v0, v1, vm) ->
 753                     v0.bOp(v1, vm, (i, a, b) -> (int)(a & b));
 754             case VECTOR_OP_OR: return (v0, v1, vm) ->
 755                     v0.bOp(v1, vm, (i, a, b) -> (int)(a | b));
 756             case VECTOR_OP_XOR: return (v0, v1, vm) ->
 757                     v0.bOp(v1, vm, (i, a, b) -> (int)(a ^ b));
 758             case VECTOR_OP_LSHIFT: return (v0, v1, vm) ->
 759                     v0.bOp(v1, vm, (i, a, n) -> (int)(a << n));
 760             case VECTOR_OP_RSHIFT: return (v0, v1, vm) ->
 761                     v0.bOp(v1, vm, (i, a, n) -> (int)(a >> n));
 762             case VECTOR_OP_URSHIFT: return (v0, v1, vm) ->
 763                     v0.bOp(v1, vm, (i, a, n) -> (int)((a & LSHR_SETUP_MASK) >>> n));
 764             case VECTOR_OP_LROTATE: return (v0, v1, vm) ->
 765                     v0.bOp(v1, vm, (i, a, n) -> rotateLeft(a, (int)n));
 766             case VECTOR_OP_RROTATE: return (v0, v1, vm) ->
 767                     v0.bOp(v1, vm, (i, a, n) -> rotateRight(a, (int)n));
 768             default: return null;
 769         }
 770     }
 771 
 772     // FIXME: Maybe all of the public final methods in this file (the
 773     // simple ones that just call lanewise) should be pushed down to
 774     // the X-VectorBits template.  They can't optimize properly at
 775     // this level, and must rely on inlining.  Does it work?
 776     // (If it works, of course keep the code here.)
 777 
 778     /**
 779      * Combines the lane values of this vector
 780      * with the value of a broadcast scalar.
 781      *
 782      * This is a lane-wise binary operation which applies
 783      * the selected operation to each lane.
 784      * The return value will be equal to this expression:
 785      * {@code this.lanewise(op, this.broadcast(e))}.
 786      *
 787      * @param op the operation used to process lane values
 788      * @param e the input scalar
 789      * @return the result of applying the operation lane-wise
 790      *         to the two input vectors
 791      * @throws UnsupportedOperationException if this vector does
 792      *         not support the requested operation
 793      * @see #lanewise(VectorOperators.Binary,Vector)
 794      * @see #lanewise(VectorOperators.Binary,int,VectorMask)
 795      */
 796     @ForceInline
 797     public final
 798     IntVector lanewise(VectorOperators.Binary op,
 799                                   int e) {
 800         if (opKind(op, VO_SHIFT) && (int)(int)e == e) {
 801             return lanewiseShift(op, (int) e);
 802         }
 803         if (op == AND_NOT) {
 804             op = AND; e = (int) ~e;
 805         }
 806         return lanewise(op, broadcast(e));
 807     }
 808 
 809     /**
 810      * Combines the lane values of this vector
 811      * with the value of a broadcast scalar,
 812      * with selection of lane elements controlled by a mask.
 813      *
 814      * This is a masked lane-wise binary operation which applies
 815      * the selected operation to each lane.
 816      * The return value will be equal to this expression:
 817      * {@code this.lanewise(op, this.broadcast(e), m)}.
 818      *
 819      * @param op the operation used to process lane values
 820      * @param e the input scalar
 821      * @param m the mask controlling lane selection
 822      * @return the result of applying the operation lane-wise
 823      *         to the input vector and the scalar
 824      * @throws UnsupportedOperationException if this vector does
 825      *         not support the requested operation
 826      * @see #lanewise(VectorOperators.Binary,Vector,VectorMask)
 827      * @see #lanewise(VectorOperators.Binary,int)
 828      */
 829     @ForceInline
 830     public final
 831     IntVector lanewise(VectorOperators.Binary op,
 832                                   int e,
 833                                   VectorMask<Integer> m) {
 834         if (opKind(op, VO_SHIFT) && (int)(int)e == e) {
 835             return lanewiseShift(op, (int) e, m);
 836         }
 837         if (op == AND_NOT) {
 838             op = AND; e = (int) ~e;
 839         }
 840         return lanewise(op, broadcast(e), m);
 841     }
 842 
 843     /**
 844      * {@inheritDoc} <!--workaround-->
 845      * @apiNote
 846      * When working with vector subtypes like {@code IntVector},
 847      * {@linkplain #lanewise(VectorOperators.Binary,int)
 848      * the more strongly typed method}
 849      * is typically selected.  It can be explicitly selected
 850      * using a cast: {@code v.lanewise(op,(int)e)}.
 851      * The two expressions will produce numerically identical results.
 852      */
 853     @ForceInline
 854     public final
 855     IntVector lanewise(VectorOperators.Binary op,
 856                                   long e) {
 857         int e1 = (int) e;
 858         if ((long)e1 != e
 859             // allow shift ops to clip down their int parameters
 860             && !(opKind(op, VO_SHIFT) && (int)e1 == e)) {
 861             vspecies().checkValue(e);  // for exception
 862         }
 863         return lanewise(op, e1);
 864     }
 865 
 866     /**
 867      * {@inheritDoc} <!--workaround-->
 868      * @apiNote
 869      * When working with vector subtypes like {@code IntVector},
 870      * {@linkplain #lanewise(VectorOperators.Binary,int,VectorMask)
 871      * the more strongly typed method}
 872      * is typically selected.  It can be explicitly selected
 873      * using a cast: {@code v.lanewise(op,(int)e,m)}.
 874      * The two expressions will produce numerically identical results.
 875      */
 876     @ForceInline
 877     public final
 878     IntVector lanewise(VectorOperators.Binary op,
 879                                   long e, VectorMask<Integer> m) {
 880         int e1 = (int) e;
 881         if ((long)e1 != e
 882             // allow shift ops to clip down their int parameters
 883             && !(opKind(op, VO_SHIFT) && (int)e1 == e)) {
 884             vspecies().checkValue(e);  // for exception
 885         }
 886         return lanewise(op, e1, m);
 887     }
 888 
 889     /*package-private*/
 890     abstract IntVector
 891     lanewiseShift(VectorOperators.Binary op, int e);
 892 
 893     /*package-private*/
 894     @ForceInline
 895     final IntVector
 896     lanewiseShiftTemplate(VectorOperators.Binary op, int e) {
 897         // Special handling for these.  FIXME: Refactor?
 898         assert(opKind(op, VO_SHIFT));
 899         // As per shift specification for Java, mask the shift count.
 900         e &= SHIFT_MASK;
 901         int opc = opCode(op);
 902         return VectorSupport.broadcastInt(
 903             opc, getClass(), null, int.class, length(),
 904             this, e, null,
 905             BIN_INT_IMPL.find(op, opc, IntVector::broadcastIntOperations));
 906     }
 907 
 908     /*package-private*/
 909     abstract IntVector
 910     lanewiseShift(VectorOperators.Binary op, int e, VectorMask<Integer> m);
 911 
 912     /*package-private*/
 913     @ForceInline
 914     final IntVector
 915     lanewiseShiftTemplate(VectorOperators.Binary op,
 916                           Class<? extends VectorMask<Integer>> maskClass,
 917                           int e, VectorMask<Integer> m) {
 918         m.check(maskClass, this);
 919         assert(opKind(op, VO_SHIFT));
 920         // As per shift specification for Java, mask the shift count.
 921         e &= SHIFT_MASK;
 922         int opc = opCode(op);
 923         return VectorSupport.broadcastInt(
 924             opc, getClass(), maskClass, int.class, length(),
 925             this, e, m,
 926             BIN_INT_IMPL.find(op, opc, IntVector::broadcastIntOperations));
 927     }
 928 
 929     private static final
 930     ImplCache<Binary,VectorBroadcastIntOp<IntVector, VectorMask<Integer>>> BIN_INT_IMPL
 931         = new ImplCache<>(Binary.class, IntVector.class);
 932 
 933     private static VectorBroadcastIntOp<IntVector, VectorMask<Integer>> broadcastIntOperations(int opc_) {
 934         switch (opc_) {
 935             case VECTOR_OP_LSHIFT: return (v, n, m) ->
 936                     v.uOp(m, (i, a) -> (int)(a << n));
 937             case VECTOR_OP_RSHIFT: return (v, n, m) ->
 938                     v.uOp(m, (i, a) -> (int)(a >> n));
 939             case VECTOR_OP_URSHIFT: return (v, n, m) ->
 940                     v.uOp(m, (i, a) -> (int)((a & LSHR_SETUP_MASK) >>> n));
 941             case VECTOR_OP_LROTATE: return (v, n, m) ->
 942                     v.uOp(m, (i, a) -> rotateLeft(a, (int)n));
 943             case VECTOR_OP_RROTATE: return (v, n, m) ->
 944                     v.uOp(m, (i, a) -> rotateRight(a, (int)n));
 945             default: return null;
 946         }
 947     }
 948 
 949     // As per shift specification for Java, mask the shift count.
 950     // We mask 0X3F (long), 0X1F (int), 0x0F (short), 0x7 (byte).
 951     // The latter two maskings go beyond the JLS, but seem reasonable
 952     // since our lane types are first-class types, not just dressed
 953     // up ints.
 954     private static final int SHIFT_MASK = (Integer.SIZE - 1);
 955     private static final int LSHR_SETUP_MASK = -1;
 956 
 957     // Ternary lanewise support
 958 
 959     // Ternary operators come in eight variations:
 960     //   lanewise(op, [broadcast(e1)|v1], [broadcast(e2)|v2])
 961     //   lanewise(op, [broadcast(e1)|v1], [broadcast(e2)|v2], mask)
 962 
 963     // It is annoying to support all of these variations of masking
 964     // and broadcast, but it would be more surprising not to continue
 965     // the obvious pattern started by unary and binary.
 966 
 967    /**
 968      * {@inheritDoc} <!--workaround-->
 969      * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask)
 970      * @see #lanewise(VectorOperators.Ternary,Vector,int,VectorMask)
 971      * @see #lanewise(VectorOperators.Ternary,int,Vector,VectorMask)
 972      * @see #lanewise(VectorOperators.Ternary,int,int)
 973      * @see #lanewise(VectorOperators.Ternary,Vector,int)
 974      * @see #lanewise(VectorOperators.Ternary,int,Vector)
 975      */
 976     @Override
 977     public abstract
 978     IntVector lanewise(VectorOperators.Ternary op,
 979                                                   Vector<Integer> v1,
 980                                                   Vector<Integer> v2);
 981     @ForceInline
 982     final
 983     IntVector lanewiseTemplate(VectorOperators.Ternary op,
 984                                           Vector<Integer> v1,
 985                                           Vector<Integer> v2) {
 986         IntVector that = (IntVector) v1;
 987         IntVector tother = (IntVector) v2;
 988         // It's a word: https://www.dictionary.com/browse/tother
 989         // See also Chapter 11 of Dickens, Our Mutual Friend:
 990         // "Totherest Governor," replied Mr Riderhood...
 991         that.check(this);
 992         tother.check(this);
 993         if (op == BITWISE_BLEND) {
 994             // FIXME: Support this in the JIT.
 995             that = this.lanewise(XOR, that).lanewise(AND, tother);
 996             return this.lanewise(XOR, that);
 997         }
 998         int opc = opCode(op);
 999         return VectorSupport.ternaryOp(
1000             opc, getClass(), null, int.class, length(),
1001             this, that, tother, null,
1002             TERN_IMPL.find(op, opc, IntVector::ternaryOperations));
1003     }
1004 
1005     /**
1006      * {@inheritDoc} <!--workaround-->
1007      * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask)
1008      * @see #lanewise(VectorOperators.Ternary,Vector,int,VectorMask)
1009      * @see #lanewise(VectorOperators.Ternary,int,Vector,VectorMask)
1010      */
1011     @Override
1012     public abstract
1013     IntVector lanewise(VectorOperators.Ternary op,
1014                                   Vector<Integer> v1,
1015                                   Vector<Integer> v2,
1016                                   VectorMask<Integer> m);
1017     @ForceInline
1018     final
1019     IntVector lanewiseTemplate(VectorOperators.Ternary op,
1020                                           Class<? extends VectorMask<Integer>> maskClass,
1021                                           Vector<Integer> v1,
1022                                           Vector<Integer> v2,
1023                                           VectorMask<Integer> m) {
1024         IntVector that = (IntVector) v1;
1025         IntVector tother = (IntVector) v2;
1026         // It's a word: https://www.dictionary.com/browse/tother
1027         // See also Chapter 11 of Dickens, Our Mutual Friend:
1028         // "Totherest Governor," replied Mr Riderhood...
1029         that.check(this);
1030         tother.check(this);
1031         m.check(maskClass, this);
1032 
1033         if (op == BITWISE_BLEND) {
1034             // FIXME: Support this in the JIT.
1035             that = this.lanewise(XOR, that).lanewise(AND, tother);
1036             return this.lanewise(XOR, that, m);
1037         }
1038         int opc = opCode(op);
1039         return VectorSupport.ternaryOp(
1040             opc, getClass(), maskClass, int.class, length(),
1041             this, that, tother, m,
1042             TERN_IMPL.find(op, opc, IntVector::ternaryOperations));
1043     }
1044 
1045     private static final
1046     ImplCache<Ternary, TernaryOperation<IntVector, VectorMask<Integer>>>
1047         TERN_IMPL = new ImplCache<>(Ternary.class, IntVector.class);
1048 
1049     private static TernaryOperation<IntVector, VectorMask<Integer>> ternaryOperations(int opc_) {
1050         switch (opc_) {
1051             default: return null;
1052         }
1053     }
1054 
1055     /**
1056      * Combines the lane values of this vector
1057      * with the values of two broadcast scalars.
1058      *
1059      * This is a lane-wise ternary operation which applies
1060      * the selected operation to each lane.
1061      * The return value will be equal to this expression:
1062      * {@code this.lanewise(op, this.broadcast(e1), this.broadcast(e2))}.
1063      *
1064      * @param op the operation used to combine lane values
1065      * @param e1 the first input scalar
1066      * @param e2 the second input scalar
1067      * @return the result of applying the operation lane-wise
1068      *         to the input vector and the scalars
1069      * @throws UnsupportedOperationException if this vector does
1070      *         not support the requested operation
1071      * @see #lanewise(VectorOperators.Ternary,Vector,Vector)
1072      * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask)
1073      */
1074     @ForceInline
1075     public final
1076     IntVector lanewise(VectorOperators.Ternary op, //(op,e1,e2)
1077                                   int e1,
1078                                   int e2) {
1079         return lanewise(op, broadcast(e1), broadcast(e2));
1080     }
1081 
1082     /**
1083      * Combines the lane values of this vector
1084      * with the values of two broadcast scalars,
1085      * with selection of lane elements controlled by a mask.
1086      *
1087      * This is a masked lane-wise ternary operation which applies
1088      * the selected operation to each lane.
1089      * The return value will be equal to this expression:
1090      * {@code this.lanewise(op, this.broadcast(e1), this.broadcast(e2), m)}.
1091      *
1092      * @param op the operation used to combine lane values
1093      * @param e1 the first input scalar
1094      * @param e2 the second input scalar
1095      * @param m the mask controlling lane selection
1096      * @return the result of applying the operation lane-wise
1097      *         to the input vector and the scalars
1098      * @throws UnsupportedOperationException if this vector does
1099      *         not support the requested operation
1100      * @see #lanewise(VectorOperators.Ternary,Vector,Vector,VectorMask)
1101      * @see #lanewise(VectorOperators.Ternary,int,int)
1102      */
1103     @ForceInline
1104     public final
1105     IntVector lanewise(VectorOperators.Ternary op, //(op,e1,e2,m)
1106                                   int e1,
1107                                   int e2,
1108                                   VectorMask<Integer> m) {
1109         return lanewise(op, broadcast(e1), broadcast(e2), m);
1110     }
1111 
1112     /**
1113      * Combines the lane values of this vector
1114      * with the values of another vector and a broadcast scalar.
1115      *
1116      * This is a lane-wise ternary operation which applies
1117      * the selected operation to each lane.
1118      * The return value will be equal to this expression:
1119      * {@code this.lanewise(op, v1, this.broadcast(e2))}.
1120      *
1121      * @param op the operation used to combine lane values
1122      * @param v1 the other input vector
1123      * @param e2 the input scalar
1124      * @return the result of applying the operation lane-wise
1125      *         to the input vectors and the scalar
1126      * @throws UnsupportedOperationException if this vector does
1127      *         not support the requested operation
1128      * @see #lanewise(VectorOperators.Ternary,int,int)
1129      * @see #lanewise(VectorOperators.Ternary,Vector,int,VectorMask)
1130      */
1131     @ForceInline
1132     public final
1133     IntVector lanewise(VectorOperators.Ternary op, //(op,v1,e2)
1134                                   Vector<Integer> v1,
1135                                   int e2) {
1136         return lanewise(op, v1, broadcast(e2));
1137     }
1138 
1139     /**
1140      * Combines the lane values of this vector
1141      * with the values of another vector and a broadcast scalar,
1142      * with selection of lane elements controlled by a mask.
1143      *
1144      * This is a masked lane-wise ternary operation which applies
1145      * the selected operation to each lane.
1146      * The return value will be equal to this expression:
1147      * {@code this.lanewise(op, v1, this.broadcast(e2), m)}.
1148      *
1149      * @param op the operation used to combine lane values
1150      * @param v1 the other input vector
1151      * @param e2 the input scalar
1152      * @param m the mask controlling lane selection
1153      * @return the result of applying the operation lane-wise
1154      *         to the input vectors and the scalar
1155      * @throws UnsupportedOperationException if this vector does
1156      *         not support the requested operation
1157      * @see #lanewise(VectorOperators.Ternary,Vector,Vector)
1158      * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask)
1159      * @see #lanewise(VectorOperators.Ternary,Vector,int)
1160      */
1161     @ForceInline
1162     public final
1163     IntVector lanewise(VectorOperators.Ternary op, //(op,v1,e2,m)
1164                                   Vector<Integer> v1,
1165                                   int e2,
1166                                   VectorMask<Integer> m) {
1167         return lanewise(op, v1, broadcast(e2), m);
1168     }
1169 
1170     /**
1171      * Combines the lane values of this vector
1172      * with the values of another vector and a broadcast scalar.
1173      *
1174      * This is a lane-wise ternary operation which applies
1175      * the selected operation to each lane.
1176      * The return value will be equal to this expression:
1177      * {@code this.lanewise(op, this.broadcast(e1), v2)}.
1178      *
1179      * @param op the operation used to combine lane values
1180      * @param e1 the input scalar
1181      * @param v2 the other input vector
1182      * @return the result of applying the operation lane-wise
1183      *         to the input vectors and the scalar
1184      * @throws UnsupportedOperationException if this vector does
1185      *         not support the requested operation
1186      * @see #lanewise(VectorOperators.Ternary,Vector,Vector)
1187      * @see #lanewise(VectorOperators.Ternary,int,Vector,VectorMask)
1188      */
1189     @ForceInline
1190     public final
1191     IntVector lanewise(VectorOperators.Ternary op, //(op,e1,v2)
1192                                   int e1,
1193                                   Vector<Integer> v2) {
1194         return lanewise(op, broadcast(e1), v2);
1195     }
1196 
1197     /**
1198      * Combines the lane values of this vector
1199      * with the values of another vector and a broadcast scalar,
1200      * with selection of lane elements controlled by a mask.
1201      *
1202      * This is a masked lane-wise ternary operation which applies
1203      * the selected operation to each lane.
1204      * The return value will be equal to this expression:
1205      * {@code this.lanewise(op, this.broadcast(e1), v2, m)}.
1206      *
1207      * @param op the operation used to combine lane values
1208      * @param e1 the input scalar
1209      * @param v2 the other input vector
1210      * @param m the mask controlling lane selection
1211      * @return the result of applying the operation lane-wise
1212      *         to the input vectors and the scalar
1213      * @throws UnsupportedOperationException if this vector does
1214      *         not support the requested operation
1215      * @see #lanewise(VectorOperators.Ternary,Vector,Vector,VectorMask)
1216      * @see #lanewise(VectorOperators.Ternary,int,Vector)
1217      */
1218     @ForceInline
1219     public final
1220     IntVector lanewise(VectorOperators.Ternary op, //(op,e1,v2,m)
1221                                   int e1,
1222                                   Vector<Integer> v2,
1223                                   VectorMask<Integer> m) {
1224         return lanewise(op, broadcast(e1), v2, m);
1225     }
1226 
1227     // (Thus endeth the Great and Mighty Ternary Ogdoad.)
1228     // https://en.wikipedia.org/wiki/Ogdoad
1229 
1230     /// FULL-SERVICE BINARY METHODS: ADD, SUB, MUL, DIV
1231     //
1232     // These include masked and non-masked versions.
1233     // This subclass adds broadcast (masked or not).
1234 
1235     /**
1236      * {@inheritDoc} <!--workaround-->
1237      * @see #add(int)
1238      */
1239     @Override
1240     @ForceInline
1241     public final IntVector add(Vector<Integer> v) {
1242         return lanewise(ADD, v);
1243     }
1244 
1245     /**
1246      * Adds this vector to the broadcast of an input scalar.
1247      *
1248      * This is a lane-wise binary operation which applies
1249      * the primitive addition operation ({@code +}) to each lane.
1250      *
1251      * This method is also equivalent to the expression
1252      * {@link #lanewise(VectorOperators.Binary,int)
1253      *    lanewise}{@code (}{@link VectorOperators#ADD
1254      *    ADD}{@code , e)}.
1255      *
1256      * @param e the input scalar
1257      * @return the result of adding each lane of this vector to the scalar
1258      * @see #add(Vector)
1259      * @see #broadcast(int)
1260      * @see #add(int,VectorMask)
1261      * @see VectorOperators#ADD
1262      * @see #lanewise(VectorOperators.Binary,Vector)
1263      * @see #lanewise(VectorOperators.Binary,int)
1264      */
1265     @ForceInline
1266     public final
1267     IntVector add(int e) {
1268         return lanewise(ADD, e);
1269     }
1270 
1271     /**
1272      * {@inheritDoc} <!--workaround-->
1273      * @see #add(int,VectorMask)
1274      */
1275     @Override
1276     @ForceInline
1277     public final IntVector add(Vector<Integer> v,
1278                                           VectorMask<Integer> m) {
1279         return lanewise(ADD, v, m);
1280     }
1281 
1282     /**
1283      * Adds this vector to the broadcast of an input scalar,
1284      * selecting lane elements controlled by a mask.
1285      *
1286      * This is a masked lane-wise binary operation which applies
1287      * the primitive addition operation ({@code +}) to each lane.
1288      *
1289      * This method is also equivalent to the expression
1290      * {@link #lanewise(VectorOperators.Binary,int,VectorMask)
1291      *    lanewise}{@code (}{@link VectorOperators#ADD
1292      *    ADD}{@code , s, m)}.
1293      *
1294      * @param e the input scalar
1295      * @param m the mask controlling lane selection
1296      * @return the result of adding each lane of this vector to the scalar
1297      * @see #add(Vector,VectorMask)
1298      * @see #broadcast(int)
1299      * @see #add(int)
1300      * @see VectorOperators#ADD
1301      * @see #lanewise(VectorOperators.Binary,Vector)
1302      * @see #lanewise(VectorOperators.Binary,int)
1303      */
1304     @ForceInline
1305     public final IntVector add(int e,
1306                                           VectorMask<Integer> m) {
1307         return lanewise(ADD, e, m);
1308     }
1309 
1310     /**
1311      * {@inheritDoc} <!--workaround-->
1312      * @see #sub(int)
1313      */
1314     @Override
1315     @ForceInline
1316     public final IntVector sub(Vector<Integer> v) {
1317         return lanewise(SUB, v);
1318     }
1319 
1320     /**
1321      * Subtracts an input scalar from this vector.
1322      *
1323      * This is a masked lane-wise binary operation which applies
1324      * the primitive subtraction operation ({@code -}) to each lane.
1325      *
1326      * This method is also equivalent to the expression
1327      * {@link #lanewise(VectorOperators.Binary,int)
1328      *    lanewise}{@code (}{@link VectorOperators#SUB
1329      *    SUB}{@code , e)}.
1330      *
1331      * @param e the input scalar
1332      * @return the result of subtracting the scalar from each lane of this vector
1333      * @see #sub(Vector)
1334      * @see #broadcast(int)
1335      * @see #sub(int,VectorMask)
1336      * @see VectorOperators#SUB
1337      * @see #lanewise(VectorOperators.Binary,Vector)
1338      * @see #lanewise(VectorOperators.Binary,int)
1339      */
1340     @ForceInline
1341     public final IntVector sub(int e) {
1342         return lanewise(SUB, e);
1343     }
1344 
1345     /**
1346      * {@inheritDoc} <!--workaround-->
1347      * @see #sub(int,VectorMask)
1348      */
1349     @Override
1350     @ForceInline
1351     public final IntVector sub(Vector<Integer> v,
1352                                           VectorMask<Integer> m) {
1353         return lanewise(SUB, v, m);
1354     }
1355 
1356     /**
1357      * Subtracts an input scalar from this vector
1358      * under the control of a mask.
1359      *
1360      * This is a masked lane-wise binary operation which applies
1361      * the primitive subtraction operation ({@code -}) to each lane.
1362      *
1363      * This method is also equivalent to the expression
1364      * {@link #lanewise(VectorOperators.Binary,int,VectorMask)
1365      *    lanewise}{@code (}{@link VectorOperators#SUB
1366      *    SUB}{@code , s, m)}.
1367      *
1368      * @param e the input scalar
1369      * @param m the mask controlling lane selection
1370      * @return the result of subtracting the scalar from each lane of this vector
1371      * @see #sub(Vector,VectorMask)
1372      * @see #broadcast(int)
1373      * @see #sub(int)
1374      * @see VectorOperators#SUB
1375      * @see #lanewise(VectorOperators.Binary,Vector)
1376      * @see #lanewise(VectorOperators.Binary,int)
1377      */
1378     @ForceInline
1379     public final IntVector sub(int e,
1380                                           VectorMask<Integer> m) {
1381         return lanewise(SUB, e, m);
1382     }
1383 
1384     /**
1385      * {@inheritDoc} <!--workaround-->
1386      * @see #mul(int)
1387      */
1388     @Override
1389     @ForceInline
1390     public final IntVector mul(Vector<Integer> v) {
1391         return lanewise(MUL, v);
1392     }
1393 
1394     /**
1395      * Multiplies this vector by the broadcast of an input scalar.
1396      *
1397      * This is a lane-wise binary operation which applies
1398      * the primitive multiplication operation ({@code *}) to each lane.
1399      *
1400      * This method is also equivalent to the expression
1401      * {@link #lanewise(VectorOperators.Binary,int)
1402      *    lanewise}{@code (}{@link VectorOperators#MUL
1403      *    MUL}{@code , e)}.
1404      *
1405      * @param e the input scalar
1406      * @return the result of multiplying this vector by the given scalar
1407      * @see #mul(Vector)
1408      * @see #broadcast(int)
1409      * @see #mul(int,VectorMask)
1410      * @see VectorOperators#MUL
1411      * @see #lanewise(VectorOperators.Binary,Vector)
1412      * @see #lanewise(VectorOperators.Binary,int)
1413      */
1414     @ForceInline
1415     public final IntVector mul(int e) {
1416         return lanewise(MUL, e);
1417     }
1418 
1419     /**
1420      * {@inheritDoc} <!--workaround-->
1421      * @see #mul(int,VectorMask)
1422      */
1423     @Override
1424     @ForceInline
1425     public final IntVector mul(Vector<Integer> v,
1426                                           VectorMask<Integer> m) {
1427         return lanewise(MUL, v, m);
1428     }
1429 
1430     /**
1431      * Multiplies this vector by the broadcast of an input scalar,
1432      * selecting lane elements controlled by a mask.
1433      *
1434      * This is a masked lane-wise binary operation which applies
1435      * the primitive multiplication operation ({@code *}) to each lane.
1436      *
1437      * This method is also equivalent to the expression
1438      * {@link #lanewise(VectorOperators.Binary,int,VectorMask)
1439      *    lanewise}{@code (}{@link VectorOperators#MUL
1440      *    MUL}{@code , s, m)}.
1441      *
1442      * @param e the input scalar
1443      * @param m the mask controlling lane selection
1444      * @return the result of muling each lane of this vector to the scalar
1445      * @see #mul(Vector,VectorMask)
1446      * @see #broadcast(int)
1447      * @see #mul(int)
1448      * @see VectorOperators#MUL
1449      * @see #lanewise(VectorOperators.Binary,Vector)
1450      * @see #lanewise(VectorOperators.Binary,int)
1451      */
1452     @ForceInline
1453     public final IntVector mul(int e,
1454                                           VectorMask<Integer> m) {
1455         return lanewise(MUL, e, m);
1456     }
1457 
1458     /**
1459      * {@inheritDoc} <!--workaround-->
1460      * @apiNote If there is a zero divisor, {@code
1461      * ArithmeticException} will be thrown.
1462      */
1463     @Override
1464     @ForceInline
1465     public final IntVector div(Vector<Integer> v) {
1466         return lanewise(DIV, v);
1467     }
1468 
1469     /**
1470      * Divides this vector by the broadcast of an input scalar.
1471      *
1472      * This is a lane-wise binary operation which applies
1473      * the primitive division operation ({@code /}) to each lane.
1474      *
1475      * This method is also equivalent to the expression
1476      * {@link #lanewise(VectorOperators.Binary,int)
1477      *    lanewise}{@code (}{@link VectorOperators#DIV
1478      *    DIV}{@code , e)}.
1479      *
1480      * @apiNote If there is a zero divisor, {@code
1481      * ArithmeticException} will be thrown.
1482      *
1483      * @param e the input scalar
1484      * @return the result of dividing each lane of this vector by the scalar
1485      * @see #div(Vector)
1486      * @see #broadcast(int)
1487      * @see #div(int,VectorMask)
1488      * @see VectorOperators#DIV
1489      * @see #lanewise(VectorOperators.Binary,Vector)
1490      * @see #lanewise(VectorOperators.Binary,int)
1491      */
1492     @ForceInline
1493     public final IntVector div(int e) {
1494         return lanewise(DIV, e);
1495     }
1496 
1497     /**
1498      * {@inheritDoc} <!--workaround-->
1499      * @see #div(int,VectorMask)
1500      * @apiNote If there is a zero divisor, {@code
1501      * ArithmeticException} will be thrown.
1502      */
1503     @Override
1504     @ForceInline
1505     public final IntVector div(Vector<Integer> v,
1506                                           VectorMask<Integer> m) {
1507         return lanewise(DIV, v, m);
1508     }
1509 
1510     /**
1511      * Divides this vector by the broadcast of an input scalar,
1512      * selecting lane elements controlled by a mask.
1513      *
1514      * This is a masked lane-wise binary operation which applies
1515      * the primitive division operation ({@code /}) to each lane.
1516      *
1517      * This method is also equivalent to the expression
1518      * {@link #lanewise(VectorOperators.Binary,int,VectorMask)
1519      *    lanewise}{@code (}{@link VectorOperators#DIV
1520      *    DIV}{@code , s, m)}.
1521      *
1522      * @apiNote If there is a zero divisor, {@code
1523      * ArithmeticException} will be thrown.
1524      *
1525      * @param e the input scalar
1526      * @param m the mask controlling lane selection
1527      * @return the result of dividing each lane of this vector by the scalar
1528      * @see #div(Vector,VectorMask)
1529      * @see #broadcast(int)
1530      * @see #div(int)
1531      * @see VectorOperators#DIV
1532      * @see #lanewise(VectorOperators.Binary,Vector)
1533      * @see #lanewise(VectorOperators.Binary,int)
1534      */
1535     @ForceInline
1536     public final IntVector div(int e,
1537                                           VectorMask<Integer> m) {
1538         return lanewise(DIV, e, m);
1539     }
1540 
1541     /// END OF FULL-SERVICE BINARY METHODS
1542 
1543     /// SECOND-TIER BINARY METHODS
1544     //
1545     // There are no masked versions.
1546 
1547     /**
1548      * {@inheritDoc} <!--workaround-->
1549      */
1550     @Override
1551     @ForceInline
1552     public final IntVector min(Vector<Integer> v) {
1553         return lanewise(MIN, v);
1554     }
1555 
1556     // FIXME:  "broadcast of an input scalar" is really wordy.  Reduce?
1557     /**
1558      * Computes the smaller of this vector and the broadcast of an input scalar.
1559      *
1560      * This is a lane-wise binary operation which applies the
1561      * operation {@code Math.min()} to each pair of
1562      * corresponding lane values.
1563      *
1564      * This method is also equivalent to the expression
1565      * {@link #lanewise(VectorOperators.Binary,int)
1566      *    lanewise}{@code (}{@link VectorOperators#MIN
1567      *    MIN}{@code , e)}.
1568      *
1569      * @param e the input scalar
1570      * @return the result of multiplying this vector by the given scalar
1571      * @see #min(Vector)
1572      * @see #broadcast(int)
1573      * @see VectorOperators#MIN
1574      * @see #lanewise(VectorOperators.Binary,int,VectorMask)
1575      */
1576     @ForceInline
1577     public final IntVector min(int e) {
1578         return lanewise(MIN, e);
1579     }
1580 
1581     /**
1582      * {@inheritDoc} <!--workaround-->
1583      */
1584     @Override
1585     @ForceInline
1586     public final IntVector max(Vector<Integer> v) {
1587         return lanewise(MAX, v);
1588     }
1589 
1590     /**
1591      * Computes the larger of this vector and the broadcast of an input scalar.
1592      *
1593      * This is a lane-wise binary operation which applies the
1594      * operation {@code Math.max()} to each pair of
1595      * corresponding lane values.
1596      *
1597      * This method is also equivalent to the expression
1598      * {@link #lanewise(VectorOperators.Binary,int)
1599      *    lanewise}{@code (}{@link VectorOperators#MAX
1600      *    MAX}{@code , e)}.
1601      *
1602      * @param e the input scalar
1603      * @return the result of multiplying this vector by the given scalar
1604      * @see #max(Vector)
1605      * @see #broadcast(int)
1606      * @see VectorOperators#MAX
1607      * @see #lanewise(VectorOperators.Binary,int,VectorMask)
1608      */
1609     @ForceInline
1610     public final IntVector max(int e) {
1611         return lanewise(MAX, e);
1612     }
1613 
1614     // common bitwise operators: and, or, not (with scalar versions)
1615     /**
1616      * Computes the bitwise logical conjunction ({@code &})
1617      * of this vector and a second input vector.
1618      *
1619      * This is a lane-wise binary operation which applies the
1620      * the primitive bitwise "and" operation ({@code &})
1621      * to each pair of corresponding lane values.
1622      *
1623      * This method is also equivalent to the expression
1624      * {@link #lanewise(VectorOperators.Binary,Vector)
1625      *    lanewise}{@code (}{@link VectorOperators#AND
1626      *    AND}{@code , v)}.
1627      *
1628      * <p>
1629      * This is not a full-service named operation like
1630      * {@link #add(Vector) add}.  A masked version of
1631      * this operation is not directly available
1632      * but may be obtained via the masked version of
1633      * {@code lanewise}.
1634      *
1635      * @param v a second input vector
1636      * @return the bitwise {@code &} of this vector and the second input vector
1637      * @see #and(int)
1638      * @see #or(Vector)
1639      * @see #not()
1640      * @see VectorOperators#AND
1641      * @see #lanewise(VectorOperators.Binary,Vector,VectorMask)
1642      */
1643     @ForceInline
1644     public final IntVector and(Vector<Integer> v) {
1645         return lanewise(AND, v);
1646     }
1647 
1648     /**
1649      * Computes the bitwise logical conjunction ({@code &})
1650      * of this vector and a scalar.
1651      *
1652      * This is a lane-wise binary operation which applies the
1653      * the primitive bitwise "and" operation ({@code &})
1654      * to each pair of corresponding lane values.
1655      *
1656      * This method is also equivalent to the expression
1657      * {@link #lanewise(VectorOperators.Binary,Vector)
1658      *    lanewise}{@code (}{@link VectorOperators#AND
1659      *    AND}{@code , e)}.
1660      *
1661      * @param e an input scalar
1662      * @return the bitwise {@code &} of this vector and scalar
1663      * @see #and(Vector)
1664      * @see VectorOperators#AND
1665      * @see #lanewise(VectorOperators.Binary,Vector,VectorMask)
1666      */
1667     @ForceInline
1668     public final IntVector and(int e) {
1669         return lanewise(AND, e);
1670     }
1671 
1672     /**
1673      * Computes the bitwise logical disjunction ({@code |})
1674      * of this vector and a second input vector.
1675      *
1676      * This is a lane-wise binary operation which applies the
1677      * the primitive bitwise "or" operation ({@code |})
1678      * to each pair of corresponding lane values.
1679      *
1680      * This method is also equivalent to the expression
1681      * {@link #lanewise(VectorOperators.Binary,Vector)
1682      *    lanewise}{@code (}{@link VectorOperators#OR
1683      *    AND}{@code , v)}.
1684      *
1685      * <p>
1686      * This is not a full-service named operation like
1687      * {@link #add(Vector) add}.  A masked version of
1688      * this operation is not directly available
1689      * but may be obtained via the masked version of
1690      * {@code lanewise}.
1691      *
1692      * @param v a second input vector
1693      * @return the bitwise {@code |} of this vector and the second input vector
1694      * @see #or(int)
1695      * @see #and(Vector)
1696      * @see #not()
1697      * @see VectorOperators#OR
1698      * @see #lanewise(VectorOperators.Binary,Vector,VectorMask)
1699      */
1700     @ForceInline
1701     public final IntVector or(Vector<Integer> v) {
1702         return lanewise(OR, v);
1703     }
1704 
1705     /**
1706      * Computes the bitwise logical disjunction ({@code |})
1707      * of this vector and a scalar.
1708      *
1709      * This is a lane-wise binary operation which applies the
1710      * the primitive bitwise "or" operation ({@code |})
1711      * to each pair of corresponding lane values.
1712      *
1713      * This method is also equivalent to the expression
1714      * {@link #lanewise(VectorOperators.Binary,Vector)
1715      *    lanewise}{@code (}{@link VectorOperators#OR
1716      *    OR}{@code , e)}.
1717      *
1718      * @param e an input scalar
1719      * @return the bitwise {@code |} of this vector and scalar
1720      * @see #or(Vector)
1721      * @see VectorOperators#OR
1722      * @see #lanewise(VectorOperators.Binary,Vector,VectorMask)
1723      */
1724     @ForceInline
1725     public final IntVector or(int e) {
1726         return lanewise(OR, e);
1727     }
1728 
1729 
1730 
1731     /// UNARY METHODS
1732 
1733     /**
1734      * {@inheritDoc} <!--workaround-->
1735      */
1736     @Override
1737     @ForceInline
1738     public final
1739     IntVector neg() {
1740         return lanewise(NEG);
1741     }
1742 
1743     /**
1744      * {@inheritDoc} <!--workaround-->
1745      */
1746     @Override
1747     @ForceInline
1748     public final
1749     IntVector abs() {
1750         return lanewise(ABS);
1751     }
1752 
1753     // not (~)
1754     /**
1755      * Computes the bitwise logical complement ({@code ~})
1756      * of this vector.
1757      *
1758      * This is a lane-wise binary operation which applies the
1759      * the primitive bitwise "not" operation ({@code ~})
1760      * to each lane value.
1761      *
1762      * This method is also equivalent to the expression
1763      * {@link #lanewise(VectorOperators.Unary)
1764      *    lanewise}{@code (}{@link VectorOperators#NOT
1765      *    NOT}{@code )}.
1766      *
1767      * <p>
1768      * This is not a full-service named operation like
1769      * {@link #add(Vector) add}.  A masked version of
1770      * this operation is not directly available
1771      * but may be obtained via the masked version of
1772      * {@code lanewise}.
1773      *
1774      * @return the bitwise complement {@code ~} of this vector
1775      * @see #and(Vector)
1776      * @see VectorOperators#NOT
1777      * @see #lanewise(VectorOperators.Unary,VectorMask)
1778      */
1779     @ForceInline
1780     public final IntVector not() {
1781         return lanewise(NOT);
1782     }
1783 
1784 
1785     /// COMPARISONS
1786 
1787     /**
1788      * {@inheritDoc} <!--workaround-->
1789      */
1790     @Override
1791     @ForceInline
1792     public final
1793     VectorMask<Integer> eq(Vector<Integer> v) {
1794         return compare(EQ, v);
1795     }
1796 
1797     /**
1798      * Tests if this vector is equal to an input scalar.
1799      *
1800      * This is a lane-wise binary test operation which applies
1801      * the primitive equals operation ({@code ==}) to each lane.
1802      * The result is the same as {@code compare(VectorOperators.Comparison.EQ, e)}.
1803      *
1804      * @param e the input scalar
1805      * @return the result mask of testing if this vector
1806      *         is equal to {@code e}
1807      * @see #compare(VectorOperators.Comparison,int)
1808      */
1809     @ForceInline
1810     public final
1811     VectorMask<Integer> eq(int e) {
1812         return compare(EQ, e);
1813     }
1814 
1815     /**
1816      * {@inheritDoc} <!--workaround-->
1817      */
1818     @Override
1819     @ForceInline
1820     public final
1821     VectorMask<Integer> lt(Vector<Integer> v) {
1822         return compare(LT, v);
1823     }
1824 
1825     /**
1826      * Tests if this vector is less than an input scalar.
1827      *
1828      * This is a lane-wise binary test operation which applies
1829      * the primitive less than operation ({@code <}) to each lane.
1830      * The result is the same as {@code compare(VectorOperators.LT, e)}.
1831      *
1832      * @param e the input scalar
1833      * @return the mask result of testing if this vector
1834      *         is less than the input scalar
1835      * @see #compare(VectorOperators.Comparison,int)
1836      */
1837     @ForceInline
1838     public final
1839     VectorMask<Integer> lt(int e) {
1840         return compare(LT, e);
1841     }
1842 
1843     /**
1844      * {@inheritDoc} <!--workaround-->
1845      */
1846     @Override
1847     public abstract
1848     VectorMask<Integer> test(VectorOperators.Test op);
1849 
1850     /*package-private*/
1851     @ForceInline
1852     final
1853     <M extends VectorMask<Integer>>
1854     M testTemplate(Class<M> maskType, Test op) {
1855         IntSpecies vsp = vspecies();
1856         if (opKind(op, VO_SPECIAL)) {
1857             IntVector bits = this.viewAsIntegralLanes();
1858             VectorMask<Integer> m;
1859             if (op == IS_DEFAULT) {
1860                 m = bits.compare(EQ, (int) 0);
1861             } else if (op == IS_NEGATIVE) {
1862                 m = bits.compare(LT, (int) 0);
1863             }
1864             else {
1865                 throw new AssertionError(op);
1866             }
1867             return maskType.cast(m);
1868         }
1869         int opc = opCode(op);
1870         throw new AssertionError(op);
1871     }
1872 
1873     /**
1874      * {@inheritDoc} <!--workaround-->
1875      */
1876     @Override
1877     @ForceInline
1878     public final
1879     VectorMask<Integer> test(VectorOperators.Test op,
1880                                   VectorMask<Integer> m) {
1881         return test(op).and(m);
1882     }
1883 
1884     /**
1885      * {@inheritDoc} <!--workaround-->
1886      */
1887     @Override
1888     public abstract
1889     VectorMask<Integer> compare(VectorOperators.Comparison op, Vector<Integer> v);
1890 
1891     /*package-private*/
1892     @ForceInline
1893     final
1894     <M extends VectorMask<Integer>>
1895     M compareTemplate(Class<M> maskType, Comparison op, Vector<Integer> v) {
1896         IntVector that = (IntVector) v;
1897         that.check(this);
1898         int opc = opCode(op);
1899         return VectorSupport.compare(
1900             opc, getClass(), maskType, int.class, length(),
1901             this, that, null,
1902             (cond, v0, v1, m1) -> {
1903                 AbstractMask<Integer> m
1904                     = v0.bTest(cond, v1, (cond_, i, a, b)
1905                                -> compareWithOp(cond, a, b));
1906                 @SuppressWarnings("unchecked")
1907                 M m2 = (M) m;
1908                 return m2;
1909             });
1910     }
1911 
1912     /*package-private*/
1913     @ForceInline
1914     final
1915     <M extends VectorMask<Integer>>
1916     M compareTemplate(Class<M> maskType, Comparison op, Vector<Integer> v, M m) {
1917         IntVector that = (IntVector) v;
1918         that.check(this);
1919         m.check(maskType, this);
1920         int opc = opCode(op);
1921         return VectorSupport.compare(
1922             opc, getClass(), maskType, int.class, length(),
1923             this, that, m,
1924             (cond, v0, v1, m1) -> {
1925                 AbstractMask<Integer> cmpM
1926                     = v0.bTest(cond, v1, (cond_, i, a, b)
1927                                -> compareWithOp(cond, a, b));
1928                 @SuppressWarnings("unchecked")
1929                 M m2 = (M) cmpM.and(m1);
1930                 return m2;
1931             });
1932     }
1933 
1934     @ForceInline
1935     private static boolean compareWithOp(int cond, int a, int b) {
1936         return switch (cond) {
1937             case BT_eq -> a == b;
1938             case BT_ne -> a != b;
1939             case BT_lt -> a < b;
1940             case BT_le -> a <= b;
1941             case BT_gt -> a > b;
1942             case BT_ge -> a >= b;
1943             case BT_ult -> Integer.compareUnsigned(a, b) < 0;
1944             case BT_ule -> Integer.compareUnsigned(a, b) <= 0;
1945             case BT_ugt -> Integer.compareUnsigned(a, b) > 0;
1946             case BT_uge -> Integer.compareUnsigned(a, b) >= 0;
1947             default -> throw new AssertionError();
1948         };
1949     }
1950 
1951     /**
1952      * Tests this vector by comparing it with an input scalar,
1953      * according to the given comparison operation.
1954      *
1955      * This is a lane-wise binary test operation which applies
1956      * the comparison operation to each lane.
1957      * <p>
1958      * The result is the same as
1959      * {@code compare(op, broadcast(species(), e))}.
1960      * That is, the scalar may be regarded as broadcast to
1961      * a vector of the same species, and then compared
1962      * against the original vector, using the selected
1963      * comparison operation.
1964      *
1965      * @param op the operation used to compare lane values
1966      * @param e the input scalar
1967      * @return the mask result of testing lane-wise if this vector
1968      *         compares to the input, according to the selected
1969      *         comparison operator
1970      * @see IntVector#compare(VectorOperators.Comparison,Vector)
1971      * @see #eq(int)
1972      * @see #lt(int)
1973      */
1974     public abstract
1975     VectorMask<Integer> compare(Comparison op, int e);
1976 
1977     /*package-private*/
1978     @ForceInline
1979     final
1980     <M extends VectorMask<Integer>>
1981     M compareTemplate(Class<M> maskType, Comparison op, int e) {
1982         return compareTemplate(maskType, op, broadcast(e));
1983     }
1984 
1985     /**
1986      * Tests this vector by comparing it with an input scalar,
1987      * according to the given comparison operation,
1988      * in lanes selected by a mask.
1989      *
1990      * This is a masked lane-wise binary test operation which applies
1991      * to each pair of corresponding lane values.
1992      *
1993      * The returned result is equal to the expression
1994      * {@code compare(op,s).and(m)}.
1995      *
1996      * @param op the operation used to compare lane values
1997      * @param e the input scalar
1998      * @param m the mask controlling lane selection
1999      * @return the mask result of testing lane-wise if this vector
2000      *         compares to the input, according to the selected
2001      *         comparison operator,
2002      *         and only in the lanes selected by the mask
2003      * @see IntVector#compare(VectorOperators.Comparison,Vector,VectorMask)
2004      */
2005     @ForceInline
2006     public final VectorMask<Integer> compare(VectorOperators.Comparison op,
2007                                                int e,
2008                                                VectorMask<Integer> m) {
2009         return compare(op, broadcast(e), m);
2010     }
2011 
2012     /**
2013      * {@inheritDoc} <!--workaround-->
2014      */
2015     @Override
2016     public abstract
2017     VectorMask<Integer> compare(Comparison op, long e);
2018 
2019     /*package-private*/
2020     @ForceInline
2021     final
2022     <M extends VectorMask<Integer>>
2023     M compareTemplate(Class<M> maskType, Comparison op, long e) {
2024         return compareTemplate(maskType, op, broadcast(e));
2025     }
2026 
2027     /**
2028      * {@inheritDoc} <!--workaround-->
2029      */
2030     @Override
2031     @ForceInline
2032     public final
2033     VectorMask<Integer> compare(Comparison op, long e, VectorMask<Integer> m) {
2034         return compare(op, broadcast(e), m);
2035     }
2036 
2037 
2038 
2039     /**
2040      * {@inheritDoc} <!--workaround-->
2041      */
2042     @Override public abstract
2043     IntVector blend(Vector<Integer> v, VectorMask<Integer> m);
2044 
2045     /*package-private*/
2046     @ForceInline
2047     final
2048     <M extends VectorMask<Integer>>
2049     IntVector
2050     blendTemplate(Class<M> maskType, IntVector v, M m) {
2051         v.check(this);
2052         return VectorSupport.blend(
2053             getClass(), maskType, int.class, length(),
2054             this, v, m,
2055             (v0, v1, m_) -> v0.bOp(v1, m_, (i, a, b) -> b));
2056     }
2057 
2058     /**
2059      * {@inheritDoc} <!--workaround-->
2060      */
2061     @Override public abstract IntVector addIndex(int scale);
2062 
2063     /*package-private*/
2064     @ForceInline
2065     final IntVector addIndexTemplate(int scale) {
2066         IntSpecies vsp = vspecies();
2067         // make sure VLENGTH*scale doesn't overflow:
2068         vsp.checkScale(scale);
2069         return VectorSupport.indexVector(
2070             getClass(), int.class, length(),
2071             this, scale, vsp,
2072             (v, scale_, s)
2073             -> {
2074                 // If the platform doesn't support an INDEX
2075                 // instruction directly, load IOTA from memory
2076                 // and multiply.
2077                 IntVector iota = s.iota();
2078                 int sc = (int) scale_;
2079                 return v.add(sc == 1 ? iota : iota.mul(sc));
2080             });
2081     }
2082 
2083     /**
2084      * Replaces selected lanes of this vector with
2085      * a scalar value
2086      * under the control of a mask.
2087      *
2088      * This is a masked lane-wise binary operation which
2089      * selects each lane value from one or the other input.
2090      *
2091      * The returned result is equal to the expression
2092      * {@code blend(broadcast(e),m)}.
2093      *
2094      * @param e the input scalar, containing the replacement lane value
2095      * @param m the mask controlling lane selection of the scalar
2096      * @return the result of blending the lane elements of this vector with
2097      *         the scalar value
2098      */
2099     @ForceInline
2100     public final IntVector blend(int e,
2101                                             VectorMask<Integer> m) {
2102         return blend(broadcast(e), m);
2103     }
2104 
2105     /**
2106      * Replaces selected lanes of this vector with
2107      * a scalar value
2108      * under the control of a mask.
2109      *
2110      * This is a masked lane-wise binary operation which
2111      * selects each lane value from one or the other input.
2112      *
2113      * The returned result is equal to the expression
2114      * {@code blend(broadcast(e),m)}.
2115      *
2116      * @param e the input scalar, containing the replacement lane value
2117      * @param m the mask controlling lane selection of the scalar
2118      * @return the result of blending the lane elements of this vector with
2119      *         the scalar value
2120      */
2121     @ForceInline
2122     public final IntVector blend(long e,
2123                                             VectorMask<Integer> m) {
2124         return blend(broadcast(e), m);
2125     }
2126 
2127     /**
2128      * {@inheritDoc} <!--workaround-->
2129      */
2130     @Override
2131     public abstract
2132     IntVector slice(int origin, Vector<Integer> v1);
2133 
2134     /*package-private*/
2135     final
2136     @ForceInline
2137     IntVector sliceTemplate(int origin, Vector<Integer> v1) {
2138         IntVector that = (IntVector) v1;
2139         that.check(this);
2140         Objects.checkIndex(origin, length() + 1);
2141         VectorShuffle<Integer> iota = iotaShuffle();
2142         VectorMask<Integer> blendMask = iota.toVector().compare(VectorOperators.LT, (broadcast((int)(length() - origin))));
2143         iota = iotaShuffle(origin, 1, true);
2144         return that.rearrange(iota).blend(this.rearrange(iota), blendMask);
2145     }
2146 
2147     /**
2148      * {@inheritDoc} <!--workaround-->
2149      */
2150     @Override
2151     @ForceInline
2152     public final
2153     IntVector slice(int origin,
2154                                Vector<Integer> w,
2155                                VectorMask<Integer> m) {
2156         return broadcast(0).blend(slice(origin, w), m);
2157     }
2158 
2159     /**
2160      * {@inheritDoc} <!--workaround-->
2161      */
2162     @Override
2163     public abstract
2164     IntVector slice(int origin);
2165 
2166     /*package-private*/
2167     final
2168     @ForceInline
2169     IntVector sliceTemplate(int origin) {
2170         Objects.checkIndex(origin, length() + 1);
2171         VectorShuffle<Integer> iota = iotaShuffle();
2172         VectorMask<Integer> blendMask = iota.toVector().compare(VectorOperators.LT, (broadcast((int)(length() - origin))));
2173         iota = iotaShuffle(origin, 1, true);
2174         return vspecies().zero().blend(this.rearrange(iota), blendMask);
2175     }
2176 
2177     /**
2178      * {@inheritDoc} <!--workaround-->
2179      */
2180     @Override
2181     public abstract
2182     IntVector unslice(int origin, Vector<Integer> w, int part);
2183 
2184     /*package-private*/
2185     final
2186     @ForceInline
2187     IntVector
2188     unsliceTemplate(int origin, Vector<Integer> w, int part) {
2189         IntVector that = (IntVector) w;
2190         that.check(this);
2191         Objects.checkIndex(origin, length() + 1);
2192         VectorShuffle<Integer> iota = iotaShuffle();
2193         VectorMask<Integer> blendMask = iota.toVector().compare((part == 0) ? VectorOperators.GE : VectorOperators.LT,
2194                                                                   (broadcast((int)(origin))));
2195         iota = iotaShuffle(-origin, 1, true);
2196         return that.blend(this.rearrange(iota), blendMask);
2197     }
2198 
2199     /*package-private*/
2200     final
2201     @ForceInline
2202     <M extends VectorMask<Integer>>
2203     IntVector
2204     unsliceTemplate(Class<M> maskType, int origin, Vector<Integer> w, int part, M m) {
2205         IntVector that = (IntVector) w;
2206         that.check(this);
2207         IntVector slice = that.sliceTemplate(origin, that);
2208         slice = slice.blendTemplate(maskType, this, m);
2209         return slice.unsliceTemplate(origin, w, part);
2210     }
2211 
2212     /**
2213      * {@inheritDoc} <!--workaround-->
2214      */
2215     @Override
2216     public abstract
2217     IntVector unslice(int origin, Vector<Integer> w, int part, VectorMask<Integer> m);
2218 
2219     /**
2220      * {@inheritDoc} <!--workaround-->
2221      */
2222     @Override
2223     public abstract
2224     IntVector unslice(int origin);
2225 
2226     /*package-private*/
2227     final
2228     @ForceInline
2229     IntVector
2230     unsliceTemplate(int origin) {
2231         Objects.checkIndex(origin, length() + 1);
2232         VectorShuffle<Integer> iota = iotaShuffle();
2233         VectorMask<Integer> blendMask = iota.toVector().compare(VectorOperators.GE,
2234                                                                   (broadcast((int)(origin))));
2235         iota = iotaShuffle(-origin, 1, true);
2236         return vspecies().zero().blend(this.rearrange(iota), blendMask);
2237     }
2238 
2239     private ArrayIndexOutOfBoundsException
2240     wrongPartForSlice(int part) {
2241         String msg = String.format("bad part number %d for slice operation",
2242                                    part);
2243         return new ArrayIndexOutOfBoundsException(msg);
2244     }
2245 
2246     /**
2247      * {@inheritDoc} <!--workaround-->
2248      */
2249     @Override
2250     public abstract
2251     IntVector rearrange(VectorShuffle<Integer> m);
2252 
2253     /*package-private*/
2254     @ForceInline
2255     final
2256     <S extends VectorShuffle<Integer>>
2257     IntVector rearrangeTemplate(Class<S> shuffletype, S shuffle) {
2258         shuffle.checkIndexes();
2259         return VectorSupport.rearrangeOp(
2260             getClass(), shuffletype, null, int.class, length(),
2261             this, shuffle, null,
2262             (v1, s_, m_) -> v1.uOp((i, a) -> {
2263                 int ei = s_.laneSource(i);
2264                 return v1.lane(ei);
2265             }));
2266     }
2267 
2268     /**
2269      * {@inheritDoc} <!--workaround-->
2270      */
2271     @Override
2272     public abstract
2273     IntVector rearrange(VectorShuffle<Integer> s,
2274                                    VectorMask<Integer> m);
2275 
2276     /*package-private*/
2277     @ForceInline
2278     final
2279     <S extends VectorShuffle<Integer>, M extends VectorMask<Integer>>
2280     IntVector rearrangeTemplate(Class<S> shuffletype,
2281                                            Class<M> masktype,
2282                                            S shuffle,
2283                                            M m) {
2284 
2285         m.check(masktype, this);
2286         VectorMask<Integer> valid = shuffle.laneIsValid();
2287         if (m.andNot(valid).anyTrue()) {
2288             shuffle.checkIndexes();
2289             throw new AssertionError();
2290         }
2291         return VectorSupport.rearrangeOp(
2292                    getClass(), shuffletype, masktype, int.class, length(),
2293                    this, shuffle, m,
2294                    (v1, s_, m_) -> v1.uOp((i, a) -> {
2295                         int ei = s_.laneSource(i);
2296                         return ei < 0  || !m_.laneIsSet(i) ? 0 : v1.lane(ei);
2297                    }));
2298     }
2299 
2300     /**
2301      * {@inheritDoc} <!--workaround-->
2302      */
2303     @Override
2304     public abstract
2305     IntVector rearrange(VectorShuffle<Integer> s,
2306                                    Vector<Integer> v);
2307 
2308     /*package-private*/
2309     @ForceInline
2310     final
2311     <S extends VectorShuffle<Integer>>
2312     IntVector rearrangeTemplate(Class<S> shuffletype,
2313                                            S shuffle,
2314                                            IntVector v) {
2315         VectorMask<Integer> valid = shuffle.laneIsValid();
2316         @SuppressWarnings("unchecked")
2317         S ws = (S) shuffle.wrapIndexes();
2318         IntVector r0 =
2319             VectorSupport.rearrangeOp(
2320                 getClass(), shuffletype, null, int.class, length(),
2321                 this, ws, null,
2322                 (v0, s_, m_) -> v0.uOp((i, a) -> {
2323                     int ei = s_.laneSource(i);
2324                     return v0.lane(ei);
2325                 }));
2326         IntVector r1 =
2327             VectorSupport.rearrangeOp(
2328                 getClass(), shuffletype, null, int.class, length(),
2329                 v, ws, null,
2330                 (v1, s_, m_) -> v1.uOp((i, a) -> {
2331                     int ei = s_.laneSource(i);
2332                     return v1.lane(ei);
2333                 }));
2334         return r1.blend(r0, valid);
2335     }
2336 
2337     @ForceInline
2338     private final
2339     VectorShuffle<Integer> toShuffle0(IntSpecies dsp) {
2340         int[] a = toArray();
2341         int[] sa = new int[a.length];
2342         for (int i = 0; i < a.length; i++) {
2343             sa[i] = (int) a[i];
2344         }
2345         return VectorShuffle.fromArray(dsp, sa, 0);
2346     }
2347 
2348     /*package-private*/
2349     @ForceInline
2350     final
2351     VectorShuffle<Integer> toShuffleTemplate(Class<?> shuffleType) {
2352         IntSpecies vsp = vspecies();
2353         return VectorSupport.convert(VectorSupport.VECTOR_OP_CAST,
2354                                      getClass(), int.class, length(),
2355                                      shuffleType, byte.class, length(),
2356                                      this, vsp,
2357                                      IntVector::toShuffle0);
2358     }
2359 
2360     /**
2361      * {@inheritDoc} <!--workaround-->
2362      */
2363     @Override
2364     public abstract
2365     IntVector selectFrom(Vector<Integer> v);
2366 
2367     /*package-private*/
2368     @ForceInline
2369     final IntVector selectFromTemplate(IntVector v) {
2370         return v.rearrange(this.toShuffle());
2371     }
2372 
2373     /**
2374      * {@inheritDoc} <!--workaround-->
2375      */
2376     @Override
2377     public abstract
2378     IntVector selectFrom(Vector<Integer> s, VectorMask<Integer> m);
2379 
2380     /*package-private*/
2381     @ForceInline
2382     final IntVector selectFromTemplate(IntVector v,
2383                                                   AbstractMask<Integer> m) {
2384         return v.rearrange(this.toShuffle(), m);
2385     }
2386 
2387     /// Ternary operations
2388 
2389     /**
2390      * Blends together the bits of two vectors under
2391      * the control of a third, which supplies mask bits.
2392      *
2393      * This is a lane-wise ternary operation which performs
2394      * a bitwise blending operation {@code (a&~c)|(b&c)}
2395      * to each lane.
2396      *
2397      * This method is also equivalent to the expression
2398      * {@link #lanewise(VectorOperators.Ternary,Vector,Vector)
2399      *    lanewise}{@code (}{@link VectorOperators#BITWISE_BLEND
2400      *    BITWISE_BLEND}{@code , bits, mask)}.
2401      *
2402      * @param bits input bits to blend into the current vector
2403      * @param mask a bitwise mask to enable blending of the input bits
2404      * @return the bitwise blend of the given bits into the current vector,
2405      *         under control of the bitwise mask
2406      * @see #bitwiseBlend(int,int)
2407      * @see #bitwiseBlend(int,Vector)
2408      * @see #bitwiseBlend(Vector,int)
2409      * @see VectorOperators#BITWISE_BLEND
2410      * @see #lanewise(VectorOperators.Ternary,Vector,Vector,VectorMask)
2411      */
2412     @ForceInline
2413     public final
2414     IntVector bitwiseBlend(Vector<Integer> bits, Vector<Integer> mask) {
2415         return lanewise(BITWISE_BLEND, bits, mask);
2416     }
2417 
2418     /**
2419      * Blends together the bits of a vector and a scalar under
2420      * the control of another scalar, which supplies mask bits.
2421      *
2422      * This is a lane-wise ternary operation which performs
2423      * a bitwise blending operation {@code (a&~c)|(b&c)}
2424      * to each lane.
2425      *
2426      * This method is also equivalent to the expression
2427      * {@link #lanewise(VectorOperators.Ternary,Vector,Vector)
2428      *    lanewise}{@code (}{@link VectorOperators#BITWISE_BLEND
2429      *    BITWISE_BLEND}{@code , bits, mask)}.
2430      *
2431      * @param bits input bits to blend into the current vector
2432      * @param mask a bitwise mask to enable blending of the input bits
2433      * @return the bitwise blend of the given bits into the current vector,
2434      *         under control of the bitwise mask
2435      * @see #bitwiseBlend(Vector,Vector)
2436      * @see VectorOperators#BITWISE_BLEND
2437      * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask)
2438      */
2439     @ForceInline
2440     public final
2441     IntVector bitwiseBlend(int bits, int mask) {
2442         return lanewise(BITWISE_BLEND, bits, mask);
2443     }
2444 
2445     /**
2446      * Blends together the bits of a vector and a scalar under
2447      * the control of another vector, which supplies mask bits.
2448      *
2449      * This is a lane-wise ternary operation which performs
2450      * a bitwise blending operation {@code (a&~c)|(b&c)}
2451      * to each lane.
2452      *
2453      * This method is also equivalent to the expression
2454      * {@link #lanewise(VectorOperators.Ternary,Vector,Vector)
2455      *    lanewise}{@code (}{@link VectorOperators#BITWISE_BLEND
2456      *    BITWISE_BLEND}{@code , bits, mask)}.
2457      *
2458      * @param bits input bits to blend into the current vector
2459      * @param mask a bitwise mask to enable blending of the input bits
2460      * @return the bitwise blend of the given bits into the current vector,
2461      *         under control of the bitwise mask
2462      * @see #bitwiseBlend(Vector,Vector)
2463      * @see VectorOperators#BITWISE_BLEND
2464      * @see #lanewise(VectorOperators.Ternary,int,Vector,VectorMask)
2465      */
2466     @ForceInline
2467     public final
2468     IntVector bitwiseBlend(int bits, Vector<Integer> mask) {
2469         return lanewise(BITWISE_BLEND, bits, mask);
2470     }
2471 
2472     /**
2473      * Blends together the bits of two vectors under
2474      * the control of a scalar, which supplies mask bits.
2475      *
2476      * This is a lane-wise ternary operation which performs
2477      * a bitwise blending operation {@code (a&~c)|(b&c)}
2478      * to each lane.
2479      *
2480      * This method is also equivalent to the expression
2481      * {@link #lanewise(VectorOperators.Ternary,Vector,Vector)
2482      *    lanewise}{@code (}{@link VectorOperators#BITWISE_BLEND
2483      *    BITWISE_BLEND}{@code , bits, mask)}.
2484      *
2485      * @param bits input bits to blend into the current vector
2486      * @param mask a bitwise mask to enable blending of the input bits
2487      * @return the bitwise blend of the given bits into the current vector,
2488      *         under control of the bitwise mask
2489      * @see #bitwiseBlend(Vector,Vector)
2490      * @see VectorOperators#BITWISE_BLEND
2491      * @see #lanewise(VectorOperators.Ternary,Vector,int,VectorMask)
2492      */
2493     @ForceInline
2494     public final
2495     IntVector bitwiseBlend(Vector<Integer> bits, int mask) {
2496         return lanewise(BITWISE_BLEND, bits, mask);
2497     }
2498 
2499 
2500     // Type specific horizontal reductions
2501 
2502     /**
2503      * Returns a value accumulated from all the lanes of this vector.
2504      *
2505      * This is an associative cross-lane reduction operation which
2506      * applies the specified operation to all the lane elements.
2507      * <p>
2508      * A few reduction operations do not support arbitrary reordering
2509      * of their operands, yet are included here because of their
2510      * usefulness.
2511      * <ul>
2512      * <li>
2513      * In the case of {@code FIRST_NONZERO}, the reduction returns
2514      * the value from the lowest-numbered non-zero lane.
2515      * <li>
2516      * All other reduction operations are fully commutative and
2517      * associative.  The implementation can choose any order of
2518      * processing, yet it will always produce the same result.
2519      * </ul>
2520      *
2521      * @param op the operation used to combine lane values
2522      * @return the accumulated result
2523      * @throws UnsupportedOperationException if this vector does
2524      *         not support the requested operation
2525      * @see #reduceLanes(VectorOperators.Associative,VectorMask)
2526      * @see #add(Vector)
2527      * @see #mul(Vector)
2528      * @see #min(Vector)
2529      * @see #max(Vector)
2530      * @see #and(Vector)
2531      * @see #or(Vector)
2532      * @see VectorOperators#XOR
2533      * @see VectorOperators#FIRST_NONZERO
2534      */
2535     public abstract int reduceLanes(VectorOperators.Associative op);
2536 
2537     /**
2538      * Returns a value accumulated from selected lanes of this vector,
2539      * controlled by a mask.
2540      *
2541      * This is an associative cross-lane reduction operation which
2542      * applies the specified operation to the selected lane elements.
2543      * <p>
2544      * If no elements are selected, an operation-specific identity
2545      * value is returned.
2546      * <ul>
2547      * <li>
2548      * If the operation is
2549      *  {@code ADD}, {@code XOR}, {@code OR},
2550      * or {@code FIRST_NONZERO},
2551      * then the identity value is zero, the default {@code int} value.
2552      * <li>
2553      * If the operation is {@code MUL},
2554      * then the identity value is one.
2555      * <li>
2556      * If the operation is {@code AND},
2557      * then the identity value is minus one (all bits set).
2558      * <li>
2559      * If the operation is {@code MAX},
2560      * then the identity value is {@code Integer.MIN_VALUE}.
2561      * <li>
2562      * If the operation is {@code MIN},
2563      * then the identity value is {@code Integer.MAX_VALUE}.
2564      * </ul>
2565      * <p>
2566      * A few reduction operations do not support arbitrary reordering
2567      * of their operands, yet are included here because of their
2568      * usefulness.
2569      * <ul>
2570      * <li>
2571      * In the case of {@code FIRST_NONZERO}, the reduction returns
2572      * the value from the lowest-numbered non-zero lane.
2573      * <li>
2574      * All other reduction operations are fully commutative and
2575      * associative.  The implementation can choose any order of
2576      * processing, yet it will always produce the same result.
2577      * </ul>
2578      *
2579      * @param op the operation used to combine lane values
2580      * @param m the mask controlling lane selection
2581      * @return the reduced result accumulated from the selected lane values
2582      * @throws UnsupportedOperationException if this vector does
2583      *         not support the requested operation
2584      * @see #reduceLanes(VectorOperators.Associative)
2585      */
2586     public abstract int reduceLanes(VectorOperators.Associative op,
2587                                        VectorMask<Integer> m);
2588 
2589     /*package-private*/
2590     @ForceInline
2591     final
2592     int reduceLanesTemplate(VectorOperators.Associative op,
2593                                Class<? extends VectorMask<Integer>> maskClass,
2594                                VectorMask<Integer> m) {
2595         m.check(maskClass, this);
2596         if (op == FIRST_NONZERO) {
2597             IntVector v = reduceIdentityVector(op).blend(this, m);
2598             return v.reduceLanesTemplate(op);
2599         }
2600         int opc = opCode(op);
2601         return fromBits(VectorSupport.reductionCoerced(
2602             opc, getClass(), maskClass, int.class, length(),
2603             this, m,
2604             REDUCE_IMPL.find(op, opc, IntVector::reductionOperations)));
2605     }
2606 
2607     /*package-private*/
2608     @ForceInline
2609     final
2610     int reduceLanesTemplate(VectorOperators.Associative op) {
2611         if (op == FIRST_NONZERO) {
2612             // FIXME:  The JIT should handle this, and other scan ops alos.
2613             VectorMask<Integer> thisNZ
2614                 = this.viewAsIntegralLanes().compare(NE, (int) 0);
2615             return this.lane(thisNZ.firstTrue());
2616         }
2617         int opc = opCode(op);
2618         return fromBits(VectorSupport.reductionCoerced(
2619             opc, getClass(), null, int.class, length(),
2620             this, null,
2621             REDUCE_IMPL.find(op, opc, IntVector::reductionOperations)));
2622     }
2623 
2624     private static final
2625     ImplCache<Associative, ReductionOperation<IntVector, VectorMask<Integer>>>
2626         REDUCE_IMPL = new ImplCache<>(Associative.class, IntVector.class);
2627 
2628     private static ReductionOperation<IntVector, VectorMask<Integer>> reductionOperations(int opc_) {
2629         switch (opc_) {
2630             case VECTOR_OP_ADD: return (v, m) ->
2631                     toBits(v.rOp((int)0, m, (i, a, b) -> (int)(a + b)));
2632             case VECTOR_OP_MUL: return (v, m) ->
2633                     toBits(v.rOp((int)1, m, (i, a, b) -> (int)(a * b)));
2634             case VECTOR_OP_MIN: return (v, m) ->
2635                     toBits(v.rOp(MAX_OR_INF, m, (i, a, b) -> (int) Math.min(a, b)));
2636             case VECTOR_OP_MAX: return (v, m) ->
2637                     toBits(v.rOp(MIN_OR_INF, m, (i, a, b) -> (int) Math.max(a, b)));
2638             case VECTOR_OP_AND: return (v, m) ->
2639                     toBits(v.rOp((int)-1, m, (i, a, b) -> (int)(a & b)));
2640             case VECTOR_OP_OR: return (v, m) ->
2641                     toBits(v.rOp((int)0, m, (i, a, b) -> (int)(a | b)));
2642             case VECTOR_OP_XOR: return (v, m) ->
2643                     toBits(v.rOp((int)0, m, (i, a, b) -> (int)(a ^ b)));
2644             default: return null;
2645         }
2646     }
2647 
2648     private
2649     @ForceInline
2650     IntVector reduceIdentityVector(VectorOperators.Associative op) {
2651         int opc = opCode(op);
2652         UnaryOperator<IntVector> fn
2653             = REDUCE_ID_IMPL.find(op, opc, (opc_) -> {
2654                 switch (opc_) {
2655                 case VECTOR_OP_ADD:
2656                 case VECTOR_OP_OR:
2657                 case VECTOR_OP_XOR:
2658                     return v -> v.broadcast(0);
2659                 case VECTOR_OP_MUL:
2660                     return v -> v.broadcast(1);
2661                 case VECTOR_OP_AND:
2662                     return v -> v.broadcast(-1);
2663                 case VECTOR_OP_MIN:
2664                     return v -> v.broadcast(MAX_OR_INF);
2665                 case VECTOR_OP_MAX:
2666                     return v -> v.broadcast(MIN_OR_INF);
2667                 default: return null;
2668                 }
2669             });
2670         return fn.apply(this);
2671     }
2672     private static final
2673     ImplCache<Associative,UnaryOperator<IntVector>> REDUCE_ID_IMPL
2674         = new ImplCache<>(Associative.class, IntVector.class);
2675 
2676     private static final int MIN_OR_INF = Integer.MIN_VALUE;
2677     private static final int MAX_OR_INF = Integer.MAX_VALUE;
2678 
2679     public @Override abstract long reduceLanesToLong(VectorOperators.Associative op);
2680     public @Override abstract long reduceLanesToLong(VectorOperators.Associative op,
2681                                                      VectorMask<Integer> m);
2682 
2683     // Type specific accessors
2684 
2685     /**
2686      * Gets the lane element at lane index {@code i}
2687      *
2688      * @param i the lane index
2689      * @return the lane element at lane index {@code i}
2690      * @throws IllegalArgumentException if the index is is out of range
2691      * ({@code < 0 || >= length()})
2692      */
2693     public abstract int lane(int i);
2694 
2695     /**
2696      * Replaces the lane element of this vector at lane index {@code i} with
2697      * value {@code e}.
2698      *
2699      * This is a cross-lane operation and behaves as if it returns the result
2700      * of blending this vector with an input vector that is the result of
2701      * broadcasting {@code e} and a mask that has only one lane set at lane
2702      * index {@code i}.
2703      *
2704      * @param i the lane index of the lane element to be replaced
2705      * @param e the value to be placed
2706      * @return the result of replacing the lane element of this vector at lane
2707      * index {@code i} with value {@code e}.
2708      * @throws IllegalArgumentException if the index is is out of range
2709      * ({@code < 0 || >= length()})
2710      */
2711     public abstract IntVector withLane(int i, int e);
2712 
2713     // Memory load operations
2714 
2715     /**
2716      * Returns an array of type {@code int[]}
2717      * containing all the lane values.
2718      * The array length is the same as the vector length.
2719      * The array elements are stored in lane order.
2720      * <p>
2721      * This method behaves as if it stores
2722      * this vector into an allocated array
2723      * (using {@link #intoArray(int[], int) intoArray})
2724      * and returns the array as follows:
2725      * <pre>{@code
2726      *   int[] a = new int[this.length()];
2727      *   this.intoArray(a, 0);
2728      *   return a;
2729      * }</pre>
2730      *
2731      * @return an array containing the lane values of this vector
2732      */
2733     @ForceInline
2734     @Override
2735     public final int[] toArray() {
2736         int[] a = new int[vspecies().laneCount()];
2737         intoArray(a, 0);
2738         return a;
2739     }
2740 
2741     /**
2742      * {@inheritDoc} <!--workaround-->
2743      * This is an alias for {@link #toArray()}
2744      * When this method is used on used on vectors
2745      * of type {@code IntVector},
2746      * there will be no loss of range or precision.
2747      */
2748     @ForceInline
2749     @Override
2750     public final int[] toIntArray() {
2751         return toArray();
2752     }
2753 
2754     /** {@inheritDoc} <!--workaround-->
2755      * @implNote
2756      * When this method is used on used on vectors
2757      * of type {@code IntVector},
2758      * there will be no loss of precision or range,
2759      * and so no {@code UnsupportedOperationException} will
2760      * be thrown.
2761      */
2762     @ForceInline
2763     @Override
2764     public final long[] toLongArray() {
2765         int[] a = toArray();
2766         long[] res = new long[a.length];
2767         for (int i = 0; i < a.length; i++) {
2768             int e = a[i];
2769             res[i] = IntSpecies.toIntegralChecked(e, false);
2770         }
2771         return res;
2772     }
2773 
2774     /** {@inheritDoc} <!--workaround-->
2775      * @implNote
2776      * When this method is used on used on vectors
2777      * of type {@code IntVector},
2778      * there will be no loss of precision.
2779      */
2780     @ForceInline
2781     @Override
2782     public final double[] toDoubleArray() {
2783         int[] a = toArray();
2784         double[] res = new double[a.length];
2785         for (int i = 0; i < a.length; i++) {
2786             res[i] = (double) a[i];
2787         }
2788         return res;
2789     }
2790 
2791     /**
2792      * Loads a vector from a byte array starting at an offset.
2793      * Bytes are composed into primitive lane elements according
2794      * to the specified byte order.
2795      * The vector is arranged into lanes according to
2796      * <a href="Vector.html#lane-order">memory ordering</a>.
2797      * <p>
2798      * This method behaves as if it returns the result of calling
2799      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
2800      * fromByteBuffer()} as follows:
2801      * <pre>{@code
2802      * var bb = ByteBuffer.wrap(a);
2803      * var m = species.maskAll(true);
2804      * return fromByteBuffer(species, bb, offset, bo, m);
2805      * }</pre>
2806      *
2807      * @param species species of desired vector
2808      * @param a the byte array
2809      * @param offset the offset into the array
2810      * @param bo the intended byte order
2811      * @return a vector loaded from a byte array
2812      * @throws IndexOutOfBoundsException
2813      *         if {@code offset+N*ESIZE < 0}
2814      *         or {@code offset+(N+1)*ESIZE > a.length}
2815      *         for any lane {@code N} in the vector
2816      */
2817     @ForceInline
2818     public static
2819     IntVector fromByteArray(VectorSpecies<Integer> species,
2820                                        byte[] a, int offset,
2821                                        ByteOrder bo) {
2822         offset = checkFromIndexSize(offset, species.vectorByteSize(), a.length);
2823         IntSpecies vsp = (IntSpecies) species;
2824         return vsp.dummyVector().fromByteArray0(a, offset).maybeSwap(bo);
2825     }
2826 
2827     /**
2828      * Loads a vector from a byte array starting at an offset
2829      * and using a mask.
2830      * Lanes where the mask is unset are filled with the default
2831      * value of {@code int} (zero).
2832      * Bytes are composed into primitive lane elements according
2833      * to the specified byte order.
2834      * The vector is arranged into lanes according to
2835      * <a href="Vector.html#lane-order">memory ordering</a>.
2836      * <p>
2837      * This method behaves as if it returns the result of calling
2838      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
2839      * fromByteBuffer()} as follows:
2840      * <pre>{@code
2841      * var bb = ByteBuffer.wrap(a);
2842      * return fromByteBuffer(species, bb, offset, bo, m);
2843      * }</pre>
2844      *
2845      * @param species species of desired vector
2846      * @param a the byte array
2847      * @param offset the offset into the array
2848      * @param bo the intended byte order
2849      * @param m the mask controlling lane selection
2850      * @return a vector loaded from a byte array
2851      * @throws IndexOutOfBoundsException
2852      *         if {@code offset+N*ESIZE < 0}
2853      *         or {@code offset+(N+1)*ESIZE > a.length}
2854      *         for any lane {@code N} in the vector
2855      *         where the mask is set
2856      */
2857     @ForceInline
2858     public static
2859     IntVector fromByteArray(VectorSpecies<Integer> species,
2860                                        byte[] a, int offset,
2861                                        ByteOrder bo,
2862                                        VectorMask<Integer> m) {
2863         IntSpecies vsp = (IntSpecies) species;
2864         if (offset >= 0 && offset <= (a.length - species.vectorByteSize())) {
2865             return vsp.dummyVector().fromByteArray0(a, offset, m).maybeSwap(bo);
2866         }
2867 
2868         // FIXME: optimize
2869         checkMaskFromIndexSize(offset, vsp, m, 4, a.length);
2870         ByteBuffer wb = wrapper(a, bo);
2871         return vsp.ldOp(wb, offset, (AbstractMask<Integer>)m,
2872                    (wb_, o, i)  -> wb_.getInt(o + i * 4));
2873     }
2874 
2875     /**
2876      * Loads a vector from an array of type {@code int[]}
2877      * starting at an offset.
2878      * For each vector lane, where {@code N} is the vector lane index, the
2879      * array element at index {@code offset + N} is placed into the
2880      * resulting vector at lane index {@code N}.
2881      *
2882      * @param species species of desired vector
2883      * @param a the array
2884      * @param offset the offset into the array
2885      * @return the vector loaded from an array
2886      * @throws IndexOutOfBoundsException
2887      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
2888      *         for any lane {@code N} in the vector
2889      */
2890     @ForceInline
2891     public static
2892     IntVector fromArray(VectorSpecies<Integer> species,
2893                                    int[] a, int offset) {
2894         offset = checkFromIndexSize(offset, species.length(), a.length);
2895         IntSpecies vsp = (IntSpecies) species;
2896         return vsp.dummyVector().fromArray0(a, offset);
2897     }
2898 
2899     /**
2900      * Loads a vector from an array of type {@code int[]}
2901      * starting at an offset and using a mask.
2902      * Lanes where the mask is unset are filled with the default
2903      * value of {@code int} (zero).
2904      * For each vector lane, where {@code N} is the vector lane index,
2905      * if the mask lane at index {@code N} is set then the array element at
2906      * index {@code offset + N} is placed into the resulting vector at lane index
2907      * {@code N}, otherwise the default element value is placed into the
2908      * resulting vector at lane index {@code N}.
2909      *
2910      * @param species species of desired vector
2911      * @param a the array
2912      * @param offset the offset into the array
2913      * @param m the mask controlling lane selection
2914      * @return the vector loaded from an array
2915      * @throws IndexOutOfBoundsException
2916      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
2917      *         for any lane {@code N} in the vector
2918      *         where the mask is set
2919      */
2920     @ForceInline
2921     public static
2922     IntVector fromArray(VectorSpecies<Integer> species,
2923                                    int[] a, int offset,
2924                                    VectorMask<Integer> m) {
2925         IntSpecies vsp = (IntSpecies) species;
2926         if (offset >= 0 && offset <= (a.length - species.length())) {
2927             return vsp.dummyVector().fromArray0(a, offset, m);
2928         }
2929 
2930         // FIXME: optimize
2931         checkMaskFromIndexSize(offset, vsp, m, 1, a.length);
2932         return vsp.vOp(m, i -> a[offset + i]);
2933     }
2934 
2935     /**
2936      * Gathers a new vector composed of elements from an array of type
2937      * {@code int[]},
2938      * using indexes obtained by adding a fixed {@code offset} to a
2939      * series of secondary offsets from an <em>index map</em>.
2940      * The index map is a contiguous sequence of {@code VLENGTH}
2941      * elements in a second array of {@code int}s, starting at a given
2942      * {@code mapOffset}.
2943      * <p>
2944      * For each vector lane, where {@code N} is the vector lane index,
2945      * the lane is loaded from the array
2946      * element {@code a[f(N)]}, where {@code f(N)} is the
2947      * index mapping expression
2948      * {@code offset + indexMap[mapOffset + N]]}.
2949      *
2950      * @param species species of desired vector
2951      * @param a the array
2952      * @param offset the offset into the array, may be negative if relative
2953      * indexes in the index map compensate to produce a value within the
2954      * array bounds
2955      * @param indexMap the index map
2956      * @param mapOffset the offset into the index map
2957      * @return the vector loaded from the indexed elements of the array
2958      * @throws IndexOutOfBoundsException
2959      *         if {@code mapOffset+N < 0}
2960      *         or if {@code mapOffset+N >= indexMap.length},
2961      *         or if {@code f(N)=offset+indexMap[mapOffset+N]}
2962      *         is an invalid index into {@code a},
2963      *         for any lane {@code N} in the vector
2964      * @see IntVector#toIntArray()
2965      */
2966     @ForceInline
2967     public static
2968     IntVector fromArray(VectorSpecies<Integer> species,
2969                                    int[] a, int offset,
2970                                    int[] indexMap, int mapOffset) {
2971         IntSpecies vsp = (IntSpecies) species;
2972         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
2973         Objects.requireNonNull(a);
2974         Objects.requireNonNull(indexMap);
2975         Class<? extends IntVector> vectorType = vsp.vectorType();
2976 
2977         // Index vector: vix[0:n] = k -> offset + indexMap[mapOffset + k]
2978         IntVector vix = IntVector
2979             .fromArray(isp, indexMap, mapOffset)
2980             .add(offset);
2981 
2982         vix = VectorIntrinsics.checkIndex(vix, a.length);
2983 
2984         return VectorSupport.loadWithMap(
2985             vectorType, null, int.class, vsp.laneCount(),
2986             isp.vectorType(),
2987             a, ARRAY_BASE, vix, null,
2988             a, offset, indexMap, mapOffset, vsp,
2989             (c, idx, iMap, idy, s, vm) ->
2990             s.vOp(n -> c[idx + iMap[idy+n]]));
2991     }
2992 
2993     /**
2994      * Gathers a new vector composed of elements from an array of type
2995      * {@code int[]},
2996      * under the control of a mask, and
2997      * using indexes obtained by adding a fixed {@code offset} to a
2998      * series of secondary offsets from an <em>index map</em>.
2999      * The index map is a contiguous sequence of {@code VLENGTH}
3000      * elements in a second array of {@code int}s, starting at a given
3001      * {@code mapOffset}.
3002      * <p>
3003      * For each vector lane, where {@code N} is the vector lane index,
3004      * if the lane is set in the mask,
3005      * the lane is loaded from the array
3006      * element {@code a[f(N)]}, where {@code f(N)} is the
3007      * index mapping expression
3008      * {@code offset + indexMap[mapOffset + N]]}.
3009      * Unset lanes in the resulting vector are set to zero.
3010      *
3011      * @param species species of desired vector
3012      * @param a the array
3013      * @param offset the offset into the array, may be negative if relative
3014      * indexes in the index map compensate to produce a value within the
3015      * array bounds
3016      * @param indexMap the index map
3017      * @param mapOffset the offset into the index map
3018      * @param m the mask controlling lane selection
3019      * @return the vector loaded from the indexed elements of the array
3020      * @throws IndexOutOfBoundsException
3021      *         if {@code mapOffset+N < 0}
3022      *         or if {@code mapOffset+N >= indexMap.length},
3023      *         or if {@code f(N)=offset+indexMap[mapOffset+N]}
3024      *         is an invalid index into {@code a},
3025      *         for any lane {@code N} in the vector
3026      *         where the mask is set
3027      * @see IntVector#toIntArray()
3028      */
3029     @ForceInline
3030     public static
3031     IntVector fromArray(VectorSpecies<Integer> species,
3032                                    int[] a, int offset,
3033                                    int[] indexMap, int mapOffset,
3034                                    VectorMask<Integer> m) {
3035         if (m.allTrue()) {
3036             return fromArray(species, a, offset, indexMap, mapOffset);
3037         }
3038         else {
3039             IntSpecies vsp = (IntSpecies) species;
3040             return vsp.dummyVector().fromArray0(a, offset, indexMap, mapOffset, m);
3041         }
3042     }
3043 
3044 
3045 
3046     /**
3047      * Loads a vector from a {@linkplain ByteBuffer byte buffer}
3048      * starting at an offset into the byte buffer.
3049      * Bytes are composed into primitive lane elements according
3050      * to the specified byte order.
3051      * The vector is arranged into lanes according to
3052      * <a href="Vector.html#lane-order">memory ordering</a>.
3053      * <p>
3054      * This method behaves as if it returns the result of calling
3055      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
3056      * fromByteBuffer()} as follows:
3057      * <pre>{@code
3058      * var m = species.maskAll(true);
3059      * return fromByteBuffer(species, bb, offset, bo, m);
3060      * }</pre>
3061      *
3062      * @param species species of desired vector
3063      * @param bb the byte buffer
3064      * @param offset the offset into the byte buffer
3065      * @param bo the intended byte order
3066      * @return a vector loaded from a byte buffer
3067      * @throws IndexOutOfBoundsException
3068      *         if {@code offset+N*4 < 0}
3069      *         or {@code offset+N*4 >= bb.limit()}
3070      *         for any lane {@code N} in the vector
3071      */
3072     @ForceInline
3073     public static
3074     IntVector fromByteBuffer(VectorSpecies<Integer> species,
3075                                         ByteBuffer bb, int offset,
3076                                         ByteOrder bo) {
3077         offset = checkFromIndexSize(offset, species.vectorByteSize(), bb.limit());
3078         IntSpecies vsp = (IntSpecies) species;
3079         return vsp.dummyVector().fromByteBuffer0(bb, offset).maybeSwap(bo);
3080     }
3081 
3082     /**
3083      * Loads a vector from a {@linkplain ByteBuffer byte buffer}
3084      * starting at an offset into the byte buffer
3085      * and using a mask.
3086      * Lanes where the mask is unset are filled with the default
3087      * value of {@code int} (zero).
3088      * Bytes are composed into primitive lane elements according
3089      * to the specified byte order.
3090      * The vector is arranged into lanes according to
3091      * <a href="Vector.html#lane-order">memory ordering</a>.
3092      * <p>
3093      * The following pseudocode illustrates the behavior:
3094      * <pre>{@code
3095      * IntBuffer eb = bb.duplicate()
3096      *     .position(offset)
3097      *     .order(bo).asIntBuffer();
3098      * int[] ar = new int[species.length()];
3099      * for (int n = 0; n < ar.length; n++) {
3100      *     if (m.laneIsSet(n)) {
3101      *         ar[n] = eb.get(n);
3102      *     }
3103      * }
3104      * IntVector r = IntVector.fromArray(species, ar, 0);
3105      * }</pre>
3106      * @implNote
3107      * This operation is likely to be more efficient if
3108      * the specified byte order is the same as
3109      * {@linkplain ByteOrder#nativeOrder()
3110      * the platform native order},
3111      * since this method will not need to reorder
3112      * the bytes of lane values.
3113      *
3114      * @param species species of desired vector
3115      * @param bb the byte buffer
3116      * @param offset the offset into the byte buffer
3117      * @param bo the intended byte order
3118      * @param m the mask controlling lane selection
3119      * @return a vector loaded from a byte buffer
3120      * @throws IndexOutOfBoundsException
3121      *         if {@code offset+N*4 < 0}
3122      *         or {@code offset+N*4 >= bb.limit()}
3123      *         for any lane {@code N} in the vector
3124      *         where the mask is set
3125      */
3126     @ForceInline
3127     public static
3128     IntVector fromByteBuffer(VectorSpecies<Integer> species,
3129                                         ByteBuffer bb, int offset,
3130                                         ByteOrder bo,
3131                                         VectorMask<Integer> m) {
3132         IntSpecies vsp = (IntSpecies) species;
3133         if (offset >= 0 && offset <= (bb.limit() - species.vectorByteSize())) {
3134             return vsp.dummyVector().fromByteBuffer0(bb, offset, m).maybeSwap(bo);
3135         }
3136 
3137         // FIXME: optimize
3138         checkMaskFromIndexSize(offset, vsp, m, 4, bb.limit());
3139         ByteBuffer wb = wrapper(bb, bo);
3140         return vsp.ldOp(wb, offset, (AbstractMask<Integer>)m,
3141                    (wb_, o, i)  -> wb_.getInt(o + i * 4));
3142     }
3143 
3144     // Memory store operations
3145 
3146     /**
3147      * Stores this vector into an array of type {@code int[]}
3148      * starting at an offset.
3149      * <p>
3150      * For each vector lane, where {@code N} is the vector lane index,
3151      * the lane element at index {@code N} is stored into the array
3152      * element {@code a[offset+N]}.
3153      *
3154      * @param a the array, of type {@code int[]}
3155      * @param offset the offset into the array
3156      * @throws IndexOutOfBoundsException
3157      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
3158      *         for any lane {@code N} in the vector
3159      */
3160     @ForceInline
3161     public final
3162     void intoArray(int[] a, int offset) {
3163         offset = checkFromIndexSize(offset, length(), a.length);
3164         IntSpecies vsp = vspecies();
3165         VectorSupport.store(
3166             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3167             a, arrayAddress(a, offset),
3168             this,
3169             a, offset,
3170             (arr, off, v)
3171             -> v.stOp(arr, off,
3172                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3173     }
3174 
3175     /**
3176      * Stores this vector into an array of type {@code int[]}
3177      * starting at offset and using a mask.
3178      * <p>
3179      * For each vector lane, where {@code N} is the vector lane index,
3180      * the lane element at index {@code N} is stored into the array
3181      * element {@code a[offset+N]}.
3182      * If the mask lane at {@code N} is unset then the corresponding
3183      * array element {@code a[offset+N]} is left unchanged.
3184      * <p>
3185      * Array range checking is done for lanes where the mask is set.
3186      * Lanes where the mask is unset are not stored and do not need
3187      * to correspond to legitimate elements of {@code a}.
3188      * That is, unset lanes may correspond to array indexes less than
3189      * zero or beyond the end of the array.
3190      *
3191      * @param a the array, of type {@code int[]}
3192      * @param offset the offset into the array
3193      * @param m the mask controlling lane storage
3194      * @throws IndexOutOfBoundsException
3195      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
3196      *         for any lane {@code N} in the vector
3197      *         where the mask is set
3198      */
3199     @ForceInline
3200     public final
3201     void intoArray(int[] a, int offset,
3202                    VectorMask<Integer> m) {
3203         if (m.allTrue()) {
3204             intoArray(a, offset);
3205         } else {
3206             IntSpecies vsp = vspecies();
3207             checkMaskFromIndexSize(offset, vsp, m, 1, a.length);
3208             intoArray0(a, offset, m);
3209         }
3210     }
3211 
3212     /**
3213      * Scatters this vector into an array of type {@code int[]}
3214      * using indexes obtained by adding a fixed {@code offset} to a
3215      * series of secondary offsets from an <em>index map</em>.
3216      * The index map is a contiguous sequence of {@code VLENGTH}
3217      * elements in a second array of {@code int}s, starting at a given
3218      * {@code mapOffset}.
3219      * <p>
3220      * For each vector lane, where {@code N} is the vector lane index,
3221      * the lane element at index {@code N} is stored into the array
3222      * element {@code a[f(N)]}, where {@code f(N)} is the
3223      * index mapping expression
3224      * {@code offset + indexMap[mapOffset + N]]}.
3225      *
3226      * @param a the array
3227      * @param offset an offset to combine with the index map offsets
3228      * @param indexMap the index map
3229      * @param mapOffset the offset into the index map
3230      * @throws IndexOutOfBoundsException
3231      *         if {@code mapOffset+N < 0}
3232      *         or if {@code mapOffset+N >= indexMap.length},
3233      *         or if {@code f(N)=offset+indexMap[mapOffset+N]}
3234      *         is an invalid index into {@code a},
3235      *         for any lane {@code N} in the vector
3236      * @see IntVector#toIntArray()
3237      */
3238     @ForceInline
3239     public final
3240     void intoArray(int[] a, int offset,
3241                    int[] indexMap, int mapOffset) {
3242         IntSpecies vsp = vspecies();
3243         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3244         // Index vector: vix[0:n] = i -> offset + indexMap[mo + i]
3245         IntVector vix = IntVector
3246             .fromArray(isp, indexMap, mapOffset)
3247             .add(offset);
3248 
3249         vix = VectorIntrinsics.checkIndex(vix, a.length);
3250 
3251         VectorSupport.storeWithMap(
3252             vsp.vectorType(), null, vsp.elementType(), vsp.laneCount(),
3253             isp.vectorType(),
3254             a, arrayAddress(a, 0), vix,
3255             this, null,
3256             a, offset, indexMap, mapOffset,
3257             (arr, off, v, map, mo, vm)
3258             -> v.stOp(arr, off,
3259                       (arr_, off_, i, e) -> {
3260                           int j = map[mo + i];
3261                           arr[off + j] = e;
3262                       }));
3263     }
3264 
3265     /**
3266      * Scatters this vector into an array of type {@code int[]},
3267      * under the control of a mask, and
3268      * using indexes obtained by adding a fixed {@code offset} to a
3269      * series of secondary offsets from an <em>index map</em>.
3270      * The index map is a contiguous sequence of {@code VLENGTH}
3271      * elements in a second array of {@code int}s, starting at a given
3272      * {@code mapOffset}.
3273      * <p>
3274      * For each vector lane, where {@code N} is the vector lane index,
3275      * if the mask lane at index {@code N} is set then
3276      * the lane element at index {@code N} is stored into the array
3277      * element {@code a[f(N)]}, where {@code f(N)} is the
3278      * index mapping expression
3279      * {@code offset + indexMap[mapOffset + N]]}.
3280      *
3281      * @param a the array
3282      * @param offset an offset to combine with the index map offsets
3283      * @param indexMap the index map
3284      * @param mapOffset the offset into the index map
3285      * @param m the mask
3286      * @throws IndexOutOfBoundsException
3287      *         if {@code mapOffset+N < 0}
3288      *         or if {@code mapOffset+N >= indexMap.length},
3289      *         or if {@code f(N)=offset+indexMap[mapOffset+N]}
3290      *         is an invalid index into {@code a},
3291      *         for any lane {@code N} in the vector
3292      *         where the mask is set
3293      * @see IntVector#toIntArray()
3294      */
3295     @ForceInline
3296     public final
3297     void intoArray(int[] a, int offset,
3298                    int[] indexMap, int mapOffset,
3299                    VectorMask<Integer> m) {
3300         if (m.allTrue()) {
3301             intoArray(a, offset, indexMap, mapOffset);
3302         }
3303         else {
3304             intoArray0(a, offset, indexMap, mapOffset, m);
3305         }
3306     }
3307 
3308 
3309 
3310     /**
3311      * {@inheritDoc} <!--workaround-->
3312      */
3313     @Override
3314     @ForceInline
3315     public final
3316     void intoByteArray(byte[] a, int offset,
3317                        ByteOrder bo) {
3318         offset = checkFromIndexSize(offset, byteSize(), a.length);
3319         maybeSwap(bo).intoByteArray0(a, offset);
3320     }
3321 
3322     /**
3323      * {@inheritDoc} <!--workaround-->
3324      */
3325     @Override
3326     @ForceInline
3327     public final
3328     void intoByteArray(byte[] a, int offset,
3329                        ByteOrder bo,
3330                        VectorMask<Integer> m) {
3331         if (m.allTrue()) {
3332             intoByteArray(a, offset, bo);
3333         } else {
3334             IntSpecies vsp = vspecies();
3335             checkMaskFromIndexSize(offset, vsp, m, 4, a.length);
3336             maybeSwap(bo).intoByteArray0(a, offset, m);
3337         }
3338     }
3339 
3340     /**
3341      * {@inheritDoc} <!--workaround-->
3342      */
3343     @Override
3344     @ForceInline
3345     public final
3346     void intoByteBuffer(ByteBuffer bb, int offset,
3347                         ByteOrder bo) {
3348         if (ScopedMemoryAccess.isReadOnly(bb)) {
3349             throw new ReadOnlyBufferException();
3350         }
3351         offset = checkFromIndexSize(offset, byteSize(), bb.limit());
3352         maybeSwap(bo).intoByteBuffer0(bb, offset);
3353     }
3354 
3355     /**
3356      * {@inheritDoc} <!--workaround-->
3357      */
3358     @Override
3359     @ForceInline
3360     public final
3361     void intoByteBuffer(ByteBuffer bb, int offset,
3362                         ByteOrder bo,
3363                         VectorMask<Integer> m) {
3364         if (m.allTrue()) {
3365             intoByteBuffer(bb, offset, bo);
3366         } else {
3367             if (bb.isReadOnly()) {
3368                 throw new ReadOnlyBufferException();
3369             }
3370             IntSpecies vsp = vspecies();
3371             checkMaskFromIndexSize(offset, vsp, m, 4, bb.limit());
3372             maybeSwap(bo).intoByteBuffer0(bb, offset, m);
3373         }
3374     }
3375 
3376     // ================================================
3377 
3378     // Low-level memory operations.
3379     //
3380     // Note that all of these operations *must* inline into a context
3381     // where the exact species of the involved vector is a
3382     // compile-time constant.  Otherwise, the intrinsic generation
3383     // will fail and performance will suffer.
3384     //
3385     // In many cases this is achieved by re-deriving a version of the
3386     // method in each concrete subclass (per species).  The re-derived
3387     // method simply calls one of these generic methods, with exact
3388     // parameters for the controlling metadata, which is either a
3389     // typed vector or constant species instance.
3390 
3391     // Unchecked loading operations in native byte order.
3392     // Caller is responsible for applying index checks, masking, and
3393     // byte swapping.
3394 
3395     /*package-private*/
3396     abstract
3397     IntVector fromArray0(int[] a, int offset);
3398     @ForceInline
3399     final
3400     IntVector fromArray0Template(int[] a, int offset) {
3401         IntSpecies vsp = vspecies();
3402         return VectorSupport.load(
3403             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3404             a, arrayAddress(a, offset),
3405             a, offset, vsp,
3406             (arr, off, s) -> s.ldOp(arr, off,
3407                                     (arr_, off_, i) -> arr_[off_ + i]));
3408     }
3409 
3410     /*package-private*/
3411     abstract
3412     IntVector fromArray0(int[] a, int offset, VectorMask<Integer> m);
3413     @ForceInline
3414     final
3415     <M extends VectorMask<Integer>>
3416     IntVector fromArray0Template(Class<M> maskClass, int[] a, int offset, M m) {
3417         m.check(species());
3418         IntSpecies vsp = vspecies();
3419         return VectorSupport.loadMasked(
3420             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3421             a, arrayAddress(a, offset), m,
3422             a, offset, vsp,
3423             (arr, off, s, vm) -> s.ldOp(arr, off, vm,
3424                                         (arr_, off_, i) -> arr_[off_ + i]));
3425     }
3426 
3427     /*package-private*/
3428     abstract
3429     IntVector fromArray0(int[] a, int offset,
3430                                     int[] indexMap, int mapOffset,
3431                                     VectorMask<Integer> m);
3432     @ForceInline
3433     final
3434     <M extends VectorMask<Integer>>
3435     IntVector fromArray0Template(Class<M> maskClass, int[] a, int offset,
3436                                             int[] indexMap, int mapOffset, M m) {
3437         IntSpecies vsp = vspecies();
3438         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3439         Objects.requireNonNull(a);
3440         Objects.requireNonNull(indexMap);
3441         m.check(vsp);
3442         Class<? extends IntVector> vectorType = vsp.vectorType();
3443 
3444         // Index vector: vix[0:n] = k -> offset + indexMap[mapOffset + k]
3445         IntVector vix = IntVector
3446             .fromArray(isp, indexMap, mapOffset)
3447             .add(offset);
3448 
3449         // FIXME: Check index under mask controlling.
3450         vix = VectorIntrinsics.checkIndex(vix, a.length);
3451 
3452         return VectorSupport.loadWithMap(
3453             vectorType, maskClass, int.class, vsp.laneCount(),
3454             isp.vectorType(),
3455             a, ARRAY_BASE, vix, m,
3456             a, offset, indexMap, mapOffset, vsp,
3457             (c, idx, iMap, idy, s, vm) ->
3458             s.vOp(vm, n -> c[idx + iMap[idy+n]]));
3459     }
3460 
3461 
3462 
3463     @Override
3464     abstract
3465     IntVector fromByteArray0(byte[] a, int offset);
3466     @ForceInline
3467     final
3468     IntVector fromByteArray0Template(byte[] a, int offset) {
3469         IntSpecies vsp = vspecies();
3470         return VectorSupport.load(
3471             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3472             a, byteArrayAddress(a, offset),
3473             a, offset, vsp,
3474             (arr, off, s) -> {
3475                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3476                 return s.ldOp(wb, off,
3477                         (wb_, o, i) -> wb_.getInt(o + i * 4));
3478             });
3479     }
3480 
3481     abstract
3482     IntVector fromByteArray0(byte[] a, int offset, VectorMask<Integer> m);
3483     @ForceInline
3484     final
3485     <M extends VectorMask<Integer>>
3486     IntVector fromByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
3487         IntSpecies vsp = vspecies();
3488         m.check(vsp);
3489         return VectorSupport.loadMasked(
3490             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3491             a, byteArrayAddress(a, offset), m,
3492             a, offset, vsp,
3493             (arr, off, s, vm) -> {
3494                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3495                 return s.ldOp(wb, off, vm,
3496                         (wb_, o, i) -> wb_.getInt(o + i * 4));
3497             });
3498     }
3499 
3500     abstract
3501     IntVector fromByteBuffer0(ByteBuffer bb, int offset);
3502     @ForceInline
3503     final
3504     IntVector fromByteBuffer0Template(ByteBuffer bb, int offset) {
3505         IntSpecies vsp = vspecies();
3506         return ScopedMemoryAccess.loadFromByteBuffer(
3507                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3508                 bb, offset, vsp,
3509                 (buf, off, s) -> {
3510                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3511                     return s.ldOp(wb, off,
3512                             (wb_, o, i) -> wb_.getInt(o + i * 4));
3513                 });
3514     }
3515 
3516     abstract
3517     IntVector fromByteBuffer0(ByteBuffer bb, int offset, VectorMask<Integer> m);
3518     @ForceInline
3519     final
3520     <M extends VectorMask<Integer>>
3521     IntVector fromByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
3522         IntSpecies vsp = vspecies();
3523         m.check(vsp);
3524         return ScopedMemoryAccess.loadFromByteBufferMasked(
3525                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3526                 bb, offset, m, vsp,
3527                 (buf, off, s, vm) -> {
3528                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3529                     return s.ldOp(wb, off, vm,
3530                             (wb_, o, i) -> wb_.getInt(o + i * 4));
3531                 });
3532     }
3533 
3534     // Unchecked storing operations in native byte order.
3535     // Caller is responsible for applying index checks, masking, and
3536     // byte swapping.
3537 
3538     abstract
3539     void intoArray0(int[] a, int offset);
3540     @ForceInline
3541     final
3542     void intoArray0Template(int[] a, int offset) {
3543         IntSpecies vsp = vspecies();
3544         VectorSupport.store(
3545             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3546             a, arrayAddress(a, offset),
3547             this, a, offset,
3548             (arr, off, v)
3549             -> v.stOp(arr, off,
3550                       (arr_, off_, i, e) -> arr_[off_+i] = e));
3551     }
3552 
3553     abstract
3554     void intoArray0(int[] a, int offset, VectorMask<Integer> m);
3555     @ForceInline
3556     final
3557     <M extends VectorMask<Integer>>
3558     void intoArray0Template(Class<M> maskClass, int[] a, int offset, M m) {
3559         m.check(species());
3560         IntSpecies vsp = vspecies();
3561         VectorSupport.storeMasked(
3562             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3563             a, arrayAddress(a, offset),
3564             this, m, a, offset,
3565             (arr, off, v, vm)
3566             -> v.stOp(arr, off, vm,
3567                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3568     }
3569 
3570     abstract
3571     void intoArray0(int[] a, int offset,
3572                     int[] indexMap, int mapOffset,
3573                     VectorMask<Integer> m);
3574     @ForceInline
3575     final
3576     <M extends VectorMask<Integer>>
3577     void intoArray0Template(Class<M> maskClass, int[] a, int offset,
3578                             int[] indexMap, int mapOffset, M m) {
3579         m.check(species());
3580         IntSpecies vsp = vspecies();
3581         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3582         // Index vector: vix[0:n] = i -> offset + indexMap[mo + i]
3583         IntVector vix = IntVector
3584             .fromArray(isp, indexMap, mapOffset)
3585             .add(offset);
3586 
3587         // FIXME: Check index under mask controlling.
3588         vix = VectorIntrinsics.checkIndex(vix, a.length);
3589 
3590         VectorSupport.storeWithMap(
3591             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3592             isp.vectorType(),
3593             a, arrayAddress(a, 0), vix,
3594             this, m,
3595             a, offset, indexMap, mapOffset,
3596             (arr, off, v, map, mo, vm)
3597             -> v.stOp(arr, off, vm,
3598                       (arr_, off_, i, e) -> {
3599                           int j = map[mo + i];
3600                           arr[off + j] = e;
3601                       }));
3602     }
3603 
3604 
3605     abstract
3606     void intoByteArray0(byte[] a, int offset);
3607     @ForceInline
3608     final
3609     void intoByteArray0Template(byte[] a, int offset) {
3610         IntSpecies vsp = vspecies();
3611         VectorSupport.store(
3612             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3613             a, byteArrayAddress(a, offset),
3614             this, a, offset,
3615             (arr, off, v) -> {
3616                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3617                 v.stOp(wb, off,
3618                         (tb_, o, i, e) -> tb_.putInt(o + i * 4, e));
3619             });
3620     }
3621 
3622     abstract
3623     void intoByteArray0(byte[] a, int offset, VectorMask<Integer> m);
3624     @ForceInline
3625     final
3626     <M extends VectorMask<Integer>>
3627     void intoByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
3628         IntSpecies vsp = vspecies();
3629         m.check(vsp);
3630         VectorSupport.storeMasked(
3631             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3632             a, byteArrayAddress(a, offset),
3633             this, m, a, offset,
3634             (arr, off, v, vm) -> {
3635                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3636                 v.stOp(wb, off, vm,
3637                         (tb_, o, i, e) -> tb_.putInt(o + i * 4, e));
3638             });
3639     }
3640 
3641     @ForceInline
3642     final
3643     void intoByteBuffer0(ByteBuffer bb, int offset) {
3644         IntSpecies vsp = vspecies();
3645         ScopedMemoryAccess.storeIntoByteBuffer(
3646                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3647                 this, bb, offset,
3648                 (buf, off, v) -> {
3649                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3650                     v.stOp(wb, off,
3651                             (wb_, o, i, e) -> wb_.putInt(o + i * 4, e));
3652                 });
3653     }
3654 
3655     abstract
3656     void intoByteBuffer0(ByteBuffer bb, int offset, VectorMask<Integer> m);
3657     @ForceInline
3658     final
3659     <M extends VectorMask<Integer>>
3660     void intoByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
3661         IntSpecies vsp = vspecies();
3662         m.check(vsp);
3663         ScopedMemoryAccess.storeIntoByteBufferMasked(
3664                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3665                 this, m, bb, offset,
3666                 (buf, off, v, vm) -> {
3667                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3668                     v.stOp(wb, off, vm,
3669                             (wb_, o, i, e) -> wb_.putInt(o + i * 4, e));
3670                 });
3671     }
3672 
3673 
3674     // End of low-level memory operations.
3675 
3676     private static
3677     void checkMaskFromIndexSize(int offset,
3678                                 IntSpecies vsp,
3679                                 VectorMask<Integer> m,
3680                                 int scale,
3681                                 int limit) {
3682         ((AbstractMask<Integer>)m)
3683             .checkIndexByLane(offset, limit, vsp.iota(), scale);
3684     }
3685 
3686     @ForceInline
3687     private void conditionalStoreNYI(int offset,
3688                                      IntSpecies vsp,
3689                                      VectorMask<Integer> m,
3690                                      int scale,
3691                                      int limit) {
3692         if (offset < 0 || offset + vsp.laneCount() * scale > limit) {
3693             String msg =
3694                 String.format("unimplemented: store @%d in [0..%d), %s in %s",
3695                               offset, limit, m, vsp);
3696             throw new AssertionError(msg);
3697         }
3698     }
3699 
3700     /*package-private*/
3701     @Override
3702     @ForceInline
3703     final
3704     IntVector maybeSwap(ByteOrder bo) {
3705         if (bo != NATIVE_ENDIAN) {
3706             return this.reinterpretAsBytes()
3707                 .rearrange(swapBytesShuffle())
3708                 .reinterpretAsInts();
3709         }
3710         return this;
3711     }
3712 
3713     static final int ARRAY_SHIFT =
3714         31 - Integer.numberOfLeadingZeros(Unsafe.ARRAY_INT_INDEX_SCALE);
3715     static final long ARRAY_BASE =
3716         Unsafe.ARRAY_INT_BASE_OFFSET;
3717 
3718     @ForceInline
3719     static long arrayAddress(int[] a, int index) {
3720         return ARRAY_BASE + (((long)index) << ARRAY_SHIFT);
3721     }
3722 
3723 
3724 
3725     @ForceInline
3726     static long byteArrayAddress(byte[] a, int index) {
3727         return Unsafe.ARRAY_BYTE_BASE_OFFSET + index;
3728     }
3729 
3730     // ================================================
3731 
3732     /// Reinterpreting view methods:
3733     //   lanewise reinterpret: viewAsXVector()
3734     //   keep shape, redraw lanes: reinterpretAsEs()
3735 
3736     /**
3737      * {@inheritDoc} <!--workaround-->
3738      */
3739     @ForceInline
3740     @Override
3741     public final ByteVector reinterpretAsBytes() {
3742          // Going to ByteVector, pay close attention to byte order.
3743          assert(REGISTER_ENDIAN == ByteOrder.LITTLE_ENDIAN);
3744          return asByteVectorRaw();
3745          //return asByteVectorRaw().rearrange(swapBytesShuffle());
3746     }
3747 
3748     /**
3749      * {@inheritDoc} <!--workaround-->
3750      */
3751     @ForceInline
3752     @Override
3753     public final IntVector viewAsIntegralLanes() {
3754         return this;
3755     }
3756 
3757     /**
3758      * {@inheritDoc} <!--workaround-->
3759      */
3760     @ForceInline
3761     @Override
3762     public final
3763     FloatVector
3764     viewAsFloatingLanes() {
3765         LaneType flt = LaneType.INT.asFloating();
3766         return (FloatVector) asVectorRaw(flt);
3767     }
3768 
3769     // ================================================
3770 
3771     /// Object methods: toString, equals, hashCode
3772     //
3773     // Object methods are defined as if via Arrays.toString, etc.,
3774     // is applied to the array of elements.  Two equal vectors
3775     // are required to have equal species and equal lane values.
3776 
3777     /**
3778      * Returns a string representation of this vector, of the form
3779      * {@code "[0,1,2...]"}, reporting the lane values of this vector,
3780      * in lane order.
3781      *
3782      * The string is produced as if by a call to {@link
3783      * java.util.Arrays#toString(int[]) Arrays.toString()},
3784      * as appropriate to the {@code int} array returned by
3785      * {@link #toArray this.toArray()}.
3786      *
3787      * @return a string of the form {@code "[0,1,2...]"}
3788      * reporting the lane values of this vector
3789      */
3790     @Override
3791     @ForceInline
3792     public final
3793     String toString() {
3794         // now that toArray is strongly typed, we can define this
3795         return Arrays.toString(toArray());
3796     }
3797 
3798     /**
3799      * {@inheritDoc} <!--workaround-->
3800      */
3801     @Override
3802     @ForceInline
3803     public final
3804     boolean equals(Object obj) {
3805         if (obj instanceof Vector) {
3806             Vector<?> that = (Vector<?>) obj;
3807             if (this.species().equals(that.species())) {
3808                 return this.eq(that.check(this.species())).allTrue();
3809             }
3810         }
3811         return false;
3812     }
3813 
3814     /**
3815      * {@inheritDoc} <!--workaround-->
3816      */
3817     @Override
3818     @ForceInline
3819     public final
3820     int hashCode() {
3821         // now that toArray is strongly typed, we can define this
3822         return Objects.hash(species(), Arrays.hashCode(toArray()));
3823     }
3824 
3825     // ================================================
3826 
3827     // Species
3828 
3829     /**
3830      * Class representing {@link IntVector}'s of the same {@link VectorShape VectorShape}.
3831      */
3832     /*package-private*/
3833     static final class IntSpecies extends AbstractSpecies<Integer> {
3834         private IntSpecies(VectorShape shape,
3835                 Class<? extends IntVector> vectorType,
3836                 Class<? extends AbstractMask<Integer>> maskType,
3837                 Function<Object, IntVector> vectorFactory) {
3838             super(shape, LaneType.of(int.class),
3839                   vectorType, maskType,
3840                   vectorFactory);
3841             assert(this.elementSize() == Integer.SIZE);
3842         }
3843 
3844         // Specializing overrides:
3845 
3846         @Override
3847         @ForceInline
3848         public final Class<Integer> elementType() {
3849             return int.class;
3850         }
3851 
3852         @Override
3853         @ForceInline
3854         final Class<Integer> genericElementType() {
3855             return Integer.class;
3856         }
3857 
3858         @SuppressWarnings("unchecked")
3859         @Override
3860         @ForceInline
3861         public final Class<? extends IntVector> vectorType() {
3862             return (Class<? extends IntVector>) vectorType;
3863         }
3864 
3865         @Override
3866         @ForceInline
3867         public final long checkValue(long e) {
3868             longToElementBits(e);  // only for exception
3869             return e;
3870         }
3871 
3872         /*package-private*/
3873         @Override
3874         @ForceInline
3875         final IntVector broadcastBits(long bits) {
3876             return (IntVector)
3877                 VectorSupport.broadcastCoerced(
3878                     vectorType, int.class, laneCount,
3879                     bits, this,
3880                     (bits_, s_) -> s_.rvOp(i -> bits_));
3881         }
3882 
3883         /*package-private*/
3884         @ForceInline
3885         final IntVector broadcast(int e) {
3886             return broadcastBits(toBits(e));
3887         }
3888 
3889         @Override
3890         @ForceInline
3891         public final IntVector broadcast(long e) {
3892             return broadcastBits(longToElementBits(e));
3893         }
3894 
3895         /*package-private*/
3896         final @Override
3897         @ForceInline
3898         long longToElementBits(long value) {
3899             // Do the conversion, and then test it for failure.
3900             int e = (int) value;
3901             if ((long) e != value) {
3902                 throw badElementBits(value, e);
3903             }
3904             return toBits(e);
3905         }
3906 
3907         /*package-private*/
3908         @ForceInline
3909         static long toIntegralChecked(int e, boolean convertToInt) {
3910             long value = convertToInt ? (int) e : (long) e;
3911             if ((int) value != e) {
3912                 throw badArrayBits(e, convertToInt, value);
3913             }
3914             return value;
3915         }
3916 
3917         /* this non-public one is for internal conversions */
3918         @Override
3919         @ForceInline
3920         final IntVector fromIntValues(int[] values) {
3921             VectorIntrinsics.requireLength(values.length, laneCount);
3922             int[] va = new int[laneCount()];
3923             for (int i = 0; i < va.length; i++) {
3924                 int lv = values[i];
3925                 int v = (int) lv;
3926                 va[i] = v;
3927                 if ((int)v != lv) {
3928                     throw badElementBits(lv, v);
3929                 }
3930             }
3931             return dummyVector().fromArray0(va, 0);
3932         }
3933 
3934         // Virtual constructors
3935 
3936         @ForceInline
3937         @Override final
3938         public IntVector fromArray(Object a, int offset) {
3939             // User entry point:  Be careful with inputs.
3940             return IntVector
3941                 .fromArray(this, (int[]) a, offset);
3942         }
3943 
3944         @ForceInline
3945         @Override final
3946         IntVector dummyVector() {
3947             return (IntVector) super.dummyVector();
3948         }
3949 
3950         /*package-private*/
3951         final @Override
3952         @ForceInline
3953         IntVector rvOp(RVOp f) {
3954             int[] res = new int[laneCount()];
3955             for (int i = 0; i < res.length; i++) {
3956                 int bits = (int) f.apply(i);
3957                 res[i] = fromBits(bits);
3958             }
3959             return dummyVector().vectorFactory(res);
3960         }
3961 
3962         IntVector vOp(FVOp f) {
3963             int[] res = new int[laneCount()];
3964             for (int i = 0; i < res.length; i++) {
3965                 res[i] = f.apply(i);
3966             }
3967             return dummyVector().vectorFactory(res);
3968         }
3969 
3970         IntVector vOp(VectorMask<Integer> m, FVOp f) {
3971             int[] res = new int[laneCount()];
3972             boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
3973             for (int i = 0; i < res.length; i++) {
3974                 if (mbits[i]) {
3975                     res[i] = f.apply(i);
3976                 }
3977             }
3978             return dummyVector().vectorFactory(res);
3979         }
3980 
3981         /*package-private*/
3982         @ForceInline
3983         <M> IntVector ldOp(M memory, int offset,
3984                                       FLdOp<M> f) {
3985             return dummyVector().ldOp(memory, offset, f);
3986         }
3987 
3988         /*package-private*/
3989         @ForceInline
3990         <M> IntVector ldOp(M memory, int offset,
3991                                       VectorMask<Integer> m,
3992                                       FLdOp<M> f) {
3993             return dummyVector().ldOp(memory, offset, m, f);
3994         }
3995 
3996         /*package-private*/
3997         @ForceInline
3998         <M> void stOp(M memory, int offset, FStOp<M> f) {
3999             dummyVector().stOp(memory, offset, f);
4000         }
4001 
4002         /*package-private*/
4003         @ForceInline
4004         <M> void stOp(M memory, int offset,
4005                       AbstractMask<Integer> m,
4006                       FStOp<M> f) {
4007             dummyVector().stOp(memory, offset, m, f);
4008         }
4009 
4010         // N.B. Make sure these constant vectors and
4011         // masks load up correctly into registers.
4012         //
4013         // Also, see if we can avoid all that switching.
4014         // Could we cache both vectors and both masks in
4015         // this species object?
4016 
4017         // Zero and iota vector access
4018         @Override
4019         @ForceInline
4020         public final IntVector zero() {
4021             if ((Class<?>) vectorType() == IntMaxVector.class)
4022                 return IntMaxVector.ZERO;
4023             switch (vectorBitSize()) {
4024                 case 64: return Int64Vector.ZERO;
4025                 case 128: return Int128Vector.ZERO;
4026                 case 256: return Int256Vector.ZERO;
4027                 case 512: return Int512Vector.ZERO;
4028             }
4029             throw new AssertionError();
4030         }
4031 
4032         @Override
4033         @ForceInline
4034         public final IntVector iota() {
4035             if ((Class<?>) vectorType() == IntMaxVector.class)
4036                 return IntMaxVector.IOTA;
4037             switch (vectorBitSize()) {
4038                 case 64: return Int64Vector.IOTA;
4039                 case 128: return Int128Vector.IOTA;
4040                 case 256: return Int256Vector.IOTA;
4041                 case 512: return Int512Vector.IOTA;
4042             }
4043             throw new AssertionError();
4044         }
4045 
4046         // Mask access
4047         @Override
4048         @ForceInline
4049         public final VectorMask<Integer> maskAll(boolean bit) {
4050             if ((Class<?>) vectorType() == IntMaxVector.class)
4051                 return IntMaxVector.IntMaxMask.maskAll(bit);
4052             switch (vectorBitSize()) {
4053                 case 64: return Int64Vector.Int64Mask.maskAll(bit);
4054                 case 128: return Int128Vector.Int128Mask.maskAll(bit);
4055                 case 256: return Int256Vector.Int256Mask.maskAll(bit);
4056                 case 512: return Int512Vector.Int512Mask.maskAll(bit);
4057             }
4058             throw new AssertionError();
4059         }
4060     }
4061 
4062     /**
4063      * Finds a species for an element type of {@code int} and shape.
4064      *
4065      * @param s the shape
4066      * @return a species for an element type of {@code int} and shape
4067      * @throws IllegalArgumentException if no such species exists for the shape
4068      */
4069     static IntSpecies species(VectorShape s) {
4070         Objects.requireNonNull(s);
4071         switch (s) {
4072             case S_64_BIT: return (IntSpecies) SPECIES_64;
4073             case S_128_BIT: return (IntSpecies) SPECIES_128;
4074             case S_256_BIT: return (IntSpecies) SPECIES_256;
4075             case S_512_BIT: return (IntSpecies) SPECIES_512;
4076             case S_Max_BIT: return (IntSpecies) SPECIES_MAX;
4077             default: throw new IllegalArgumentException("Bad shape: " + s);
4078         }
4079     }
4080 
4081     /** Species representing {@link IntVector}s of {@link VectorShape#S_64_BIT VectorShape.S_64_BIT}. */
4082     public static final VectorSpecies<Integer> SPECIES_64
4083         = new IntSpecies(VectorShape.S_64_BIT,
4084                             Int64Vector.class,
4085                             Int64Vector.Int64Mask.class,
4086                             Int64Vector::new);
4087 
4088     /** Species representing {@link IntVector}s of {@link VectorShape#S_128_BIT VectorShape.S_128_BIT}. */
4089     public static final VectorSpecies<Integer> SPECIES_128
4090         = new IntSpecies(VectorShape.S_128_BIT,
4091                             Int128Vector.class,
4092                             Int128Vector.Int128Mask.class,
4093                             Int128Vector::new);
4094 
4095     /** Species representing {@link IntVector}s of {@link VectorShape#S_256_BIT VectorShape.S_256_BIT}. */
4096     public static final VectorSpecies<Integer> SPECIES_256
4097         = new IntSpecies(VectorShape.S_256_BIT,
4098                             Int256Vector.class,
4099                             Int256Vector.Int256Mask.class,
4100                             Int256Vector::new);
4101 
4102     /** Species representing {@link IntVector}s of {@link VectorShape#S_512_BIT VectorShape.S_512_BIT}. */
4103     public static final VectorSpecies<Integer> SPECIES_512
4104         = new IntSpecies(VectorShape.S_512_BIT,
4105                             Int512Vector.class,
4106                             Int512Vector.Int512Mask.class,
4107                             Int512Vector::new);
4108 
4109     /** Species representing {@link IntVector}s of {@link VectorShape#S_Max_BIT VectorShape.S_Max_BIT}. */
4110     public static final VectorSpecies<Integer> SPECIES_MAX
4111         = new IntSpecies(VectorShape.S_Max_BIT,
4112                             IntMaxVector.class,
4113                             IntMaxVector.IntMaxMask.class,
4114                             IntMaxVector::new);
4115 
4116     /**
4117      * Preferred species for {@link IntVector}s.
4118      * A preferred species is a species of maximal bit-size for the platform.
4119      */
4120     public static final VectorSpecies<Integer> SPECIES_PREFERRED
4121         = (IntSpecies) VectorSpecies.ofPreferred(int.class);
4122 }