1 /*
   2  * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.  Oracle designates this
   8  * particular file as subject to the "Classpath" exception as provided
   9  * by Oracle in the LICENSE file that accompanied this code.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  */
  25 package jdk.incubator.vector;
  26 
  27 import java.nio.ByteBuffer;
  28 import java.nio.ByteOrder;
  29 import java.nio.ReadOnlyBufferException;
  30 import java.util.Arrays;
  31 import java.util.Objects;
  32 import java.util.function.Function;
  33 import java.util.function.UnaryOperator;
  34 



  35 import jdk.internal.misc.ScopedMemoryAccess;
  36 import jdk.internal.misc.Unsafe;
  37 import jdk.internal.vm.annotation.ForceInline;
  38 import jdk.internal.vm.vector.VectorSupport;
  39 
  40 import static jdk.internal.vm.vector.VectorSupport.*;
  41 import static jdk.incubator.vector.VectorIntrinsics.*;
  42 
  43 import static jdk.incubator.vector.VectorOperators.*;
  44 
  45 // -- This file was mechanically generated: Do not edit! -- //
  46 
  47 /**
  48  * A specialized {@link Vector} representing an ordered immutable sequence of
  49  * {@code int} values.
  50  */
  51 @SuppressWarnings("cast")  // warning: redundant cast
  52 public abstract class IntVector extends AbstractVector<Integer> {
  53 
  54     IntVector(int[] vec) {
  55         super(vec);
  56     }
  57 
  58     static final int FORBID_OPCODE_KIND = VO_ONLYFP;
  59 


  60     @ForceInline
  61     static int opCode(Operator op) {
  62         return VectorOperators.opCode(op, VO_OPCODE_VALID, FORBID_OPCODE_KIND);
  63     }
  64     @ForceInline
  65     static int opCode(Operator op, int requireKind) {
  66         requireKind |= VO_OPCODE_VALID;
  67         return VectorOperators.opCode(op, requireKind, FORBID_OPCODE_KIND);
  68     }
  69     @ForceInline
  70     static boolean opKind(Operator op, int bit) {
  71         return VectorOperators.opKind(op, bit);
  72     }
  73 
  74     // Virtualized factories and operators,
  75     // coded with portable definitions.
  76     // These are all @ForceInline in case
  77     // they need to be used performantly.
  78     // The various shape-specific subclasses
  79     // also specialize them by wrapping
  80     // them in a call like this:
  81     //    return (Byte128Vector)
  82     //       super.bOp((Byte128Vector) o);
  83     // The purpose of that is to forcibly inline
  84     // the generic definition from this file
  85     // into a sharply type- and size-specific
  86     // wrapper in the subclass file, so that
  87     // the JIT can specialize the code.
  88     // The code is only inlined and expanded
  89     // if it gets hot.  Think of it as a cheap
  90     // and lazy version of C++ templates.
  91 
  92     // Virtualized getter
  93 
  94     /*package-private*/
  95     abstract int[] vec();
  96 
  97     // Virtualized constructors
  98 
  99     /**
 100      * Build a vector directly using my own constructor.
 101      * It is an error if the array is aliased elsewhere.
 102      */
 103     /*package-private*/
 104     abstract IntVector vectorFactory(int[] vec);
 105 
 106     /**
 107      * Build a mask directly using my species.
 108      * It is an error if the array is aliased elsewhere.
 109      */
 110     /*package-private*/
 111     @ForceInline
 112     final
 113     AbstractMask<Integer> maskFactory(boolean[] bits) {
 114         return vspecies().maskFactory(bits);
 115     }
 116 
 117     // Constant loader (takes dummy as vector arg)
 118     interface FVOp {
 119         int apply(int i);
 120     }
 121 
 122     /*package-private*/
 123     @ForceInline
 124     final
 125     IntVector vOp(FVOp f) {
 126         int[] res = new int[length()];
 127         for (int i = 0; i < res.length; i++) {
 128             res[i] = f.apply(i);
 129         }
 130         return vectorFactory(res);
 131     }
 132 
 133     @ForceInline
 134     final
 135     IntVector vOp(VectorMask<Integer> m, FVOp f) {
 136         int[] res = new int[length()];
 137         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 138         for (int i = 0; i < res.length; i++) {
 139             if (mbits[i]) {
 140                 res[i] = f.apply(i);
 141             }
 142         }
 143         return vectorFactory(res);
 144     }
 145 
 146     // Unary operator
 147 
 148     /*package-private*/
 149     interface FUnOp {
 150         int apply(int i, int a);
 151     }
 152 
 153     /*package-private*/
 154     abstract
 155     IntVector uOp(FUnOp f);
 156     @ForceInline
 157     final
 158     IntVector uOpTemplate(FUnOp f) {
 159         int[] vec = vec();
 160         int[] res = new int[length()];
 161         for (int i = 0; i < res.length; i++) {
 162             res[i] = f.apply(i, vec[i]);
 163         }
 164         return vectorFactory(res);
 165     }
 166 
 167     /*package-private*/
 168     abstract
 169     IntVector uOp(VectorMask<Integer> m,
 170                              FUnOp f);
 171     @ForceInline
 172     final
 173     IntVector uOpTemplate(VectorMask<Integer> m,
 174                                      FUnOp f) {
 175         if (m == null) {
 176             return uOpTemplate(f);
 177         }
 178         int[] vec = vec();
 179         int[] res = new int[length()];
 180         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 181         for (int i = 0; i < res.length; i++) {
 182             res[i] = mbits[i] ? f.apply(i, vec[i]) : vec[i];
 183         }
 184         return vectorFactory(res);
 185     }
 186 
 187     // Binary operator
 188 
 189     /*package-private*/
 190     interface FBinOp {
 191         int apply(int i, int a, int b);
 192     }
 193 
 194     /*package-private*/
 195     abstract
 196     IntVector bOp(Vector<Integer> o,
 197                              FBinOp f);
 198     @ForceInline
 199     final
 200     IntVector bOpTemplate(Vector<Integer> o,
 201                                      FBinOp f) {
 202         int[] res = new int[length()];
 203         int[] vec1 = this.vec();
 204         int[] vec2 = ((IntVector)o).vec();
 205         for (int i = 0; i < res.length; i++) {
 206             res[i] = f.apply(i, vec1[i], vec2[i]);
 207         }
 208         return vectorFactory(res);
 209     }
 210 
 211     /*package-private*/
 212     abstract
 213     IntVector bOp(Vector<Integer> o,
 214                              VectorMask<Integer> m,
 215                              FBinOp f);
 216     @ForceInline
 217     final
 218     IntVector bOpTemplate(Vector<Integer> o,
 219                                      VectorMask<Integer> m,
 220                                      FBinOp f) {
 221         if (m == null) {
 222             return bOpTemplate(o, f);
 223         }
 224         int[] res = new int[length()];
 225         int[] vec1 = this.vec();
 226         int[] vec2 = ((IntVector)o).vec();
 227         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 228         for (int i = 0; i < res.length; i++) {
 229             res[i] = mbits[i] ? f.apply(i, vec1[i], vec2[i]) : vec1[i];
 230         }
 231         return vectorFactory(res);
 232     }
 233 
 234     // Ternary operator
 235 
 236     /*package-private*/
 237     interface FTriOp {
 238         int apply(int i, int a, int b, int c);
 239     }
 240 
 241     /*package-private*/
 242     abstract
 243     IntVector tOp(Vector<Integer> o1,
 244                              Vector<Integer> o2,
 245                              FTriOp f);
 246     @ForceInline
 247     final
 248     IntVector tOpTemplate(Vector<Integer> o1,
 249                                      Vector<Integer> o2,
 250                                      FTriOp f) {
 251         int[] res = new int[length()];
 252         int[] vec1 = this.vec();
 253         int[] vec2 = ((IntVector)o1).vec();
 254         int[] vec3 = ((IntVector)o2).vec();
 255         for (int i = 0; i < res.length; i++) {
 256             res[i] = f.apply(i, vec1[i], vec2[i], vec3[i]);
 257         }
 258         return vectorFactory(res);
 259     }
 260 
 261     /*package-private*/
 262     abstract
 263     IntVector tOp(Vector<Integer> o1,
 264                              Vector<Integer> o2,
 265                              VectorMask<Integer> m,
 266                              FTriOp f);
 267     @ForceInline
 268     final
 269     IntVector tOpTemplate(Vector<Integer> o1,
 270                                      Vector<Integer> o2,
 271                                      VectorMask<Integer> m,
 272                                      FTriOp f) {
 273         if (m == null) {
 274             return tOpTemplate(o1, o2, f);
 275         }
 276         int[] res = new int[length()];
 277         int[] vec1 = this.vec();
 278         int[] vec2 = ((IntVector)o1).vec();
 279         int[] vec3 = ((IntVector)o2).vec();
 280         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 281         for (int i = 0; i < res.length; i++) {
 282             res[i] = mbits[i] ? f.apply(i, vec1[i], vec2[i], vec3[i]) : vec1[i];
 283         }
 284         return vectorFactory(res);
 285     }
 286 
 287     // Reduction operator
 288 
 289     /*package-private*/
 290     abstract
 291     int rOp(int v, VectorMask<Integer> m, FBinOp f);
 292 
 293     @ForceInline
 294     final
 295     int rOpTemplate(int v, VectorMask<Integer> m, FBinOp f) {
 296         if (m == null) {
 297             return rOpTemplate(v, f);
 298         }
 299         int[] vec = vec();
 300         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 301         for (int i = 0; i < vec.length; i++) {
 302             v = mbits[i] ? f.apply(i, v, vec[i]) : v;
 303         }
 304         return v;
 305     }
 306 
 307     @ForceInline
 308     final
 309     int rOpTemplate(int v, FBinOp f) {
 310         int[] vec = vec();
 311         for (int i = 0; i < vec.length; i++) {
 312             v = f.apply(i, v, vec[i]);
 313         }
 314         return v;
 315     }
 316 
 317     // Memory reference
 318 
 319     /*package-private*/
 320     interface FLdOp<M> {
 321         int apply(M memory, int offset, int i);
 322     }
 323 
 324     /*package-private*/
 325     @ForceInline
 326     final
 327     <M> IntVector ldOp(M memory, int offset,
 328                                   FLdOp<M> f) {
 329         //dummy; no vec = vec();
 330         int[] res = new int[length()];
 331         for (int i = 0; i < res.length; i++) {
 332             res[i] = f.apply(memory, offset, i);
 333         }
 334         return vectorFactory(res);
 335     }
 336 
 337     /*package-private*/
 338     @ForceInline
 339     final
 340     <M> IntVector ldOp(M memory, int offset,
 341                                   VectorMask<Integer> m,
 342                                   FLdOp<M> f) {
 343         //int[] vec = vec();
 344         int[] res = new int[length()];
 345         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 346         for (int i = 0; i < res.length; i++) {
 347             if (mbits[i]) {
 348                 res[i] = f.apply(memory, offset, i);
 349             }
 350         }
 351         return vectorFactory(res);
 352     }
 353 







































 354     interface FStOp<M> {
 355         void apply(M memory, int offset, int i, int a);
 356     }
 357 
 358     /*package-private*/
 359     @ForceInline
 360     final
 361     <M> void stOp(M memory, int offset,
 362                   FStOp<M> f) {
 363         int[] vec = vec();
 364         for (int i = 0; i < vec.length; i++) {
 365             f.apply(memory, offset, i, vec[i]);
 366         }
 367     }
 368 
 369     /*package-private*/
 370     @ForceInline
 371     final
 372     <M> void stOp(M memory, int offset,
 373                   VectorMask<Integer> m,
 374                   FStOp<M> f) {
 375         int[] vec = vec();
 376         boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
 377         for (int i = 0; i < vec.length; i++) {
 378             if (mbits[i]) {
 379                 f.apply(memory, offset, i, vec[i]);
 380             }
 381         }
 382     }
 383 


































 384     // Binary test
 385 
 386     /*package-private*/
 387     interface FBinTest {
 388         boolean apply(int cond, int i, int a, int b);
 389     }
 390 
 391     /*package-private*/
 392     @ForceInline
 393     final
 394     AbstractMask<Integer> bTest(int cond,
 395                                   Vector<Integer> o,
 396                                   FBinTest f) {
 397         int[] vec1 = vec();
 398         int[] vec2 = ((IntVector)o).vec();
 399         boolean[] bits = new boolean[length()];
 400         for (int i = 0; i < length(); i++){
 401             bits[i] = f.apply(cond, i, vec1[i], vec2[i]);
 402         }
 403         return maskFactory(bits);
 404     }
 405 
 406     /*package-private*/
 407     @ForceInline
 408     static int rotateLeft(int a, int n) {
 409         return Integer.rotateLeft(a, n);
 410     }
 411 
 412     /*package-private*/
 413     @ForceInline
 414     static int rotateRight(int a, int n) {
 415         return Integer.rotateRight(a, n);
 416     }
 417 
 418     /*package-private*/
 419     @Override
 420     abstract IntSpecies vspecies();
 421 
 422     /*package-private*/
 423     @ForceInline
 424     static long toBits(int e) {
 425         return  e;
 426     }
 427 
 428     /*package-private*/
 429     @ForceInline
 430     static int fromBits(long bits) {
 431         return ((int)bits);
 432     }
 433 






























 434     // Static factories (other than memory operations)
 435 
 436     // Note: A surprising behavior in javadoc
 437     // sometimes makes a lone /** {@inheritDoc} */
 438     // comment drop the method altogether,
 439     // apparently if the method mentions an
 440     // parameter or return type of Vector<Integer>
 441     // instead of Vector<E> as originally specified.
 442     // Adding an empty HTML fragment appears to
 443     // nudge javadoc into providing the desired
 444     // inherited documentation.  We use the HTML
 445     // comment <!--workaround--> for this.
 446 
 447     /**
 448      * Returns a vector of the given species
 449      * where all lane elements are set to
 450      * zero, the default primitive value.
 451      *
 452      * @param species species of the desired zero vector
 453      * @return a zero vector
 454      */
 455     @ForceInline
 456     public static IntVector zero(VectorSpecies<Integer> species) {
 457         IntSpecies vsp = (IntSpecies) species;
 458         return VectorSupport.fromBitsCoerced(vsp.vectorType(), int.class, species.length(),
 459                                 0, MODE_BROADCAST, vsp,
 460                                 ((bits_, s_) -> s_.rvOp(i -> bits_)));
 461     }
 462 
 463     /**
 464      * Returns a vector of the same species as this one
 465      * where all lane elements are set to
 466      * the primitive value {@code e}.
 467      *
 468      * The contents of the current vector are discarded;
 469      * only the species is relevant to this operation.
 470      *
 471      * <p> This method returns the value of this expression:
 472      * {@code IntVector.broadcast(this.species(), e)}.
 473      *
 474      * @apiNote
 475      * Unlike the similar method named {@code broadcast()}
 476      * in the supertype {@code Vector}, this method does not
 477      * need to validate its argument, and cannot throw
 478      * {@code IllegalArgumentException}.  This method is
 479      * therefore preferable to the supertype method.
 480      *
 481      * @param e the value to broadcast
 482      * @return a vector where all lane elements are set to
 483      *         the primitive value {@code e}
 484      * @see #broadcast(VectorSpecies,long)
 485      * @see Vector#broadcast(long)
 486      * @see VectorSpecies#broadcast(long)
 487      */
 488     public abstract IntVector broadcast(int e);
 489 
 490     /**
 491      * Returns a vector of the given species
 492      * where all lane elements are set to
 493      * the primitive value {@code e}.
 494      *
 495      * @param species species of the desired vector
 496      * @param e the value to broadcast
 497      * @return a vector where all lane elements are set to
 498      *         the primitive value {@code e}
 499      * @see #broadcast(long)
 500      * @see Vector#broadcast(long)
 501      * @see VectorSpecies#broadcast(long)
 502      */
 503     @ForceInline
 504     public static IntVector broadcast(VectorSpecies<Integer> species, int e) {
 505         IntSpecies vsp = (IntSpecies) species;
 506         return vsp.broadcast(e);
 507     }
 508 
 509     /*package-private*/
 510     @ForceInline
 511     final IntVector broadcastTemplate(int e) {
 512         IntSpecies vsp = vspecies();
 513         return vsp.broadcast(e);
 514     }
 515 
 516     /**
 517      * {@inheritDoc} <!--workaround-->
 518      * @apiNote
 519      * When working with vector subtypes like {@code IntVector},
 520      * {@linkplain #broadcast(int) the more strongly typed method}
 521      * is typically selected.  It can be explicitly selected
 522      * using a cast: {@code v.broadcast((int)e)}.
 523      * The two expressions will produce numerically identical results.
 524      */
 525     @Override
 526     public abstract IntVector broadcast(long e);
 527 
 528     /**
 529      * Returns a vector of the given species
 530      * where all lane elements are set to
 531      * the primitive value {@code e}.
 532      *
 533      * The {@code long} value must be accurately representable
 534      * by the {@code ETYPE} of the vector species, so that
 535      * {@code e==(long)(ETYPE)e}.
 536      *
 537      * @param species species of the desired vector
 538      * @param e the value to broadcast
 539      * @return a vector where all lane elements are set to
 540      *         the primitive value {@code e}
 541      * @throws IllegalArgumentException
 542      *         if the given {@code long} value cannot
 543      *         be represented by the vector's {@code ETYPE}
 544      * @see #broadcast(VectorSpecies,int)
 545      * @see VectorSpecies#checkValue(long)
 546      */
 547     @ForceInline
 548     public static IntVector broadcast(VectorSpecies<Integer> species, long e) {
 549         IntSpecies vsp = (IntSpecies) species;
 550         return vsp.broadcast(e);
 551     }
 552 
 553     /*package-private*/
 554     @ForceInline
 555     final IntVector broadcastTemplate(long e) {
 556         return vspecies().broadcast(e);
 557     }
 558 
 559     // Unary lanewise support
 560 
 561     /**
 562      * {@inheritDoc} <!--workaround-->
 563      */
 564     public abstract
 565     IntVector lanewise(VectorOperators.Unary op);
 566 
 567     @ForceInline
 568     final
 569     IntVector lanewiseTemplate(VectorOperators.Unary op) {
 570         if (opKind(op, VO_SPECIAL)) {
 571             if (op == ZOMO) {
 572                 return blend(broadcast(-1), compare(NE, 0));
 573             }
 574             if (op == NOT) {
 575                 return broadcast(-1).lanewise(XOR, this);
 576             }
 577         }
 578         int opc = opCode(op);
 579         return VectorSupport.unaryOp(
 580             opc, getClass(), null, int.class, length(),
 581             this, null,
 582             UN_IMPL.find(op, opc, IntVector::unaryOperations));
 583     }
 584 
 585     /**
 586      * {@inheritDoc} <!--workaround-->
 587      */
 588     @Override
 589     public abstract
 590     IntVector lanewise(VectorOperators.Unary op,
 591                                   VectorMask<Integer> m);
 592     @ForceInline
 593     final
 594     IntVector lanewiseTemplate(VectorOperators.Unary op,
 595                                           Class<? extends VectorMask<Integer>> maskClass,
 596                                           VectorMask<Integer> m) {
 597         m.check(maskClass, this);
 598         if (opKind(op, VO_SPECIAL)) {
 599             if (op == ZOMO) {
 600                 return blend(broadcast(-1), compare(NE, 0, m));
 601             }
 602             if (op == NOT) {
 603                 return lanewise(XOR, broadcast(-1), m);
 604             }
 605         }
 606         int opc = opCode(op);
 607         return VectorSupport.unaryOp(
 608             opc, getClass(), maskClass, int.class, length(),
 609             this, m,
 610             UN_IMPL.find(op, opc, IntVector::unaryOperations));
 611     }
 612 
 613     private static final
 614     ImplCache<Unary, UnaryOperation<IntVector, VectorMask<Integer>>>
 615         UN_IMPL = new ImplCache<>(Unary.class, IntVector.class);
 616 
 617     private static UnaryOperation<IntVector, VectorMask<Integer>> unaryOperations(int opc_) {
 618         switch (opc_) {
 619             case VECTOR_OP_NEG: return (v0, m) ->
 620                     v0.uOp(m, (i, a) -> (int) -a);
 621             case VECTOR_OP_ABS: return (v0, m) ->
 622                     v0.uOp(m, (i, a) -> (int) Math.abs(a));










 623             default: return null;
 624         }
 625     }
 626 
 627     // Binary lanewise support
 628 
 629     /**
 630      * {@inheritDoc} <!--workaround-->
 631      * @see #lanewise(VectorOperators.Binary,int)
 632      * @see #lanewise(VectorOperators.Binary,int,VectorMask)
 633      */
 634     @Override
 635     public abstract
 636     IntVector lanewise(VectorOperators.Binary op,
 637                                   Vector<Integer> v);
 638     @ForceInline
 639     final
 640     IntVector lanewiseTemplate(VectorOperators.Binary op,
 641                                           Vector<Integer> v) {
 642         IntVector that = (IntVector) v;
 643         that.check(this);
 644 
 645         if (opKind(op, VO_SPECIAL  | VO_SHIFT)) {
 646             if (op == FIRST_NONZERO) {
 647                 // FIXME: Support this in the JIT.
 648                 VectorMask<Integer> thisNZ
 649                     = this.viewAsIntegralLanes().compare(NE, (int) 0);
 650                 that = that.blend((int) 0, thisNZ.cast(vspecies()));
 651                 op = OR_UNCHECKED;
 652             }
 653             if (opKind(op, VO_SHIFT)) {
 654                 // As per shift specification for Java, mask the shift count.
 655                 // This allows the JIT to ignore some ISA details.
 656                 that = that.lanewise(AND, SHIFT_MASK);
 657             }
 658             if (op == AND_NOT) {
 659                 // FIXME: Support this in the JIT.
 660                 that = that.lanewise(NOT);
 661                 op = AND;
 662             } else if (op == DIV) {
 663                 VectorMask<Integer> eqz = that.eq((int) 0);
 664                 if (eqz.anyTrue()) {
 665                     throw that.divZeroException();
 666                 }
 667             }
 668         }
 669 
 670         int opc = opCode(op);
 671         return VectorSupport.binaryOp(
 672             opc, getClass(), null, int.class, length(),
 673             this, that, null,
 674             BIN_IMPL.find(op, opc, IntVector::binaryOperations));
 675     }
 676 
 677     /**
 678      * {@inheritDoc} <!--workaround-->
 679      * @see #lanewise(VectorOperators.Binary,int,VectorMask)
 680      */
 681     @Override
 682     public abstract
 683     IntVector lanewise(VectorOperators.Binary op,
 684                                   Vector<Integer> v,
 685                                   VectorMask<Integer> m);
 686     @ForceInline
 687     final
 688     IntVector lanewiseTemplate(VectorOperators.Binary op,
 689                                           Class<? extends VectorMask<Integer>> maskClass,
 690                                           Vector<Integer> v, VectorMask<Integer> m) {
 691         IntVector that = (IntVector) v;
 692         that.check(this);
 693         m.check(maskClass, this);
 694 
 695         if (opKind(op, VO_SPECIAL  | VO_SHIFT)) {
 696             if (op == FIRST_NONZERO) {
 697                 // FIXME: Support this in the JIT.
 698                 VectorMask<Integer> thisNZ
 699                     = this.viewAsIntegralLanes().compare(NE, (int) 0);
 700                 that = that.blend((int) 0, thisNZ.cast(vspecies()));
 701                 op = OR_UNCHECKED;
 702             }
 703             if (opKind(op, VO_SHIFT)) {
 704                 // As per shift specification for Java, mask the shift count.
 705                 // This allows the JIT to ignore some ISA details.
 706                 that = that.lanewise(AND, SHIFT_MASK);
 707             }
 708             if (op == AND_NOT) {
 709                 // FIXME: Support this in the JIT.
 710                 that = that.lanewise(NOT);
 711                 op = AND;
 712             } else if (op == DIV) {
 713                 VectorMask<Integer> eqz = that.eq((int)0);
 714                 if (eqz.and(m).anyTrue()) {
 715                     throw that.divZeroException();
 716                 }
 717                 // suppress div/0 exceptions in unset lanes
 718                 that = that.lanewise(NOT, eqz);
 719             }
 720         }
 721 
 722         int opc = opCode(op);
 723         return VectorSupport.binaryOp(
 724             opc, getClass(), maskClass, int.class, length(),
 725             this, that, m,
 726             BIN_IMPL.find(op, opc, IntVector::binaryOperations));
 727     }
 728 
 729     private static final
 730     ImplCache<Binary, BinaryOperation<IntVector, VectorMask<Integer>>>
 731         BIN_IMPL = new ImplCache<>(Binary.class, IntVector.class);
 732 
 733     private static BinaryOperation<IntVector, VectorMask<Integer>> binaryOperations(int opc_) {
 734         switch (opc_) {
 735             case VECTOR_OP_ADD: return (v0, v1, vm) ->
 736                     v0.bOp(v1, vm, (i, a, b) -> (int)(a + b));
 737             case VECTOR_OP_SUB: return (v0, v1, vm) ->
 738                     v0.bOp(v1, vm, (i, a, b) -> (int)(a - b));
 739             case VECTOR_OP_MUL: return (v0, v1, vm) ->
 740                     v0.bOp(v1, vm, (i, a, b) -> (int)(a * b));
 741             case VECTOR_OP_DIV: return (v0, v1, vm) ->
 742                     v0.bOp(v1, vm, (i, a, b) -> (int)(a / b));
 743             case VECTOR_OP_MAX: return (v0, v1, vm) ->
 744                     v0.bOp(v1, vm, (i, a, b) -> (int)Math.max(a, b));
 745             case VECTOR_OP_MIN: return (v0, v1, vm) ->
 746                     v0.bOp(v1, vm, (i, a, b) -> (int)Math.min(a, b));
 747             case VECTOR_OP_AND: return (v0, v1, vm) ->
 748                     v0.bOp(v1, vm, (i, a, b) -> (int)(a & b));
 749             case VECTOR_OP_OR: return (v0, v1, vm) ->
 750                     v0.bOp(v1, vm, (i, a, b) -> (int)(a | b));
 751             case VECTOR_OP_XOR: return (v0, v1, vm) ->
 752                     v0.bOp(v1, vm, (i, a, b) -> (int)(a ^ b));
 753             case VECTOR_OP_LSHIFT: return (v0, v1, vm) ->
 754                     v0.bOp(v1, vm, (i, a, n) -> (int)(a << n));
 755             case VECTOR_OP_RSHIFT: return (v0, v1, vm) ->
 756                     v0.bOp(v1, vm, (i, a, n) -> (int)(a >> n));
 757             case VECTOR_OP_URSHIFT: return (v0, v1, vm) ->
 758                     v0.bOp(v1, vm, (i, a, n) -> (int)((a & LSHR_SETUP_MASK) >>> n));
 759             case VECTOR_OP_LROTATE: return (v0, v1, vm) ->
 760                     v0.bOp(v1, vm, (i, a, n) -> rotateLeft(a, (int)n));
 761             case VECTOR_OP_RROTATE: return (v0, v1, vm) ->
 762                     v0.bOp(v1, vm, (i, a, n) -> rotateRight(a, (int)n));




 763             default: return null;
 764         }
 765     }
 766 
 767     // FIXME: Maybe all of the public final methods in this file (the
 768     // simple ones that just call lanewise) should be pushed down to
 769     // the X-VectorBits template.  They can't optimize properly at
 770     // this level, and must rely on inlining.  Does it work?
 771     // (If it works, of course keep the code here.)
 772 
 773     /**
 774      * Combines the lane values of this vector
 775      * with the value of a broadcast scalar.
 776      *
 777      * This is a lane-wise binary operation which applies
 778      * the selected operation to each lane.
 779      * The return value will be equal to this expression:
 780      * {@code this.lanewise(op, this.broadcast(e))}.
 781      *
 782      * @param op the operation used to process lane values
 783      * @param e the input scalar
 784      * @return the result of applying the operation lane-wise
 785      *         to the two input vectors
 786      * @throws UnsupportedOperationException if this vector does
 787      *         not support the requested operation
 788      * @see #lanewise(VectorOperators.Binary,Vector)
 789      * @see #lanewise(VectorOperators.Binary,int,VectorMask)
 790      */
 791     @ForceInline
 792     public final
 793     IntVector lanewise(VectorOperators.Binary op,
 794                                   int e) {
 795         if (opKind(op, VO_SHIFT) && (int)(int)e == e) {
 796             return lanewiseShift(op, (int) e);
 797         }
 798         if (op == AND_NOT) {
 799             op = AND; e = (int) ~e;
 800         }
 801         return lanewise(op, broadcast(e));
 802     }
 803 
 804     /**
 805      * Combines the lane values of this vector
 806      * with the value of a broadcast scalar,
 807      * with selection of lane elements controlled by a mask.
 808      *
 809      * This is a masked lane-wise binary operation which applies
 810      * the selected operation to each lane.
 811      * The return value will be equal to this expression:
 812      * {@code this.lanewise(op, this.broadcast(e), m)}.
 813      *
 814      * @param op the operation used to process lane values
 815      * @param e the input scalar
 816      * @param m the mask controlling lane selection
 817      * @return the result of applying the operation lane-wise
 818      *         to the input vector and the scalar
 819      * @throws UnsupportedOperationException if this vector does
 820      *         not support the requested operation
 821      * @see #lanewise(VectorOperators.Binary,Vector,VectorMask)
 822      * @see #lanewise(VectorOperators.Binary,int)
 823      */
 824     @ForceInline
 825     public final
 826     IntVector lanewise(VectorOperators.Binary op,
 827                                   int e,
 828                                   VectorMask<Integer> m) {
 829         if (opKind(op, VO_SHIFT) && (int)(int)e == e) {
 830             return lanewiseShift(op, (int) e, m);
 831         }
 832         if (op == AND_NOT) {
 833             op = AND; e = (int) ~e;
 834         }
 835         return lanewise(op, broadcast(e), m);
 836     }
 837 
 838     /**
 839      * {@inheritDoc} <!--workaround-->
 840      * @apiNote
 841      * When working with vector subtypes like {@code IntVector},
 842      * {@linkplain #lanewise(VectorOperators.Binary,int)
 843      * the more strongly typed method}
 844      * is typically selected.  It can be explicitly selected
 845      * using a cast: {@code v.lanewise(op,(int)e)}.
 846      * The two expressions will produce numerically identical results.
 847      */
 848     @ForceInline
 849     public final
 850     IntVector lanewise(VectorOperators.Binary op,
 851                                   long e) {
 852         int e1 = (int) e;
 853         if ((long)e1 != e
 854             // allow shift ops to clip down their int parameters
 855             && !(opKind(op, VO_SHIFT) && (int)e1 == e)) {
 856             vspecies().checkValue(e);  // for exception
 857         }
 858         return lanewise(op, e1);
 859     }
 860 
 861     /**
 862      * {@inheritDoc} <!--workaround-->
 863      * @apiNote
 864      * When working with vector subtypes like {@code IntVector},
 865      * {@linkplain #lanewise(VectorOperators.Binary,int,VectorMask)
 866      * the more strongly typed method}
 867      * is typically selected.  It can be explicitly selected
 868      * using a cast: {@code v.lanewise(op,(int)e,m)}.
 869      * The two expressions will produce numerically identical results.
 870      */
 871     @ForceInline
 872     public final
 873     IntVector lanewise(VectorOperators.Binary op,
 874                                   long e, VectorMask<Integer> m) {
 875         int e1 = (int) e;
 876         if ((long)e1 != e
 877             // allow shift ops to clip down their int parameters
 878             && !(opKind(op, VO_SHIFT) && (int)e1 == e)) {
 879             vspecies().checkValue(e);  // for exception
 880         }
 881         return lanewise(op, e1, m);
 882     }
 883 
 884     /*package-private*/
 885     abstract IntVector
 886     lanewiseShift(VectorOperators.Binary op, int e);
 887 
 888     /*package-private*/
 889     @ForceInline
 890     final IntVector
 891     lanewiseShiftTemplate(VectorOperators.Binary op, int e) {
 892         // Special handling for these.  FIXME: Refactor?
 893         assert(opKind(op, VO_SHIFT));
 894         // As per shift specification for Java, mask the shift count.
 895         e &= SHIFT_MASK;
 896         int opc = opCode(op);
 897         return VectorSupport.broadcastInt(
 898             opc, getClass(), null, int.class, length(),
 899             this, e, null,
 900             BIN_INT_IMPL.find(op, opc, IntVector::broadcastIntOperations));
 901     }
 902 
 903     /*package-private*/
 904     abstract IntVector
 905     lanewiseShift(VectorOperators.Binary op, int e, VectorMask<Integer> m);
 906 
 907     /*package-private*/
 908     @ForceInline
 909     final IntVector
 910     lanewiseShiftTemplate(VectorOperators.Binary op,
 911                           Class<? extends VectorMask<Integer>> maskClass,
 912                           int e, VectorMask<Integer> m) {
 913         m.check(maskClass, this);
 914         assert(opKind(op, VO_SHIFT));
 915         // As per shift specification for Java, mask the shift count.
 916         e &= SHIFT_MASK;
 917         int opc = opCode(op);
 918         return VectorSupport.broadcastInt(
 919             opc, getClass(), maskClass, int.class, length(),
 920             this, e, m,
 921             BIN_INT_IMPL.find(op, opc, IntVector::broadcastIntOperations));
 922     }
 923 
 924     private static final
 925     ImplCache<Binary,VectorBroadcastIntOp<IntVector, VectorMask<Integer>>> BIN_INT_IMPL
 926         = new ImplCache<>(Binary.class, IntVector.class);
 927 
 928     private static VectorBroadcastIntOp<IntVector, VectorMask<Integer>> broadcastIntOperations(int opc_) {
 929         switch (opc_) {
 930             case VECTOR_OP_LSHIFT: return (v, n, m) ->
 931                     v.uOp(m, (i, a) -> (int)(a << n));
 932             case VECTOR_OP_RSHIFT: return (v, n, m) ->
 933                     v.uOp(m, (i, a) -> (int)(a >> n));
 934             case VECTOR_OP_URSHIFT: return (v, n, m) ->
 935                     v.uOp(m, (i, a) -> (int)((a & LSHR_SETUP_MASK) >>> n));
 936             case VECTOR_OP_LROTATE: return (v, n, m) ->
 937                     v.uOp(m, (i, a) -> rotateLeft(a, (int)n));
 938             case VECTOR_OP_RROTATE: return (v, n, m) ->
 939                     v.uOp(m, (i, a) -> rotateRight(a, (int)n));
 940             default: return null;
 941         }
 942     }
 943 
 944     // As per shift specification for Java, mask the shift count.
 945     // We mask 0X3F (long), 0X1F (int), 0x0F (short), 0x7 (byte).
 946     // The latter two maskings go beyond the JLS, but seem reasonable
 947     // since our lane types are first-class types, not just dressed
 948     // up ints.
 949     private static final int SHIFT_MASK = (Integer.SIZE - 1);
 950     private static final int LSHR_SETUP_MASK = -1;
 951 
 952     // Ternary lanewise support
 953 
 954     // Ternary operators come in eight variations:
 955     //   lanewise(op, [broadcast(e1)|v1], [broadcast(e2)|v2])
 956     //   lanewise(op, [broadcast(e1)|v1], [broadcast(e2)|v2], mask)
 957 
 958     // It is annoying to support all of these variations of masking
 959     // and broadcast, but it would be more surprising not to continue
 960     // the obvious pattern started by unary and binary.
 961 
 962    /**
 963      * {@inheritDoc} <!--workaround-->
 964      * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask)
 965      * @see #lanewise(VectorOperators.Ternary,Vector,int,VectorMask)
 966      * @see #lanewise(VectorOperators.Ternary,int,Vector,VectorMask)
 967      * @see #lanewise(VectorOperators.Ternary,int,int)
 968      * @see #lanewise(VectorOperators.Ternary,Vector,int)
 969      * @see #lanewise(VectorOperators.Ternary,int,Vector)
 970      */
 971     @Override
 972     public abstract
 973     IntVector lanewise(VectorOperators.Ternary op,
 974                                                   Vector<Integer> v1,
 975                                                   Vector<Integer> v2);
 976     @ForceInline
 977     final
 978     IntVector lanewiseTemplate(VectorOperators.Ternary op,
 979                                           Vector<Integer> v1,
 980                                           Vector<Integer> v2) {
 981         IntVector that = (IntVector) v1;
 982         IntVector tother = (IntVector) v2;
 983         // It's a word: https://www.dictionary.com/browse/tother
 984         // See also Chapter 11 of Dickens, Our Mutual Friend:
 985         // "Totherest Governor," replied Mr Riderhood...
 986         that.check(this);
 987         tother.check(this);
 988         if (op == BITWISE_BLEND) {
 989             // FIXME: Support this in the JIT.
 990             that = this.lanewise(XOR, that).lanewise(AND, tother);
 991             return this.lanewise(XOR, that);
 992         }
 993         int opc = opCode(op);
 994         return VectorSupport.ternaryOp(
 995             opc, getClass(), null, int.class, length(),
 996             this, that, tother, null,
 997             TERN_IMPL.find(op, opc, IntVector::ternaryOperations));
 998     }
 999 
1000     /**
1001      * {@inheritDoc} <!--workaround-->
1002      * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask)
1003      * @see #lanewise(VectorOperators.Ternary,Vector,int,VectorMask)
1004      * @see #lanewise(VectorOperators.Ternary,int,Vector,VectorMask)
1005      */
1006     @Override
1007     public abstract
1008     IntVector lanewise(VectorOperators.Ternary op,
1009                                   Vector<Integer> v1,
1010                                   Vector<Integer> v2,
1011                                   VectorMask<Integer> m);
1012     @ForceInline
1013     final
1014     IntVector lanewiseTemplate(VectorOperators.Ternary op,
1015                                           Class<? extends VectorMask<Integer>> maskClass,
1016                                           Vector<Integer> v1,
1017                                           Vector<Integer> v2,
1018                                           VectorMask<Integer> m) {
1019         IntVector that = (IntVector) v1;
1020         IntVector tother = (IntVector) v2;
1021         // It's a word: https://www.dictionary.com/browse/tother
1022         // See also Chapter 11 of Dickens, Our Mutual Friend:
1023         // "Totherest Governor," replied Mr Riderhood...
1024         that.check(this);
1025         tother.check(this);
1026         m.check(maskClass, this);
1027 
1028         if (op == BITWISE_BLEND) {
1029             // FIXME: Support this in the JIT.
1030             that = this.lanewise(XOR, that).lanewise(AND, tother);
1031             return this.lanewise(XOR, that, m);
1032         }
1033         int opc = opCode(op);
1034         return VectorSupport.ternaryOp(
1035             opc, getClass(), maskClass, int.class, length(),
1036             this, that, tother, m,
1037             TERN_IMPL.find(op, opc, IntVector::ternaryOperations));
1038     }
1039 
1040     private static final
1041     ImplCache<Ternary, TernaryOperation<IntVector, VectorMask<Integer>>>
1042         TERN_IMPL = new ImplCache<>(Ternary.class, IntVector.class);
1043 
1044     private static TernaryOperation<IntVector, VectorMask<Integer>> ternaryOperations(int opc_) {
1045         switch (opc_) {
1046             default: return null;
1047         }
1048     }
1049 
1050     /**
1051      * Combines the lane values of this vector
1052      * with the values of two broadcast scalars.
1053      *
1054      * This is a lane-wise ternary operation which applies
1055      * the selected operation to each lane.
1056      * The return value will be equal to this expression:
1057      * {@code this.lanewise(op, this.broadcast(e1), this.broadcast(e2))}.
1058      *
1059      * @param op the operation used to combine lane values
1060      * @param e1 the first input scalar
1061      * @param e2 the second input scalar
1062      * @return the result of applying the operation lane-wise
1063      *         to the input vector and the scalars
1064      * @throws UnsupportedOperationException if this vector does
1065      *         not support the requested operation
1066      * @see #lanewise(VectorOperators.Ternary,Vector,Vector)
1067      * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask)
1068      */
1069     @ForceInline
1070     public final
1071     IntVector lanewise(VectorOperators.Ternary op, //(op,e1,e2)
1072                                   int e1,
1073                                   int e2) {
1074         return lanewise(op, broadcast(e1), broadcast(e2));
1075     }
1076 
1077     /**
1078      * Combines the lane values of this vector
1079      * with the values of two broadcast scalars,
1080      * with selection of lane elements controlled by a mask.
1081      *
1082      * This is a masked lane-wise ternary operation which applies
1083      * the selected operation to each lane.
1084      * The return value will be equal to this expression:
1085      * {@code this.lanewise(op, this.broadcast(e1), this.broadcast(e2), m)}.
1086      *
1087      * @param op the operation used to combine lane values
1088      * @param e1 the first input scalar
1089      * @param e2 the second input scalar
1090      * @param m the mask controlling lane selection
1091      * @return the result of applying the operation lane-wise
1092      *         to the input vector and the scalars
1093      * @throws UnsupportedOperationException if this vector does
1094      *         not support the requested operation
1095      * @see #lanewise(VectorOperators.Ternary,Vector,Vector,VectorMask)
1096      * @see #lanewise(VectorOperators.Ternary,int,int)
1097      */
1098     @ForceInline
1099     public final
1100     IntVector lanewise(VectorOperators.Ternary op, //(op,e1,e2,m)
1101                                   int e1,
1102                                   int e2,
1103                                   VectorMask<Integer> m) {
1104         return lanewise(op, broadcast(e1), broadcast(e2), m);
1105     }
1106 
1107     /**
1108      * Combines the lane values of this vector
1109      * with the values of another vector and a broadcast scalar.
1110      *
1111      * This is a lane-wise ternary operation which applies
1112      * the selected operation to each lane.
1113      * The return value will be equal to this expression:
1114      * {@code this.lanewise(op, v1, this.broadcast(e2))}.
1115      *
1116      * @param op the operation used to combine lane values
1117      * @param v1 the other input vector
1118      * @param e2 the input scalar
1119      * @return the result of applying the operation lane-wise
1120      *         to the input vectors and the scalar
1121      * @throws UnsupportedOperationException if this vector does
1122      *         not support the requested operation
1123      * @see #lanewise(VectorOperators.Ternary,int,int)
1124      * @see #lanewise(VectorOperators.Ternary,Vector,int,VectorMask)
1125      */
1126     @ForceInline
1127     public final
1128     IntVector lanewise(VectorOperators.Ternary op, //(op,v1,e2)
1129                                   Vector<Integer> v1,
1130                                   int e2) {
1131         return lanewise(op, v1, broadcast(e2));
1132     }
1133 
1134     /**
1135      * Combines the lane values of this vector
1136      * with the values of another vector and a broadcast scalar,
1137      * with selection of lane elements controlled by a mask.
1138      *
1139      * This is a masked lane-wise ternary operation which applies
1140      * the selected operation to each lane.
1141      * The return value will be equal to this expression:
1142      * {@code this.lanewise(op, v1, this.broadcast(e2), m)}.
1143      *
1144      * @param op the operation used to combine lane values
1145      * @param v1 the other input vector
1146      * @param e2 the input scalar
1147      * @param m the mask controlling lane selection
1148      * @return the result of applying the operation lane-wise
1149      *         to the input vectors and the scalar
1150      * @throws UnsupportedOperationException if this vector does
1151      *         not support the requested operation
1152      * @see #lanewise(VectorOperators.Ternary,Vector,Vector)
1153      * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask)
1154      * @see #lanewise(VectorOperators.Ternary,Vector,int)
1155      */
1156     @ForceInline
1157     public final
1158     IntVector lanewise(VectorOperators.Ternary op, //(op,v1,e2,m)
1159                                   Vector<Integer> v1,
1160                                   int e2,
1161                                   VectorMask<Integer> m) {
1162         return lanewise(op, v1, broadcast(e2), m);
1163     }
1164 
1165     /**
1166      * Combines the lane values of this vector
1167      * with the values of another vector and a broadcast scalar.
1168      *
1169      * This is a lane-wise ternary operation which applies
1170      * the selected operation to each lane.
1171      * The return value will be equal to this expression:
1172      * {@code this.lanewise(op, this.broadcast(e1), v2)}.
1173      *
1174      * @param op the operation used to combine lane values
1175      * @param e1 the input scalar
1176      * @param v2 the other input vector
1177      * @return the result of applying the operation lane-wise
1178      *         to the input vectors and the scalar
1179      * @throws UnsupportedOperationException if this vector does
1180      *         not support the requested operation
1181      * @see #lanewise(VectorOperators.Ternary,Vector,Vector)
1182      * @see #lanewise(VectorOperators.Ternary,int,Vector,VectorMask)
1183      */
1184     @ForceInline
1185     public final
1186     IntVector lanewise(VectorOperators.Ternary op, //(op,e1,v2)
1187                                   int e1,
1188                                   Vector<Integer> v2) {
1189         return lanewise(op, broadcast(e1), v2);
1190     }
1191 
1192     /**
1193      * Combines the lane values of this vector
1194      * with the values of another vector and a broadcast scalar,
1195      * with selection of lane elements controlled by a mask.
1196      *
1197      * This is a masked lane-wise ternary operation which applies
1198      * the selected operation to each lane.
1199      * The return value will be equal to this expression:
1200      * {@code this.lanewise(op, this.broadcast(e1), v2, m)}.
1201      *
1202      * @param op the operation used to combine lane values
1203      * @param e1 the input scalar
1204      * @param v2 the other input vector
1205      * @param m the mask controlling lane selection
1206      * @return the result of applying the operation lane-wise
1207      *         to the input vectors and the scalar
1208      * @throws UnsupportedOperationException if this vector does
1209      *         not support the requested operation
1210      * @see #lanewise(VectorOperators.Ternary,Vector,Vector,VectorMask)
1211      * @see #lanewise(VectorOperators.Ternary,int,Vector)
1212      */
1213     @ForceInline
1214     public final
1215     IntVector lanewise(VectorOperators.Ternary op, //(op,e1,v2,m)
1216                                   int e1,
1217                                   Vector<Integer> v2,
1218                                   VectorMask<Integer> m) {
1219         return lanewise(op, broadcast(e1), v2, m);
1220     }
1221 
1222     // (Thus endeth the Great and Mighty Ternary Ogdoad.)
1223     // https://en.wikipedia.org/wiki/Ogdoad
1224 
1225     /// FULL-SERVICE BINARY METHODS: ADD, SUB, MUL, DIV
1226     //
1227     // These include masked and non-masked versions.
1228     // This subclass adds broadcast (masked or not).
1229 
1230     /**
1231      * {@inheritDoc} <!--workaround-->
1232      * @see #add(int)
1233      */
1234     @Override
1235     @ForceInline
1236     public final IntVector add(Vector<Integer> v) {
1237         return lanewise(ADD, v);
1238     }
1239 
1240     /**
1241      * Adds this vector to the broadcast of an input scalar.
1242      *
1243      * This is a lane-wise binary operation which applies
1244      * the primitive addition operation ({@code +}) to each lane.
1245      *
1246      * This method is also equivalent to the expression
1247      * {@link #lanewise(VectorOperators.Binary,int)
1248      *    lanewise}{@code (}{@link VectorOperators#ADD
1249      *    ADD}{@code , e)}.
1250      *
1251      * @param e the input scalar
1252      * @return the result of adding each lane of this vector to the scalar
1253      * @see #add(Vector)
1254      * @see #broadcast(int)
1255      * @see #add(int,VectorMask)
1256      * @see VectorOperators#ADD
1257      * @see #lanewise(VectorOperators.Binary,Vector)
1258      * @see #lanewise(VectorOperators.Binary,int)
1259      */
1260     @ForceInline
1261     public final
1262     IntVector add(int e) {
1263         return lanewise(ADD, e);
1264     }
1265 
1266     /**
1267      * {@inheritDoc} <!--workaround-->
1268      * @see #add(int,VectorMask)
1269      */
1270     @Override
1271     @ForceInline
1272     public final IntVector add(Vector<Integer> v,
1273                                           VectorMask<Integer> m) {
1274         return lanewise(ADD, v, m);
1275     }
1276 
1277     /**
1278      * Adds this vector to the broadcast of an input scalar,
1279      * selecting lane elements controlled by a mask.
1280      *
1281      * This is a masked lane-wise binary operation which applies
1282      * the primitive addition operation ({@code +}) to each lane.
1283      *
1284      * This method is also equivalent to the expression
1285      * {@link #lanewise(VectorOperators.Binary,int,VectorMask)
1286      *    lanewise}{@code (}{@link VectorOperators#ADD
1287      *    ADD}{@code , s, m)}.
1288      *
1289      * @param e the input scalar
1290      * @param m the mask controlling lane selection
1291      * @return the result of adding each lane of this vector to the scalar
1292      * @see #add(Vector,VectorMask)
1293      * @see #broadcast(int)
1294      * @see #add(int)
1295      * @see VectorOperators#ADD
1296      * @see #lanewise(VectorOperators.Binary,Vector)
1297      * @see #lanewise(VectorOperators.Binary,int)
1298      */
1299     @ForceInline
1300     public final IntVector add(int e,
1301                                           VectorMask<Integer> m) {
1302         return lanewise(ADD, e, m);
1303     }
1304 
1305     /**
1306      * {@inheritDoc} <!--workaround-->
1307      * @see #sub(int)
1308      */
1309     @Override
1310     @ForceInline
1311     public final IntVector sub(Vector<Integer> v) {
1312         return lanewise(SUB, v);
1313     }
1314 
1315     /**
1316      * Subtracts an input scalar from this vector.
1317      *
1318      * This is a masked lane-wise binary operation which applies
1319      * the primitive subtraction operation ({@code -}) to each lane.
1320      *
1321      * This method is also equivalent to the expression
1322      * {@link #lanewise(VectorOperators.Binary,int)
1323      *    lanewise}{@code (}{@link VectorOperators#SUB
1324      *    SUB}{@code , e)}.
1325      *
1326      * @param e the input scalar
1327      * @return the result of subtracting the scalar from each lane of this vector
1328      * @see #sub(Vector)
1329      * @see #broadcast(int)
1330      * @see #sub(int,VectorMask)
1331      * @see VectorOperators#SUB
1332      * @see #lanewise(VectorOperators.Binary,Vector)
1333      * @see #lanewise(VectorOperators.Binary,int)
1334      */
1335     @ForceInline
1336     public final IntVector sub(int e) {
1337         return lanewise(SUB, e);
1338     }
1339 
1340     /**
1341      * {@inheritDoc} <!--workaround-->
1342      * @see #sub(int,VectorMask)
1343      */
1344     @Override
1345     @ForceInline
1346     public final IntVector sub(Vector<Integer> v,
1347                                           VectorMask<Integer> m) {
1348         return lanewise(SUB, v, m);
1349     }
1350 
1351     /**
1352      * Subtracts an input scalar from this vector
1353      * under the control of a mask.
1354      *
1355      * This is a masked lane-wise binary operation which applies
1356      * the primitive subtraction operation ({@code -}) to each lane.
1357      *
1358      * This method is also equivalent to the expression
1359      * {@link #lanewise(VectorOperators.Binary,int,VectorMask)
1360      *    lanewise}{@code (}{@link VectorOperators#SUB
1361      *    SUB}{@code , s, m)}.
1362      *
1363      * @param e the input scalar
1364      * @param m the mask controlling lane selection
1365      * @return the result of subtracting the scalar from each lane of this vector
1366      * @see #sub(Vector,VectorMask)
1367      * @see #broadcast(int)
1368      * @see #sub(int)
1369      * @see VectorOperators#SUB
1370      * @see #lanewise(VectorOperators.Binary,Vector)
1371      * @see #lanewise(VectorOperators.Binary,int)
1372      */
1373     @ForceInline
1374     public final IntVector sub(int e,
1375                                           VectorMask<Integer> m) {
1376         return lanewise(SUB, e, m);
1377     }
1378 
1379     /**
1380      * {@inheritDoc} <!--workaround-->
1381      * @see #mul(int)
1382      */
1383     @Override
1384     @ForceInline
1385     public final IntVector mul(Vector<Integer> v) {
1386         return lanewise(MUL, v);
1387     }
1388 
1389     /**
1390      * Multiplies this vector by the broadcast of an input scalar.
1391      *
1392      * This is a lane-wise binary operation which applies
1393      * the primitive multiplication operation ({@code *}) to each lane.
1394      *
1395      * This method is also equivalent to the expression
1396      * {@link #lanewise(VectorOperators.Binary,int)
1397      *    lanewise}{@code (}{@link VectorOperators#MUL
1398      *    MUL}{@code , e)}.
1399      *
1400      * @param e the input scalar
1401      * @return the result of multiplying this vector by the given scalar
1402      * @see #mul(Vector)
1403      * @see #broadcast(int)
1404      * @see #mul(int,VectorMask)
1405      * @see VectorOperators#MUL
1406      * @see #lanewise(VectorOperators.Binary,Vector)
1407      * @see #lanewise(VectorOperators.Binary,int)
1408      */
1409     @ForceInline
1410     public final IntVector mul(int e) {
1411         return lanewise(MUL, e);
1412     }
1413 
1414     /**
1415      * {@inheritDoc} <!--workaround-->
1416      * @see #mul(int,VectorMask)
1417      */
1418     @Override
1419     @ForceInline
1420     public final IntVector mul(Vector<Integer> v,
1421                                           VectorMask<Integer> m) {
1422         return lanewise(MUL, v, m);
1423     }
1424 
1425     /**
1426      * Multiplies this vector by the broadcast of an input scalar,
1427      * selecting lane elements controlled by a mask.
1428      *
1429      * This is a masked lane-wise binary operation which applies
1430      * the primitive multiplication operation ({@code *}) to each lane.
1431      *
1432      * This method is also equivalent to the expression
1433      * {@link #lanewise(VectorOperators.Binary,int,VectorMask)
1434      *    lanewise}{@code (}{@link VectorOperators#MUL
1435      *    MUL}{@code , s, m)}.
1436      *
1437      * @param e the input scalar
1438      * @param m the mask controlling lane selection
1439      * @return the result of muling each lane of this vector to the scalar
1440      * @see #mul(Vector,VectorMask)
1441      * @see #broadcast(int)
1442      * @see #mul(int)
1443      * @see VectorOperators#MUL
1444      * @see #lanewise(VectorOperators.Binary,Vector)
1445      * @see #lanewise(VectorOperators.Binary,int)
1446      */
1447     @ForceInline
1448     public final IntVector mul(int e,
1449                                           VectorMask<Integer> m) {
1450         return lanewise(MUL, e, m);
1451     }
1452 
1453     /**
1454      * {@inheritDoc} <!--workaround-->
1455      * @apiNote If there is a zero divisor, {@code
1456      * ArithmeticException} will be thrown.
1457      */
1458     @Override
1459     @ForceInline
1460     public final IntVector div(Vector<Integer> v) {
1461         return lanewise(DIV, v);
1462     }
1463 
1464     /**
1465      * Divides this vector by the broadcast of an input scalar.
1466      *
1467      * This is a lane-wise binary operation which applies
1468      * the primitive division operation ({@code /}) to each lane.
1469      *
1470      * This method is also equivalent to the expression
1471      * {@link #lanewise(VectorOperators.Binary,int)
1472      *    lanewise}{@code (}{@link VectorOperators#DIV
1473      *    DIV}{@code , e)}.
1474      *
1475      * @apiNote If there is a zero divisor, {@code
1476      * ArithmeticException} will be thrown.
1477      *
1478      * @param e the input scalar
1479      * @return the result of dividing each lane of this vector by the scalar
1480      * @see #div(Vector)
1481      * @see #broadcast(int)
1482      * @see #div(int,VectorMask)
1483      * @see VectorOperators#DIV
1484      * @see #lanewise(VectorOperators.Binary,Vector)
1485      * @see #lanewise(VectorOperators.Binary,int)
1486      */
1487     @ForceInline
1488     public final IntVector div(int e) {
1489         return lanewise(DIV, e);
1490     }
1491 
1492     /**
1493      * {@inheritDoc} <!--workaround-->
1494      * @see #div(int,VectorMask)
1495      * @apiNote If there is a zero divisor, {@code
1496      * ArithmeticException} will be thrown.
1497      */
1498     @Override
1499     @ForceInline
1500     public final IntVector div(Vector<Integer> v,
1501                                           VectorMask<Integer> m) {
1502         return lanewise(DIV, v, m);
1503     }
1504 
1505     /**
1506      * Divides this vector by the broadcast of an input scalar,
1507      * selecting lane elements controlled by a mask.
1508      *
1509      * This is a masked lane-wise binary operation which applies
1510      * the primitive division operation ({@code /}) to each lane.
1511      *
1512      * This method is also equivalent to the expression
1513      * {@link #lanewise(VectorOperators.Binary,int,VectorMask)
1514      *    lanewise}{@code (}{@link VectorOperators#DIV
1515      *    DIV}{@code , s, m)}.
1516      *
1517      * @apiNote If there is a zero divisor, {@code
1518      * ArithmeticException} will be thrown.
1519      *
1520      * @param e the input scalar
1521      * @param m the mask controlling lane selection
1522      * @return the result of dividing each lane of this vector by the scalar
1523      * @see #div(Vector,VectorMask)
1524      * @see #broadcast(int)
1525      * @see #div(int)
1526      * @see VectorOperators#DIV
1527      * @see #lanewise(VectorOperators.Binary,Vector)
1528      * @see #lanewise(VectorOperators.Binary,int)
1529      */
1530     @ForceInline
1531     public final IntVector div(int e,
1532                                           VectorMask<Integer> m) {
1533         return lanewise(DIV, e, m);
1534     }
1535 
1536     /// END OF FULL-SERVICE BINARY METHODS
1537 
1538     /// SECOND-TIER BINARY METHODS
1539     //
1540     // There are no masked versions.
1541 
1542     /**
1543      * {@inheritDoc} <!--workaround-->
1544      */
1545     @Override
1546     @ForceInline
1547     public final IntVector min(Vector<Integer> v) {
1548         return lanewise(MIN, v);
1549     }
1550 
1551     // FIXME:  "broadcast of an input scalar" is really wordy.  Reduce?
1552     /**
1553      * Computes the smaller of this vector and the broadcast of an input scalar.
1554      *
1555      * This is a lane-wise binary operation which applies the
1556      * operation {@code Math.min()} to each pair of
1557      * corresponding lane values.
1558      *
1559      * This method is also equivalent to the expression
1560      * {@link #lanewise(VectorOperators.Binary,int)
1561      *    lanewise}{@code (}{@link VectorOperators#MIN
1562      *    MIN}{@code , e)}.
1563      *
1564      * @param e the input scalar
1565      * @return the result of multiplying this vector by the given scalar
1566      * @see #min(Vector)
1567      * @see #broadcast(int)
1568      * @see VectorOperators#MIN
1569      * @see #lanewise(VectorOperators.Binary,int,VectorMask)
1570      */
1571     @ForceInline
1572     public final IntVector min(int e) {
1573         return lanewise(MIN, e);
1574     }
1575 
1576     /**
1577      * {@inheritDoc} <!--workaround-->
1578      */
1579     @Override
1580     @ForceInline
1581     public final IntVector max(Vector<Integer> v) {
1582         return lanewise(MAX, v);
1583     }
1584 
1585     /**
1586      * Computes the larger of this vector and the broadcast of an input scalar.
1587      *
1588      * This is a lane-wise binary operation which applies the
1589      * operation {@code Math.max()} to each pair of
1590      * corresponding lane values.
1591      *
1592      * This method is also equivalent to the expression
1593      * {@link #lanewise(VectorOperators.Binary,int)
1594      *    lanewise}{@code (}{@link VectorOperators#MAX
1595      *    MAX}{@code , e)}.
1596      *
1597      * @param e the input scalar
1598      * @return the result of multiplying this vector by the given scalar
1599      * @see #max(Vector)
1600      * @see #broadcast(int)
1601      * @see VectorOperators#MAX
1602      * @see #lanewise(VectorOperators.Binary,int,VectorMask)
1603      */
1604     @ForceInline
1605     public final IntVector max(int e) {
1606         return lanewise(MAX, e);
1607     }
1608 
1609     // common bitwise operators: and, or, not (with scalar versions)
1610     /**
1611      * Computes the bitwise logical conjunction ({@code &})
1612      * of this vector and a second input vector.
1613      *
1614      * This is a lane-wise binary operation which applies the
1615      * the primitive bitwise "and" operation ({@code &})
1616      * to each pair of corresponding lane values.
1617      *
1618      * This method is also equivalent to the expression
1619      * {@link #lanewise(VectorOperators.Binary,Vector)
1620      *    lanewise}{@code (}{@link VectorOperators#AND
1621      *    AND}{@code , v)}.
1622      *
1623      * <p>
1624      * This is not a full-service named operation like
1625      * {@link #add(Vector) add}.  A masked version of
1626      * this operation is not directly available
1627      * but may be obtained via the masked version of
1628      * {@code lanewise}.
1629      *
1630      * @param v a second input vector
1631      * @return the bitwise {@code &} of this vector and the second input vector
1632      * @see #and(int)
1633      * @see #or(Vector)
1634      * @see #not()
1635      * @see VectorOperators#AND
1636      * @see #lanewise(VectorOperators.Binary,Vector,VectorMask)
1637      */
1638     @ForceInline
1639     public final IntVector and(Vector<Integer> v) {
1640         return lanewise(AND, v);
1641     }
1642 
1643     /**
1644      * Computes the bitwise logical conjunction ({@code &})
1645      * of this vector and a scalar.
1646      *
1647      * This is a lane-wise binary operation which applies the
1648      * the primitive bitwise "and" operation ({@code &})
1649      * to each pair of corresponding lane values.
1650      *
1651      * This method is also equivalent to the expression
1652      * {@link #lanewise(VectorOperators.Binary,Vector)
1653      *    lanewise}{@code (}{@link VectorOperators#AND
1654      *    AND}{@code , e)}.
1655      *
1656      * @param e an input scalar
1657      * @return the bitwise {@code &} of this vector and scalar
1658      * @see #and(Vector)
1659      * @see VectorOperators#AND
1660      * @see #lanewise(VectorOperators.Binary,Vector,VectorMask)
1661      */
1662     @ForceInline
1663     public final IntVector and(int e) {
1664         return lanewise(AND, e);
1665     }
1666 
1667     /**
1668      * Computes the bitwise logical disjunction ({@code |})
1669      * of this vector and a second input vector.
1670      *
1671      * This is a lane-wise binary operation which applies the
1672      * the primitive bitwise "or" operation ({@code |})
1673      * to each pair of corresponding lane values.
1674      *
1675      * This method is also equivalent to the expression
1676      * {@link #lanewise(VectorOperators.Binary,Vector)
1677      *    lanewise}{@code (}{@link VectorOperators#OR
1678      *    AND}{@code , v)}.
1679      *
1680      * <p>
1681      * This is not a full-service named operation like
1682      * {@link #add(Vector) add}.  A masked version of
1683      * this operation is not directly available
1684      * but may be obtained via the masked version of
1685      * {@code lanewise}.
1686      *
1687      * @param v a second input vector
1688      * @return the bitwise {@code |} of this vector and the second input vector
1689      * @see #or(int)
1690      * @see #and(Vector)
1691      * @see #not()
1692      * @see VectorOperators#OR
1693      * @see #lanewise(VectorOperators.Binary,Vector,VectorMask)
1694      */
1695     @ForceInline
1696     public final IntVector or(Vector<Integer> v) {
1697         return lanewise(OR, v);
1698     }
1699 
1700     /**
1701      * Computes the bitwise logical disjunction ({@code |})
1702      * of this vector and a scalar.
1703      *
1704      * This is a lane-wise binary operation which applies the
1705      * the primitive bitwise "or" operation ({@code |})
1706      * to each pair of corresponding lane values.
1707      *
1708      * This method is also equivalent to the expression
1709      * {@link #lanewise(VectorOperators.Binary,Vector)
1710      *    lanewise}{@code (}{@link VectorOperators#OR
1711      *    OR}{@code , e)}.
1712      *
1713      * @param e an input scalar
1714      * @return the bitwise {@code |} of this vector and scalar
1715      * @see #or(Vector)
1716      * @see VectorOperators#OR
1717      * @see #lanewise(VectorOperators.Binary,Vector,VectorMask)
1718      */
1719     @ForceInline
1720     public final IntVector or(int e) {
1721         return lanewise(OR, e);
1722     }
1723 
1724 
1725 
1726     /// UNARY METHODS
1727 
1728     /**
1729      * {@inheritDoc} <!--workaround-->
1730      */
1731     @Override
1732     @ForceInline
1733     public final
1734     IntVector neg() {
1735         return lanewise(NEG);
1736     }
1737 
1738     /**
1739      * {@inheritDoc} <!--workaround-->
1740      */
1741     @Override
1742     @ForceInline
1743     public final
1744     IntVector abs() {
1745         return lanewise(ABS);
1746     }
1747 

1748     // not (~)
1749     /**
1750      * Computes the bitwise logical complement ({@code ~})
1751      * of this vector.
1752      *
1753      * This is a lane-wise binary operation which applies the
1754      * the primitive bitwise "not" operation ({@code ~})
1755      * to each lane value.
1756      *
1757      * This method is also equivalent to the expression
1758      * {@link #lanewise(VectorOperators.Unary)
1759      *    lanewise}{@code (}{@link VectorOperators#NOT
1760      *    NOT}{@code )}.
1761      *
1762      * <p>
1763      * This is not a full-service named operation like
1764      * {@link #add(Vector) add}.  A masked version of
1765      * this operation is not directly available
1766      * but may be obtained via the masked version of
1767      * {@code lanewise}.
1768      *
1769      * @return the bitwise complement {@code ~} of this vector
1770      * @see #and(Vector)
1771      * @see VectorOperators#NOT
1772      * @see #lanewise(VectorOperators.Unary,VectorMask)
1773      */
1774     @ForceInline
1775     public final IntVector not() {
1776         return lanewise(NOT);
1777     }
1778 
1779 
1780     /// COMPARISONS
1781 
1782     /**
1783      * {@inheritDoc} <!--workaround-->
1784      */
1785     @Override
1786     @ForceInline
1787     public final
1788     VectorMask<Integer> eq(Vector<Integer> v) {
1789         return compare(EQ, v);
1790     }
1791 
1792     /**
1793      * Tests if this vector is equal to an input scalar.
1794      *
1795      * This is a lane-wise binary test operation which applies
1796      * the primitive equals operation ({@code ==}) to each lane.
1797      * The result is the same as {@code compare(VectorOperators.Comparison.EQ, e)}.
1798      *
1799      * @param e the input scalar
1800      * @return the result mask of testing if this vector
1801      *         is equal to {@code e}
1802      * @see #compare(VectorOperators.Comparison,int)
1803      */
1804     @ForceInline
1805     public final
1806     VectorMask<Integer> eq(int e) {
1807         return compare(EQ, e);
1808     }
1809 
1810     /**
1811      * {@inheritDoc} <!--workaround-->
1812      */
1813     @Override
1814     @ForceInline
1815     public final
1816     VectorMask<Integer> lt(Vector<Integer> v) {
1817         return compare(LT, v);
1818     }
1819 
1820     /**
1821      * Tests if this vector is less than an input scalar.
1822      *
1823      * This is a lane-wise binary test operation which applies
1824      * the primitive less than operation ({@code <}) to each lane.
1825      * The result is the same as {@code compare(VectorOperators.LT, e)}.
1826      *
1827      * @param e the input scalar
1828      * @return the mask result of testing if this vector
1829      *         is less than the input scalar
1830      * @see #compare(VectorOperators.Comparison,int)
1831      */
1832     @ForceInline
1833     public final
1834     VectorMask<Integer> lt(int e) {
1835         return compare(LT, e);
1836     }
1837 
1838     /**
1839      * {@inheritDoc} <!--workaround-->
1840      */
1841     @Override
1842     public abstract
1843     VectorMask<Integer> test(VectorOperators.Test op);
1844 
1845     /*package-private*/
1846     @ForceInline
1847     final
1848     <M extends VectorMask<Integer>>
1849     M testTemplate(Class<M> maskType, Test op) {
1850         IntSpecies vsp = vspecies();
1851         if (opKind(op, VO_SPECIAL)) {
1852             VectorMask<Integer> m;
1853             if (op == IS_DEFAULT) {
1854                 m = compare(EQ, (int) 0);
1855             } else if (op == IS_NEGATIVE) {
1856                 m = compare(LT, (int) 0);
1857             }
1858             else {
1859                 throw new AssertionError(op);
1860             }
1861             return maskType.cast(m);
1862         }
1863         int opc = opCode(op);
1864         throw new AssertionError(op);
1865     }
1866 
1867     /**
1868      * {@inheritDoc} <!--workaround-->
1869      */
1870     @Override
1871     public abstract
1872     VectorMask<Integer> test(VectorOperators.Test op,
1873                                   VectorMask<Integer> m);
1874 
1875     /*package-private*/
1876     @ForceInline
1877     final
1878     <M extends VectorMask<Integer>>
1879     M testTemplate(Class<M> maskType, Test op, M mask) {
1880         IntSpecies vsp = vspecies();
1881         mask.check(maskType, this);
1882         if (opKind(op, VO_SPECIAL)) {
1883             VectorMask<Integer> m = mask;
1884             if (op == IS_DEFAULT) {
1885                 m = compare(EQ, (int) 0, m);
1886             } else if (op == IS_NEGATIVE) {
1887                 m = compare(LT, (int) 0, m);
1888             }
1889             else {
1890                 throw new AssertionError(op);
1891             }
1892             return maskType.cast(m);
1893         }
1894         int opc = opCode(op);
1895         throw new AssertionError(op);
1896     }
1897 
1898     /**
1899      * {@inheritDoc} <!--workaround-->
1900      */
1901     @Override
1902     public abstract
1903     VectorMask<Integer> compare(VectorOperators.Comparison op, Vector<Integer> v);
1904 
1905     /*package-private*/
1906     @ForceInline
1907     final
1908     <M extends VectorMask<Integer>>
1909     M compareTemplate(Class<M> maskType, Comparison op, Vector<Integer> v) {
1910         IntVector that = (IntVector) v;
1911         that.check(this);
1912         int opc = opCode(op);
1913         return VectorSupport.compare(
1914             opc, getClass(), maskType, int.class, length(),
1915             this, that, null,
1916             (cond, v0, v1, m1) -> {
1917                 AbstractMask<Integer> m
1918                     = v0.bTest(cond, v1, (cond_, i, a, b)
1919                                -> compareWithOp(cond, a, b));
1920                 @SuppressWarnings("unchecked")
1921                 M m2 = (M) m;
1922                 return m2;
1923             });
1924     }
1925 
1926     /*package-private*/
1927     @ForceInline
1928     final
1929     <M extends VectorMask<Integer>>
1930     M compareTemplate(Class<M> maskType, Comparison op, Vector<Integer> v, M m) {
1931         IntVector that = (IntVector) v;
1932         that.check(this);
1933         m.check(maskType, this);
1934         int opc = opCode(op);
1935         return VectorSupport.compare(
1936             opc, getClass(), maskType, int.class, length(),
1937             this, that, m,
1938             (cond, v0, v1, m1) -> {
1939                 AbstractMask<Integer> cmpM
1940                     = v0.bTest(cond, v1, (cond_, i, a, b)
1941                                -> compareWithOp(cond, a, b));
1942                 @SuppressWarnings("unchecked")
1943                 M m2 = (M) cmpM.and(m1);
1944                 return m2;
1945             });
1946     }
1947 
1948     @ForceInline
1949     private static boolean compareWithOp(int cond, int a, int b) {
1950         return switch (cond) {
1951             case BT_eq -> a == b;
1952             case BT_ne -> a != b;
1953             case BT_lt -> a < b;
1954             case BT_le -> a <= b;
1955             case BT_gt -> a > b;
1956             case BT_ge -> a >= b;
1957             case BT_ult -> Integer.compareUnsigned(a, b) < 0;
1958             case BT_ule -> Integer.compareUnsigned(a, b) <= 0;
1959             case BT_ugt -> Integer.compareUnsigned(a, b) > 0;
1960             case BT_uge -> Integer.compareUnsigned(a, b) >= 0;
1961             default -> throw new AssertionError();
1962         };
1963     }
1964 
1965     /**
1966      * Tests this vector by comparing it with an input scalar,
1967      * according to the given comparison operation.
1968      *
1969      * This is a lane-wise binary test operation which applies
1970      * the comparison operation to each lane.
1971      * <p>
1972      * The result is the same as
1973      * {@code compare(op, broadcast(species(), e))}.
1974      * That is, the scalar may be regarded as broadcast to
1975      * a vector of the same species, and then compared
1976      * against the original vector, using the selected
1977      * comparison operation.
1978      *
1979      * @param op the operation used to compare lane values
1980      * @param e the input scalar
1981      * @return the mask result of testing lane-wise if this vector
1982      *         compares to the input, according to the selected
1983      *         comparison operator
1984      * @see IntVector#compare(VectorOperators.Comparison,Vector)
1985      * @see #eq(int)
1986      * @see #lt(int)
1987      */
1988     public abstract
1989     VectorMask<Integer> compare(Comparison op, int e);
1990 
1991     /*package-private*/
1992     @ForceInline
1993     final
1994     <M extends VectorMask<Integer>>
1995     M compareTemplate(Class<M> maskType, Comparison op, int e) {
1996         return compareTemplate(maskType, op, broadcast(e));
1997     }
1998 
1999     /**
2000      * Tests this vector by comparing it with an input scalar,
2001      * according to the given comparison operation,
2002      * in lanes selected by a mask.
2003      *
2004      * This is a masked lane-wise binary test operation which applies
2005      * to each pair of corresponding lane values.
2006      *
2007      * The returned result is equal to the expression
2008      * {@code compare(op,s).and(m)}.
2009      *
2010      * @param op the operation used to compare lane values
2011      * @param e the input scalar
2012      * @param m the mask controlling lane selection
2013      * @return the mask result of testing lane-wise if this vector
2014      *         compares to the input, according to the selected
2015      *         comparison operator,
2016      *         and only in the lanes selected by the mask
2017      * @see IntVector#compare(VectorOperators.Comparison,Vector,VectorMask)
2018      */
2019     @ForceInline
2020     public final VectorMask<Integer> compare(VectorOperators.Comparison op,
2021                                                int e,
2022                                                VectorMask<Integer> m) {
2023         return compare(op, broadcast(e), m);
2024     }
2025 
2026     /**
2027      * {@inheritDoc} <!--workaround-->
2028      */
2029     @Override
2030     public abstract
2031     VectorMask<Integer> compare(Comparison op, long e);
2032 
2033     /*package-private*/
2034     @ForceInline
2035     final
2036     <M extends VectorMask<Integer>>
2037     M compareTemplate(Class<M> maskType, Comparison op, long e) {
2038         return compareTemplate(maskType, op, broadcast(e));
2039     }
2040 
2041     /**
2042      * {@inheritDoc} <!--workaround-->
2043      */
2044     @Override
2045     @ForceInline
2046     public final
2047     VectorMask<Integer> compare(Comparison op, long e, VectorMask<Integer> m) {
2048         return compare(op, broadcast(e), m);
2049     }
2050 
2051 
2052 
2053     /**
2054      * {@inheritDoc} <!--workaround-->
2055      */
2056     @Override public abstract
2057     IntVector blend(Vector<Integer> v, VectorMask<Integer> m);
2058 
2059     /*package-private*/
2060     @ForceInline
2061     final
2062     <M extends VectorMask<Integer>>
2063     IntVector
2064     blendTemplate(Class<M> maskType, IntVector v, M m) {
2065         v.check(this);
2066         return VectorSupport.blend(
2067             getClass(), maskType, int.class, length(),
2068             this, v, m,
2069             (v0, v1, m_) -> v0.bOp(v1, m_, (i, a, b) -> b));
2070     }
2071 
2072     /**
2073      * {@inheritDoc} <!--workaround-->
2074      */
2075     @Override public abstract IntVector addIndex(int scale);
2076 
2077     /*package-private*/
2078     @ForceInline
2079     final IntVector addIndexTemplate(int scale) {
2080         IntSpecies vsp = vspecies();
2081         // make sure VLENGTH*scale doesn't overflow:
2082         vsp.checkScale(scale);
2083         return VectorSupport.indexVector(
2084             getClass(), int.class, length(),
2085             this, scale, vsp,
2086             (v, scale_, s)
2087             -> {
2088                 // If the platform doesn't support an INDEX
2089                 // instruction directly, load IOTA from memory
2090                 // and multiply.
2091                 IntVector iota = s.iota();
2092                 int sc = (int) scale_;
2093                 return v.add(sc == 1 ? iota : iota.mul(sc));
2094             });
2095     }
2096 
2097     /**
2098      * Replaces selected lanes of this vector with
2099      * a scalar value
2100      * under the control of a mask.
2101      *
2102      * This is a masked lane-wise binary operation which
2103      * selects each lane value from one or the other input.
2104      *
2105      * The returned result is equal to the expression
2106      * {@code blend(broadcast(e),m)}.
2107      *
2108      * @param e the input scalar, containing the replacement lane value
2109      * @param m the mask controlling lane selection of the scalar
2110      * @return the result of blending the lane elements of this vector with
2111      *         the scalar value
2112      */
2113     @ForceInline
2114     public final IntVector blend(int e,
2115                                             VectorMask<Integer> m) {
2116         return blend(broadcast(e), m);
2117     }
2118 
2119     /**
2120      * Replaces selected lanes of this vector with
2121      * a scalar value
2122      * under the control of a mask.
2123      *
2124      * This is a masked lane-wise binary operation which
2125      * selects each lane value from one or the other input.
2126      *
2127      * The returned result is equal to the expression
2128      * {@code blend(broadcast(e),m)}.
2129      *
2130      * @param e the input scalar, containing the replacement lane value
2131      * @param m the mask controlling lane selection of the scalar
2132      * @return the result of blending the lane elements of this vector with
2133      *         the scalar value
2134      */
2135     @ForceInline
2136     public final IntVector blend(long e,
2137                                             VectorMask<Integer> m) {
2138         return blend(broadcast(e), m);
2139     }
2140 
2141     /**
2142      * {@inheritDoc} <!--workaround-->
2143      */
2144     @Override
2145     public abstract
2146     IntVector slice(int origin, Vector<Integer> v1);
2147 
2148     /*package-private*/
2149     final
2150     @ForceInline
2151     IntVector sliceTemplate(int origin, Vector<Integer> v1) {
2152         IntVector that = (IntVector) v1;
2153         that.check(this);
2154         Objects.checkIndex(origin, length() + 1);
2155         VectorShuffle<Integer> iota = iotaShuffle();
2156         VectorMask<Integer> blendMask = iota.toVector().compare(VectorOperators.LT, (broadcast((int)(length() - origin))));
2157         iota = iotaShuffle(origin, 1, true);
2158         return that.rearrange(iota).blend(this.rearrange(iota), blendMask);
2159     }
2160 
2161     /**
2162      * {@inheritDoc} <!--workaround-->
2163      */
2164     @Override
2165     @ForceInline
2166     public final
2167     IntVector slice(int origin,
2168                                Vector<Integer> w,
2169                                VectorMask<Integer> m) {
2170         return broadcast(0).blend(slice(origin, w), m);
2171     }
2172 
2173     /**
2174      * {@inheritDoc} <!--workaround-->
2175      */
2176     @Override
2177     public abstract
2178     IntVector slice(int origin);
2179 
2180     /*package-private*/
2181     final
2182     @ForceInline
2183     IntVector sliceTemplate(int origin) {
2184         Objects.checkIndex(origin, length() + 1);
2185         VectorShuffle<Integer> iota = iotaShuffle();
2186         VectorMask<Integer> blendMask = iota.toVector().compare(VectorOperators.LT, (broadcast((int)(length() - origin))));
2187         iota = iotaShuffle(origin, 1, true);
2188         return vspecies().zero().blend(this.rearrange(iota), blendMask);
2189     }
2190 
2191     /**
2192      * {@inheritDoc} <!--workaround-->
2193      */
2194     @Override
2195     public abstract
2196     IntVector unslice(int origin, Vector<Integer> w, int part);
2197 
2198     /*package-private*/
2199     final
2200     @ForceInline
2201     IntVector
2202     unsliceTemplate(int origin, Vector<Integer> w, int part) {
2203         IntVector that = (IntVector) w;
2204         that.check(this);
2205         Objects.checkIndex(origin, length() + 1);
2206         VectorShuffle<Integer> iota = iotaShuffle();
2207         VectorMask<Integer> blendMask = iota.toVector().compare((part == 0) ? VectorOperators.GE : VectorOperators.LT,
2208                                                                   (broadcast((int)(origin))));
2209         iota = iotaShuffle(-origin, 1, true);
2210         return that.blend(this.rearrange(iota), blendMask);
2211     }
2212 
2213     /*package-private*/
2214     final
2215     @ForceInline
2216     <M extends VectorMask<Integer>>
2217     IntVector
2218     unsliceTemplate(Class<M> maskType, int origin, Vector<Integer> w, int part, M m) {
2219         IntVector that = (IntVector) w;
2220         that.check(this);
2221         IntVector slice = that.sliceTemplate(origin, that);
2222         slice = slice.blendTemplate(maskType, this, m);
2223         return slice.unsliceTemplate(origin, w, part);
2224     }
2225 
2226     /**
2227      * {@inheritDoc} <!--workaround-->
2228      */
2229     @Override
2230     public abstract
2231     IntVector unslice(int origin, Vector<Integer> w, int part, VectorMask<Integer> m);
2232 
2233     /**
2234      * {@inheritDoc} <!--workaround-->
2235      */
2236     @Override
2237     public abstract
2238     IntVector unslice(int origin);
2239 
2240     /*package-private*/
2241     final
2242     @ForceInline
2243     IntVector
2244     unsliceTemplate(int origin) {
2245         Objects.checkIndex(origin, length() + 1);
2246         VectorShuffle<Integer> iota = iotaShuffle();
2247         VectorMask<Integer> blendMask = iota.toVector().compare(VectorOperators.GE,
2248                                                                   (broadcast((int)(origin))));
2249         iota = iotaShuffle(-origin, 1, true);
2250         return vspecies().zero().blend(this.rearrange(iota), blendMask);
2251     }
2252 
2253     private ArrayIndexOutOfBoundsException
2254     wrongPartForSlice(int part) {
2255         String msg = String.format("bad part number %d for slice operation",
2256                                    part);
2257         return new ArrayIndexOutOfBoundsException(msg);
2258     }
2259 
2260     /**
2261      * {@inheritDoc} <!--workaround-->
2262      */
2263     @Override
2264     public abstract
2265     IntVector rearrange(VectorShuffle<Integer> m);
2266 
2267     /*package-private*/
2268     @ForceInline
2269     final
2270     <S extends VectorShuffle<Integer>>
2271     IntVector rearrangeTemplate(Class<S> shuffletype, S shuffle) {
2272         shuffle.checkIndexes();
2273         return VectorSupport.rearrangeOp(
2274             getClass(), shuffletype, null, int.class, length(),
2275             this, shuffle, null,
2276             (v1, s_, m_) -> v1.uOp((i, a) -> {
2277                 int ei = s_.laneSource(i);
2278                 return v1.lane(ei);
2279             }));
2280     }
2281 
2282     /**
2283      * {@inheritDoc} <!--workaround-->
2284      */
2285     @Override
2286     public abstract
2287     IntVector rearrange(VectorShuffle<Integer> s,
2288                                    VectorMask<Integer> m);
2289 
2290     /*package-private*/
2291     @ForceInline
2292     final
2293     <S extends VectorShuffle<Integer>, M extends VectorMask<Integer>>
2294     IntVector rearrangeTemplate(Class<S> shuffletype,
2295                                            Class<M> masktype,
2296                                            S shuffle,
2297                                            M m) {
2298 
2299         m.check(masktype, this);
2300         VectorMask<Integer> valid = shuffle.laneIsValid();
2301         if (m.andNot(valid).anyTrue()) {
2302             shuffle.checkIndexes();
2303             throw new AssertionError();
2304         }
2305         return VectorSupport.rearrangeOp(
2306                    getClass(), shuffletype, masktype, int.class, length(),
2307                    this, shuffle, m,
2308                    (v1, s_, m_) -> v1.uOp((i, a) -> {
2309                         int ei = s_.laneSource(i);
2310                         return ei < 0  || !m_.laneIsSet(i) ? 0 : v1.lane(ei);
2311                    }));
2312     }
2313 
2314     /**
2315      * {@inheritDoc} <!--workaround-->
2316      */
2317     @Override
2318     public abstract
2319     IntVector rearrange(VectorShuffle<Integer> s,
2320                                    Vector<Integer> v);
2321 
2322     /*package-private*/
2323     @ForceInline
2324     final
2325     <S extends VectorShuffle<Integer>>
2326     IntVector rearrangeTemplate(Class<S> shuffletype,
2327                                            S shuffle,
2328                                            IntVector v) {
2329         VectorMask<Integer> valid = shuffle.laneIsValid();
2330         @SuppressWarnings("unchecked")
2331         S ws = (S) shuffle.wrapIndexes();
2332         IntVector r0 =
2333             VectorSupport.rearrangeOp(
2334                 getClass(), shuffletype, null, int.class, length(),
2335                 this, ws, null,
2336                 (v0, s_, m_) -> v0.uOp((i, a) -> {
2337                     int ei = s_.laneSource(i);
2338                     return v0.lane(ei);
2339                 }));
2340         IntVector r1 =
2341             VectorSupport.rearrangeOp(
2342                 getClass(), shuffletype, null, int.class, length(),
2343                 v, ws, null,
2344                 (v1, s_, m_) -> v1.uOp((i, a) -> {
2345                     int ei = s_.laneSource(i);
2346                     return v1.lane(ei);
2347                 }));
2348         return r1.blend(r0, valid);
2349     }
2350 
2351     @ForceInline
2352     private final
2353     VectorShuffle<Integer> toShuffle0(IntSpecies dsp) {
2354         int[] a = toArray();
2355         int[] sa = new int[a.length];
2356         for (int i = 0; i < a.length; i++) {
2357             sa[i] = (int) a[i];
2358         }
2359         return VectorShuffle.fromArray(dsp, sa, 0);
2360     }
2361 
2362     /*package-private*/
2363     @ForceInline
2364     final
2365     VectorShuffle<Integer> toShuffleTemplate(Class<?> shuffleType) {
2366         IntSpecies vsp = vspecies();
2367         return VectorSupport.convert(VectorSupport.VECTOR_OP_CAST,
2368                                      getClass(), int.class, length(),
2369                                      shuffleType, byte.class, length(),
2370                                      this, vsp,
2371                                      IntVector::toShuffle0);
2372     }
2373 







































2374     /**
2375      * {@inheritDoc} <!--workaround-->
2376      */
2377     @Override
2378     public abstract
2379     IntVector selectFrom(Vector<Integer> v);
2380 
2381     /*package-private*/
2382     @ForceInline
2383     final IntVector selectFromTemplate(IntVector v) {
2384         return v.rearrange(this.toShuffle());
2385     }
2386 
2387     /**
2388      * {@inheritDoc} <!--workaround-->
2389      */
2390     @Override
2391     public abstract
2392     IntVector selectFrom(Vector<Integer> s, VectorMask<Integer> m);
2393 
2394     /*package-private*/
2395     @ForceInline
2396     final IntVector selectFromTemplate(IntVector v,
2397                                                   AbstractMask<Integer> m) {
2398         return v.rearrange(this.toShuffle(), m);
2399     }
2400 
2401     /// Ternary operations
2402 
2403     /**
2404      * Blends together the bits of two vectors under
2405      * the control of a third, which supplies mask bits.
2406      *
2407      * This is a lane-wise ternary operation which performs
2408      * a bitwise blending operation {@code (a&~c)|(b&c)}
2409      * to each lane.
2410      *
2411      * This method is also equivalent to the expression
2412      * {@link #lanewise(VectorOperators.Ternary,Vector,Vector)
2413      *    lanewise}{@code (}{@link VectorOperators#BITWISE_BLEND
2414      *    BITWISE_BLEND}{@code , bits, mask)}.
2415      *
2416      * @param bits input bits to blend into the current vector
2417      * @param mask a bitwise mask to enable blending of the input bits
2418      * @return the bitwise blend of the given bits into the current vector,
2419      *         under control of the bitwise mask
2420      * @see #bitwiseBlend(int,int)
2421      * @see #bitwiseBlend(int,Vector)
2422      * @see #bitwiseBlend(Vector,int)
2423      * @see VectorOperators#BITWISE_BLEND
2424      * @see #lanewise(VectorOperators.Ternary,Vector,Vector,VectorMask)
2425      */
2426     @ForceInline
2427     public final
2428     IntVector bitwiseBlend(Vector<Integer> bits, Vector<Integer> mask) {
2429         return lanewise(BITWISE_BLEND, bits, mask);
2430     }
2431 
2432     /**
2433      * Blends together the bits of a vector and a scalar under
2434      * the control of another scalar, which supplies mask bits.
2435      *
2436      * This is a lane-wise ternary operation which performs
2437      * a bitwise blending operation {@code (a&~c)|(b&c)}
2438      * to each lane.
2439      *
2440      * This method is also equivalent to the expression
2441      * {@link #lanewise(VectorOperators.Ternary,Vector,Vector)
2442      *    lanewise}{@code (}{@link VectorOperators#BITWISE_BLEND
2443      *    BITWISE_BLEND}{@code , bits, mask)}.
2444      *
2445      * @param bits input bits to blend into the current vector
2446      * @param mask a bitwise mask to enable blending of the input bits
2447      * @return the bitwise blend of the given bits into the current vector,
2448      *         under control of the bitwise mask
2449      * @see #bitwiseBlend(Vector,Vector)
2450      * @see VectorOperators#BITWISE_BLEND
2451      * @see #lanewise(VectorOperators.Ternary,int,int,VectorMask)
2452      */
2453     @ForceInline
2454     public final
2455     IntVector bitwiseBlend(int bits, int mask) {
2456         return lanewise(BITWISE_BLEND, bits, mask);
2457     }
2458 
2459     /**
2460      * Blends together the bits of a vector and a scalar under
2461      * the control of another vector, which supplies mask bits.
2462      *
2463      * This is a lane-wise ternary operation which performs
2464      * a bitwise blending operation {@code (a&~c)|(b&c)}
2465      * to each lane.
2466      *
2467      * This method is also equivalent to the expression
2468      * {@link #lanewise(VectorOperators.Ternary,Vector,Vector)
2469      *    lanewise}{@code (}{@link VectorOperators#BITWISE_BLEND
2470      *    BITWISE_BLEND}{@code , bits, mask)}.
2471      *
2472      * @param bits input bits to blend into the current vector
2473      * @param mask a bitwise mask to enable blending of the input bits
2474      * @return the bitwise blend of the given bits into the current vector,
2475      *         under control of the bitwise mask
2476      * @see #bitwiseBlend(Vector,Vector)
2477      * @see VectorOperators#BITWISE_BLEND
2478      * @see #lanewise(VectorOperators.Ternary,int,Vector,VectorMask)
2479      */
2480     @ForceInline
2481     public final
2482     IntVector bitwiseBlend(int bits, Vector<Integer> mask) {
2483         return lanewise(BITWISE_BLEND, bits, mask);
2484     }
2485 
2486     /**
2487      * Blends together the bits of two vectors under
2488      * the control of a scalar, which supplies mask bits.
2489      *
2490      * This is a lane-wise ternary operation which performs
2491      * a bitwise blending operation {@code (a&~c)|(b&c)}
2492      * to each lane.
2493      *
2494      * This method is also equivalent to the expression
2495      * {@link #lanewise(VectorOperators.Ternary,Vector,Vector)
2496      *    lanewise}{@code (}{@link VectorOperators#BITWISE_BLEND
2497      *    BITWISE_BLEND}{@code , bits, mask)}.
2498      *
2499      * @param bits input bits to blend into the current vector
2500      * @param mask a bitwise mask to enable blending of the input bits
2501      * @return the bitwise blend of the given bits into the current vector,
2502      *         under control of the bitwise mask
2503      * @see #bitwiseBlend(Vector,Vector)
2504      * @see VectorOperators#BITWISE_BLEND
2505      * @see #lanewise(VectorOperators.Ternary,Vector,int,VectorMask)
2506      */
2507     @ForceInline
2508     public final
2509     IntVector bitwiseBlend(Vector<Integer> bits, int mask) {
2510         return lanewise(BITWISE_BLEND, bits, mask);
2511     }
2512 
2513 
2514     // Type specific horizontal reductions
2515 
2516     /**
2517      * Returns a value accumulated from all the lanes of this vector.
2518      *
2519      * This is an associative cross-lane reduction operation which
2520      * applies the specified operation to all the lane elements.
2521      * <p>
2522      * A few reduction operations do not support arbitrary reordering
2523      * of their operands, yet are included here because of their
2524      * usefulness.
2525      * <ul>
2526      * <li>
2527      * In the case of {@code FIRST_NONZERO}, the reduction returns
2528      * the value from the lowest-numbered non-zero lane.
2529      * <li>
2530      * All other reduction operations are fully commutative and
2531      * associative.  The implementation can choose any order of
2532      * processing, yet it will always produce the same result.
2533      * </ul>
2534      *
2535      * @param op the operation used to combine lane values
2536      * @return the accumulated result
2537      * @throws UnsupportedOperationException if this vector does
2538      *         not support the requested operation
2539      * @see #reduceLanes(VectorOperators.Associative,VectorMask)
2540      * @see #add(Vector)
2541      * @see #mul(Vector)
2542      * @see #min(Vector)
2543      * @see #max(Vector)
2544      * @see #and(Vector)
2545      * @see #or(Vector)
2546      * @see VectorOperators#XOR
2547      * @see VectorOperators#FIRST_NONZERO
2548      */
2549     public abstract int reduceLanes(VectorOperators.Associative op);
2550 
2551     /**
2552      * Returns a value accumulated from selected lanes of this vector,
2553      * controlled by a mask.
2554      *
2555      * This is an associative cross-lane reduction operation which
2556      * applies the specified operation to the selected lane elements.
2557      * <p>
2558      * If no elements are selected, an operation-specific identity
2559      * value is returned.
2560      * <ul>
2561      * <li>
2562      * If the operation is
2563      *  {@code ADD}, {@code XOR}, {@code OR},
2564      * or {@code FIRST_NONZERO},
2565      * then the identity value is zero, the default {@code int} value.
2566      * <li>
2567      * If the operation is {@code MUL},
2568      * then the identity value is one.
2569      * <li>
2570      * If the operation is {@code AND},
2571      * then the identity value is minus one (all bits set).
2572      * <li>
2573      * If the operation is {@code MAX},
2574      * then the identity value is {@code Integer.MIN_VALUE}.
2575      * <li>
2576      * If the operation is {@code MIN},
2577      * then the identity value is {@code Integer.MAX_VALUE}.
2578      * </ul>
2579      * <p>
2580      * A few reduction operations do not support arbitrary reordering
2581      * of their operands, yet are included here because of their
2582      * usefulness.
2583      * <ul>
2584      * <li>
2585      * In the case of {@code FIRST_NONZERO}, the reduction returns
2586      * the value from the lowest-numbered non-zero lane.
2587      * <li>
2588      * All other reduction operations are fully commutative and
2589      * associative.  The implementation can choose any order of
2590      * processing, yet it will always produce the same result.
2591      * </ul>
2592      *
2593      * @param op the operation used to combine lane values
2594      * @param m the mask controlling lane selection
2595      * @return the reduced result accumulated from the selected lane values
2596      * @throws UnsupportedOperationException if this vector does
2597      *         not support the requested operation
2598      * @see #reduceLanes(VectorOperators.Associative)
2599      */
2600     public abstract int reduceLanes(VectorOperators.Associative op,
2601                                        VectorMask<Integer> m);
2602 
2603     /*package-private*/
2604     @ForceInline
2605     final
2606     int reduceLanesTemplate(VectorOperators.Associative op,
2607                                Class<? extends VectorMask<Integer>> maskClass,
2608                                VectorMask<Integer> m) {
2609         m.check(maskClass, this);
2610         if (op == FIRST_NONZERO) {
2611             // FIXME:  The JIT should handle this.
2612             IntVector v = broadcast((int) 0).blend(this, m);
2613             return v.reduceLanesTemplate(op);
2614         }
2615         int opc = opCode(op);
2616         return fromBits(VectorSupport.reductionCoerced(
2617             opc, getClass(), maskClass, int.class, length(),
2618             this, m,
2619             REDUCE_IMPL.find(op, opc, IntVector::reductionOperations)));
2620     }
2621 
2622     /*package-private*/
2623     @ForceInline
2624     final
2625     int reduceLanesTemplate(VectorOperators.Associative op) {
2626         if (op == FIRST_NONZERO) {
2627             // FIXME:  The JIT should handle this.
2628             VectorMask<Integer> thisNZ
2629                 = this.viewAsIntegralLanes().compare(NE, (int) 0);
2630             int ft = thisNZ.firstTrue();
2631             return ft < length() ? this.lane(ft) : (int) 0;
2632         }
2633         int opc = opCode(op);
2634         return fromBits(VectorSupport.reductionCoerced(
2635             opc, getClass(), null, int.class, length(),
2636             this, null,
2637             REDUCE_IMPL.find(op, opc, IntVector::reductionOperations)));
2638     }
2639 
2640     private static final
2641     ImplCache<Associative, ReductionOperation<IntVector, VectorMask<Integer>>>
2642         REDUCE_IMPL = new ImplCache<>(Associative.class, IntVector.class);
2643 
2644     private static ReductionOperation<IntVector, VectorMask<Integer>> reductionOperations(int opc_) {
2645         switch (opc_) {
2646             case VECTOR_OP_ADD: return (v, m) ->
2647                     toBits(v.rOp((int)0, m, (i, a, b) -> (int)(a + b)));
2648             case VECTOR_OP_MUL: return (v, m) ->
2649                     toBits(v.rOp((int)1, m, (i, a, b) -> (int)(a * b)));
2650             case VECTOR_OP_MIN: return (v, m) ->
2651                     toBits(v.rOp(MAX_OR_INF, m, (i, a, b) -> (int) Math.min(a, b)));
2652             case VECTOR_OP_MAX: return (v, m) ->
2653                     toBits(v.rOp(MIN_OR_INF, m, (i, a, b) -> (int) Math.max(a, b)));
2654             case VECTOR_OP_AND: return (v, m) ->
2655                     toBits(v.rOp((int)-1, m, (i, a, b) -> (int)(a & b)));
2656             case VECTOR_OP_OR: return (v, m) ->
2657                     toBits(v.rOp((int)0, m, (i, a, b) -> (int)(a | b)));
2658             case VECTOR_OP_XOR: return (v, m) ->
2659                     toBits(v.rOp((int)0, m, (i, a, b) -> (int)(a ^ b)));
2660             default: return null;
2661         }
2662     }
2663 
2664     private static final int MIN_OR_INF = Integer.MIN_VALUE;
2665     private static final int MAX_OR_INF = Integer.MAX_VALUE;
2666 
2667     public @Override abstract long reduceLanesToLong(VectorOperators.Associative op);
2668     public @Override abstract long reduceLanesToLong(VectorOperators.Associative op,
2669                                                      VectorMask<Integer> m);
2670 
2671     // Type specific accessors
2672 
2673     /**
2674      * Gets the lane element at lane index {@code i}
2675      *
2676      * @param i the lane index
2677      * @return the lane element at lane index {@code i}
2678      * @throws IllegalArgumentException if the index is is out of range
2679      * ({@code < 0 || >= length()})
2680      */
2681     public abstract int lane(int i);
2682 
2683     /**
2684      * Replaces the lane element of this vector at lane index {@code i} with
2685      * value {@code e}.
2686      *
2687      * This is a cross-lane operation and behaves as if it returns the result
2688      * of blending this vector with an input vector that is the result of
2689      * broadcasting {@code e} and a mask that has only one lane set at lane
2690      * index {@code i}.
2691      *
2692      * @param i the lane index of the lane element to be replaced
2693      * @param e the value to be placed
2694      * @return the result of replacing the lane element of this vector at lane
2695      * index {@code i} with value {@code e}.
2696      * @throws IllegalArgumentException if the index is is out of range
2697      * ({@code < 0 || >= length()})
2698      */
2699     public abstract IntVector withLane(int i, int e);
2700 
2701     // Memory load operations
2702 
2703     /**
2704      * Returns an array of type {@code int[]}
2705      * containing all the lane values.
2706      * The array length is the same as the vector length.
2707      * The array elements are stored in lane order.
2708      * <p>
2709      * This method behaves as if it stores
2710      * this vector into an allocated array
2711      * (using {@link #intoArray(int[], int) intoArray})
2712      * and returns the array as follows:
2713      * <pre>{@code
2714      *   int[] a = new int[this.length()];
2715      *   this.intoArray(a, 0);
2716      *   return a;
2717      * }</pre>
2718      *
2719      * @return an array containing the lane values of this vector
2720      */
2721     @ForceInline
2722     @Override
2723     public final int[] toArray() {
2724         int[] a = new int[vspecies().laneCount()];
2725         intoArray(a, 0);
2726         return a;
2727     }
2728 
2729     /**
2730      * {@inheritDoc} <!--workaround-->
2731      * This is an alias for {@link #toArray()}
2732      * When this method is used on used on vectors
2733      * of type {@code IntVector},
2734      * there will be no loss of range or precision.
2735      */
2736     @ForceInline
2737     @Override
2738     public final int[] toIntArray() {
2739         return toArray();
2740     }
2741 
2742     /** {@inheritDoc} <!--workaround-->
2743      * @implNote
2744      * When this method is used on used on vectors
2745      * of type {@code IntVector},
2746      * there will be no loss of precision or range,
2747      * and so no {@code UnsupportedOperationException} will
2748      * be thrown.
2749      */
2750     @ForceInline
2751     @Override
2752     public final long[] toLongArray() {
2753         int[] a = toArray();
2754         long[] res = new long[a.length];
2755         for (int i = 0; i < a.length; i++) {
2756             int e = a[i];
2757             res[i] = IntSpecies.toIntegralChecked(e, false);
2758         }
2759         return res;
2760     }
2761 
2762     /** {@inheritDoc} <!--workaround-->
2763      * @implNote
2764      * When this method is used on used on vectors
2765      * of type {@code IntVector},
2766      * there will be no loss of precision.
2767      */
2768     @ForceInline
2769     @Override
2770     public final double[] toDoubleArray() {
2771         int[] a = toArray();
2772         double[] res = new double[a.length];
2773         for (int i = 0; i < a.length; i++) {
2774             res[i] = (double) a[i];
2775         }
2776         return res;
2777     }
2778 
2779     /**
2780      * Loads a vector from a byte array starting at an offset.
2781      * Bytes are composed into primitive lane elements according
2782      * to the specified byte order.
2783      * The vector is arranged into lanes according to
2784      * <a href="Vector.html#lane-order">memory ordering</a>.
2785      * <p>
2786      * This method behaves as if it returns the result of calling
2787      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
2788      * fromByteBuffer()} as follows:
2789      * <pre>{@code
2790      * var bb = ByteBuffer.wrap(a);
2791      * var m = species.maskAll(true);
2792      * return fromByteBuffer(species, bb, offset, bo, m);
2793      * }</pre>
2794      *
2795      * @param species species of desired vector
2796      * @param a the byte array
2797      * @param offset the offset into the array
2798      * @param bo the intended byte order
2799      * @return a vector loaded from a byte array
2800      * @throws IndexOutOfBoundsException
2801      *         if {@code offset+N*ESIZE < 0}
2802      *         or {@code offset+(N+1)*ESIZE > a.length}
2803      *         for any lane {@code N} in the vector
2804      */
2805     @ForceInline
2806     public static
2807     IntVector fromByteArray(VectorSpecies<Integer> species,
2808                                        byte[] a, int offset,
2809                                        ByteOrder bo) {
2810         offset = checkFromIndexSize(offset, species.vectorByteSize(), a.length);
2811         IntSpecies vsp = (IntSpecies) species;
2812         return vsp.dummyVector().fromByteArray0(a, offset).maybeSwap(bo);
2813     }
2814 
2815     /**
2816      * Loads a vector from a byte array starting at an offset
2817      * and using a mask.
2818      * Lanes where the mask is unset are filled with the default
2819      * value of {@code int} (zero).
2820      * Bytes are composed into primitive lane elements according
2821      * to the specified byte order.
2822      * The vector is arranged into lanes according to
2823      * <a href="Vector.html#lane-order">memory ordering</a>.
2824      * <p>
2825      * This method behaves as if it returns the result of calling
2826      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
2827      * fromByteBuffer()} as follows:
2828      * <pre>{@code
2829      * var bb = ByteBuffer.wrap(a);
2830      * return fromByteBuffer(species, bb, offset, bo, m);
2831      * }</pre>
2832      *
2833      * @param species species of desired vector
2834      * @param a the byte array
2835      * @param offset the offset into the array
2836      * @param bo the intended byte order
2837      * @param m the mask controlling lane selection
2838      * @return a vector loaded from a byte array
2839      * @throws IndexOutOfBoundsException
2840      *         if {@code offset+N*ESIZE < 0}
2841      *         or {@code offset+(N+1)*ESIZE > a.length}
2842      *         for any lane {@code N} in the vector
2843      *         where the mask is set
2844      */
2845     @ForceInline
2846     public static
2847     IntVector fromByteArray(VectorSpecies<Integer> species,
2848                                        byte[] a, int offset,
2849                                        ByteOrder bo,
2850                                        VectorMask<Integer> m) {
2851         IntSpecies vsp = (IntSpecies) species;
2852         if (offset >= 0 && offset <= (a.length - species.vectorByteSize())) {
2853             return vsp.dummyVector().fromByteArray0(a, offset, m).maybeSwap(bo);
2854         }
2855 
2856         // FIXME: optimize
2857         checkMaskFromIndexSize(offset, vsp, m, 4, a.length);
2858         ByteBuffer wb = wrapper(a, bo);
2859         return vsp.ldOp(wb, offset, (AbstractMask<Integer>)m,
2860                    (wb_, o, i)  -> wb_.getInt(o + i * 4));
2861     }
2862 
2863     /**
2864      * Loads a vector from an array of type {@code int[]}
2865      * starting at an offset.
2866      * For each vector lane, where {@code N} is the vector lane index, the
2867      * array element at index {@code offset + N} is placed into the
2868      * resulting vector at lane index {@code N}.
2869      *
2870      * @param species species of desired vector
2871      * @param a the array
2872      * @param offset the offset into the array
2873      * @return the vector loaded from an array
2874      * @throws IndexOutOfBoundsException
2875      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
2876      *         for any lane {@code N} in the vector
2877      */
2878     @ForceInline
2879     public static
2880     IntVector fromArray(VectorSpecies<Integer> species,
2881                                    int[] a, int offset) {
2882         offset = checkFromIndexSize(offset, species.length(), a.length);
2883         IntSpecies vsp = (IntSpecies) species;
2884         return vsp.dummyVector().fromArray0(a, offset);
2885     }
2886 
2887     /**
2888      * Loads a vector from an array of type {@code int[]}
2889      * starting at an offset and using a mask.
2890      * Lanes where the mask is unset are filled with the default
2891      * value of {@code int} (zero).
2892      * For each vector lane, where {@code N} is the vector lane index,
2893      * if the mask lane at index {@code N} is set then the array element at
2894      * index {@code offset + N} is placed into the resulting vector at lane index
2895      * {@code N}, otherwise the default element value is placed into the
2896      * resulting vector at lane index {@code N}.
2897      *
2898      * @param species species of desired vector
2899      * @param a the array
2900      * @param offset the offset into the array
2901      * @param m the mask controlling lane selection
2902      * @return the vector loaded from an array
2903      * @throws IndexOutOfBoundsException
2904      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
2905      *         for any lane {@code N} in the vector
2906      *         where the mask is set
2907      */
2908     @ForceInline
2909     public static
2910     IntVector fromArray(VectorSpecies<Integer> species,
2911                                    int[] a, int offset,
2912                                    VectorMask<Integer> m) {
2913         IntSpecies vsp = (IntSpecies) species;
2914         if (offset >= 0 && offset <= (a.length - species.length())) {
2915             return vsp.dummyVector().fromArray0(a, offset, m);
2916         }
2917 
2918         // FIXME: optimize
2919         checkMaskFromIndexSize(offset, vsp, m, 1, a.length);
2920         return vsp.vOp(m, i -> a[offset + i]);
2921     }
2922 
2923     /**
2924      * Gathers a new vector composed of elements from an array of type
2925      * {@code int[]},
2926      * using indexes obtained by adding a fixed {@code offset} to a
2927      * series of secondary offsets from an <em>index map</em>.
2928      * The index map is a contiguous sequence of {@code VLENGTH}
2929      * elements in a second array of {@code int}s, starting at a given
2930      * {@code mapOffset}.
2931      * <p>
2932      * For each vector lane, where {@code N} is the vector lane index,
2933      * the lane is loaded from the array
2934      * element {@code a[f(N)]}, where {@code f(N)} is the
2935      * index mapping expression
2936      * {@code offset + indexMap[mapOffset + N]]}.
2937      *
2938      * @param species species of desired vector
2939      * @param a the array
2940      * @param offset the offset into the array, may be negative if relative
2941      * indexes in the index map compensate to produce a value within the
2942      * array bounds
2943      * @param indexMap the index map
2944      * @param mapOffset the offset into the index map
2945      * @return the vector loaded from the indexed elements of the array
2946      * @throws IndexOutOfBoundsException
2947      *         if {@code mapOffset+N < 0}
2948      *         or if {@code mapOffset+N >= indexMap.length},
2949      *         or if {@code f(N)=offset+indexMap[mapOffset+N]}
2950      *         is an invalid index into {@code a},
2951      *         for any lane {@code N} in the vector
2952      * @see IntVector#toIntArray()
2953      */
2954     @ForceInline
2955     public static
2956     IntVector fromArray(VectorSpecies<Integer> species,
2957                                    int[] a, int offset,
2958                                    int[] indexMap, int mapOffset) {
2959         IntSpecies vsp = (IntSpecies) species;
2960         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
2961         Objects.requireNonNull(a);
2962         Objects.requireNonNull(indexMap);
2963         Class<? extends IntVector> vectorType = vsp.vectorType();
2964 
2965         // Index vector: vix[0:n] = k -> offset + indexMap[mapOffset + k]
2966         IntVector vix = IntVector
2967             .fromArray(isp, indexMap, mapOffset)
2968             .add(offset);
2969 
2970         vix = VectorIntrinsics.checkIndex(vix, a.length);
2971 
2972         return VectorSupport.loadWithMap(
2973             vectorType, null, int.class, vsp.laneCount(),
2974             isp.vectorType(),
2975             a, ARRAY_BASE, vix, null,
2976             a, offset, indexMap, mapOffset, vsp,
2977             (c, idx, iMap, idy, s, vm) ->
2978             s.vOp(n -> c[idx + iMap[idy+n]]));
2979     }
2980 
2981     /**
2982      * Gathers a new vector composed of elements from an array of type
2983      * {@code int[]},
2984      * under the control of a mask, and
2985      * using indexes obtained by adding a fixed {@code offset} to a
2986      * series of secondary offsets from an <em>index map</em>.
2987      * The index map is a contiguous sequence of {@code VLENGTH}
2988      * elements in a second array of {@code int}s, starting at a given
2989      * {@code mapOffset}.
2990      * <p>
2991      * For each vector lane, where {@code N} is the vector lane index,
2992      * if the lane is set in the mask,
2993      * the lane is loaded from the array
2994      * element {@code a[f(N)]}, where {@code f(N)} is the
2995      * index mapping expression
2996      * {@code offset + indexMap[mapOffset + N]]}.
2997      * Unset lanes in the resulting vector are set to zero.
2998      *
2999      * @param species species of desired vector
3000      * @param a the array
3001      * @param offset the offset into the array, may be negative if relative
3002      * indexes in the index map compensate to produce a value within the
3003      * array bounds
3004      * @param indexMap the index map
3005      * @param mapOffset the offset into the index map
3006      * @param m the mask controlling lane selection
3007      * @return the vector loaded from the indexed elements of the array
3008      * @throws IndexOutOfBoundsException
3009      *         if {@code mapOffset+N < 0}
3010      *         or if {@code mapOffset+N >= indexMap.length},
3011      *         or if {@code f(N)=offset+indexMap[mapOffset+N]}
3012      *         is an invalid index into {@code a},
3013      *         for any lane {@code N} in the vector
3014      *         where the mask is set
3015      * @see IntVector#toIntArray()
3016      */
3017     @ForceInline
3018     public static
3019     IntVector fromArray(VectorSpecies<Integer> species,
3020                                    int[] a, int offset,
3021                                    int[] indexMap, int mapOffset,
3022                                    VectorMask<Integer> m) {
3023         if (m.allTrue()) {
3024             return fromArray(species, a, offset, indexMap, mapOffset);
3025         }
3026         else {
3027             IntSpecies vsp = (IntSpecies) species;
3028             return vsp.dummyVector().fromArray0(a, offset, indexMap, mapOffset, m);
3029         }
3030     }
3031 
3032 
3033 
3034     /**
3035      * Loads a vector from a {@linkplain ByteBuffer byte buffer}
3036      * starting at an offset into the byte buffer.
3037      * Bytes are composed into primitive lane elements according
3038      * to the specified byte order.
3039      * The vector is arranged into lanes according to
3040      * <a href="Vector.html#lane-order">memory ordering</a>.
3041      * <p>
3042      * This method behaves as if it returns the result of calling
3043      * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
3044      * fromByteBuffer()} as follows:
3045      * <pre>{@code
3046      * var m = species.maskAll(true);
3047      * return fromByteBuffer(species, bb, offset, bo, m);
3048      * }</pre>
3049      *
3050      * @param species species of desired vector
3051      * @param bb the byte buffer
3052      * @param offset the offset into the byte buffer
3053      * @param bo the intended byte order
3054      * @return a vector loaded from a byte buffer
3055      * @throws IndexOutOfBoundsException
3056      *         if {@code offset+N*4 < 0}
3057      *         or {@code offset+N*4 >= bb.limit()}
3058      *         for any lane {@code N} in the vector





3059      */
3060     @ForceInline
3061     public static
3062     IntVector fromByteBuffer(VectorSpecies<Integer> species,
3063                                         ByteBuffer bb, int offset,
3064                                         ByteOrder bo) {
3065         offset = checkFromIndexSize(offset, species.vectorByteSize(), bb.limit());
3066         IntSpecies vsp = (IntSpecies) species;
3067         return vsp.dummyVector().fromByteBuffer0(bb, offset).maybeSwap(bo);
3068     }
3069 
3070     /**
3071      * Loads a vector from a {@linkplain ByteBuffer byte buffer}
3072      * starting at an offset into the byte buffer
3073      * and using a mask.
3074      * Lanes where the mask is unset are filled with the default
3075      * value of {@code int} (zero).
3076      * Bytes are composed into primitive lane elements according
3077      * to the specified byte order.
3078      * The vector is arranged into lanes according to
3079      * <a href="Vector.html#lane-order">memory ordering</a>.
3080      * <p>
3081      * The following pseudocode illustrates the behavior:
3082      * <pre>{@code
3083      * IntBuffer eb = bb.duplicate()
3084      *     .position(offset)
3085      *     .order(bo).asIntBuffer();
3086      * int[] ar = new int[species.length()];
3087      * for (int n = 0; n < ar.length; n++) {
3088      *     if (m.laneIsSet(n)) {
3089      *         ar[n] = eb.get(n);
3090      *     }
3091      * }
3092      * IntVector r = IntVector.fromArray(species, ar, 0);
3093      * }</pre>
3094      * @implNote
3095      * This operation is likely to be more efficient if
3096      * the specified byte order is the same as
3097      * {@linkplain ByteOrder#nativeOrder()
3098      * the platform native order},
3099      * since this method will not need to reorder
3100      * the bytes of lane values.
3101      *
3102      * @param species species of desired vector
3103      * @param bb the byte buffer
3104      * @param offset the offset into the byte buffer
3105      * @param bo the intended byte order
3106      * @param m the mask controlling lane selection
3107      * @return a vector loaded from a byte buffer
3108      * @throws IndexOutOfBoundsException
3109      *         if {@code offset+N*4 < 0}
3110      *         or {@code offset+N*4 >= bb.limit()}
3111      *         for any lane {@code N} in the vector
3112      *         where the mask is set





3113      */
3114     @ForceInline
3115     public static
3116     IntVector fromByteBuffer(VectorSpecies<Integer> species,
3117                                         ByteBuffer bb, int offset,
3118                                         ByteOrder bo,
3119                                         VectorMask<Integer> m) {
3120         IntSpecies vsp = (IntSpecies) species;
3121         if (offset >= 0 && offset <= (bb.limit() - species.vectorByteSize())) {
3122             return vsp.dummyVector().fromByteBuffer0(bb, offset, m).maybeSwap(bo);
3123         }
3124 
3125         // FIXME: optimize
3126         checkMaskFromIndexSize(offset, vsp, m, 4, bb.limit());
3127         ByteBuffer wb = wrapper(bb, bo);
3128         return vsp.ldOp(wb, offset, (AbstractMask<Integer>)m,
3129                    (wb_, o, i)  -> wb_.getInt(o + i * 4));
3130     }
3131 
3132     // Memory store operations
3133 
3134     /**
3135      * Stores this vector into an array of type {@code int[]}
3136      * starting at an offset.
3137      * <p>
3138      * For each vector lane, where {@code N} is the vector lane index,
3139      * the lane element at index {@code N} is stored into the array
3140      * element {@code a[offset+N]}.
3141      *
3142      * @param a the array, of type {@code int[]}
3143      * @param offset the offset into the array
3144      * @throws IndexOutOfBoundsException
3145      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
3146      *         for any lane {@code N} in the vector
3147      */
3148     @ForceInline
3149     public final
3150     void intoArray(int[] a, int offset) {
3151         offset = checkFromIndexSize(offset, length(), a.length);
3152         IntSpecies vsp = vspecies();
3153         VectorSupport.store(
3154             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3155             a, arrayAddress(a, offset),
3156             this,
3157             a, offset,
3158             (arr, off, v)
3159             -> v.stOp(arr, off,
3160                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3161     }
3162 
3163     /**
3164      * Stores this vector into an array of type {@code int[]}
3165      * starting at offset and using a mask.
3166      * <p>
3167      * For each vector lane, where {@code N} is the vector lane index,
3168      * the lane element at index {@code N} is stored into the array
3169      * element {@code a[offset+N]}.
3170      * If the mask lane at {@code N} is unset then the corresponding
3171      * array element {@code a[offset+N]} is left unchanged.
3172      * <p>
3173      * Array range checking is done for lanes where the mask is set.
3174      * Lanes where the mask is unset are not stored and do not need
3175      * to correspond to legitimate elements of {@code a}.
3176      * That is, unset lanes may correspond to array indexes less than
3177      * zero or beyond the end of the array.
3178      *
3179      * @param a the array, of type {@code int[]}
3180      * @param offset the offset into the array
3181      * @param m the mask controlling lane storage
3182      * @throws IndexOutOfBoundsException
3183      *         if {@code offset+N < 0} or {@code offset+N >= a.length}
3184      *         for any lane {@code N} in the vector
3185      *         where the mask is set
3186      */
3187     @ForceInline
3188     public final
3189     void intoArray(int[] a, int offset,
3190                    VectorMask<Integer> m) {
3191         if (m.allTrue()) {
3192             intoArray(a, offset);
3193         } else {
3194             IntSpecies vsp = vspecies();
3195             checkMaskFromIndexSize(offset, vsp, m, 1, a.length);
3196             intoArray0(a, offset, m);
3197         }
3198     }
3199 
3200     /**
3201      * Scatters this vector into an array of type {@code int[]}
3202      * using indexes obtained by adding a fixed {@code offset} to a
3203      * series of secondary offsets from an <em>index map</em>.
3204      * The index map is a contiguous sequence of {@code VLENGTH}
3205      * elements in a second array of {@code int}s, starting at a given
3206      * {@code mapOffset}.
3207      * <p>
3208      * For each vector lane, where {@code N} is the vector lane index,
3209      * the lane element at index {@code N} is stored into the array
3210      * element {@code a[f(N)]}, where {@code f(N)} is the
3211      * index mapping expression
3212      * {@code offset + indexMap[mapOffset + N]]}.
3213      *
3214      * @param a the array
3215      * @param offset an offset to combine with the index map offsets
3216      * @param indexMap the index map
3217      * @param mapOffset the offset into the index map
3218      * @throws IndexOutOfBoundsException
3219      *         if {@code mapOffset+N < 0}
3220      *         or if {@code mapOffset+N >= indexMap.length},
3221      *         or if {@code f(N)=offset+indexMap[mapOffset+N]}
3222      *         is an invalid index into {@code a},
3223      *         for any lane {@code N} in the vector
3224      * @see IntVector#toIntArray()
3225      */
3226     @ForceInline
3227     public final
3228     void intoArray(int[] a, int offset,
3229                    int[] indexMap, int mapOffset) {
3230         IntSpecies vsp = vspecies();
3231         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3232         // Index vector: vix[0:n] = i -> offset + indexMap[mo + i]
3233         IntVector vix = IntVector
3234             .fromArray(isp, indexMap, mapOffset)
3235             .add(offset);
3236 
3237         vix = VectorIntrinsics.checkIndex(vix, a.length);
3238 
3239         VectorSupport.storeWithMap(
3240             vsp.vectorType(), null, vsp.elementType(), vsp.laneCount(),
3241             isp.vectorType(),
3242             a, arrayAddress(a, 0), vix,
3243             this, null,
3244             a, offset, indexMap, mapOffset,
3245             (arr, off, v, map, mo, vm)
3246             -> v.stOp(arr, off,
3247                       (arr_, off_, i, e) -> {
3248                           int j = map[mo + i];
3249                           arr[off + j] = e;
3250                       }));
3251     }
3252 
3253     /**
3254      * Scatters this vector into an array of type {@code int[]},
3255      * under the control of a mask, and
3256      * using indexes obtained by adding a fixed {@code offset} to a
3257      * series of secondary offsets from an <em>index map</em>.
3258      * The index map is a contiguous sequence of {@code VLENGTH}
3259      * elements in a second array of {@code int}s, starting at a given
3260      * {@code mapOffset}.
3261      * <p>
3262      * For each vector lane, where {@code N} is the vector lane index,
3263      * if the mask lane at index {@code N} is set then
3264      * the lane element at index {@code N} is stored into the array
3265      * element {@code a[f(N)]}, where {@code f(N)} is the
3266      * index mapping expression
3267      * {@code offset + indexMap[mapOffset + N]]}.
3268      *
3269      * @param a the array
3270      * @param offset an offset to combine with the index map offsets
3271      * @param indexMap the index map
3272      * @param mapOffset the offset into the index map
3273      * @param m the mask
3274      * @throws IndexOutOfBoundsException
3275      *         if {@code mapOffset+N < 0}
3276      *         or if {@code mapOffset+N >= indexMap.length},
3277      *         or if {@code f(N)=offset+indexMap[mapOffset+N]}
3278      *         is an invalid index into {@code a},
3279      *         for any lane {@code N} in the vector
3280      *         where the mask is set
3281      * @see IntVector#toIntArray()
3282      */
3283     @ForceInline
3284     public final
3285     void intoArray(int[] a, int offset,
3286                    int[] indexMap, int mapOffset,
3287                    VectorMask<Integer> m) {
3288         if (m.allTrue()) {
3289             intoArray(a, offset, indexMap, mapOffset);
3290         }
3291         else {
3292             intoArray0(a, offset, indexMap, mapOffset, m);
3293         }
3294     }
3295 
3296 
3297 
3298     /**
3299      * {@inheritDoc} <!--workaround-->

3300      */
3301     @Override
3302     @ForceInline
3303     public final
3304     void intoByteArray(byte[] a, int offset,
3305                        ByteOrder bo) {
3306         offset = checkFromIndexSize(offset, byteSize(), a.length);
3307         maybeSwap(bo).intoByteArray0(a, offset);
3308     }
3309 
3310     /**
3311      * {@inheritDoc} <!--workaround-->
3312      */
3313     @Override
3314     @ForceInline
3315     public final
3316     void intoByteArray(byte[] a, int offset,
3317                        ByteOrder bo,
3318                        VectorMask<Integer> m) {
3319         if (m.allTrue()) {
3320             intoByteArray(a, offset, bo);
3321         } else {
3322             IntSpecies vsp = vspecies();
3323             checkMaskFromIndexSize(offset, vsp, m, 4, a.length);
3324             maybeSwap(bo).intoByteArray0(a, offset, m);
3325         }
3326     }
3327 
3328     /**
3329      * {@inheritDoc} <!--workaround-->
3330      */
3331     @Override
3332     @ForceInline
3333     public final
3334     void intoByteBuffer(ByteBuffer bb, int offset,
3335                         ByteOrder bo) {
3336         if (ScopedMemoryAccess.isReadOnly(bb)) {
3337             throw new ReadOnlyBufferException();
3338         }
3339         offset = checkFromIndexSize(offset, byteSize(), bb.limit());
3340         maybeSwap(bo).intoByteBuffer0(bb, offset);
3341     }
3342 
3343     /**
3344      * {@inheritDoc} <!--workaround-->

3345      */
3346     @Override
3347     @ForceInline
3348     public final
3349     void intoByteBuffer(ByteBuffer bb, int offset,
3350                         ByteOrder bo,
3351                         VectorMask<Integer> m) {
3352         if (m.allTrue()) {
3353             intoByteBuffer(bb, offset, bo);
3354         } else {
3355             if (bb.isReadOnly()) {
3356                 throw new ReadOnlyBufferException();
3357             }
3358             IntSpecies vsp = vspecies();
3359             checkMaskFromIndexSize(offset, vsp, m, 4, bb.limit());
3360             maybeSwap(bo).intoByteBuffer0(bb, offset, m);
3361         }
3362     }
3363 
3364     // ================================================
3365 
3366     // Low-level memory operations.
3367     //
3368     // Note that all of these operations *must* inline into a context
3369     // where the exact species of the involved vector is a
3370     // compile-time constant.  Otherwise, the intrinsic generation
3371     // will fail and performance will suffer.
3372     //
3373     // In many cases this is achieved by re-deriving a version of the
3374     // method in each concrete subclass (per species).  The re-derived
3375     // method simply calls one of these generic methods, with exact
3376     // parameters for the controlling metadata, which is either a
3377     // typed vector or constant species instance.
3378 
3379     // Unchecked loading operations in native byte order.
3380     // Caller is responsible for applying index checks, masking, and
3381     // byte swapping.
3382 
3383     /*package-private*/
3384     abstract
3385     IntVector fromArray0(int[] a, int offset);
3386     @ForceInline
3387     final
3388     IntVector fromArray0Template(int[] a, int offset) {
3389         IntSpecies vsp = vspecies();
3390         return VectorSupport.load(
3391             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3392             a, arrayAddress(a, offset),
3393             a, offset, vsp,
3394             (arr, off, s) -> s.ldOp(arr, off,
3395                                     (arr_, off_, i) -> arr_[off_ + i]));
3396     }
3397 
3398     /*package-private*/
3399     abstract
3400     IntVector fromArray0(int[] a, int offset, VectorMask<Integer> m);
3401     @ForceInline
3402     final
3403     <M extends VectorMask<Integer>>
3404     IntVector fromArray0Template(Class<M> maskClass, int[] a, int offset, M m) {
3405         m.check(species());
3406         IntSpecies vsp = vspecies();
3407         return VectorSupport.loadMasked(
3408             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3409             a, arrayAddress(a, offset), m,
3410             a, offset, vsp,
3411             (arr, off, s, vm) -> s.ldOp(arr, off, vm,
3412                                         (arr_, off_, i) -> arr_[off_ + i]));
3413     }
3414 
3415     /*package-private*/
3416     abstract
3417     IntVector fromArray0(int[] a, int offset,
3418                                     int[] indexMap, int mapOffset,
3419                                     VectorMask<Integer> m);
3420     @ForceInline
3421     final
3422     <M extends VectorMask<Integer>>
3423     IntVector fromArray0Template(Class<M> maskClass, int[] a, int offset,
3424                                             int[] indexMap, int mapOffset, M m) {
3425         IntSpecies vsp = vspecies();
3426         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3427         Objects.requireNonNull(a);
3428         Objects.requireNonNull(indexMap);
3429         m.check(vsp);
3430         Class<? extends IntVector> vectorType = vsp.vectorType();
3431 
3432         // Index vector: vix[0:n] = k -> offset + indexMap[mapOffset + k]
3433         IntVector vix = IntVector
3434             .fromArray(isp, indexMap, mapOffset)
3435             .add(offset);
3436 
3437         // FIXME: Check index under mask controlling.
3438         vix = VectorIntrinsics.checkIndex(vix, a.length);
3439 
3440         return VectorSupport.loadWithMap(
3441             vectorType, maskClass, int.class, vsp.laneCount(),
3442             isp.vectorType(),
3443             a, ARRAY_BASE, vix, m,
3444             a, offset, indexMap, mapOffset, vsp,
3445             (c, idx, iMap, idy, s, vm) ->
3446             s.vOp(vm, n -> c[idx + iMap[idy+n]]));
3447     }
3448 
3449 
3450 
3451     @Override
3452     abstract
3453     IntVector fromByteArray0(byte[] a, int offset);
3454     @ForceInline
3455     final
3456     IntVector fromByteArray0Template(byte[] a, int offset) {
3457         IntSpecies vsp = vspecies();
3458         return VectorSupport.load(
3459             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3460             a, byteArrayAddress(a, offset),
3461             a, offset, vsp,
3462             (arr, off, s) -> {
3463                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3464                 return s.ldOp(wb, off,
3465                         (wb_, o, i) -> wb_.getInt(o + i * 4));
3466             });
3467     }
3468 
3469     abstract
3470     IntVector fromByteArray0(byte[] a, int offset, VectorMask<Integer> m);
3471     @ForceInline
3472     final
3473     <M extends VectorMask<Integer>>
3474     IntVector fromByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
3475         IntSpecies vsp = vspecies();
3476         m.check(vsp);
3477         return VectorSupport.loadMasked(
3478             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3479             a, byteArrayAddress(a, offset), m,
3480             a, offset, vsp,
3481             (arr, off, s, vm) -> {
3482                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3483                 return s.ldOp(wb, off, vm,
3484                         (wb_, o, i) -> wb_.getInt(o + i * 4));
3485             });
3486     }
3487 
3488     abstract
3489     IntVector fromByteBuffer0(ByteBuffer bb, int offset);
3490     @ForceInline
3491     final
3492     IntVector fromByteBuffer0Template(ByteBuffer bb, int offset) {
3493         IntSpecies vsp = vspecies();
3494         return ScopedMemoryAccess.loadFromByteBuffer(
3495                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3496                 bb, offset, vsp,
3497                 (buf, off, s) -> {
3498                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3499                     return s.ldOp(wb, off,
3500                             (wb_, o, i) -> wb_.getInt(o + i * 4));
3501                 });
3502     }
3503 
3504     abstract
3505     IntVector fromByteBuffer0(ByteBuffer bb, int offset, VectorMask<Integer> m);
3506     @ForceInline
3507     final
3508     <M extends VectorMask<Integer>>
3509     IntVector fromByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
3510         IntSpecies vsp = vspecies();
3511         m.check(vsp);
3512         return ScopedMemoryAccess.loadFromByteBufferMasked(
3513                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3514                 bb, offset, m, vsp,
3515                 (buf, off, s, vm) -> {
3516                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3517                     return s.ldOp(wb, off, vm,
3518                             (wb_, o, i) -> wb_.getInt(o + i * 4));
3519                 });
3520     }
3521 
3522     // Unchecked storing operations in native byte order.
3523     // Caller is responsible for applying index checks, masking, and
3524     // byte swapping.
3525 
3526     abstract
3527     void intoArray0(int[] a, int offset);
3528     @ForceInline
3529     final
3530     void intoArray0Template(int[] a, int offset) {
3531         IntSpecies vsp = vspecies();
3532         VectorSupport.store(
3533             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3534             a, arrayAddress(a, offset),
3535             this, a, offset,
3536             (arr, off, v)
3537             -> v.stOp(arr, off,
3538                       (arr_, off_, i, e) -> arr_[off_+i] = e));
3539     }
3540 
3541     abstract
3542     void intoArray0(int[] a, int offset, VectorMask<Integer> m);
3543     @ForceInline
3544     final
3545     <M extends VectorMask<Integer>>
3546     void intoArray0Template(Class<M> maskClass, int[] a, int offset, M m) {
3547         m.check(species());
3548         IntSpecies vsp = vspecies();
3549         VectorSupport.storeMasked(
3550             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3551             a, arrayAddress(a, offset),
3552             this, m, a, offset,
3553             (arr, off, v, vm)
3554             -> v.stOp(arr, off, vm,
3555                       (arr_, off_, i, e) -> arr_[off_ + i] = e));
3556     }
3557 
3558     abstract
3559     void intoArray0(int[] a, int offset,
3560                     int[] indexMap, int mapOffset,
3561                     VectorMask<Integer> m);
3562     @ForceInline
3563     final
3564     <M extends VectorMask<Integer>>
3565     void intoArray0Template(Class<M> maskClass, int[] a, int offset,
3566                             int[] indexMap, int mapOffset, M m) {
3567         m.check(species());
3568         IntSpecies vsp = vspecies();
3569         IntVector.IntSpecies isp = IntVector.species(vsp.indexShape());
3570         // Index vector: vix[0:n] = i -> offset + indexMap[mo + i]
3571         IntVector vix = IntVector
3572             .fromArray(isp, indexMap, mapOffset)
3573             .add(offset);
3574 
3575         // FIXME: Check index under mask controlling.
3576         vix = VectorIntrinsics.checkIndex(vix, a.length);
3577 
3578         VectorSupport.storeWithMap(
3579             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3580             isp.vectorType(),
3581             a, arrayAddress(a, 0), vix,
3582             this, m,
3583             a, offset, indexMap, mapOffset,
3584             (arr, off, v, map, mo, vm)
3585             -> v.stOp(arr, off, vm,
3586                       (arr_, off_, i, e) -> {
3587                           int j = map[mo + i];
3588                           arr[off + j] = e;
3589                       }));
3590     }
3591 
3592 
3593     abstract
3594     void intoByteArray0(byte[] a, int offset);
3595     @ForceInline
3596     final
3597     void intoByteArray0Template(byte[] a, int offset) {
3598         IntSpecies vsp = vspecies();
3599         VectorSupport.store(
3600             vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3601             a, byteArrayAddress(a, offset),
3602             this, a, offset,
3603             (arr, off, v) -> {
3604                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3605                 v.stOp(wb, off,
3606                         (tb_, o, i, e) -> tb_.putInt(o + i * 4, e));
3607             });
3608     }
3609 
3610     abstract
3611     void intoByteArray0(byte[] a, int offset, VectorMask<Integer> m);
3612     @ForceInline
3613     final
3614     <M extends VectorMask<Integer>>
3615     void intoByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
3616         IntSpecies vsp = vspecies();
3617         m.check(vsp);
3618         VectorSupport.storeMasked(
3619             vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3620             a, byteArrayAddress(a, offset),
3621             this, m, a, offset,
3622             (arr, off, v, vm) -> {
3623                 ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
3624                 v.stOp(wb, off, vm,
3625                         (tb_, o, i, e) -> tb_.putInt(o + i * 4, e));
3626             });
3627     }
3628 
3629     @ForceInline
3630     final
3631     void intoByteBuffer0(ByteBuffer bb, int offset) {
3632         IntSpecies vsp = vspecies();
3633         ScopedMemoryAccess.storeIntoByteBuffer(
3634                 vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
3635                 this, bb, offset,
3636                 (buf, off, v) -> {
3637                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3638                     v.stOp(wb, off,
3639                             (wb_, o, i, e) -> wb_.putInt(o + i * 4, e));
3640                 });
3641     }
3642 
3643     abstract
3644     void intoByteBuffer0(ByteBuffer bb, int offset, VectorMask<Integer> m);
3645     @ForceInline
3646     final
3647     <M extends VectorMask<Integer>>
3648     void intoByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
3649         IntSpecies vsp = vspecies();
3650         m.check(vsp);
3651         ScopedMemoryAccess.storeIntoByteBufferMasked(
3652                 vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
3653                 this, m, bb, offset,
3654                 (buf, off, v, vm) -> {
3655                     ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
3656                     v.stOp(wb, off, vm,
3657                             (wb_, o, i, e) -> wb_.putInt(o + i * 4, e));
3658                 });
3659     }
3660 
3661 
3662     // End of low-level memory operations.
3663 
3664     private static
3665     void checkMaskFromIndexSize(int offset,
3666                                 IntSpecies vsp,
3667                                 VectorMask<Integer> m,
3668                                 int scale,
3669                                 int limit) {
3670         ((AbstractMask<Integer>)m)
3671             .checkIndexByLane(offset, limit, vsp.iota(), scale);
3672     }
3673 










3674     @ForceInline
3675     private void conditionalStoreNYI(int offset,
3676                                      IntSpecies vsp,
3677                                      VectorMask<Integer> m,
3678                                      int scale,
3679                                      int limit) {
3680         if (offset < 0 || offset + vsp.laneCount() * scale > limit) {
3681             String msg =
3682                 String.format("unimplemented: store @%d in [0..%d), %s in %s",
3683                               offset, limit, m, vsp);
3684             throw new AssertionError(msg);
3685         }
3686     }
3687 
3688     /*package-private*/
3689     @Override
3690     @ForceInline
3691     final
3692     IntVector maybeSwap(ByteOrder bo) {
3693         if (bo != NATIVE_ENDIAN) {
3694             return this.reinterpretAsBytes()
3695                 .rearrange(swapBytesShuffle())
3696                 .reinterpretAsInts();
3697         }
3698         return this;
3699     }
3700 
3701     static final int ARRAY_SHIFT =
3702         31 - Integer.numberOfLeadingZeros(Unsafe.ARRAY_INT_INDEX_SCALE);
3703     static final long ARRAY_BASE =
3704         Unsafe.ARRAY_INT_BASE_OFFSET;
3705 
3706     @ForceInline
3707     static long arrayAddress(int[] a, int index) {
3708         return ARRAY_BASE + (((long)index) << ARRAY_SHIFT);
3709     }
3710 
3711 
3712 
3713     @ForceInline
3714     static long byteArrayAddress(byte[] a, int index) {
3715         return Unsafe.ARRAY_BYTE_BASE_OFFSET + index;
3716     }
3717 
3718     // ================================================
3719 
3720     /// Reinterpreting view methods:
3721     //   lanewise reinterpret: viewAsXVector()
3722     //   keep shape, redraw lanes: reinterpretAsEs()
3723 
3724     /**
3725      * {@inheritDoc} <!--workaround-->
3726      */
3727     @ForceInline
3728     @Override
3729     public final ByteVector reinterpretAsBytes() {
3730          // Going to ByteVector, pay close attention to byte order.
3731          assert(REGISTER_ENDIAN == ByteOrder.LITTLE_ENDIAN);
3732          return asByteVectorRaw();
3733          //return asByteVectorRaw().rearrange(swapBytesShuffle());
3734     }
3735 
3736     /**
3737      * {@inheritDoc} <!--workaround-->
3738      */
3739     @ForceInline
3740     @Override
3741     public final IntVector viewAsIntegralLanes() {
3742         return this;
3743     }
3744 
3745     /**
3746      * {@inheritDoc} <!--workaround-->
3747      */
3748     @ForceInline
3749     @Override
3750     public final
3751     FloatVector
3752     viewAsFloatingLanes() {
3753         LaneType flt = LaneType.INT.asFloating();
3754         return (FloatVector) asVectorRaw(flt);
3755     }
3756 
3757     // ================================================
3758 
3759     /// Object methods: toString, equals, hashCode
3760     //
3761     // Object methods are defined as if via Arrays.toString, etc.,
3762     // is applied to the array of elements.  Two equal vectors
3763     // are required to have equal species and equal lane values.
3764 
3765     /**
3766      * Returns a string representation of this vector, of the form
3767      * {@code "[0,1,2...]"}, reporting the lane values of this vector,
3768      * in lane order.
3769      *
3770      * The string is produced as if by a call to {@link
3771      * java.util.Arrays#toString(int[]) Arrays.toString()},
3772      * as appropriate to the {@code int} array returned by
3773      * {@link #toArray this.toArray()}.
3774      *
3775      * @return a string of the form {@code "[0,1,2...]"}
3776      * reporting the lane values of this vector
3777      */
3778     @Override
3779     @ForceInline
3780     public final
3781     String toString() {
3782         // now that toArray is strongly typed, we can define this
3783         return Arrays.toString(toArray());
3784     }
3785 
3786     /**
3787      * {@inheritDoc} <!--workaround-->
3788      */
3789     @Override
3790     @ForceInline
3791     public final
3792     boolean equals(Object obj) {
3793         if (obj instanceof Vector) {
3794             Vector<?> that = (Vector<?>) obj;
3795             if (this.species().equals(that.species())) {
3796                 return this.eq(that.check(this.species())).allTrue();
3797             }
3798         }
3799         return false;
3800     }
3801 
3802     /**
3803      * {@inheritDoc} <!--workaround-->
3804      */
3805     @Override
3806     @ForceInline
3807     public final
3808     int hashCode() {
3809         // now that toArray is strongly typed, we can define this
3810         return Objects.hash(species(), Arrays.hashCode(toArray()));
3811     }
3812 
3813     // ================================================
3814 
3815     // Species
3816 
3817     /**
3818      * Class representing {@link IntVector}'s of the same {@link VectorShape VectorShape}.
3819      */
3820     /*package-private*/
3821     static final class IntSpecies extends AbstractSpecies<Integer> {
3822         private IntSpecies(VectorShape shape,
3823                 Class<? extends IntVector> vectorType,
3824                 Class<? extends AbstractMask<Integer>> maskType,
3825                 Function<Object, IntVector> vectorFactory) {
3826             super(shape, LaneType.of(int.class),
3827                   vectorType, maskType,
3828                   vectorFactory);
3829             assert(this.elementSize() == Integer.SIZE);
3830         }
3831 
3832         // Specializing overrides:
3833 
3834         @Override
3835         @ForceInline
3836         public final Class<Integer> elementType() {
3837             return int.class;
3838         }
3839 
3840         @Override
3841         @ForceInline
3842         final Class<Integer> genericElementType() {
3843             return Integer.class;
3844         }
3845 
3846         @SuppressWarnings("unchecked")
3847         @Override
3848         @ForceInline
3849         public final Class<? extends IntVector> vectorType() {
3850             return (Class<? extends IntVector>) vectorType;
3851         }
3852 
3853         @Override
3854         @ForceInline
3855         public final long checkValue(long e) {
3856             longToElementBits(e);  // only for exception
3857             return e;
3858         }
3859 
3860         /*package-private*/
3861         @Override
3862         @ForceInline
3863         final IntVector broadcastBits(long bits) {
3864             return (IntVector)
3865                 VectorSupport.fromBitsCoerced(
3866                     vectorType, int.class, laneCount,
3867                     bits, MODE_BROADCAST, this,
3868                     (bits_, s_) -> s_.rvOp(i -> bits_));
3869         }
3870 
3871         /*package-private*/
3872         @ForceInline
3873         final IntVector broadcast(int e) {
3874             return broadcastBits(toBits(e));
3875         }
3876 
3877         @Override
3878         @ForceInline
3879         public final IntVector broadcast(long e) {
3880             return broadcastBits(longToElementBits(e));
3881         }
3882 
3883         /*package-private*/
3884         final @Override
3885         @ForceInline
3886         long longToElementBits(long value) {
3887             // Do the conversion, and then test it for failure.
3888             int e = (int) value;
3889             if ((long) e != value) {
3890                 throw badElementBits(value, e);
3891             }
3892             return toBits(e);
3893         }
3894 
3895         /*package-private*/
3896         @ForceInline
3897         static long toIntegralChecked(int e, boolean convertToInt) {
3898             long value = convertToInt ? (int) e : (long) e;
3899             if ((int) value != e) {
3900                 throw badArrayBits(e, convertToInt, value);
3901             }
3902             return value;
3903         }
3904 
3905         /* this non-public one is for internal conversions */
3906         @Override
3907         @ForceInline
3908         final IntVector fromIntValues(int[] values) {
3909             VectorIntrinsics.requireLength(values.length, laneCount);
3910             int[] va = new int[laneCount()];
3911             for (int i = 0; i < va.length; i++) {
3912                 int lv = values[i];
3913                 int v = (int) lv;
3914                 va[i] = v;
3915                 if ((int)v != lv) {
3916                     throw badElementBits(lv, v);
3917                 }
3918             }
3919             return dummyVector().fromArray0(va, 0);
3920         }
3921 
3922         // Virtual constructors
3923 
3924         @ForceInline
3925         @Override final
3926         public IntVector fromArray(Object a, int offset) {
3927             // User entry point:  Be careful with inputs.
3928             return IntVector
3929                 .fromArray(this, (int[]) a, offset);
3930         }
3931 
3932         @ForceInline
3933         @Override final
3934         IntVector dummyVector() {
3935             return (IntVector) super.dummyVector();
3936         }
3937 
3938         /*package-private*/
3939         final @Override
3940         @ForceInline
3941         IntVector rvOp(RVOp f) {
3942             int[] res = new int[laneCount()];
3943             for (int i = 0; i < res.length; i++) {
3944                 int bits = (int) f.apply(i);
3945                 res[i] = fromBits(bits);
3946             }
3947             return dummyVector().vectorFactory(res);
3948         }
3949 
3950         IntVector vOp(FVOp f) {
3951             int[] res = new int[laneCount()];
3952             for (int i = 0; i < res.length; i++) {
3953                 res[i] = f.apply(i);
3954             }
3955             return dummyVector().vectorFactory(res);
3956         }
3957 
3958         IntVector vOp(VectorMask<Integer> m, FVOp f) {
3959             int[] res = new int[laneCount()];
3960             boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
3961             for (int i = 0; i < res.length; i++) {
3962                 if (mbits[i]) {
3963                     res[i] = f.apply(i);
3964                 }
3965             }
3966             return dummyVector().vectorFactory(res);
3967         }
3968 
3969         /*package-private*/
3970         @ForceInline
3971         <M> IntVector ldOp(M memory, int offset,
3972                                       FLdOp<M> f) {
3973             return dummyVector().ldOp(memory, offset, f);
3974         }
3975 
3976         /*package-private*/
3977         @ForceInline
3978         <M> IntVector ldOp(M memory, int offset,
3979                                       VectorMask<Integer> m,
3980                                       FLdOp<M> f) {
3981             return dummyVector().ldOp(memory, offset, m, f);
3982         }
3983 















3984         /*package-private*/
3985         @ForceInline
3986         <M> void stOp(M memory, int offset, FStOp<M> f) {
3987             dummyVector().stOp(memory, offset, f);
3988         }
3989 
3990         /*package-private*/
3991         @ForceInline
3992         <M> void stOp(M memory, int offset,
3993                       AbstractMask<Integer> m,
3994                       FStOp<M> f) {
3995             dummyVector().stOp(memory, offset, m, f);
3996         }
3997 














3998         // N.B. Make sure these constant vectors and
3999         // masks load up correctly into registers.
4000         //
4001         // Also, see if we can avoid all that switching.
4002         // Could we cache both vectors and both masks in
4003         // this species object?
4004 
4005         // Zero and iota vector access
4006         @Override
4007         @ForceInline
4008         public final IntVector zero() {
4009             if ((Class<?>) vectorType() == IntMaxVector.class)
4010                 return IntMaxVector.ZERO;
4011             switch (vectorBitSize()) {
4012                 case 64: return Int64Vector.ZERO;
4013                 case 128: return Int128Vector.ZERO;
4014                 case 256: return Int256Vector.ZERO;
4015                 case 512: return Int512Vector.ZERO;
4016             }
4017             throw new AssertionError();
4018         }
4019 
4020         @Override
4021         @ForceInline
4022         public final IntVector iota() {
4023             if ((Class<?>) vectorType() == IntMaxVector.class)
4024                 return IntMaxVector.IOTA;
4025             switch (vectorBitSize()) {
4026                 case 64: return Int64Vector.IOTA;
4027                 case 128: return Int128Vector.IOTA;
4028                 case 256: return Int256Vector.IOTA;
4029                 case 512: return Int512Vector.IOTA;
4030             }
4031             throw new AssertionError();
4032         }
4033 
4034         // Mask access
4035         @Override
4036         @ForceInline
4037         public final VectorMask<Integer> maskAll(boolean bit) {
4038             if ((Class<?>) vectorType() == IntMaxVector.class)
4039                 return IntMaxVector.IntMaxMask.maskAll(bit);
4040             switch (vectorBitSize()) {
4041                 case 64: return Int64Vector.Int64Mask.maskAll(bit);
4042                 case 128: return Int128Vector.Int128Mask.maskAll(bit);
4043                 case 256: return Int256Vector.Int256Mask.maskAll(bit);
4044                 case 512: return Int512Vector.Int512Mask.maskAll(bit);
4045             }
4046             throw new AssertionError();
4047         }
4048     }
4049 
4050     /**
4051      * Finds a species for an element type of {@code int} and shape.
4052      *
4053      * @param s the shape
4054      * @return a species for an element type of {@code int} and shape
4055      * @throws IllegalArgumentException if no such species exists for the shape
4056      */
4057     static IntSpecies species(VectorShape s) {
4058         Objects.requireNonNull(s);
4059         switch (s.switchKey) {
4060             case VectorShape.SK_64_BIT: return (IntSpecies) SPECIES_64;
4061             case VectorShape.SK_128_BIT: return (IntSpecies) SPECIES_128;
4062             case VectorShape.SK_256_BIT: return (IntSpecies) SPECIES_256;
4063             case VectorShape.SK_512_BIT: return (IntSpecies) SPECIES_512;
4064             case VectorShape.SK_Max_BIT: return (IntSpecies) SPECIES_MAX;
4065             default: throw new IllegalArgumentException("Bad shape: " + s);
4066         }
4067     }
4068 
4069     /** Species representing {@link IntVector}s of {@link VectorShape#S_64_BIT VectorShape.S_64_BIT}. */
4070     public static final VectorSpecies<Integer> SPECIES_64
4071         = new IntSpecies(VectorShape.S_64_BIT,
4072                             Int64Vector.class,
4073                             Int64Vector.Int64Mask.class,
4074                             Int64Vector::new);
4075 
4076     /** Species representing {@link IntVector}s of {@link VectorShape#S_128_BIT VectorShape.S_128_BIT}. */
4077     public static final VectorSpecies<Integer> SPECIES_128
4078         = new IntSpecies(VectorShape.S_128_BIT,
4079                             Int128Vector.class,
4080                             Int128Vector.Int128Mask.class,
4081                             Int128Vector::new);
4082 
4083     /** Species representing {@link IntVector}s of {@link VectorShape#S_256_BIT VectorShape.S_256_BIT}. */
4084     public static final VectorSpecies<Integer> SPECIES_256
4085         = new IntSpecies(VectorShape.S_256_BIT,
4086                             Int256Vector.class,
4087                             Int256Vector.Int256Mask.class,
4088                             Int256Vector::new);
4089 
4090     /** Species representing {@link IntVector}s of {@link VectorShape#S_512_BIT VectorShape.S_512_BIT}. */
4091     public static final VectorSpecies<Integer> SPECIES_512
4092         = new IntSpecies(VectorShape.S_512_BIT,
4093                             Int512Vector.class,
4094                             Int512Vector.Int512Mask.class,
4095                             Int512Vector::new);
4096 
4097     /** Species representing {@link IntVector}s of {@link VectorShape#S_Max_BIT VectorShape.S_Max_BIT}. */
4098     public static final VectorSpecies<Integer> SPECIES_MAX
4099         = new IntSpecies(VectorShape.S_Max_BIT,
4100                             IntMaxVector.class,
4101                             IntMaxVector.IntMaxMask.class,
4102                             IntMaxVector::new);
4103 
4104     /**
4105      * Preferred species for {@link IntVector}s.
4106      * A preferred species is a species of maximal bit-size for the platform.
4107      */
4108     public static final VectorSpecies<Integer> SPECIES_PREFERRED
4109         = (IntSpecies) VectorSpecies.ofPreferred(int.class);
4110 }
--- EOF ---