1 /* 2 * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 package jdk.incubator.vector; 26 27 import java.nio.ByteBuffer; 28 import java.util.Arrays; 29 import java.util.Objects; 30 import java.util.function.IntUnaryOperator; 31 32 import jdk.internal.vm.annotation.ForceInline; 33 import jdk.internal.vm.vector.VectorSupport; 34 35 import static jdk.internal.vm.vector.VectorSupport.*; 36 37 import static jdk.incubator.vector.VectorOperators.*; 38 39 // -- This file was mechanically generated: Do not edit! -- // 40 41 @SuppressWarnings("cast") // warning: redundant cast 42 final class Double64Vector extends DoubleVector { 43 static final DoubleSpecies VSPECIES = 44 (DoubleSpecies) DoubleVector.SPECIES_64; 45 46 static final VectorShape VSHAPE = 47 VSPECIES.vectorShape(); 48 49 static final Class<Double64Vector> VCLASS = Double64Vector.class; 50 51 static final int VSIZE = VSPECIES.vectorBitSize(); 52 53 static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM 54 55 static final Class<Double> ETYPE = double.class; // used by the JVM 56 57 Double64Vector(double[] v) { 58 super(v); 59 } 60 61 // For compatibility as Double64Vector::new, 62 // stored into species.vectorFactory. 63 Double64Vector(Object v) { 64 this((double[]) v); 65 } 66 67 static final Double64Vector ZERO = new Double64Vector(new double[VLENGTH]); 68 static final Double64Vector IOTA = new Double64Vector(VSPECIES.iotaArray()); 69 70 static { 71 // Warm up a few species caches. 72 // If we do this too much we will 73 // get NPEs from bootstrap circularity. 74 VSPECIES.dummyVector(); 75 VSPECIES.withLanes(LaneType.BYTE); 76 } 77 78 // Specialized extractors 79 80 @ForceInline 81 final @Override 82 public DoubleSpecies vspecies() { 83 // ISSUE: This should probably be a @Stable 84 // field inside AbstractVector, rather than 85 // a megamorphic method. 86 return VSPECIES; 87 } 88 89 @ForceInline 90 @Override 91 public final Class<Double> elementType() { return double.class; } 92 93 @ForceInline 94 @Override 95 public final int elementSize() { return Double.SIZE; } 96 97 @ForceInline 98 @Override 99 public final VectorShape shape() { return VSHAPE; } 100 101 @ForceInline 102 @Override 103 public final int length() { return VLENGTH; } 104 105 @ForceInline 106 @Override 107 public final int bitSize() { return VSIZE; } 108 109 @ForceInline 110 @Override 111 public final int byteSize() { return VSIZE / Byte.SIZE; } 112 113 /*package-private*/ 114 @ForceInline 115 final @Override 116 double[] vec() { 117 return (double[])getPayload(); 118 } 119 120 // Virtualized constructors 121 122 @Override 123 @ForceInline 124 public final Double64Vector broadcast(double e) { 125 return (Double64Vector) super.broadcastTemplate(e); // specialize 126 } 127 128 @Override 129 @ForceInline 130 public final Double64Vector broadcast(long e) { 131 return (Double64Vector) super.broadcastTemplate(e); // specialize 132 } 133 134 @Override 135 @ForceInline 136 Double64Mask maskFromArray(boolean[] bits) { 137 return new Double64Mask(bits); 138 } 139 140 @Override 141 @ForceInline 142 Double64Shuffle iotaShuffle() { return Double64Shuffle.IOTA; } 143 144 @ForceInline 145 Double64Shuffle iotaShuffle(int start, int step, boolean wrap) { 146 if (wrap) { 147 return (Double64Shuffle)VectorSupport.shuffleIota(ETYPE, Double64Shuffle.class, VSPECIES, VLENGTH, start, step, 1, 148 (l, lstart, lstep, s) -> s.shuffleFromOp(i -> (VectorIntrinsics.wrapToRange(i*lstep + lstart, l)))); 149 } else { 150 return (Double64Shuffle)VectorSupport.shuffleIota(ETYPE, Double64Shuffle.class, VSPECIES, VLENGTH, start, step, 0, 151 (l, lstart, lstep, s) -> s.shuffleFromOp(i -> (i*lstep + lstart))); 152 } 153 } 154 155 @Override 156 @ForceInline 157 Double64Shuffle shuffleFromBytes(byte[] reorder) { return new Double64Shuffle(reorder); } 158 159 @Override 160 @ForceInline 161 Double64Shuffle shuffleFromArray(int[] indexes, int i) { return new Double64Shuffle(indexes, i); } 162 163 @Override 164 @ForceInline 165 Double64Shuffle shuffleFromOp(IntUnaryOperator fn) { return new Double64Shuffle(fn); } 166 167 // Make a vector of the same species but the given elements: 168 @ForceInline 169 final @Override 170 Double64Vector vectorFactory(double[] vec) { 171 return new Double64Vector(vec); 172 } 173 174 @ForceInline 175 final @Override 176 Byte64Vector asByteVectorRaw() { 177 return (Byte64Vector) super.asByteVectorRawTemplate(); // specialize 178 } 179 180 @ForceInline 181 final @Override 182 AbstractVector<?> asVectorRaw(LaneType laneType) { 183 return super.asVectorRawTemplate(laneType); // specialize 184 } 185 186 // Unary operator 187 188 @ForceInline 189 final @Override 190 Double64Vector uOp(FUnOp f) { 191 return (Double64Vector) super.uOpTemplate(f); // specialize 192 } 193 194 @ForceInline 195 final @Override 196 Double64Vector uOp(VectorMask<Double> m, FUnOp f) { 197 return (Double64Vector) 198 super.uOpTemplate((Double64Mask)m, f); // specialize 199 } 200 201 // Binary operator 202 203 @ForceInline 204 final @Override 205 Double64Vector bOp(Vector<Double> v, FBinOp f) { 206 return (Double64Vector) super.bOpTemplate((Double64Vector)v, f); // specialize 207 } 208 209 @ForceInline 210 final @Override 211 Double64Vector bOp(Vector<Double> v, 212 VectorMask<Double> m, FBinOp f) { 213 return (Double64Vector) 214 super.bOpTemplate((Double64Vector)v, (Double64Mask)m, 215 f); // specialize 216 } 217 218 // Ternary operator 219 220 @ForceInline 221 final @Override 222 Double64Vector tOp(Vector<Double> v1, Vector<Double> v2, FTriOp f) { 223 return (Double64Vector) 224 super.tOpTemplate((Double64Vector)v1, (Double64Vector)v2, 225 f); // specialize 226 } 227 228 @ForceInline 229 final @Override 230 Double64Vector tOp(Vector<Double> v1, Vector<Double> v2, 231 VectorMask<Double> m, FTriOp f) { 232 return (Double64Vector) 233 super.tOpTemplate((Double64Vector)v1, (Double64Vector)v2, 234 (Double64Mask)m, f); // specialize 235 } 236 237 @ForceInline 238 final @Override 239 double rOp(double v, VectorMask<Double> m, FBinOp f) { 240 return super.rOpTemplate(v, m, f); // specialize 241 } 242 243 @Override 244 @ForceInline 245 public final <F> 246 Vector<F> convertShape(VectorOperators.Conversion<Double,F> conv, 247 VectorSpecies<F> rsp, int part) { 248 return super.convertShapeTemplate(conv, rsp, part); // specialize 249 } 250 251 @Override 252 @ForceInline 253 public final <F> 254 Vector<F> reinterpretShape(VectorSpecies<F> toSpecies, int part) { 255 return super.reinterpretShapeTemplate(toSpecies, part); // specialize 256 } 257 258 // Specialized algebraic operations: 259 260 // The following definition forces a specialized version of this 261 // crucial method into the v-table of this class. A call to add() 262 // will inline to a call to lanewise(ADD,), at which point the JIT 263 // intrinsic will have the opcode of ADD, plus all the metadata 264 // for this particular class, enabling it to generate precise 265 // code. 266 // 267 // There is probably no benefit to the JIT to specialize the 268 // masked or broadcast versions of the lanewise method. 269 270 @Override 271 @ForceInline 272 public Double64Vector lanewise(Unary op) { 273 return (Double64Vector) super.lanewiseTemplate(op); // specialize 274 } 275 276 @Override 277 @ForceInline 278 public Double64Vector lanewise(Unary op, VectorMask<Double> m) { 279 return (Double64Vector) super.lanewiseTemplate(op, Double64Mask.class, (Double64Mask) m); // specialize 280 } 281 282 @Override 283 @ForceInline 284 public Double64Vector lanewise(Binary op, Vector<Double> v) { 285 return (Double64Vector) super.lanewiseTemplate(op, v); // specialize 286 } 287 288 @Override 289 @ForceInline 290 public Double64Vector lanewise(Binary op, Vector<Double> v, VectorMask<Double> m) { 291 return (Double64Vector) super.lanewiseTemplate(op, Double64Mask.class, v, (Double64Mask) m); // specialize 292 } 293 294 295 /*package-private*/ 296 @Override 297 @ForceInline 298 public final 299 Double64Vector 300 lanewise(Ternary op, Vector<Double> v1, Vector<Double> v2) { 301 return (Double64Vector) super.lanewiseTemplate(op, v1, v2); // specialize 302 } 303 304 @Override 305 @ForceInline 306 public final 307 Double64Vector 308 lanewise(Ternary op, Vector<Double> v1, Vector<Double> v2, VectorMask<Double> m) { 309 return (Double64Vector) super.lanewiseTemplate(op, Double64Mask.class, v1, v2, (Double64Mask) m); // specialize 310 } 311 312 @Override 313 @ForceInline 314 public final 315 Double64Vector addIndex(int scale) { 316 return (Double64Vector) super.addIndexTemplate(scale); // specialize 317 } 318 319 // Type specific horizontal reductions 320 321 @Override 322 @ForceInline 323 public final double reduceLanes(VectorOperators.Associative op) { 324 return super.reduceLanesTemplate(op); // specialized 325 } 326 327 @Override 328 @ForceInline 329 public final double reduceLanes(VectorOperators.Associative op, 330 VectorMask<Double> m) { 331 return super.reduceLanesTemplate(op, Double64Mask.class, (Double64Mask) m); // specialized 332 } 333 334 @Override 335 @ForceInline 336 public final long reduceLanesToLong(VectorOperators.Associative op) { 337 return (long) super.reduceLanesTemplate(op); // specialized 338 } 339 340 @Override 341 @ForceInline 342 public final long reduceLanesToLong(VectorOperators.Associative op, 343 VectorMask<Double> m) { 344 return (long) super.reduceLanesTemplate(op, Double64Mask.class, (Double64Mask) m); // specialized 345 } 346 347 @ForceInline 348 public VectorShuffle<Double> toShuffle() { 349 return super.toShuffleTemplate(Double64Shuffle.class); // specialize 350 } 351 352 // Specialized unary testing 353 354 @Override 355 @ForceInline 356 public final Double64Mask test(Test op) { 357 return super.testTemplate(Double64Mask.class, op); // specialize 358 } 359 360 @Override 361 @ForceInline 362 public final Double64Mask test(Test op, VectorMask<Double> m) { 363 return super.testTemplate(Double64Mask.class, op, (Double64Mask) m); // specialize 364 } 365 366 // Specialized comparisons 367 368 @Override 369 @ForceInline 370 public final Double64Mask compare(Comparison op, Vector<Double> v) { 371 return super.compareTemplate(Double64Mask.class, op, v); // specialize 372 } 373 374 @Override 375 @ForceInline 376 public final Double64Mask compare(Comparison op, double s) { 377 return super.compareTemplate(Double64Mask.class, op, s); // specialize 378 } 379 380 @Override 381 @ForceInline 382 public final Double64Mask compare(Comparison op, long s) { 383 return super.compareTemplate(Double64Mask.class, op, s); // specialize 384 } 385 386 @Override 387 @ForceInline 388 public final Double64Mask compare(Comparison op, Vector<Double> v, VectorMask<Double> m) { 389 return super.compareTemplate(Double64Mask.class, op, v, (Double64Mask) m); 390 } 391 392 393 @Override 394 @ForceInline 395 public Double64Vector blend(Vector<Double> v, VectorMask<Double> m) { 396 return (Double64Vector) 397 super.blendTemplate(Double64Mask.class, 398 (Double64Vector) v, 399 (Double64Mask) m); // specialize 400 } 401 402 @Override 403 @ForceInline 404 public Double64Vector slice(int origin, Vector<Double> v) { 405 return (Double64Vector) super.sliceTemplate(origin, v); // specialize 406 } 407 408 @Override 409 @ForceInline 410 public Double64Vector slice(int origin) { 411 return (Double64Vector) super.sliceTemplate(origin); // specialize 412 } 413 414 @Override 415 @ForceInline 416 public Double64Vector unslice(int origin, Vector<Double> w, int part) { 417 return (Double64Vector) super.unsliceTemplate(origin, w, part); // specialize 418 } 419 420 @Override 421 @ForceInline 422 public Double64Vector unslice(int origin, Vector<Double> w, int part, VectorMask<Double> m) { 423 return (Double64Vector) 424 super.unsliceTemplate(Double64Mask.class, 425 origin, w, part, 426 (Double64Mask) m); // specialize 427 } 428 429 @Override 430 @ForceInline 431 public Double64Vector unslice(int origin) { 432 return (Double64Vector) super.unsliceTemplate(origin); // specialize 433 } 434 435 @Override 436 @ForceInline 437 public Double64Vector rearrange(VectorShuffle<Double> s) { 438 return (Double64Vector) 439 super.rearrangeTemplate(Double64Shuffle.class, 440 (Double64Shuffle) s); // specialize 441 } 442 443 @Override 444 @ForceInline 445 public Double64Vector rearrange(VectorShuffle<Double> shuffle, 446 VectorMask<Double> m) { 447 return (Double64Vector) 448 super.rearrangeTemplate(Double64Shuffle.class, 449 Double64Mask.class, 450 (Double64Shuffle) shuffle, 451 (Double64Mask) m); // specialize 452 } 453 454 @Override 455 @ForceInline 456 public Double64Vector rearrange(VectorShuffle<Double> s, 457 Vector<Double> v) { 458 return (Double64Vector) 459 super.rearrangeTemplate(Double64Shuffle.class, 460 (Double64Shuffle) s, 461 (Double64Vector) v); // specialize 462 } 463 464 @Override 465 @ForceInline 466 public Double64Vector selectFrom(Vector<Double> v) { 467 return (Double64Vector) 468 super.selectFromTemplate((Double64Vector) v); // specialize 469 } 470 471 @Override 472 @ForceInline 473 public Double64Vector selectFrom(Vector<Double> v, 474 VectorMask<Double> m) { 475 return (Double64Vector) 476 super.selectFromTemplate((Double64Vector) v, 477 (Double64Mask) m); // specialize 478 } 479 480 481 @ForceInline 482 @Override 483 public double lane(int i) { 484 long bits; 485 switch(i) { 486 case 0: bits = laneHelper(0); break; 487 default: throw new IllegalArgumentException("Index " + i + " must be zero or positive, and less than " + VLENGTH); 488 } 489 return Double.longBitsToDouble(bits); 490 } 491 492 public long laneHelper(int i) { 493 return (long) VectorSupport.extract( 494 VCLASS, ETYPE, VLENGTH, 495 this, i, 496 (vec, ix) -> { 497 double[] vecarr = vec.vec(); 498 return (long)Double.doubleToLongBits(vecarr[ix]); 499 }); 500 } 501 502 @ForceInline 503 @Override 504 public Double64Vector withLane(int i, double e) { 505 switch(i) { 506 case 0: return withLaneHelper(0, e); 507 default: throw new IllegalArgumentException("Index " + i + " must be zero or positive, and less than " + VLENGTH); 508 } 509 } 510 511 public Double64Vector withLaneHelper(int i, double e) { 512 return VectorSupport.insert( 513 VCLASS, ETYPE, VLENGTH, 514 this, i, (long)Double.doubleToLongBits(e), 515 (v, ix, bits) -> { 516 double[] res = v.vec().clone(); 517 res[ix] = Double.longBitsToDouble((long)bits); 518 return v.vectorFactory(res); 519 }); 520 } 521 522 // Mask 523 524 static final class Double64Mask extends AbstractMask<Double> { 525 static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM 526 static final Class<Double> ETYPE = double.class; // used by the JVM 527 528 Double64Mask(boolean[] bits) { 529 this(bits, 0); 530 } 531 532 Double64Mask(boolean[] bits, int offset) { 533 super(prepare(bits, offset)); 534 } 535 536 Double64Mask(boolean val) { 537 super(prepare(val)); 538 } 539 540 private static boolean[] prepare(boolean[] bits, int offset) { 541 boolean[] newBits = new boolean[VSPECIES.laneCount()]; 542 for (int i = 0; i < newBits.length; i++) { 543 newBits[i] = bits[offset + i]; 544 } 545 return newBits; 546 } 547 548 private static boolean[] prepare(boolean val) { 549 boolean[] bits = new boolean[VSPECIES.laneCount()]; 550 Arrays.fill(bits, val); 551 return bits; 552 } 553 554 @ForceInline 555 final @Override 556 public DoubleSpecies vspecies() { 557 // ISSUE: This should probably be a @Stable 558 // field inside AbstractMask, rather than 559 // a megamorphic method. 560 return VSPECIES; 561 } 562 563 @ForceInline 564 boolean[] getBits() { 565 return (boolean[])getPayload(); 566 } 567 568 @Override 569 Double64Mask uOp(MUnOp f) { 570 boolean[] res = new boolean[vspecies().laneCount()]; 571 boolean[] bits = getBits(); 572 for (int i = 0; i < res.length; i++) { 573 res[i] = f.apply(i, bits[i]); 574 } 575 return new Double64Mask(res); 576 } 577 578 @Override 579 Double64Mask bOp(VectorMask<Double> m, MBinOp f) { 580 boolean[] res = new boolean[vspecies().laneCount()]; 581 boolean[] bits = getBits(); 582 boolean[] mbits = ((Double64Mask)m).getBits(); 583 for (int i = 0; i < res.length; i++) { 584 res[i] = f.apply(i, bits[i], mbits[i]); 585 } 586 return new Double64Mask(res); 587 } 588 589 @ForceInline 590 @Override 591 public final 592 Double64Vector toVector() { 593 return (Double64Vector) super.toVectorTemplate(); // specialize 594 } 595 596 /** 597 * Helper function for lane-wise mask conversions. 598 * This function kicks in after intrinsic failure. 599 */ 600 @ForceInline 601 private final <E> 602 VectorMask<E> defaultMaskCast(AbstractSpecies<E> dsp) { 603 if (length() != dsp.laneCount()) 604 throw new IllegalArgumentException("VectorMask length and species length differ"); 605 boolean[] maskArray = toArray(); 606 return dsp.maskFactory(maskArray).check(dsp); 607 } 608 609 @Override 610 @ForceInline 611 public <E> VectorMask<E> cast(VectorSpecies<E> dsp) { 612 AbstractSpecies<E> species = (AbstractSpecies<E>) dsp; 613 if (length() != species.laneCount()) 614 throw new IllegalArgumentException("VectorMask length and species length differ"); 615 616 return VectorSupport.convert(VectorSupport.VECTOR_OP_CAST, 617 this.getClass(), ETYPE, VLENGTH, 618 species.maskType(), species.elementType(), VLENGTH, 619 this, species, 620 (m, s) -> s.maskFactory(m.toArray()).check(s)); 621 } 622 623 @Override 624 @ForceInline 625 public Double64Mask eq(VectorMask<Double> mask) { 626 Objects.requireNonNull(mask); 627 Double64Mask m = (Double64Mask)mask; 628 return xor(m.not()); 629 } 630 631 // Unary operations 632 633 @Override 634 @ForceInline 635 public Double64Mask not() { 636 return xor(maskAll(true)); 637 } 638 639 // Binary operations 640 641 @Override 642 @ForceInline 643 public Double64Mask and(VectorMask<Double> mask) { 644 Objects.requireNonNull(mask); 645 Double64Mask m = (Double64Mask)mask; 646 return VectorSupport.binaryOp(VECTOR_OP_AND, Double64Mask.class, null, long.class, VLENGTH, 647 this, m, null, 648 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a & b)); 649 } 650 651 @Override 652 @ForceInline 653 public Double64Mask or(VectorMask<Double> mask) { 654 Objects.requireNonNull(mask); 655 Double64Mask m = (Double64Mask)mask; 656 return VectorSupport.binaryOp(VECTOR_OP_OR, Double64Mask.class, null, long.class, VLENGTH, 657 this, m, null, 658 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a | b)); 659 } 660 661 @ForceInline 662 /* package-private */ 663 Double64Mask xor(VectorMask<Double> mask) { 664 Objects.requireNonNull(mask); 665 Double64Mask m = (Double64Mask)mask; 666 return VectorSupport.binaryOp(VECTOR_OP_XOR, Double64Mask.class, null, long.class, VLENGTH, 667 this, m, null, 668 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a ^ b)); 669 } 670 671 // Mask Query operations 672 673 @Override 674 @ForceInline 675 public int trueCount() { 676 return (int) VectorSupport.maskReductionCoerced(VECTOR_OP_MASK_TRUECOUNT, Double64Mask.class, long.class, VLENGTH, this, 677 (m) -> trueCountHelper(m.getBits())); 678 } 679 680 @Override 681 @ForceInline 682 public int firstTrue() { 683 return (int) VectorSupport.maskReductionCoerced(VECTOR_OP_MASK_FIRSTTRUE, Double64Mask.class, long.class, VLENGTH, this, 684 (m) -> firstTrueHelper(m.getBits())); 685 } 686 687 @Override 688 @ForceInline 689 public int lastTrue() { 690 return (int) VectorSupport.maskReductionCoerced(VECTOR_OP_MASK_LASTTRUE, Double64Mask.class, long.class, VLENGTH, this, 691 (m) -> lastTrueHelper(m.getBits())); 692 } 693 694 @Override 695 @ForceInline 696 public long toLong() { 697 if (length() > Long.SIZE) { 698 throw new UnsupportedOperationException("too many lanes for one long"); 699 } 700 return VectorSupport.maskReductionCoerced(VECTOR_OP_MASK_TOLONG, Double64Mask.class, long.class, VLENGTH, this, 701 (m) -> toLongHelper(m.getBits())); 702 } 703 704 // Reductions 705 706 @Override 707 @ForceInline 708 public boolean anyTrue() { 709 return VectorSupport.test(BT_ne, Double64Mask.class, long.class, VLENGTH, 710 this, vspecies().maskAll(true), 711 (m, __) -> anyTrueHelper(((Double64Mask)m).getBits())); 712 } 713 714 @Override 715 @ForceInline 716 public boolean allTrue() { 717 return VectorSupport.test(BT_overflow, Double64Mask.class, long.class, VLENGTH, 718 this, vspecies().maskAll(true), 719 (m, __) -> allTrueHelper(((Double64Mask)m).getBits())); 720 } 721 722 @ForceInline 723 /*package-private*/ 724 static Double64Mask maskAll(boolean bit) { 725 return VectorSupport.fromBitsCoerced(Double64Mask.class, long.class, VLENGTH, 726 (bit ? -1 : 0), MODE_BROADCAST, null, 727 (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); 728 } 729 private static final Double64Mask TRUE_MASK = new Double64Mask(true); 730 private static final Double64Mask FALSE_MASK = new Double64Mask(false); 731 732 } 733 734 // Shuffle 735 736 static final class Double64Shuffle extends AbstractShuffle<Double> { 737 static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM 738 static final Class<Double> ETYPE = double.class; // used by the JVM 739 740 Double64Shuffle(byte[] reorder) { 741 super(VLENGTH, reorder); 742 } 743 744 public Double64Shuffle(int[] reorder) { 745 super(VLENGTH, reorder); 746 } 747 748 public Double64Shuffle(int[] reorder, int i) { 749 super(VLENGTH, reorder, i); 750 } 751 752 public Double64Shuffle(IntUnaryOperator fn) { 753 super(VLENGTH, fn); 754 } 755 756 @Override 757 public DoubleSpecies vspecies() { 758 return VSPECIES; 759 } 760 761 static { 762 // There must be enough bits in the shuffle lanes to encode 763 // VLENGTH valid indexes and VLENGTH exceptional ones. 764 assert(VLENGTH < Byte.MAX_VALUE); 765 assert(Byte.MIN_VALUE <= -VLENGTH); 766 } 767 static final Double64Shuffle IOTA = new Double64Shuffle(IDENTITY); 768 769 @Override 770 @ForceInline 771 public Double64Vector toVector() { 772 return VectorSupport.shuffleToVector(VCLASS, ETYPE, Double64Shuffle.class, this, VLENGTH, 773 (s) -> ((Double64Vector)(((AbstractShuffle<Double>)(s)).toVectorTemplate()))); 774 } 775 776 @Override 777 @ForceInline 778 public <F> VectorShuffle<F> cast(VectorSpecies<F> s) { 779 AbstractSpecies<F> species = (AbstractSpecies<F>) s; 780 if (length() != species.laneCount()) 781 throw new IllegalArgumentException("VectorShuffle length and species length differ"); 782 int[] shuffleArray = toArray(); 783 return s.shuffleFromArray(shuffleArray, 0).check(s); 784 } 785 786 @ForceInline 787 @Override 788 public Double64Shuffle rearrange(VectorShuffle<Double> shuffle) { 789 Double64Shuffle s = (Double64Shuffle) shuffle; 790 byte[] reorder1 = reorder(); 791 byte[] reorder2 = s.reorder(); 792 byte[] r = new byte[reorder1.length]; 793 for (int i = 0; i < reorder1.length; i++) { 794 int ssi = reorder2[i]; 795 r[i] = reorder1[ssi]; // throws on exceptional index 796 } 797 return new Double64Shuffle(r); 798 } 799 } 800 801 // ================================================ 802 803 // Specialized low-level memory operations. 804 805 @ForceInline 806 @Override 807 final 808 DoubleVector fromArray0(double[] a, int offset) { 809 return super.fromArray0Template(a, offset); // specialize 810 } 811 812 @ForceInline 813 @Override 814 final 815 DoubleVector fromArray0(double[] a, int offset, VectorMask<Double> m) { 816 return super.fromArray0Template(Double64Mask.class, a, offset, (Double64Mask) m); // specialize 817 } 818 819 @ForceInline 820 @Override 821 final 822 DoubleVector fromArray0(double[] a, int offset, int[] indexMap, int mapOffset, VectorMask<Double> m) { 823 return super.fromArray0Template(Double64Mask.class, a, offset, indexMap, mapOffset, (Double64Mask) m); 824 } 825 826 827 828 @ForceInline 829 @Override 830 final 831 DoubleVector fromByteArray0(byte[] a, int offset) { 832 return super.fromByteArray0Template(a, offset); // specialize 833 } 834 835 @ForceInline 836 @Override 837 final 838 DoubleVector fromByteArray0(byte[] a, int offset, VectorMask<Double> m) { 839 return super.fromByteArray0Template(Double64Mask.class, a, offset, (Double64Mask) m); // specialize 840 } 841 842 @ForceInline 843 @Override 844 final 845 DoubleVector fromByteBuffer0(ByteBuffer bb, int offset) { 846 return super.fromByteBuffer0Template(bb, offset); // specialize 847 } 848 849 @ForceInline 850 @Override 851 final 852 DoubleVector fromByteBuffer0(ByteBuffer bb, int offset, VectorMask<Double> m) { 853 return super.fromByteBuffer0Template(Double64Mask.class, bb, offset, (Double64Mask) m); // specialize 854 } 855 856 @ForceInline 857 @Override 858 final 859 void intoArray0(double[] a, int offset) { 860 super.intoArray0Template(a, offset); // specialize 861 } 862 863 @ForceInline 864 @Override 865 final 866 void intoArray0(double[] a, int offset, VectorMask<Double> m) { 867 super.intoArray0Template(Double64Mask.class, a, offset, (Double64Mask) m); 868 } 869 870 @ForceInline 871 @Override 872 final 873 void intoArray0(double[] a, int offset, int[] indexMap, int mapOffset, VectorMask<Double> m) { 874 super.intoArray0Template(Double64Mask.class, a, offset, indexMap, mapOffset, (Double64Mask) m); 875 } 876 877 878 @ForceInline 879 @Override 880 final 881 void intoByteArray0(byte[] a, int offset) { 882 super.intoByteArray0Template(a, offset); // specialize 883 } 884 885 @ForceInline 886 @Override 887 final 888 void intoByteArray0(byte[] a, int offset, VectorMask<Double> m) { 889 super.intoByteArray0Template(Double64Mask.class, a, offset, (Double64Mask) m); // specialize 890 } 891 892 @ForceInline 893 @Override 894 final 895 void intoByteBuffer0(ByteBuffer bb, int offset, VectorMask<Double> m) { 896 super.intoByteBuffer0Template(Double64Mask.class, bb, offset, (Double64Mask) m); 897 } 898 899 900 // End of specialized low-level memory operations. 901 902 // ================================================ 903 904 }