1 /* 2 * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 package jdk.incubator.vector; 26 27 import java.util.Arrays; 28 import java.util.Objects; 29 import java.util.function.IntUnaryOperator; 30 31 import jdk.incubator.foreign.MemorySegment; 32 import jdk.internal.vm.annotation.ForceInline; 33 import jdk.internal.vm.vector.VectorSupport; 34 35 import static jdk.internal.vm.vector.VectorSupport.*; 36 37 import static jdk.incubator.vector.VectorOperators.*; 38 39 // -- This file was mechanically generated: Do not edit! -- // 40 41 @SuppressWarnings("cast") // warning: redundant cast 42 final class Double64Vector extends DoubleVector { 43 static final DoubleSpecies VSPECIES = 44 (DoubleSpecies) DoubleVector.SPECIES_64; 45 46 static final VectorShape VSHAPE = 47 VSPECIES.vectorShape(); 48 49 static final Class<Double64Vector> VCLASS = Double64Vector.class; 50 51 static final int VSIZE = VSPECIES.vectorBitSize(); 52 53 static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM 54 55 static final Class<Double> ETYPE = double.class; // used by the JVM 56 57 Double64Vector(double[] v) { 58 super(v); 59 } 60 61 // For compatibility as Double64Vector::new, 62 // stored into species.vectorFactory. 63 Double64Vector(Object v) { 64 this((double[]) v); 65 } 66 67 static final Double64Vector ZERO = new Double64Vector(new double[VLENGTH]); 68 static final Double64Vector IOTA = new Double64Vector(VSPECIES.iotaArray()); 69 70 static { 71 // Warm up a few species caches. 72 // If we do this too much we will 73 // get NPEs from bootstrap circularity. 74 VSPECIES.dummyVector(); 75 VSPECIES.withLanes(LaneType.BYTE); 76 } 77 78 // Specialized extractors 79 80 @ForceInline 81 final @Override 82 public DoubleSpecies vspecies() { 83 // ISSUE: This should probably be a @Stable 84 // field inside AbstractVector, rather than 85 // a megamorphic method. 86 return VSPECIES; 87 } 88 89 @ForceInline 90 @Override 91 public final Class<Double> elementType() { return double.class; } 92 93 @ForceInline 94 @Override 95 public final int elementSize() { return Double.SIZE; } 96 97 @ForceInline 98 @Override 99 public final VectorShape shape() { return VSHAPE; } 100 101 @ForceInline 102 @Override 103 public final int length() { return VLENGTH; } 104 105 @ForceInline 106 @Override 107 public final int bitSize() { return VSIZE; } 108 109 @ForceInline 110 @Override 111 public final int byteSize() { return VSIZE / Byte.SIZE; } 112 113 /*package-private*/ 114 @ForceInline 115 final @Override 116 double[] vec() { 117 return (double[])getPayload(); 118 } 119 120 // Virtualized constructors 121 122 @Override 123 @ForceInline 124 public final Double64Vector broadcast(double e) { 125 return (Double64Vector) super.broadcastTemplate(e); // specialize 126 } 127 128 @Override 129 @ForceInline 130 public final Double64Vector broadcast(long e) { 131 return (Double64Vector) super.broadcastTemplate(e); // specialize 132 } 133 134 @Override 135 @ForceInline 136 Double64Mask maskFromArray(boolean[] bits) { 137 return new Double64Mask(bits); 138 } 139 140 @Override 141 @ForceInline 142 Double64Shuffle iotaShuffle() { return Double64Shuffle.IOTA; } 143 144 @ForceInline 145 Double64Shuffle iotaShuffle(int start, int step, boolean wrap) { 146 if (wrap) { 147 return (Double64Shuffle)VectorSupport.shuffleIota(ETYPE, Double64Shuffle.class, VSPECIES, VLENGTH, start, step, 1, 148 (l, lstart, lstep, s) -> s.shuffleFromOp(i -> (VectorIntrinsics.wrapToRange(i*lstep + lstart, l)))); 149 } else { 150 return (Double64Shuffle)VectorSupport.shuffleIota(ETYPE, Double64Shuffle.class, VSPECIES, VLENGTH, start, step, 0, 151 (l, lstart, lstep, s) -> s.shuffleFromOp(i -> (i*lstep + lstart))); 152 } 153 } 154 155 @Override 156 @ForceInline 157 Double64Shuffle shuffleFromBytes(byte[] reorder) { return new Double64Shuffle(reorder); } 158 159 @Override 160 @ForceInline 161 Double64Shuffle shuffleFromArray(int[] indexes, int i) { return new Double64Shuffle(indexes, i); } 162 163 @Override 164 @ForceInline 165 Double64Shuffle shuffleFromOp(IntUnaryOperator fn) { return new Double64Shuffle(fn); } 166 167 // Make a vector of the same species but the given elements: 168 @ForceInline 169 final @Override 170 Double64Vector vectorFactory(double[] vec) { 171 return new Double64Vector(vec); 172 } 173 174 @ForceInline 175 final @Override 176 Byte64Vector asByteVectorRaw() { 177 return (Byte64Vector) super.asByteVectorRawTemplate(); // specialize 178 } 179 180 @ForceInline 181 final @Override 182 AbstractVector<?> asVectorRaw(LaneType laneType) { 183 return super.asVectorRawTemplate(laneType); // specialize 184 } 185 186 // Unary operator 187 188 @ForceInline 189 final @Override 190 Double64Vector uOp(FUnOp f) { 191 return (Double64Vector) super.uOpTemplate(f); // specialize 192 } 193 194 @ForceInline 195 final @Override 196 Double64Vector uOp(VectorMask<Double> m, FUnOp f) { 197 return (Double64Vector) 198 super.uOpTemplate((Double64Mask)m, f); // specialize 199 } 200 201 // Binary operator 202 203 @ForceInline 204 final @Override 205 Double64Vector bOp(Vector<Double> v, FBinOp f) { 206 return (Double64Vector) super.bOpTemplate((Double64Vector)v, f); // specialize 207 } 208 209 @ForceInline 210 final @Override 211 Double64Vector bOp(Vector<Double> v, 212 VectorMask<Double> m, FBinOp f) { 213 return (Double64Vector) 214 super.bOpTemplate((Double64Vector)v, (Double64Mask)m, 215 f); // specialize 216 } 217 218 // Ternary operator 219 220 @ForceInline 221 final @Override 222 Double64Vector tOp(Vector<Double> v1, Vector<Double> v2, FTriOp f) { 223 return (Double64Vector) 224 super.tOpTemplate((Double64Vector)v1, (Double64Vector)v2, 225 f); // specialize 226 } 227 228 @ForceInline 229 final @Override 230 Double64Vector tOp(Vector<Double> v1, Vector<Double> v2, 231 VectorMask<Double> m, FTriOp f) { 232 return (Double64Vector) 233 super.tOpTemplate((Double64Vector)v1, (Double64Vector)v2, 234 (Double64Mask)m, f); // specialize 235 } 236 237 @ForceInline 238 final @Override 239 double rOp(double v, VectorMask<Double> m, FBinOp f) { 240 return super.rOpTemplate(v, m, f); // specialize 241 } 242 243 @Override 244 @ForceInline 245 public final <F> 246 Vector<F> convertShape(VectorOperators.Conversion<Double,F> conv, 247 VectorSpecies<F> rsp, int part) { 248 return super.convertShapeTemplate(conv, rsp, part); // specialize 249 } 250 251 @Override 252 @ForceInline 253 public final <F> 254 Vector<F> reinterpretShape(VectorSpecies<F> toSpecies, int part) { 255 return super.reinterpretShapeTemplate(toSpecies, part); // specialize 256 } 257 258 // Specialized algebraic operations: 259 260 // The following definition forces a specialized version of this 261 // crucial method into the v-table of this class. A call to add() 262 // will inline to a call to lanewise(ADD,), at which point the JIT 263 // intrinsic will have the opcode of ADD, plus all the metadata 264 // for this particular class, enabling it to generate precise 265 // code. 266 // 267 // There is probably no benefit to the JIT to specialize the 268 // masked or broadcast versions of the lanewise method. 269 270 @Override 271 @ForceInline 272 public Double64Vector lanewise(Unary op) { 273 return (Double64Vector) super.lanewiseTemplate(op); // specialize 274 } 275 276 @Override 277 @ForceInline 278 public Double64Vector lanewise(Unary op, VectorMask<Double> m) { 279 return (Double64Vector) super.lanewiseTemplate(op, Double64Mask.class, (Double64Mask) m); // specialize 280 } 281 282 @Override 283 @ForceInline 284 public Double64Vector lanewise(Binary op, Vector<Double> v) { 285 return (Double64Vector) super.lanewiseTemplate(op, v); // specialize 286 } 287 288 @Override 289 @ForceInline 290 public Double64Vector lanewise(Binary op, Vector<Double> v, VectorMask<Double> m) { 291 return (Double64Vector) super.lanewiseTemplate(op, Double64Mask.class, v, (Double64Mask) m); // specialize 292 } 293 294 295 /*package-private*/ 296 @Override 297 @ForceInline 298 public final 299 Double64Vector 300 lanewise(Ternary op, Vector<Double> v1, Vector<Double> v2) { 301 return (Double64Vector) super.lanewiseTemplate(op, v1, v2); // specialize 302 } 303 304 @Override 305 @ForceInline 306 public final 307 Double64Vector 308 lanewise(Ternary op, Vector<Double> v1, Vector<Double> v2, VectorMask<Double> m) { 309 return (Double64Vector) super.lanewiseTemplate(op, Double64Mask.class, v1, v2, (Double64Mask) m); // specialize 310 } 311 312 @Override 313 @ForceInline 314 public final 315 Double64Vector addIndex(int scale) { 316 return (Double64Vector) super.addIndexTemplate(scale); // specialize 317 } 318 319 // Type specific horizontal reductions 320 321 @Override 322 @ForceInline 323 public final double reduceLanes(VectorOperators.Associative op) { 324 return super.reduceLanesTemplate(op); // specialized 325 } 326 327 @Override 328 @ForceInline 329 public final double reduceLanes(VectorOperators.Associative op, 330 VectorMask<Double> m) { 331 return super.reduceLanesTemplate(op, Double64Mask.class, (Double64Mask) m); // specialized 332 } 333 334 @Override 335 @ForceInline 336 public final long reduceLanesToLong(VectorOperators.Associative op) { 337 return (long) super.reduceLanesTemplate(op); // specialized 338 } 339 340 @Override 341 @ForceInline 342 public final long reduceLanesToLong(VectorOperators.Associative op, 343 VectorMask<Double> m) { 344 return (long) super.reduceLanesTemplate(op, Double64Mask.class, (Double64Mask) m); // specialized 345 } 346 347 @ForceInline 348 public VectorShuffle<Double> toShuffle() { 349 return super.toShuffleTemplate(Double64Shuffle.class); // specialize 350 } 351 352 // Specialized unary testing 353 354 @Override 355 @ForceInline 356 public final Double64Mask test(Test op) { 357 return super.testTemplate(Double64Mask.class, op); // specialize 358 } 359 360 @Override 361 @ForceInline 362 public final Double64Mask test(Test op, VectorMask<Double> m) { 363 return super.testTemplate(Double64Mask.class, op, (Double64Mask) m); // specialize 364 } 365 366 // Specialized comparisons 367 368 @Override 369 @ForceInline 370 public final Double64Mask compare(Comparison op, Vector<Double> v) { 371 return super.compareTemplate(Double64Mask.class, op, v); // specialize 372 } 373 374 @Override 375 @ForceInline 376 public final Double64Mask compare(Comparison op, double s) { 377 return super.compareTemplate(Double64Mask.class, op, s); // specialize 378 } 379 380 @Override 381 @ForceInline 382 public final Double64Mask compare(Comparison op, long s) { 383 return super.compareTemplate(Double64Mask.class, op, s); // specialize 384 } 385 386 @Override 387 @ForceInline 388 public final Double64Mask compare(Comparison op, Vector<Double> v, VectorMask<Double> m) { 389 return super.compareTemplate(Double64Mask.class, op, v, (Double64Mask) m); 390 } 391 392 393 @Override 394 @ForceInline 395 public Double64Vector blend(Vector<Double> v, VectorMask<Double> m) { 396 return (Double64Vector) 397 super.blendTemplate(Double64Mask.class, 398 (Double64Vector) v, 399 (Double64Mask) m); // specialize 400 } 401 402 @Override 403 @ForceInline 404 public Double64Vector slice(int origin, Vector<Double> v) { 405 return (Double64Vector) super.sliceTemplate(origin, v); // specialize 406 } 407 408 @Override 409 @ForceInline 410 public Double64Vector slice(int origin) { 411 return (Double64Vector) super.sliceTemplate(origin); // specialize 412 } 413 414 @Override 415 @ForceInline 416 public Double64Vector unslice(int origin, Vector<Double> w, int part) { 417 return (Double64Vector) super.unsliceTemplate(origin, w, part); // specialize 418 } 419 420 @Override 421 @ForceInline 422 public Double64Vector unslice(int origin, Vector<Double> w, int part, VectorMask<Double> m) { 423 return (Double64Vector) 424 super.unsliceTemplate(Double64Mask.class, 425 origin, w, part, 426 (Double64Mask) m); // specialize 427 } 428 429 @Override 430 @ForceInline 431 public Double64Vector unslice(int origin) { 432 return (Double64Vector) super.unsliceTemplate(origin); // specialize 433 } 434 435 @Override 436 @ForceInline 437 public Double64Vector rearrange(VectorShuffle<Double> s) { 438 return (Double64Vector) 439 super.rearrangeTemplate(Double64Shuffle.class, 440 (Double64Shuffle) s); // specialize 441 } 442 443 @Override 444 @ForceInline 445 public Double64Vector rearrange(VectorShuffle<Double> shuffle, 446 VectorMask<Double> m) { 447 return (Double64Vector) 448 super.rearrangeTemplate(Double64Shuffle.class, 449 Double64Mask.class, 450 (Double64Shuffle) shuffle, 451 (Double64Mask) m); // specialize 452 } 453 454 @Override 455 @ForceInline 456 public Double64Vector rearrange(VectorShuffle<Double> s, 457 Vector<Double> v) { 458 return (Double64Vector) 459 super.rearrangeTemplate(Double64Shuffle.class, 460 (Double64Shuffle) s, 461 (Double64Vector) v); // specialize 462 } 463 464 @Override 465 @ForceInline 466 public Double64Vector compress(VectorMask<Double> m) { 467 return (Double64Vector) 468 super.compressTemplate(Double64Mask.class, 469 (Double64Mask) m); // specialize 470 } 471 472 @Override 473 @ForceInline 474 public Double64Vector expand(VectorMask<Double> m) { 475 return (Double64Vector) 476 super.expandTemplate(Double64Mask.class, 477 (Double64Mask) m); // specialize 478 } 479 480 @Override 481 @ForceInline 482 public Double64Vector selectFrom(Vector<Double> v) { 483 return (Double64Vector) 484 super.selectFromTemplate((Double64Vector) v); // specialize 485 } 486 487 @Override 488 @ForceInline 489 public Double64Vector selectFrom(Vector<Double> v, 490 VectorMask<Double> m) { 491 return (Double64Vector) 492 super.selectFromTemplate((Double64Vector) v, 493 (Double64Mask) m); // specialize 494 } 495 496 497 @ForceInline 498 @Override 499 public double lane(int i) { 500 long bits; 501 switch(i) { 502 case 0: bits = laneHelper(0); break; 503 default: throw new IllegalArgumentException("Index " + i + " must be zero or positive, and less than " + VLENGTH); 504 } 505 return Double.longBitsToDouble(bits); 506 } 507 508 public long laneHelper(int i) { 509 return (long) VectorSupport.extract( 510 VCLASS, ETYPE, VLENGTH, 511 this, i, 512 (vec, ix) -> { 513 double[] vecarr = vec.vec(); 514 return (long)Double.doubleToLongBits(vecarr[ix]); 515 }); 516 } 517 518 @ForceInline 519 @Override 520 public Double64Vector withLane(int i, double e) { 521 switch(i) { 522 case 0: return withLaneHelper(0, e); 523 default: throw new IllegalArgumentException("Index " + i + " must be zero or positive, and less than " + VLENGTH); 524 } 525 } 526 527 public Double64Vector withLaneHelper(int i, double e) { 528 return VectorSupport.insert( 529 VCLASS, ETYPE, VLENGTH, 530 this, i, (long)Double.doubleToLongBits(e), 531 (v, ix, bits) -> { 532 double[] res = v.vec().clone(); 533 res[ix] = Double.longBitsToDouble((long)bits); 534 return v.vectorFactory(res); 535 }); 536 } 537 538 // Mask 539 540 static final class Double64Mask extends AbstractMask<Double> { 541 static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM 542 static final Class<Double> ETYPE = double.class; // used by the JVM 543 544 Double64Mask(boolean[] bits) { 545 this(bits, 0); 546 } 547 548 Double64Mask(boolean[] bits, int offset) { 549 super(prepare(bits, offset)); 550 } 551 552 Double64Mask(boolean val) { 553 super(prepare(val)); 554 } 555 556 private static boolean[] prepare(boolean[] bits, int offset) { 557 boolean[] newBits = new boolean[VSPECIES.laneCount()]; 558 for (int i = 0; i < newBits.length; i++) { 559 newBits[i] = bits[offset + i]; 560 } 561 return newBits; 562 } 563 564 private static boolean[] prepare(boolean val) { 565 boolean[] bits = new boolean[VSPECIES.laneCount()]; 566 Arrays.fill(bits, val); 567 return bits; 568 } 569 570 @ForceInline 571 final @Override 572 public DoubleSpecies vspecies() { 573 // ISSUE: This should probably be a @Stable 574 // field inside AbstractMask, rather than 575 // a megamorphic method. 576 return VSPECIES; 577 } 578 579 @ForceInline 580 boolean[] getBits() { 581 return (boolean[])getPayload(); 582 } 583 584 @Override 585 Double64Mask uOp(MUnOp f) { 586 boolean[] res = new boolean[vspecies().laneCount()]; 587 boolean[] bits = getBits(); 588 for (int i = 0; i < res.length; i++) { 589 res[i] = f.apply(i, bits[i]); 590 } 591 return new Double64Mask(res); 592 } 593 594 @Override 595 Double64Mask bOp(VectorMask<Double> m, MBinOp f) { 596 boolean[] res = new boolean[vspecies().laneCount()]; 597 boolean[] bits = getBits(); 598 boolean[] mbits = ((Double64Mask)m).getBits(); 599 for (int i = 0; i < res.length; i++) { 600 res[i] = f.apply(i, bits[i], mbits[i]); 601 } 602 return new Double64Mask(res); 603 } 604 605 @ForceInline 606 @Override 607 public final 608 Double64Vector toVector() { 609 return (Double64Vector) super.toVectorTemplate(); // specialize 610 } 611 612 /** 613 * Helper function for lane-wise mask conversions. 614 * This function kicks in after intrinsic failure. 615 */ 616 @ForceInline 617 private final <E> 618 VectorMask<E> defaultMaskCast(AbstractSpecies<E> dsp) { 619 if (length() != dsp.laneCount()) 620 throw new IllegalArgumentException("VectorMask length and species length differ"); 621 boolean[] maskArray = toArray(); 622 return dsp.maskFactory(maskArray).check(dsp); 623 } 624 625 @Override 626 @ForceInline 627 public <E> VectorMask<E> cast(VectorSpecies<E> dsp) { 628 AbstractSpecies<E> species = (AbstractSpecies<E>) dsp; 629 if (length() != species.laneCount()) 630 throw new IllegalArgumentException("VectorMask length and species length differ"); 631 632 return VectorSupport.convert(VectorSupport.VECTOR_OP_CAST, 633 this.getClass(), ETYPE, VLENGTH, 634 species.maskType(), species.elementType(), VLENGTH, 635 this, species, 636 (m, s) -> s.maskFactory(m.toArray()).check(s)); 637 } 638 639 @Override 640 @ForceInline 641 public Double64Mask eq(VectorMask<Double> mask) { 642 Objects.requireNonNull(mask); 643 Double64Mask m = (Double64Mask)mask; 644 return xor(m.not()); 645 } 646 647 // Unary operations 648 649 @Override 650 @ForceInline 651 public Double64Mask not() { 652 return xor(maskAll(true)); 653 } 654 655 @Override 656 @ForceInline 657 public Double64Mask compress() { 658 return (Double64Mask)VectorSupport.comExpOp(VectorSupport.VECTOR_OP_MASK_COMPRESS, 659 Double64Vector.class, Double64Mask.class, ETYPE, VLENGTH, null, this, 660 (v1, m1) -> VSPECIES.iota().compare(VectorOperators.LT, m1.trueCount())); 661 } 662 663 664 // Binary operations 665 666 @Override 667 @ForceInline 668 public Double64Mask and(VectorMask<Double> mask) { 669 Objects.requireNonNull(mask); 670 Double64Mask m = (Double64Mask)mask; 671 return VectorSupport.binaryOp(VECTOR_OP_AND, Double64Mask.class, null, long.class, VLENGTH, 672 this, m, null, 673 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a & b)); 674 } 675 676 @Override 677 @ForceInline 678 public Double64Mask or(VectorMask<Double> mask) { 679 Objects.requireNonNull(mask); 680 Double64Mask m = (Double64Mask)mask; 681 return VectorSupport.binaryOp(VECTOR_OP_OR, Double64Mask.class, null, long.class, VLENGTH, 682 this, m, null, 683 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a | b)); 684 } 685 686 @ForceInline 687 /* package-private */ 688 Double64Mask xor(VectorMask<Double> mask) { 689 Objects.requireNonNull(mask); 690 Double64Mask m = (Double64Mask)mask; 691 return VectorSupport.binaryOp(VECTOR_OP_XOR, Double64Mask.class, null, long.class, VLENGTH, 692 this, m, null, 693 (m1, m2, vm) -> m1.bOp(m2, (i, a, b) -> a ^ b)); 694 } 695 696 // Mask Query operations 697 698 @Override 699 @ForceInline 700 public int trueCount() { 701 return (int) VectorSupport.maskReductionCoerced(VECTOR_OP_MASK_TRUECOUNT, Double64Mask.class, long.class, VLENGTH, this, 702 (m) -> trueCountHelper(m.getBits())); 703 } 704 705 @Override 706 @ForceInline 707 public int firstTrue() { 708 return (int) VectorSupport.maskReductionCoerced(VECTOR_OP_MASK_FIRSTTRUE, Double64Mask.class, long.class, VLENGTH, this, 709 (m) -> firstTrueHelper(m.getBits())); 710 } 711 712 @Override 713 @ForceInline 714 public int lastTrue() { 715 return (int) VectorSupport.maskReductionCoerced(VECTOR_OP_MASK_LASTTRUE, Double64Mask.class, long.class, VLENGTH, this, 716 (m) -> lastTrueHelper(m.getBits())); 717 } 718 719 @Override 720 @ForceInline 721 public long toLong() { 722 if (length() > Long.SIZE) { 723 throw new UnsupportedOperationException("too many lanes for one long"); 724 } 725 return VectorSupport.maskReductionCoerced(VECTOR_OP_MASK_TOLONG, Double64Mask.class, long.class, VLENGTH, this, 726 (m) -> toLongHelper(m.getBits())); 727 } 728 729 // Reductions 730 731 @Override 732 @ForceInline 733 public boolean anyTrue() { 734 return VectorSupport.test(BT_ne, Double64Mask.class, long.class, VLENGTH, 735 this, vspecies().maskAll(true), 736 (m, __) -> anyTrueHelper(((Double64Mask)m).getBits())); 737 } 738 739 @Override 740 @ForceInline 741 public boolean allTrue() { 742 return VectorSupport.test(BT_overflow, Double64Mask.class, long.class, VLENGTH, 743 this, vspecies().maskAll(true), 744 (m, __) -> allTrueHelper(((Double64Mask)m).getBits())); 745 } 746 747 @ForceInline 748 /*package-private*/ 749 static Double64Mask maskAll(boolean bit) { 750 return VectorSupport.fromBitsCoerced(Double64Mask.class, long.class, VLENGTH, 751 (bit ? -1 : 0), MODE_BROADCAST, null, 752 (v, __) -> (v != 0 ? TRUE_MASK : FALSE_MASK)); 753 } 754 private static final Double64Mask TRUE_MASK = new Double64Mask(true); 755 private static final Double64Mask FALSE_MASK = new Double64Mask(false); 756 757 } 758 759 // Shuffle 760 761 static final class Double64Shuffle extends AbstractShuffle<Double> { 762 static final int VLENGTH = VSPECIES.laneCount(); // used by the JVM 763 static final Class<Double> ETYPE = double.class; // used by the JVM 764 765 Double64Shuffle(byte[] reorder) { 766 super(VLENGTH, reorder); 767 } 768 769 public Double64Shuffle(int[] reorder) { 770 super(VLENGTH, reorder); 771 } 772 773 public Double64Shuffle(int[] reorder, int i) { 774 super(VLENGTH, reorder, i); 775 } 776 777 public Double64Shuffle(IntUnaryOperator fn) { 778 super(VLENGTH, fn); 779 } 780 781 @Override 782 public DoubleSpecies vspecies() { 783 return VSPECIES; 784 } 785 786 static { 787 // There must be enough bits in the shuffle lanes to encode 788 // VLENGTH valid indexes and VLENGTH exceptional ones. 789 assert(VLENGTH < Byte.MAX_VALUE); 790 assert(Byte.MIN_VALUE <= -VLENGTH); 791 } 792 static final Double64Shuffle IOTA = new Double64Shuffle(IDENTITY); 793 794 @Override 795 @ForceInline 796 public Double64Vector toVector() { 797 return VectorSupport.shuffleToVector(VCLASS, ETYPE, Double64Shuffle.class, this, VLENGTH, 798 (s) -> ((Double64Vector)(((AbstractShuffle<Double>)(s)).toVectorTemplate()))); 799 } 800 801 @Override 802 @ForceInline 803 public <F> VectorShuffle<F> cast(VectorSpecies<F> s) { 804 AbstractSpecies<F> species = (AbstractSpecies<F>) s; 805 if (length() != species.laneCount()) 806 throw new IllegalArgumentException("VectorShuffle length and species length differ"); 807 int[] shuffleArray = toArray(); 808 return s.shuffleFromArray(shuffleArray, 0).check(s); 809 } 810 811 @ForceInline 812 @Override 813 public Double64Shuffle rearrange(VectorShuffle<Double> shuffle) { 814 Double64Shuffle s = (Double64Shuffle) shuffle; 815 byte[] reorder1 = reorder(); 816 byte[] reorder2 = s.reorder(); 817 byte[] r = new byte[reorder1.length]; 818 for (int i = 0; i < reorder1.length; i++) { 819 int ssi = reorder2[i]; 820 r[i] = reorder1[ssi]; // throws on exceptional index 821 } 822 return new Double64Shuffle(r); 823 } 824 } 825 826 // ================================================ 827 828 // Specialized low-level memory operations. 829 830 @ForceInline 831 @Override 832 final 833 DoubleVector fromArray0(double[] a, int offset) { 834 return super.fromArray0Template(a, offset); // specialize 835 } 836 837 @ForceInline 838 @Override 839 final 840 DoubleVector fromArray0(double[] a, int offset, VectorMask<Double> m) { 841 return super.fromArray0Template(Double64Mask.class, a, offset, (Double64Mask) m); // specialize 842 } 843 844 @ForceInline 845 @Override 846 final 847 DoubleVector fromArray0(double[] a, int offset, int[] indexMap, int mapOffset, VectorMask<Double> m) { 848 return super.fromArray0Template(Double64Mask.class, a, offset, indexMap, mapOffset, (Double64Mask) m); 849 } 850 851 852 853 @ForceInline 854 @Override 855 final 856 DoubleVector fromMemorySegment0(MemorySegment ms, long offset) { 857 return super.fromMemorySegment0Template(ms, offset); // specialize 858 } 859 860 @ForceInline 861 @Override 862 final 863 DoubleVector fromMemorySegment0(MemorySegment ms, long offset, VectorMask<Double> m) { 864 return super.fromMemorySegment0Template(Double64Mask.class, ms, offset, (Double64Mask) m); // specialize 865 } 866 867 @ForceInline 868 @Override 869 final 870 void intoArray0(double[] a, int offset) { 871 super.intoArray0Template(a, offset); // specialize 872 } 873 874 @ForceInline 875 @Override 876 final 877 void intoArray0(double[] a, int offset, VectorMask<Double> m) { 878 super.intoArray0Template(Double64Mask.class, a, offset, (Double64Mask) m); 879 } 880 881 @ForceInline 882 @Override 883 final 884 void intoArray0(double[] a, int offset, int[] indexMap, int mapOffset, VectorMask<Double> m) { 885 super.intoArray0Template(Double64Mask.class, a, offset, indexMap, mapOffset, (Double64Mask) m); 886 } 887 888 889 @ForceInline 890 @Override 891 final 892 void intoMemorySegment0(MemorySegment ms, long offset, VectorMask<Double> m) { 893 super.intoMemorySegment0Template(Double64Mask.class, ms, offset, (Double64Mask) m); 894 } 895 896 897 // End of specialized low-level memory operations. 898 899 // ================================================ 900 901 }