< prev index next > src/jdk.incubator.vector/share/classes/jdk/incubator/vector/X-Vector.java.template
Print this page
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.incubator.vector;
- import java.nio.ByteBuffer;
import java.nio.ByteOrder;
- import java.nio.ReadOnlyBufferException;
import java.util.Arrays;
import java.util.Objects;
import java.util.function.Function;
- import java.util.function.UnaryOperator;
+ import jdk.incubator.foreign.MemorySegment;
+ import jdk.incubator.foreign.ValueLayout;
+ import jdk.internal.access.foreign.MemorySegmentProxy;
import jdk.internal.misc.ScopedMemoryAccess;
import jdk.internal.misc.Unsafe;
import jdk.internal.vm.annotation.ForceInline;
import jdk.internal.vm.vector.VectorSupport;
static final int FORBID_OPCODE_KIND = VO_NOFP;
#else[FP]
static final int FORBID_OPCODE_KIND = VO_ONLYFP;
#end[FP]
+ static final ValueLayout.Of$Type$ ELEMENT_LAYOUT = ValueLayout.JAVA_$TYPE$.withBitAlignment(8);
+
@ForceInline
static int opCode(Operator op) {
return VectorOperators.opCode(op, VO_OPCODE_VALID, FORBID_OPCODE_KIND);
}
@ForceInline
}
}
return vectorFactory(res);
}
+ /*package-private*/
+ interface FLdLongOp {
+ $type$ apply(MemorySegment memory, long offset, int i);
+ }
+
+ /*package-private*/
+ @ForceInline
+ final
+ $abstractvectortype$ ldLongOp(MemorySegment memory, long offset,
+ FLdLongOp f) {
+ //dummy; no vec = vec();
+ $type$[] res = new $type$[length()];
+ for (int i = 0; i < res.length; i++) {
+ res[i] = f.apply(memory, offset, i);
+ }
+ return vectorFactory(res);
+ }
+
+ /*package-private*/
+ @ForceInline
+ final
+ $abstractvectortype$ ldLongOp(MemorySegment memory, long offset,
+ VectorMask<$Boxtype$> m,
+ FLdLongOp f) {
+ //$type$[] vec = vec();
+ $type$[] res = new $type$[length()];
+ boolean[] mbits = ((AbstractMask<$Boxtype$>)m).getBits();
+ for (int i = 0; i < res.length; i++) {
+ if (mbits[i]) {
+ res[i] = f.apply(memory, offset, i);
+ }
+ }
+ return vectorFactory(res);
+ }
+
+ static $type$ memorySegmentGet(MemorySegment ms, long o, int i) {
+ return ms.get(ELEMENT_LAYOUT, o + i * $sizeInBytes$L);
+ }
+
interface FStOp<M> {
void apply(M memory, int offset, int i, $type$ a);
}
/*package-private*/
f.apply(memory, offset, i, vec[i]);
}
}
}
+ interface FStLongOp {
+ void apply(MemorySegment memory, long offset, int i, $type$ a);
+ }
+
+ /*package-private*/
+ @ForceInline
+ final
+ void stLongOp(MemorySegment memory, long offset,
+ FStLongOp f) {
+ $type$[] vec = vec();
+ for (int i = 0; i < vec.length; i++) {
+ f.apply(memory, offset, i, vec[i]);
+ }
+ }
+
+ /*package-private*/
+ @ForceInline
+ final
+ void stLongOp(MemorySegment memory, long offset,
+ VectorMask<$Boxtype$> m,
+ FStLongOp f) {
+ $type$[] vec = vec();
+ boolean[] mbits = ((AbstractMask<$Boxtype$>)m).getBits();
+ for (int i = 0; i < vec.length; i++) {
+ if (mbits[i]) {
+ f.apply(memory, offset, i, vec[i]);
+ }
+ }
+ }
+
+ static void memorySegmentSet(MemorySegment ms, long o, int i, $type$ e) {
+ ms.set(ELEMENT_LAYOUT, o + i * $sizeInBytes$L, e);
+ }
+
// Binary test
/*package-private*/
interface FBinTest {
boolean apply(int cond, int i, $type$ a, $type$ b);
@ForceInline
static $type$ fromBits(long bits) {
return {#if[FP]?$Type$.$bitstype$BitsTo$Type$}(($bitstype$)bits);
}
+ static $abstractvectortype$ expandHelper(Vector<$Boxtype$> v, VectorMask<$Boxtype$> m) {
+ VectorSpecies<$Boxtype$> vsp = m.vectorSpecies();
+ $abstractvectortype$ r = ($abstractvectortype$) vsp.zero();
+ $abstractvectortype$ vi = ($abstractvectortype$) v;
+ if (m.allTrue()) {
+ return vi;
+ }
+ for (int i = 0, j = 0; i < vsp.length(); i++) {
+ if (m.laneIsSet(i)) {
+ r = r.withLane(i, vi.lane(j++));
+ }
+ }
+ return r;
+ }
+
+ static $abstractvectortype$ compressHelper(Vector<$Boxtype$> v, VectorMask<$Boxtype$> m) {
+ VectorSpecies<$Boxtype$> vsp = m.vectorSpecies();
+ $abstractvectortype$ r = ($abstractvectortype$) vsp.zero();
+ $abstractvectortype$ vi = ($abstractvectortype$) v;
+ if (m.allTrue()) {
+ return vi;
+ }
+ for (int i = 0, j = 0; i < vsp.length(); i++) {
+ if (m.laneIsSet(i)) {
+ r = r.withLane(j++, vi.lane(i));
+ }
+ }
+ return r;
+ }
+
// Static factories (other than memory operations)
// Note: A surprising behavior in javadoc
// sometimes makes a lone /** {@inheritDoc} */
// comment drop the method altogether,
switch (opc_) {
case VECTOR_OP_NEG: return (v0, m) ->
v0.uOp(m, (i, a) -> ($type$) -a);
case VECTOR_OP_ABS: return (v0, m) ->
v0.uOp(m, (i, a) -> ($type$) Math.abs(a));
+ #if[!FP]
+ #if[intOrLong]
+ case VECTOR_OP_BIT_COUNT: return (v0, m) ->
+ v0.uOp(m, (i, a) -> ($type$) $Boxtype$.bitCount(a));
+ case VECTOR_OP_TZ_COUNT: return (v0, m) ->
+ v0.uOp(m, (i, a) -> ($type$) $Boxtype$.numberOfTrailingZeros(a));
+ case VECTOR_OP_LZ_COUNT: return (v0, m) ->
+ v0.uOp(m, (i, a) -> ($type$) $Boxtype$.numberOfLeadingZeros(a));
+ case VECTOR_OP_REVERSE: return (v0, m) ->
+ v0.uOp(m, (i, a) -> ($type$) $Boxtype$.reverse(a));
+ #else[intOrLong]
+ case VECTOR_OP_BIT_COUNT: return (v0, m) ->
+ v0.uOp(m, (i, a) -> ($type$) bitCount(a));
+ case VECTOR_OP_TZ_COUNT: return (v0, m) ->
+ v0.uOp(m, (i, a) -> ($type$) numberOfTrailingZeros(a));
+ case VECTOR_OP_LZ_COUNT: return (v0, m) ->
+ v0.uOp(m, (i, a) -> ($type$) numberOfLeadingZeros(a));
+ case VECTOR_OP_REVERSE: return (v0, m) ->
+ v0.uOp(m, (i, a) -> reverse(a));
+ #end[intOrLong]
+ #if[BITWISE]
+ #if[byte]
+ case VECTOR_OP_REVERSE_BYTES: return (v0, m) ->
+ v0.uOp(m, (i, a) -> a);
+ #else[byte]
+ case VECTOR_OP_REVERSE_BYTES: return (v0, m) ->
+ v0.uOp(m, (i, a) -> ($type$) $Boxtype$.reverseBytes(a));
+ #end[byte]
+ #end[BITWISE]
+ #end[!FP]
#if[FP]
case VECTOR_OP_SIN: return (v0, m) ->
v0.uOp(m, (i, a) -> ($type$) Math.sin(a));
case VECTOR_OP_COS: return (v0, m) ->
v0.uOp(m, (i, a) -> ($type$) Math.cos(a));
v0.bOp(v1, vm, (i, a, n) -> ($type$)((a & LSHR_SETUP_MASK) >>> n));
case VECTOR_OP_LROTATE: return (v0, v1, vm) ->
v0.bOp(v1, vm, (i, a, n) -> rotateLeft(a, (int)n));
case VECTOR_OP_RROTATE: return (v0, v1, vm) ->
v0.bOp(v1, vm, (i, a, n) -> rotateRight(a, (int)n));
+ #if[intOrLong]
+ case VECTOR_OP_COMPRESS_BITS: return (v0, v1, vm) ->
+ v0.bOp(v1, vm, (i, a, n) -> $Boxtype$.compress(a, n));
+ case VECTOR_OP_EXPAND_BITS: return (v0, v1, vm) ->
+ v0.bOp(v1, vm, (i, a, n) -> $Boxtype$.expand(a, n));
+ #end[intOrLong]
#end[BITWISE]
#if[FP]
case VECTOR_OP_OR: return (v0, v1, vm) ->
v0.bOp(v1, vm, (i, a, b) -> fromBits(toBits(a) | toBits(b)));
case VECTOR_OP_ATAN2: return (v0, v1, vm) ->
public final
$abstractvectortype$ abs() {
return lanewise(ABS);
}
+ #if[!FP]
+ #if[!intOrLong]
+ static int bitCount($type$ a) {
+ #if[short]
+ return Integer.bitCount((int)a & 0xFFFF);
+ #else[short]
+ return Integer.bitCount((int)a & 0xFF);
+ #end[short]
+ }
+ #end[!intOrLong]
+ #end[!FP]
+ #if[!FP]
+ #if[!intOrLong]
+ static int numberOfTrailingZeros($type$ a) {
+ #if[short]
+ return a != 0 ? Integer.numberOfTrailingZeros(a) : 16;
+ #else[short]
+ return a != 0 ? Integer.numberOfTrailingZeros(a) : 8;
+ #end[short]
+ }
+ #end[!intOrLong]
+ #end[!FP]
+ #if[!FP]
+ #if[!intOrLong]
+ static int numberOfLeadingZeros($type$ a) {
+ #if[short]
+ return a >= 0 ? Integer.numberOfLeadingZeros(a) - 16 : 0;
+ #else[short]
+ return a >= 0 ? Integer.numberOfLeadingZeros(a) - 24 : 0;
+ #end[short]
+ }
+
+ static $type$ reverse($type$ a) {
+ if (a == 0 || a == -1) return a;
+
+ #if[short]
+ $type$ b = rotateLeft(a, 8);
+ b = ($type$) (((b & 0x5555) << 1) | ((b & 0xAAAA) >>> 1));
+ b = ($type$) (((b & 0x3333) << 2) | ((b & 0xCCCC) >>> 2));
+ b = ($type$) (((b & 0x0F0F) << 4) | ((b & 0xF0F0) >>> 4));
+ #else[short]
+ $type$ b = rotateLeft(a, 4);
+ b = ($type$) (((b & 0x55) << 1) | ((b & 0xAA) >>> 1));
+ b = ($type$) (((b & 0x33) << 2) | ((b & 0xCC) >>> 2));
+ #end[short]
+ return b;
+ }
+ #end[!intOrLong]
+ #end[!FP]
+
#if[BITWISE]
// not (~)
/**
* Computes the bitwise logical complement ({@code ~})
* of this vector.
shuffleType, byte.class, length(),
this, vsp,
$Type$Vector::toShuffle0);
}
+ /**
+ * {@inheritDoc} <!--workaround-->
+ * @since 19
+ */
+ @Override
+ public abstract
+ $Type$Vector compress(VectorMask<$Boxtype$> m);
+
+ /*package-private*/
+ @ForceInline
+ final
+ <M extends AbstractMask<$Boxtype$>>
+ $Type$Vector compressTemplate(Class<M> masktype, M m) {
+ m.check(masktype, this);
+ return ($Type$Vector) VectorSupport.comExpOp(VectorSupport.VECTOR_OP_COMPRESS, getClass(), masktype,
+ $type$.class, length(), this, m,
+ (v1, m1) -> compressHelper(v1, m1));
+ }
+
+ /**
+ * {@inheritDoc} <!--workaround-->
+ * @since 19
+ */
+ @Override
+ public abstract
+ $Type$Vector expand(VectorMask<$Boxtype$> m);
+
+ /*package-private*/
+ @ForceInline
+ final
+ <M extends AbstractMask<$Boxtype$>>
+ $Type$Vector expandTemplate(Class<M> masktype, M m) {
+ m.check(masktype, this);
+ return ($Type$Vector) VectorSupport.comExpOp(VectorSupport.VECTOR_OP_EXPAND, getClass(), masktype,
+ $type$.class, length(), this, m,
+ (v1, m1) -> expandHelper(v1, m1));
+ }
+
+
/**
* {@inheritDoc} <!--workaround-->
*/
@Override
public abstract
}
return res;
}
#end[double]
- /**
- * Loads a vector from a byte array starting at an offset.
- * Bytes are composed into primitive lane elements according
- * to the specified byte order.
- * The vector is arranged into lanes according to
- * <a href="Vector.html#lane-order">memory ordering</a>.
- * <p>
- * This method behaves as if it returns the result of calling
- * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
- * fromByteBuffer()} as follows:
- * <pre>{@code
- * var bb = ByteBuffer.wrap(a);
- * var m = species.maskAll(true);
- * return fromByteBuffer(species, bb, offset, bo, m);
- * }</pre>
- *
- * @param species species of desired vector
- * @param a the byte array
- * @param offset the offset into the array
- * @param bo the intended byte order
- * @return a vector loaded from a byte array
- * @throws IndexOutOfBoundsException
- * if {@code offset+N*ESIZE < 0}
- * or {@code offset+(N+1)*ESIZE > a.length}
- * for any lane {@code N} in the vector
- */
- @ForceInline
- public static
- $abstractvectortype$ fromByteArray(VectorSpecies<$Boxtype$> species,
- byte[] a, int offset,
- ByteOrder bo) {
- offset = checkFromIndexSize(offset, species.vectorByteSize(), a.length);
- $Type$Species vsp = ($Type$Species) species;
- return vsp.dummyVector().fromByteArray0(a, offset).maybeSwap(bo);
- }
-
- /**
- * Loads a vector from a byte array starting at an offset
- * and using a mask.
- * Lanes where the mask is unset are filled with the default
- * value of {@code $type$} ({#if[FP]?positive }zero).
- * Bytes are composed into primitive lane elements according
- * to the specified byte order.
- * The vector is arranged into lanes according to
- * <a href="Vector.html#lane-order">memory ordering</a>.
- * <p>
- * This method behaves as if it returns the result of calling
- * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
- * fromByteBuffer()} as follows:
- * <pre>{@code
- * var bb = ByteBuffer.wrap(a);
- * return fromByteBuffer(species, bb, offset, bo, m);
- * }</pre>
- *
- * @param species species of desired vector
- * @param a the byte array
- * @param offset the offset into the array
- * @param bo the intended byte order
- * @param m the mask controlling lane selection
- * @return a vector loaded from a byte array
- * @throws IndexOutOfBoundsException
- * if {@code offset+N*ESIZE < 0}
- * or {@code offset+(N+1)*ESIZE > a.length}
- * for any lane {@code N} in the vector
- * where the mask is set
- */
- @ForceInline
- public static
- $abstractvectortype$ fromByteArray(VectorSpecies<$Boxtype$> species,
- byte[] a, int offset,
- ByteOrder bo,
- VectorMask<$Boxtype$> m) {
- $Type$Species vsp = ($Type$Species) species;
- if (offset >= 0 && offset <= (a.length - species.vectorByteSize())) {
- return vsp.dummyVector().fromByteArray0(a, offset, m).maybeSwap(bo);
- }
-
- // FIXME: optimize
- checkMaskFromIndexSize(offset, vsp, m, $sizeInBytes$, a.length);
- ByteBuffer wb = wrapper(a, bo);
- return vsp.ldOp(wb, offset, (AbstractMask<$Boxtype$>)m,
- (wb_, o, i) -> wb_.get{#if[byte]?(:$Type$(}o + i * $sizeInBytes$));
- }
-
/**
* Loads a vector from an array of type {@code $type$[]}
* starting at an offset.
* For each vector lane, where {@code N} is the vector lane index, the
* array element at index {@code offset + N} is placed into the
return vsp.vOp(m, n -> (byte) (a[offset + indexMap[mapOffset + n]] ? 1 : 0));
}
#end[byte]
/**
- * Loads a vector from a {@linkplain ByteBuffer byte buffer}
- * starting at an offset into the byte buffer.
+ * Loads a vector from a {@linkplain MemorySegment memory segment}
+ * starting at an offset into the memory segment.
* Bytes are composed into primitive lane elements according
* to the specified byte order.
* The vector is arranged into lanes according to
* <a href="Vector.html#lane-order">memory ordering</a>.
* <p>
* This method behaves as if it returns the result of calling
- * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
- * fromByteBuffer()} as follows:
+ * {@link #fromMemorySegment(VectorSpecies,MemorySegment,long,ByteOrder,VectorMask)
+ * fromMemorySegment()} as follows:
* <pre>{@code
* var m = species.maskAll(true);
- * return fromByteBuffer(species, bb, offset, bo, m);
+ * return fromMemorySegment(species, ms, offset, bo, m);
* }</pre>
*
* @param species species of desired vector
- * @param bb the byte buffer
- * @param offset the offset into the byte buffer
+ * @param ms the memory segment
+ * @param offset the offset into the memory segment
* @param bo the intended byte order
- * @return a vector loaded from a byte buffer
+ * @return a vector loaded from the memory segment
* @throws IndexOutOfBoundsException
* if {@code offset+N*$sizeInBytes$ < 0}
- * or {@code offset+N*$sizeInBytes$ >= bb.limit()}
+ * or {@code offset+N*$sizeInBytes$ >= ms.byteSize()}
* for any lane {@code N} in the vector
+ * @throws IllegalArgumentException if the memory segment is a heap segment that is
+ * not backed by a {@code byte[]} array.
+ * @throws IllegalStateException if the memory segment's session is not alive,
+ * or if access occurs from a thread other than the thread owning the session.
+ * @since 19
*/
@ForceInline
public static
- $abstractvectortype$ fromByteBuffer(VectorSpecies<$Boxtype$> species,
- ByteBuffer bb, int offset,
- ByteOrder bo) {
- offset = checkFromIndexSize(offset, species.vectorByteSize(), bb.limit());
+ $abstractvectortype$ fromMemorySegment(VectorSpecies<$Boxtype$> species,
+ MemorySegment ms, long offset,
+ ByteOrder bo) {
+ offset = checkFromIndexSize(offset, species.vectorByteSize(), ms.byteSize());
$Type$Species vsp = ($Type$Species) species;
- return vsp.dummyVector().fromByteBuffer0(bb, offset).maybeSwap(bo);
+ return vsp.dummyVector().fromMemorySegment0(ms, offset).maybeSwap(bo);
}
/**
- * Loads a vector from a {@linkplain ByteBuffer byte buffer}
- * starting at an offset into the byte buffer
+ * Loads a vector from a {@linkplain MemorySegment memory segment}
+ * starting at an offset into the memory segment
* and using a mask.
* Lanes where the mask is unset are filled with the default
* value of {@code $type$} ({#if[FP]?positive }zero).
* Bytes are composed into primitive lane elements according
* to the specified byte order.
* The vector is arranged into lanes according to
* <a href="Vector.html#lane-order">memory ordering</a>.
* <p>
* The following pseudocode illustrates the behavior:
* <pre>{@code
- * $Type$Buffer eb = bb.duplicate()
- * .position(offset){#if[byte]?;}
- #if[!byte]
- * .order(bo).as$Type$Buffer();
- #end[!byte]
+ * var slice = ms.asSlice(offset);
* $type$[] ar = new $type$[species.length()];
* for (int n = 0; n < ar.length; n++) {
* if (m.laneIsSet(n)) {
- * ar[n] = eb.get(n);
+ * ar[n] = slice.getAtIndex(ValuaLayout.JAVA_$TYPE$.withBitAlignment(8), n);
* }
* }
* $abstractvectortype$ r = $abstractvectortype$.fromArray(species, ar, 0);
* }</pre>
* @implNote
#else[!byte]
* The byte order argument is ignored.
#end[!byte]
*
* @param species species of desired vector
- * @param bb the byte buffer
- * @param offset the offset into the byte buffer
+ * @param ms the memory segment
+ * @param offset the offset into the memory segment
* @param bo the intended byte order
* @param m the mask controlling lane selection
- * @return a vector loaded from a byte buffer
+ * @return a vector loaded from the memory segment
* @throws IndexOutOfBoundsException
* if {@code offset+N*$sizeInBytes$ < 0}
- * or {@code offset+N*$sizeInBytes$ >= bb.limit()}
+ * or {@code offset+N*$sizeInBytes$ >= ms.byteSize()}
* for any lane {@code N} in the vector
* where the mask is set
+ * @throws IllegalArgumentException if the memory segment is a heap segment that is
+ * not backed by a {@code byte[]} array.
+ * @throws IllegalStateException if the memory segment's session is not alive,
+ * or if access occurs from a thread other than the thread owning the session.
+ * @since 19
*/
@ForceInline
public static
- $abstractvectortype$ fromByteBuffer(VectorSpecies<$Boxtype$> species,
- ByteBuffer bb, int offset,
- ByteOrder bo,
- VectorMask<$Boxtype$> m) {
+ $abstractvectortype$ fromMemorySegment(VectorSpecies<$Boxtype$> species,
+ MemorySegment ms, long offset,
+ ByteOrder bo,
+ VectorMask<$Boxtype$> m) {
$Type$Species vsp = ($Type$Species) species;
- if (offset >= 0 && offset <= (bb.limit() - species.vectorByteSize())) {
- return vsp.dummyVector().fromByteBuffer0(bb, offset, m).maybeSwap(bo);
+ if (offset >= 0 && offset <= (ms.byteSize() - species.vectorByteSize())) {
+ return vsp.dummyVector().fromMemorySegment0(ms, offset, m).maybeSwap(bo);
}
// FIXME: optimize
- checkMaskFromIndexSize(offset, vsp, m, $sizeInBytes$, bb.limit());
- ByteBuffer wb = wrapper(bb, bo);
- return vsp.ldOp(wb, offset, (AbstractMask<$Boxtype$>)m,
- (wb_, o, i) -> wb_.get{#if[byte]?(:$Type$(}o + i * $sizeInBytes$));
+ checkMaskFromIndexSize(offset, vsp, m, $sizeInBytes$, ms.byteSize());
+ return vsp.ldLongOp(ms, offset, m, $abstractvectortype$::memorySegmentGet);
}
// Memory store operations
/**
vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
a, arrayAddress(a, offset),
this,
a, offset,
(arr, off, v)
- -> v.stOp(arr, off,
+ -> v.stOp(arr, (int) off,
(arr_, off_, i, e) -> arr_[off_ + i] = e));
}
/**
* Stores this vector into an array of type {@code $type$[]}
vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
a, charArrayAddress(a, offset),
this,
a, offset,
(arr, off, v)
- -> v.stOp(arr, off,
+ -> v.stOp(arr, (int) off,
(arr_, off_, i, e) -> arr_[off_ + i] = (char) e));
}
/**
* Stores this vector into an array of type {@code char[]}
vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
a, booleanArrayAddress(a, offset),
normalized,
a, offset,
(arr, off, v)
- -> v.stOp(arr, off,
+ -> v.stOp(arr, (int) off,
(arr_, off_, i, e) -> arr_[off_ + i] = (e & 1) != 0));
}
/**
* Stores this vector into an array of type {@code boolean[]}
}
#end[byte]
/**
* {@inheritDoc} <!--workaround-->
+ * @since 19
*/
@Override
@ForceInline
public final
- void intoByteArray(byte[] a, int offset,
- ByteOrder bo) {
- offset = checkFromIndexSize(offset, byteSize(), a.length);
- maybeSwap(bo).intoByteArray0(a, offset);
- }
-
- /**
- * {@inheritDoc} <!--workaround-->
- */
- @Override
- @ForceInline
- public final
- void intoByteArray(byte[] a, int offset,
- ByteOrder bo,
- VectorMask<$Boxtype$> m) {
- if (m.allTrue()) {
- intoByteArray(a, offset, bo);
- } else {
- $Type$Species vsp = vspecies();
- checkMaskFromIndexSize(offset, vsp, m, $sizeInBytes$, a.length);
- maybeSwap(bo).intoByteArray0(a, offset, m);
+ void intoMemorySegment(MemorySegment ms, long offset,
+ ByteOrder bo) {
+ if (ms.isReadOnly()) {
+ throw new UnsupportedOperationException("Attempt to write a read-only segment");
}
- }
- /**
- * {@inheritDoc} <!--workaround-->
- */
- @Override
- @ForceInline
- public final
- void intoByteBuffer(ByteBuffer bb, int offset,
- ByteOrder bo) {
- if (ScopedMemoryAccess.isReadOnly(bb)) {
- throw new ReadOnlyBufferException();
- }
- offset = checkFromIndexSize(offset, byteSize(), bb.limit());
- maybeSwap(bo).intoByteBuffer0(bb, offset);
+ offset = checkFromIndexSize(offset, byteSize(), ms.byteSize());
+ maybeSwap(bo).intoMemorySegment0(ms, offset);
}
/**
* {@inheritDoc} <!--workaround-->
+ * @since 19
*/
@Override
@ForceInline
public final
- void intoByteBuffer(ByteBuffer bb, int offset,
- ByteOrder bo,
- VectorMask<$Boxtype$> m) {
+ void intoMemorySegment(MemorySegment ms, long offset,
+ ByteOrder bo,
+ VectorMask<$Boxtype$> m) {
if (m.allTrue()) {
- intoByteBuffer(bb, offset, bo);
+ intoMemorySegment(ms, offset, bo);
} else {
- if (bb.isReadOnly()) {
- throw new ReadOnlyBufferException();
+ if (ms.isReadOnly()) {
+ throw new UnsupportedOperationException("Attempt to write a read-only segment");
}
$Type$Species vsp = vspecies();
- checkMaskFromIndexSize(offset, vsp, m, $sizeInBytes$, bb.limit());
- maybeSwap(bo).intoByteBuffer0(bb, offset, m);
+ checkMaskFromIndexSize(offset, vsp, m, $sizeInBytes$, ms.byteSize());
+ maybeSwap(bo).intoMemorySegment0(ms, offset, m);
}
}
// ================================================
$Type$Species vsp = vspecies();
return VectorSupport.load(
vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
a, arrayAddress(a, offset),
a, offset, vsp,
- (arr, off, s) -> s.ldOp(arr, off,
+ (arr, off, s) -> s.ldOp(arr, (int) off,
(arr_, off_, i) -> arr_[off_ + i]));
}
/*package-private*/
abstract
$Type$Species vsp = vspecies();
return VectorSupport.loadMasked(
vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
a, arrayAddress(a, offset), m,
a, offset, vsp,
- (arr, off, s, vm) -> s.ldOp(arr, off, vm,
+ (arr, off, s, vm) -> s.ldOp(arr, (int) off, vm,
(arr_, off_, i) -> arr_[off_ + i]));
}
#if[!byteOrShort]
/*package-private*/
$Type$Species vsp = vspecies();
return VectorSupport.load(
vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
a, charArrayAddress(a, offset),
a, offset, vsp,
- (arr, off, s) -> s.ldOp(arr, off,
+ (arr, off, s) -> s.ldOp(arr, (int) off,
(arr_, off_, i) -> (short) arr_[off_ + i]));
}
/*package-private*/
abstract
$Type$Species vsp = vspecies();
return VectorSupport.loadMasked(
vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
a, charArrayAddress(a, offset), m,
a, offset, vsp,
- (arr, off, s, vm) -> s.ldOp(arr, off, vm,
+ (arr, off, s, vm) -> s.ldOp(arr, (int) off, vm,
(arr_, off_, i) -> (short) arr_[off_ + i]));
}
#end[short]
#if[byte]
$Type$Species vsp = vspecies();
return VectorSupport.load(
vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
a, booleanArrayAddress(a, offset),
a, offset, vsp,
- (arr, off, s) -> s.ldOp(arr, off,
+ (arr, off, s) -> s.ldOp(arr, (int) off,
(arr_, off_, i) -> (byte) (arr_[off_ + i] ? 1 : 0)));
}
/*package-private*/
abstract
$Type$Species vsp = vspecies();
return VectorSupport.loadMasked(
vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
a, booleanArrayAddress(a, offset), m,
a, offset, vsp,
- (arr, off, s, vm) -> s.ldOp(arr, off, vm,
+ (arr, off, s, vm) -> s.ldOp(arr, (int) off, vm,
(arr_, off_, i) -> (byte) (arr_[off_ + i] ? 1 : 0)));
}
#end[byte]
- @Override
abstract
- $abstractvectortype$ fromByteArray0(byte[] a, int offset);
+ $abstractvectortype$ fromMemorySegment0(MemorySegment bb, long offset);
@ForceInline
final
- $abstractvectortype$ fromByteArray0Template(byte[] a, int offset) {
+ $abstractvectortype$ fromMemorySegment0Template(MemorySegment ms, long offset) {
$Type$Species vsp = vspecies();
- return VectorSupport.load(
- vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
- a, byteArrayAddress(a, offset),
- a, offset, vsp,
- (arr, off, s) -> {
- ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
- return s.ldOp(wb, off,
- (wb_, o, i) -> wb_.get{#if[byte]?(:$Type$(}o + i * $sizeInBytes$));
- });
- }
-
- abstract
- $abstractvectortype$ fromByteArray0(byte[] a, int offset, VectorMask<$Boxtype$> m);
- @ForceInline
- final
- <M extends VectorMask<$Boxtype$>>
- $abstractvectortype$ fromByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
- $Type$Species vsp = vspecies();
- m.check(vsp);
- return VectorSupport.loadMasked(
- vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
- a, byteArrayAddress(a, offset), m,
- a, offset, vsp,
- (arr, off, s, vm) -> {
- ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
- return s.ldOp(wb, off, vm,
- (wb_, o, i) -> wb_.get{#if[byte]?(:$Type$(}o + i * $sizeInBytes$));
- });
- }
-
- abstract
- $abstractvectortype$ fromByteBuffer0(ByteBuffer bb, int offset);
- @ForceInline
- final
- $abstractvectortype$ fromByteBuffer0Template(ByteBuffer bb, int offset) {
- $Type$Species vsp = vspecies();
- return ScopedMemoryAccess.loadFromByteBuffer(
+ return ScopedMemoryAccess.loadFromMemorySegment(
vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
- bb, offset, vsp,
- (buf, off, s) -> {
- ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
- return s.ldOp(wb, off,
- (wb_, o, i) -> wb_.get{#if[byte]?(:$Type$(}o + i * $sizeInBytes$));
+ (MemorySegmentProxy) ms, offset, vsp,
+ (msp, off, s) -> {
+ return s.ldLongOp((MemorySegment) msp, off, $abstractvectortype$::memorySegmentGet);
});
}
abstract
- $abstractvectortype$ fromByteBuffer0(ByteBuffer bb, int offset, VectorMask<$Boxtype$> m);
+ $abstractvectortype$ fromMemorySegment0(MemorySegment ms, long offset, VectorMask<$Boxtype$> m);
@ForceInline
final
<M extends VectorMask<$Boxtype$>>
- $abstractvectortype$ fromByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
+ $abstractvectortype$ fromMemorySegment0Template(Class<M> maskClass, MemorySegment ms, long offset, M m) {
$Type$Species vsp = vspecies();
m.check(vsp);
- return ScopedMemoryAccess.loadFromByteBufferMasked(
+ return ScopedMemoryAccess.loadFromMemorySegmentMasked(
vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
- bb, offset, m, vsp,
- (buf, off, s, vm) -> {
- ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
- return s.ldOp(wb, off, vm,
- (wb_, o, i) -> wb_.get{#if[byte]?(:$Type$(}o + i * $sizeInBytes$));
+ (MemorySegmentProxy) ms, offset, m, vsp,
+ (msp, off, s, vm) -> {
+ return s.ldLongOp((MemorySegment) msp, off, vm, $abstractvectortype$::memorySegmentGet);
});
}
// Unchecked storing operations in native byte order.
// Caller is responsible for applying index checks, masking, and
VectorSupport.store(
vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
a, arrayAddress(a, offset),
this, a, offset,
(arr, off, v)
- -> v.stOp(arr, off,
+ -> v.stOp(arr, (int) off,
(arr_, off_, i, e) -> arr_[off_+i] = e));
}
abstract
void intoArray0($type$[] a, int offset, VectorMask<$Boxtype$> m);
VectorSupport.storeMasked(
vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
a, arrayAddress(a, offset),
this, m, a, offset,
(arr, off, v, vm)
- -> v.stOp(arr, off, vm,
+ -> v.stOp(arr, (int) off, vm,
(arr_, off_, i, e) -> arr_[off_ + i] = e));
}
#if[!byteOrShort]
abstract
VectorSupport.storeMasked(
vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
a, booleanArrayAddress(a, offset),
normalized, m, a, offset,
(arr, off, v, vm)
- -> v.stOp(arr, off, vm,
+ -> v.stOp(arr, (int) off, vm,
(arr_, off_, i, e) -> arr_[off_ + i] = (e & 1) != 0));
}
#end[byte]
- abstract
- void intoByteArray0(byte[] a, int offset);
@ForceInline
final
- void intoByteArray0Template(byte[] a, int offset) {
+ void intoMemorySegment0(MemorySegment ms, long offset) {
$Type$Species vsp = vspecies();
- VectorSupport.store(
- vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
- a, byteArrayAddress(a, offset),
- this, a, offset,
- (arr, off, v) -> {
- ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
- v.stOp(wb, off,
- (tb_, o, i, e) -> tb_.put{#if[byte]?(:$Type$(}o + i * $sizeInBytes$, e));
- });
- }
-
- abstract
- void intoByteArray0(byte[] a, int offset, VectorMask<$Boxtype$> m);
- @ForceInline
- final
- <M extends VectorMask<$Boxtype$>>
- void intoByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
- $Type$Species vsp = vspecies();
- m.check(vsp);
- VectorSupport.storeMasked(
- vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
- a, byteArrayAddress(a, offset),
- this, m, a, offset,
- (arr, off, v, vm) -> {
- ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
- v.stOp(wb, off, vm,
- (tb_, o, i, e) -> tb_.put{#if[byte]?(:$Type$(}o + i * $sizeInBytes$, e));
- });
- }
-
- @ForceInline
- final
- void intoByteBuffer0(ByteBuffer bb, int offset) {
- $Type$Species vsp = vspecies();
- ScopedMemoryAccess.storeIntoByteBuffer(
+ ScopedMemoryAccess.storeIntoMemorySegment(
vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
- this, bb, offset,
- (buf, off, v) -> {
- ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
- v.stOp(wb, off,
- (wb_, o, i, e) -> wb_.put{#if[byte]?(:$Type$(}o + i * $sizeInBytes$, e));
+ this,
+ (MemorySegmentProxy) ms, offset,
+ (msp, off, v) -> {
+ v.stLongOp((MemorySegment) msp, off, $abstractvectortype$::memorySegmentSet);
});
}
abstract
- void intoByteBuffer0(ByteBuffer bb, int offset, VectorMask<$Boxtype$> m);
+ void intoMemorySegment0(MemorySegment bb, long offset, VectorMask<$Boxtype$> m);
@ForceInline
final
<M extends VectorMask<$Boxtype$>>
- void intoByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
+ void intoMemorySegment0Template(Class<M> maskClass, MemorySegment ms, long offset, M m) {
$Type$Species vsp = vspecies();
m.check(vsp);
- ScopedMemoryAccess.storeIntoByteBufferMasked(
+ ScopedMemoryAccess.storeIntoMemorySegmentMasked(
vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
- this, m, bb, offset,
- (buf, off, v, vm) -> {
- ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
- v.stOp(wb, off, vm,
- (wb_, o, i, e) -> wb_.put{#if[byte]?(:$Type$(}o + i * $sizeInBytes$, e));
+ this, m,
+ (MemorySegmentProxy) ms, offset,
+ (msp, off, v, vm) -> {
+ v.stLongOp((MemorySegment) msp, off, vm, $abstractvectortype$::memorySegmentSet);
});
}
#if[short]
/*package-private*/
VectorSupport.storeMasked(
vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
a, charArrayAddress(a, offset),
this, m, a, offset,
(arr, off, v, vm)
- -> v.stOp(arr, off, vm,
+ -> v.stOp(arr, (int) off, vm,
(arr_, off_, i, e) -> arr_[off_ + i] = (char) e));
}
#end[short]
// End of low-level memory operations.
int limit) {
((AbstractMask<$Boxtype$>)m)
.checkIndexByLane(offset, limit, vsp.iota(), scale);
}
+ private static
+ void checkMaskFromIndexSize(long offset,
+ $Type$Species vsp,
+ VectorMask<$Boxtype$> m,
+ int scale,
+ long limit) {
+ ((AbstractMask<$Boxtype$>)m)
+ .checkIndexByLane(offset, limit, vsp.iota(), scale);
+ }
+
@ForceInline
private void conditionalStoreNYI(int offset,
$Type$Species vsp,
VectorMask<$Boxtype$> m,
int scale,
VectorMask<$Boxtype$> m,
FLdOp<M> f) {
return dummyVector().ldOp(memory, offset, m, f);
}
+ /*package-private*/
+ @ForceInline
+ $abstractvectortype$ ldLongOp(MemorySegment memory, long offset,
+ FLdLongOp f) {
+ return dummyVector().ldLongOp(memory, offset, f);
+ }
+
+ /*package-private*/
+ @ForceInline
+ $abstractvectortype$ ldLongOp(MemorySegment memory, long offset,
+ VectorMask<$Boxtype$> m,
+ FLdLongOp f) {
+ return dummyVector().ldLongOp(memory, offset, m, f);
+ }
+
/*package-private*/
@ForceInline
<M> void stOp(M memory, int offset, FStOp<M> f) {
dummyVector().stOp(memory, offset, f);
}
<M> void stOp(M memory, int offset,
AbstractMask<$Boxtype$> m,
FStOp<M> f) {
dummyVector().stOp(memory, offset, m, f);
}
+
+ /*package-private*/
+ @ForceInline
+ void stLongOp(MemorySegment memory, long offset, FStLongOp f) {
+ dummyVector().stLongOp(memory, offset, f);
+ }
+
+ /*package-private*/
+ @ForceInline
+ void stLongOp(MemorySegment memory, long offset,
+ AbstractMask<$Boxtype$> m,
+ FStLongOp f) {
+ dummyVector().stLongOp(memory, offset, m, f);
+ }
// N.B. Make sure these constant vectors and
// masks load up correctly into registers.
//
// Also, see if we can avoid all that switching.
< prev index next >