< prev index next > src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector.java
Print this page
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.incubator.vector;
- import java.nio.ByteBuffer;
import java.nio.ByteOrder;
- import java.nio.ReadOnlyBufferException;
import java.util.Arrays;
import java.util.Objects;
import java.util.function.Function;
- import java.util.function.UnaryOperator;
+ import jdk.incubator.foreign.MemorySegment;
+ import jdk.incubator.foreign.ValueLayout;
+ import jdk.internal.access.foreign.MemorySegmentProxy;
import jdk.internal.misc.ScopedMemoryAccess;
import jdk.internal.misc.Unsafe;
import jdk.internal.vm.annotation.ForceInline;
import jdk.internal.vm.vector.VectorSupport;
super(vec);
}
static final int FORBID_OPCODE_KIND = VO_ONLYFP;
+ static final ValueLayout.OfInt ELEMENT_LAYOUT = ValueLayout.JAVA_INT.withBitAlignment(8);
+
@ForceInline
static int opCode(Operator op) {
return VectorOperators.opCode(op, VO_OPCODE_VALID, FORBID_OPCODE_KIND);
}
@ForceInline
}
}
return vectorFactory(res);
}
+ /*package-private*/
+ interface FLdLongOp {
+ int apply(MemorySegment memory, long offset, int i);
+ }
+
+ /*package-private*/
+ @ForceInline
+ final
+ IntVector ldLongOp(MemorySegment memory, long offset,
+ FLdLongOp f) {
+ //dummy; no vec = vec();
+ int[] res = new int[length()];
+ for (int i = 0; i < res.length; i++) {
+ res[i] = f.apply(memory, offset, i);
+ }
+ return vectorFactory(res);
+ }
+
+ /*package-private*/
+ @ForceInline
+ final
+ IntVector ldLongOp(MemorySegment memory, long offset,
+ VectorMask<Integer> m,
+ FLdLongOp f) {
+ //int[] vec = vec();
+ int[] res = new int[length()];
+ boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
+ for (int i = 0; i < res.length; i++) {
+ if (mbits[i]) {
+ res[i] = f.apply(memory, offset, i);
+ }
+ }
+ return vectorFactory(res);
+ }
+
+ static int memorySegmentGet(MemorySegment ms, long o, int i) {
+ return ms.get(ELEMENT_LAYOUT, o + i * 4L);
+ }
+
interface FStOp<M> {
void apply(M memory, int offset, int i, int a);
}
/*package-private*/
f.apply(memory, offset, i, vec[i]);
}
}
}
+ interface FStLongOp {
+ void apply(MemorySegment memory, long offset, int i, int a);
+ }
+
+ /*package-private*/
+ @ForceInline
+ final
+ void stLongOp(MemorySegment memory, long offset,
+ FStLongOp f) {
+ int[] vec = vec();
+ for (int i = 0; i < vec.length; i++) {
+ f.apply(memory, offset, i, vec[i]);
+ }
+ }
+
+ /*package-private*/
+ @ForceInline
+ final
+ void stLongOp(MemorySegment memory, long offset,
+ VectorMask<Integer> m,
+ FStLongOp f) {
+ int[] vec = vec();
+ boolean[] mbits = ((AbstractMask<Integer>)m).getBits();
+ for (int i = 0; i < vec.length; i++) {
+ if (mbits[i]) {
+ f.apply(memory, offset, i, vec[i]);
+ }
+ }
+ }
+
+ static void memorySegmentSet(MemorySegment ms, long o, int i, int e) {
+ ms.set(ELEMENT_LAYOUT, o + i * 4L, e);
+ }
+
// Binary test
/*package-private*/
interface FBinTest {
boolean apply(int cond, int i, int a, int b);
@ForceInline
static int fromBits(long bits) {
return ((int)bits);
}
+ static IntVector expandHelper(Vector<Integer> v, VectorMask<Integer> m) {
+ VectorSpecies<Integer> vsp = m.vectorSpecies();
+ IntVector r = (IntVector) vsp.zero();
+ IntVector vi = (IntVector) v;
+ if (m.allTrue()) {
+ return vi;
+ }
+ for (int i = 0, j = 0; i < vsp.length(); i++) {
+ if (m.laneIsSet(i)) {
+ r = r.withLane(i, vi.lane(j++));
+ }
+ }
+ return r;
+ }
+
+ static IntVector compressHelper(Vector<Integer> v, VectorMask<Integer> m) {
+ VectorSpecies<Integer> vsp = m.vectorSpecies();
+ IntVector r = (IntVector) vsp.zero();
+ IntVector vi = (IntVector) v;
+ if (m.allTrue()) {
+ return vi;
+ }
+ for (int i = 0, j = 0; i < vsp.length(); i++) {
+ if (m.laneIsSet(i)) {
+ r = r.withLane(j++, vi.lane(i));
+ }
+ }
+ return r;
+ }
+
// Static factories (other than memory operations)
// Note: A surprising behavior in javadoc
// sometimes makes a lone /** {@inheritDoc} */
// comment drop the method altogether,
switch (opc_) {
case VECTOR_OP_NEG: return (v0, m) ->
v0.uOp(m, (i, a) -> (int) -a);
case VECTOR_OP_ABS: return (v0, m) ->
v0.uOp(m, (i, a) -> (int) Math.abs(a));
+ case VECTOR_OP_BIT_COUNT: return (v0, m) ->
+ v0.uOp(m, (i, a) -> (int) Integer.bitCount(a));
+ case VECTOR_OP_TZ_COUNT: return (v0, m) ->
+ v0.uOp(m, (i, a) -> (int) Integer.numberOfTrailingZeros(a));
+ case VECTOR_OP_LZ_COUNT: return (v0, m) ->
+ v0.uOp(m, (i, a) -> (int) Integer.numberOfLeadingZeros(a));
+ case VECTOR_OP_REVERSE: return (v0, m) ->
+ v0.uOp(m, (i, a) -> (int) Integer.reverse(a));
+ case VECTOR_OP_REVERSE_BYTES: return (v0, m) ->
+ v0.uOp(m, (i, a) -> (int) Integer.reverseBytes(a));
default: return null;
}
}
// Binary lanewise support
v0.bOp(v1, vm, (i, a, n) -> (int)((a & LSHR_SETUP_MASK) >>> n));
case VECTOR_OP_LROTATE: return (v0, v1, vm) ->
v0.bOp(v1, vm, (i, a, n) -> rotateLeft(a, (int)n));
case VECTOR_OP_RROTATE: return (v0, v1, vm) ->
v0.bOp(v1, vm, (i, a, n) -> rotateRight(a, (int)n));
+ case VECTOR_OP_COMPRESS_BITS: return (v0, v1, vm) ->
+ v0.bOp(v1, vm, (i, a, n) -> Integer.compress(a, n));
+ case VECTOR_OP_EXPAND_BITS: return (v0, v1, vm) ->
+ v0.bOp(v1, vm, (i, a, n) -> Integer.expand(a, n));
default: return null;
}
}
// FIXME: Maybe all of the public final methods in this file (the
public final
IntVector abs() {
return lanewise(ABS);
}
+
// not (~)
/**
* Computes the bitwise logical complement ({@code ~})
* of this vector.
*
shuffleType, byte.class, length(),
this, vsp,
IntVector::toShuffle0);
}
+ /**
+ * {@inheritDoc} <!--workaround-->
+ * @since 19
+ */
+ @Override
+ public abstract
+ IntVector compress(VectorMask<Integer> m);
+
+ /*package-private*/
+ @ForceInline
+ final
+ <M extends AbstractMask<Integer>>
+ IntVector compressTemplate(Class<M> masktype, M m) {
+ m.check(masktype, this);
+ return (IntVector) VectorSupport.comExpOp(VectorSupport.VECTOR_OP_COMPRESS, getClass(), masktype,
+ int.class, length(), this, m,
+ (v1, m1) -> compressHelper(v1, m1));
+ }
+
+ /**
+ * {@inheritDoc} <!--workaround-->
+ * @since 19
+ */
+ @Override
+ public abstract
+ IntVector expand(VectorMask<Integer> m);
+
+ /*package-private*/
+ @ForceInline
+ final
+ <M extends AbstractMask<Integer>>
+ IntVector expandTemplate(Class<M> masktype, M m) {
+ m.check(masktype, this);
+ return (IntVector) VectorSupport.comExpOp(VectorSupport.VECTOR_OP_EXPAND, getClass(), masktype,
+ int.class, length(), this, m,
+ (v1, m1) -> expandHelper(v1, m1));
+ }
+
+
/**
* {@inheritDoc} <!--workaround-->
*/
@Override
public abstract
res[i] = (double) a[i];
}
return res;
}
- /**
- * Loads a vector from a byte array starting at an offset.
- * Bytes are composed into primitive lane elements according
- * to the specified byte order.
- * The vector is arranged into lanes according to
- * <a href="Vector.html#lane-order">memory ordering</a>.
- * <p>
- * This method behaves as if it returns the result of calling
- * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
- * fromByteBuffer()} as follows:
- * <pre>{@code
- * var bb = ByteBuffer.wrap(a);
- * var m = species.maskAll(true);
- * return fromByteBuffer(species, bb, offset, bo, m);
- * }</pre>
- *
- * @param species species of desired vector
- * @param a the byte array
- * @param offset the offset into the array
- * @param bo the intended byte order
- * @return a vector loaded from a byte array
- * @throws IndexOutOfBoundsException
- * if {@code offset+N*ESIZE < 0}
- * or {@code offset+(N+1)*ESIZE > a.length}
- * for any lane {@code N} in the vector
- */
- @ForceInline
- public static
- IntVector fromByteArray(VectorSpecies<Integer> species,
- byte[] a, int offset,
- ByteOrder bo) {
- offset = checkFromIndexSize(offset, species.vectorByteSize(), a.length);
- IntSpecies vsp = (IntSpecies) species;
- return vsp.dummyVector().fromByteArray0(a, offset).maybeSwap(bo);
- }
-
- /**
- * Loads a vector from a byte array starting at an offset
- * and using a mask.
- * Lanes where the mask is unset are filled with the default
- * value of {@code int} (zero).
- * Bytes are composed into primitive lane elements according
- * to the specified byte order.
- * The vector is arranged into lanes according to
- * <a href="Vector.html#lane-order">memory ordering</a>.
- * <p>
- * This method behaves as if it returns the result of calling
- * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
- * fromByteBuffer()} as follows:
- * <pre>{@code
- * var bb = ByteBuffer.wrap(a);
- * return fromByteBuffer(species, bb, offset, bo, m);
- * }</pre>
- *
- * @param species species of desired vector
- * @param a the byte array
- * @param offset the offset into the array
- * @param bo the intended byte order
- * @param m the mask controlling lane selection
- * @return a vector loaded from a byte array
- * @throws IndexOutOfBoundsException
- * if {@code offset+N*ESIZE < 0}
- * or {@code offset+(N+1)*ESIZE > a.length}
- * for any lane {@code N} in the vector
- * where the mask is set
- */
- @ForceInline
- public static
- IntVector fromByteArray(VectorSpecies<Integer> species,
- byte[] a, int offset,
- ByteOrder bo,
- VectorMask<Integer> m) {
- IntSpecies vsp = (IntSpecies) species;
- if (offset >= 0 && offset <= (a.length - species.vectorByteSize())) {
- return vsp.dummyVector().fromByteArray0(a, offset, m).maybeSwap(bo);
- }
-
- // FIXME: optimize
- checkMaskFromIndexSize(offset, vsp, m, 4, a.length);
- ByteBuffer wb = wrapper(a, bo);
- return vsp.ldOp(wb, offset, (AbstractMask<Integer>)m,
- (wb_, o, i) -> wb_.getInt(o + i * 4));
- }
-
/**
* Loads a vector from an array of type {@code int[]}
* starting at an offset.
* For each vector lane, where {@code N} is the vector lane index, the
* array element at index {@code offset + N} is placed into the
}
/**
- * Loads a vector from a {@linkplain ByteBuffer byte buffer}
- * starting at an offset into the byte buffer.
+ * Loads a vector from a {@linkplain MemorySegment memory segment}
+ * starting at an offset into the memory segment.
* Bytes are composed into primitive lane elements according
* to the specified byte order.
* The vector is arranged into lanes according to
* <a href="Vector.html#lane-order">memory ordering</a>.
* <p>
* This method behaves as if it returns the result of calling
- * {@link #fromByteBuffer(VectorSpecies,ByteBuffer,int,ByteOrder,VectorMask)
- * fromByteBuffer()} as follows:
+ * {@link #fromMemorySegment(VectorSpecies,MemorySegment,long,ByteOrder,VectorMask)
+ * fromMemorySegment()} as follows:
* <pre>{@code
* var m = species.maskAll(true);
- * return fromByteBuffer(species, bb, offset, bo, m);
+ * return fromMemorySegment(species, ms, offset, bo, m);
* }</pre>
*
* @param species species of desired vector
- * @param bb the byte buffer
- * @param offset the offset into the byte buffer
+ * @param ms the memory segment
+ * @param offset the offset into the memory segment
* @param bo the intended byte order
- * @return a vector loaded from a byte buffer
+ * @return a vector loaded from the memory segment
* @throws IndexOutOfBoundsException
* if {@code offset+N*4 < 0}
- * or {@code offset+N*4 >= bb.limit()}
+ * or {@code offset+N*4 >= ms.byteSize()}
* for any lane {@code N} in the vector
+ * @throws IllegalArgumentException if the memory segment is a heap segment that is
+ * not backed by a {@code byte[]} array.
+ * @throws IllegalStateException if the memory segment's session is not alive,
+ * or if access occurs from a thread other than the thread owning the session.
+ * @since 19
*/
@ForceInline
public static
- IntVector fromByteBuffer(VectorSpecies<Integer> species,
- ByteBuffer bb, int offset,
- ByteOrder bo) {
- offset = checkFromIndexSize(offset, species.vectorByteSize(), bb.limit());
+ IntVector fromMemorySegment(VectorSpecies<Integer> species,
+ MemorySegment ms, long offset,
+ ByteOrder bo) {
+ offset = checkFromIndexSize(offset, species.vectorByteSize(), ms.byteSize());
IntSpecies vsp = (IntSpecies) species;
- return vsp.dummyVector().fromByteBuffer0(bb, offset).maybeSwap(bo);
+ return vsp.dummyVector().fromMemorySegment0(ms, offset).maybeSwap(bo);
}
/**
- * Loads a vector from a {@linkplain ByteBuffer byte buffer}
- * starting at an offset into the byte buffer
+ * Loads a vector from a {@linkplain MemorySegment memory segment}
+ * starting at an offset into the memory segment
* and using a mask.
* Lanes where the mask is unset are filled with the default
* value of {@code int} (zero).
* Bytes are composed into primitive lane elements according
* to the specified byte order.
* The vector is arranged into lanes according to
* <a href="Vector.html#lane-order">memory ordering</a>.
* <p>
* The following pseudocode illustrates the behavior:
* <pre>{@code
- * IntBuffer eb = bb.duplicate()
- * .position(offset)
- * .order(bo).asIntBuffer();
+ * var slice = ms.asSlice(offset);
* int[] ar = new int[species.length()];
* for (int n = 0; n < ar.length; n++) {
* if (m.laneIsSet(n)) {
- * ar[n] = eb.get(n);
+ * ar[n] = slice.getAtIndex(ValuaLayout.JAVA_INT.withBitAlignment(8), n);
* }
* }
* IntVector r = IntVector.fromArray(species, ar, 0);
* }</pre>
* @implNote
* the platform native order},
* since this method will not need to reorder
* the bytes of lane values.
*
* @param species species of desired vector
- * @param bb the byte buffer
- * @param offset the offset into the byte buffer
+ * @param ms the memory segment
+ * @param offset the offset into the memory segment
* @param bo the intended byte order
* @param m the mask controlling lane selection
- * @return a vector loaded from a byte buffer
+ * @return a vector loaded from the memory segment
* @throws IndexOutOfBoundsException
* if {@code offset+N*4 < 0}
- * or {@code offset+N*4 >= bb.limit()}
+ * or {@code offset+N*4 >= ms.byteSize()}
* for any lane {@code N} in the vector
* where the mask is set
+ * @throws IllegalArgumentException if the memory segment is a heap segment that is
+ * not backed by a {@code byte[]} array.
+ * @throws IllegalStateException if the memory segment's session is not alive,
+ * or if access occurs from a thread other than the thread owning the session.
+ * @since 19
*/
@ForceInline
public static
- IntVector fromByteBuffer(VectorSpecies<Integer> species,
- ByteBuffer bb, int offset,
- ByteOrder bo,
- VectorMask<Integer> m) {
+ IntVector fromMemorySegment(VectorSpecies<Integer> species,
+ MemorySegment ms, long offset,
+ ByteOrder bo,
+ VectorMask<Integer> m) {
IntSpecies vsp = (IntSpecies) species;
- if (offset >= 0 && offset <= (bb.limit() - species.vectorByteSize())) {
- return vsp.dummyVector().fromByteBuffer0(bb, offset, m).maybeSwap(bo);
+ if (offset >= 0 && offset <= (ms.byteSize() - species.vectorByteSize())) {
+ return vsp.dummyVector().fromMemorySegment0(ms, offset, m).maybeSwap(bo);
}
// FIXME: optimize
- checkMaskFromIndexSize(offset, vsp, m, 4, bb.limit());
- ByteBuffer wb = wrapper(bb, bo);
- return vsp.ldOp(wb, offset, (AbstractMask<Integer>)m,
- (wb_, o, i) -> wb_.getInt(o + i * 4));
+ checkMaskFromIndexSize(offset, vsp, m, 4, ms.byteSize());
+ return vsp.ldLongOp(ms, offset, m, IntVector::memorySegmentGet);
}
// Memory store operations
/**
vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
a, arrayAddress(a, offset),
this,
a, offset,
(arr, off, v)
- -> v.stOp(arr, off,
+ -> v.stOp(arr, (int) off,
(arr_, off_, i, e) -> arr_[off_ + i] = e));
}
/**
* Stores this vector into an array of type {@code int[]}
/**
* {@inheritDoc} <!--workaround-->
+ * @since 19
*/
@Override
@ForceInline
public final
- void intoByteArray(byte[] a, int offset,
- ByteOrder bo) {
- offset = checkFromIndexSize(offset, byteSize(), a.length);
- maybeSwap(bo).intoByteArray0(a, offset);
- }
-
- /**
- * {@inheritDoc} <!--workaround-->
- */
- @Override
- @ForceInline
- public final
- void intoByteArray(byte[] a, int offset,
- ByteOrder bo,
- VectorMask<Integer> m) {
- if (m.allTrue()) {
- intoByteArray(a, offset, bo);
- } else {
- IntSpecies vsp = vspecies();
- checkMaskFromIndexSize(offset, vsp, m, 4, a.length);
- maybeSwap(bo).intoByteArray0(a, offset, m);
+ void intoMemorySegment(MemorySegment ms, long offset,
+ ByteOrder bo) {
+ if (ms.isReadOnly()) {
+ throw new UnsupportedOperationException("Attempt to write a read-only segment");
}
- }
- /**
- * {@inheritDoc} <!--workaround-->
- */
- @Override
- @ForceInline
- public final
- void intoByteBuffer(ByteBuffer bb, int offset,
- ByteOrder bo) {
- if (ScopedMemoryAccess.isReadOnly(bb)) {
- throw new ReadOnlyBufferException();
- }
- offset = checkFromIndexSize(offset, byteSize(), bb.limit());
- maybeSwap(bo).intoByteBuffer0(bb, offset);
+ offset = checkFromIndexSize(offset, byteSize(), ms.byteSize());
+ maybeSwap(bo).intoMemorySegment0(ms, offset);
}
/**
* {@inheritDoc} <!--workaround-->
+ * @since 19
*/
@Override
@ForceInline
public final
- void intoByteBuffer(ByteBuffer bb, int offset,
- ByteOrder bo,
- VectorMask<Integer> m) {
+ void intoMemorySegment(MemorySegment ms, long offset,
+ ByteOrder bo,
+ VectorMask<Integer> m) {
if (m.allTrue()) {
- intoByteBuffer(bb, offset, bo);
+ intoMemorySegment(ms, offset, bo);
} else {
- if (bb.isReadOnly()) {
- throw new ReadOnlyBufferException();
+ if (ms.isReadOnly()) {
+ throw new UnsupportedOperationException("Attempt to write a read-only segment");
}
IntSpecies vsp = vspecies();
- checkMaskFromIndexSize(offset, vsp, m, 4, bb.limit());
- maybeSwap(bo).intoByteBuffer0(bb, offset, m);
+ checkMaskFromIndexSize(offset, vsp, m, 4, ms.byteSize());
+ maybeSwap(bo).intoMemorySegment0(ms, offset, m);
}
}
// ================================================
IntSpecies vsp = vspecies();
return VectorSupport.load(
vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
a, arrayAddress(a, offset),
a, offset, vsp,
- (arr, off, s) -> s.ldOp(arr, off,
+ (arr, off, s) -> s.ldOp(arr, (int) off,
(arr_, off_, i) -> arr_[off_ + i]));
}
/*package-private*/
abstract
IntSpecies vsp = vspecies();
return VectorSupport.loadMasked(
vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
a, arrayAddress(a, offset), m,
a, offset, vsp,
- (arr, off, s, vm) -> s.ldOp(arr, off, vm,
+ (arr, off, s, vm) -> s.ldOp(arr, (int) off, vm,
(arr_, off_, i) -> arr_[off_ + i]));
}
/*package-private*/
abstract
s.vOp(vm, n -> c[idx + iMap[idy+n]]));
}
- @Override
abstract
- IntVector fromByteArray0(byte[] a, int offset);
+ IntVector fromMemorySegment0(MemorySegment bb, long offset);
@ForceInline
final
- IntVector fromByteArray0Template(byte[] a, int offset) {
+ IntVector fromMemorySegment0Template(MemorySegment ms, long offset) {
IntSpecies vsp = vspecies();
- return VectorSupport.load(
- vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
- a, byteArrayAddress(a, offset),
- a, offset, vsp,
- (arr, off, s) -> {
- ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
- return s.ldOp(wb, off,
- (wb_, o, i) -> wb_.getInt(o + i * 4));
- });
- }
-
- abstract
- IntVector fromByteArray0(byte[] a, int offset, VectorMask<Integer> m);
- @ForceInline
- final
- <M extends VectorMask<Integer>>
- IntVector fromByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
- IntSpecies vsp = vspecies();
- m.check(vsp);
- return VectorSupport.loadMasked(
- vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
- a, byteArrayAddress(a, offset), m,
- a, offset, vsp,
- (arr, off, s, vm) -> {
- ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
- return s.ldOp(wb, off, vm,
- (wb_, o, i) -> wb_.getInt(o + i * 4));
- });
- }
-
- abstract
- IntVector fromByteBuffer0(ByteBuffer bb, int offset);
- @ForceInline
- final
- IntVector fromByteBuffer0Template(ByteBuffer bb, int offset) {
- IntSpecies vsp = vspecies();
- return ScopedMemoryAccess.loadFromByteBuffer(
+ return ScopedMemoryAccess.loadFromMemorySegment(
vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
- bb, offset, vsp,
- (buf, off, s) -> {
- ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
- return s.ldOp(wb, off,
- (wb_, o, i) -> wb_.getInt(o + i * 4));
+ (MemorySegmentProxy) ms, offset, vsp,
+ (msp, off, s) -> {
+ return s.ldLongOp((MemorySegment) msp, off, IntVector::memorySegmentGet);
});
}
abstract
- IntVector fromByteBuffer0(ByteBuffer bb, int offset, VectorMask<Integer> m);
+ IntVector fromMemorySegment0(MemorySegment ms, long offset, VectorMask<Integer> m);
@ForceInline
final
<M extends VectorMask<Integer>>
- IntVector fromByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
+ IntVector fromMemorySegment0Template(Class<M> maskClass, MemorySegment ms, long offset, M m) {
IntSpecies vsp = vspecies();
m.check(vsp);
- return ScopedMemoryAccess.loadFromByteBufferMasked(
+ return ScopedMemoryAccess.loadFromMemorySegmentMasked(
vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
- bb, offset, m, vsp,
- (buf, off, s, vm) -> {
- ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
- return s.ldOp(wb, off, vm,
- (wb_, o, i) -> wb_.getInt(o + i * 4));
+ (MemorySegmentProxy) ms, offset, m, vsp,
+ (msp, off, s, vm) -> {
+ return s.ldLongOp((MemorySegment) msp, off, vm, IntVector::memorySegmentGet);
});
}
// Unchecked storing operations in native byte order.
// Caller is responsible for applying index checks, masking, and
VectorSupport.store(
vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
a, arrayAddress(a, offset),
this, a, offset,
(arr, off, v)
- -> v.stOp(arr, off,
+ -> v.stOp(arr, (int) off,
(arr_, off_, i, e) -> arr_[off_+i] = e));
}
abstract
void intoArray0(int[] a, int offset, VectorMask<Integer> m);
VectorSupport.storeMasked(
vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
a, arrayAddress(a, offset),
this, m, a, offset,
(arr, off, v, vm)
- -> v.stOp(arr, off, vm,
+ -> v.stOp(arr, (int) off, vm,
(arr_, off_, i, e) -> arr_[off_ + i] = e));
}
abstract
void intoArray0(int[] a, int offset,
arr[off + j] = e;
}));
}
- abstract
- void intoByteArray0(byte[] a, int offset);
- @ForceInline
- final
- void intoByteArray0Template(byte[] a, int offset) {
- IntSpecies vsp = vspecies();
- VectorSupport.store(
- vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
- a, byteArrayAddress(a, offset),
- this, a, offset,
- (arr, off, v) -> {
- ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
- v.stOp(wb, off,
- (tb_, o, i, e) -> tb_.putInt(o + i * 4, e));
- });
- }
-
- abstract
- void intoByteArray0(byte[] a, int offset, VectorMask<Integer> m);
@ForceInline
final
- <M extends VectorMask<Integer>>
- void intoByteArray0Template(Class<M> maskClass, byte[] a, int offset, M m) {
+ void intoMemorySegment0(MemorySegment ms, long offset) {
IntSpecies vsp = vspecies();
- m.check(vsp);
- VectorSupport.storeMasked(
- vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
- a, byteArrayAddress(a, offset),
- this, m, a, offset,
- (arr, off, v, vm) -> {
- ByteBuffer wb = wrapper(arr, NATIVE_ENDIAN);
- v.stOp(wb, off, vm,
- (tb_, o, i, e) -> tb_.putInt(o + i * 4, e));
- });
- }
-
- @ForceInline
- final
- void intoByteBuffer0(ByteBuffer bb, int offset) {
- IntSpecies vsp = vspecies();
- ScopedMemoryAccess.storeIntoByteBuffer(
+ ScopedMemoryAccess.storeIntoMemorySegment(
vsp.vectorType(), vsp.elementType(), vsp.laneCount(),
- this, bb, offset,
- (buf, off, v) -> {
- ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
- v.stOp(wb, off,
- (wb_, o, i, e) -> wb_.putInt(o + i * 4, e));
+ this,
+ (MemorySegmentProxy) ms, offset,
+ (msp, off, v) -> {
+ v.stLongOp((MemorySegment) msp, off, IntVector::memorySegmentSet);
});
}
abstract
- void intoByteBuffer0(ByteBuffer bb, int offset, VectorMask<Integer> m);
+ void intoMemorySegment0(MemorySegment bb, long offset, VectorMask<Integer> m);
@ForceInline
final
<M extends VectorMask<Integer>>
- void intoByteBuffer0Template(Class<M> maskClass, ByteBuffer bb, int offset, M m) {
+ void intoMemorySegment0Template(Class<M> maskClass, MemorySegment ms, long offset, M m) {
IntSpecies vsp = vspecies();
m.check(vsp);
- ScopedMemoryAccess.storeIntoByteBufferMasked(
+ ScopedMemoryAccess.storeIntoMemorySegmentMasked(
vsp.vectorType(), maskClass, vsp.elementType(), vsp.laneCount(),
- this, m, bb, offset,
- (buf, off, v, vm) -> {
- ByteBuffer wb = wrapper(buf, NATIVE_ENDIAN);
- v.stOp(wb, off, vm,
- (wb_, o, i, e) -> wb_.putInt(o + i * 4, e));
+ this, m,
+ (MemorySegmentProxy) ms, offset,
+ (msp, off, v, vm) -> {
+ v.stLongOp((MemorySegment) msp, off, vm, IntVector::memorySegmentSet);
});
}
// End of low-level memory operations.
int limit) {
((AbstractMask<Integer>)m)
.checkIndexByLane(offset, limit, vsp.iota(), scale);
}
+ private static
+ void checkMaskFromIndexSize(long offset,
+ IntSpecies vsp,
+ VectorMask<Integer> m,
+ int scale,
+ long limit) {
+ ((AbstractMask<Integer>)m)
+ .checkIndexByLane(offset, limit, vsp.iota(), scale);
+ }
+
@ForceInline
private void conditionalStoreNYI(int offset,
IntSpecies vsp,
VectorMask<Integer> m,
int scale,
VectorMask<Integer> m,
FLdOp<M> f) {
return dummyVector().ldOp(memory, offset, m, f);
}
+ /*package-private*/
+ @ForceInline
+ IntVector ldLongOp(MemorySegment memory, long offset,
+ FLdLongOp f) {
+ return dummyVector().ldLongOp(memory, offset, f);
+ }
+
+ /*package-private*/
+ @ForceInline
+ IntVector ldLongOp(MemorySegment memory, long offset,
+ VectorMask<Integer> m,
+ FLdLongOp f) {
+ return dummyVector().ldLongOp(memory, offset, m, f);
+ }
+
/*package-private*/
@ForceInline
<M> void stOp(M memory, int offset, FStOp<M> f) {
dummyVector().stOp(memory, offset, f);
}
AbstractMask<Integer> m,
FStOp<M> f) {
dummyVector().stOp(memory, offset, m, f);
}
+ /*package-private*/
+ @ForceInline
+ void stLongOp(MemorySegment memory, long offset, FStLongOp f) {
+ dummyVector().stLongOp(memory, offset, f);
+ }
+
+ /*package-private*/
+ @ForceInline
+ void stLongOp(MemorySegment memory, long offset,
+ AbstractMask<Integer> m,
+ FStLongOp f) {
+ dummyVector().stLongOp(memory, offset, m, f);
+ }
+
// N.B. Make sure these constant vectors and
// masks load up correctly into registers.
//
// Also, see if we can avoid all that switching.
// Could we cache both vectors and both masks in
< prev index next >