1 /*
  2  * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.  Oracle designates this
  8  * particular file as subject to the "Classpath" exception as provided
  9  * by Oracle in the LICENSE file that accompanied this code.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  */
 25 
 26 package jdk.internal.misc;
 27 
 28 import java.lang.annotation.ElementType;
 29 import java.lang.annotation.Retention;
 30 import java.lang.annotation.RetentionPolicy;
 31 import java.lang.annotation.Target;
 32 import java.lang.ref.Reference;
 33 import java.io.FileDescriptor;
 34 import java.nio.Buffer;
 35 import java.nio.ByteBuffer;
 36 
 37 import jdk.internal.access.JavaNioAccess;
 38 import jdk.internal.access.SharedSecrets;
 39 import jdk.internal.access.foreign.MemorySegmentProxy;
 40 import jdk.internal.util.ArraysSupport;
 41 import jdk.internal.vm.annotation.ForceInline;
 42 import jdk.internal.vm.vector.VectorSupport;
 43 
 44 
 45 /**
 46  * This class defines low-level methods to access on-heap and off-heap memory. The methods in this class
 47  * can be thought of as thin wrappers around methods provided in the {@link Unsafe} class. All the methods in this
 48  * class, accept one or more {@link Scope} parameter, which is used to validate as to whether access to memory
 49  * can be performed in a safe fashion - more specifically, to ensure that the memory being accessed has not
 50  * already been released (which would result in a hard VM crash).
 51  * <p>
 52  * Accessing and releasing memory from a single thread is not problematic - after all, a given thread cannot,
 53  * at the same time, access a memory region <em>and</em> free it. But ensuring correctness of memory access
 54  * when multiple threads are involved is much trickier, as there can be cases where a thread is accessing
 55  * a memory region while another thread is releasing it.
 56  * <p>
 57  * This class provides tools to manage races when multiple threads are accessing and/or releasing the same memory
 58  * region concurrently. More specifically, when a thread wants to release a memory region, it should call the
 59  * {@link #closeScope(jdk.internal.misc.ScopedMemoryAccess.Scope)} method provided by this class. This method initiates
 60  * thread-local handshakes with all the other VM threads, which are then stopped one by one. If any thread is found
 61  * accessing memory that is associated to the very scope object being closed, that thread execution is asynchronously
 62  * interrupted with a {@link Scope.ScopedAccessError}.
 63  * <p>
 64  * This synchronization strategy relies on the idea that accessing memory is atomic with respect to checking the
 65  * validity of the scope associated with that memory region - that is, a thread that wants to perform memory access will be
 66  * suspended either <em>before</em> a scope check or <em>after</em> the memory access. To ensure this atomicity,
 67  * all methods in this class are marked with the special {@link Scoped} annotation, which is recognized by the VM,
 68  * and used during the thread-local handshake to detect (and stop) threads performing potentially problematic memory access
 69  * operations. Additionally, to make sure that the scope object(s) of the memory being accessed is always
 70  * reachable during an access operation, all the methods in this class add reachability fences around the underlying
 71  * unsafe access.
 72  * <p>
 73  * This form of synchronization allows APIs to use plain memory access without any other form of synchronization
 74  * which might be deemed to expensive; in other words, this approach prioritizes the performance of memory access over
 75  * that of releasing a shared memory resource.
 76  */
 77 public class ScopedMemoryAccess {
 78 
 79     private static final Unsafe UNSAFE = Unsafe.getUnsafe();
 80 
 81     private static native void registerNatives();
 82     static {
 83         registerNatives();
 84     }
 85 
 86     public boolean closeScope(Scope scope) {
 87         return closeScope0(scope, Scope.ScopedAccessError.INSTANCE);
 88     }
 89 
 90     native boolean closeScope0(Scope scope, Scope.ScopedAccessError exception);
 91 
 92     private ScopedMemoryAccess() {}
 93 
 94     private static final ScopedMemoryAccess theScopedMemoryAccess = new ScopedMemoryAccess();
 95 
 96     public static ScopedMemoryAccess getScopedMemoryAccess() {
 97         return theScopedMemoryAccess;
 98     }
 99 
100     /**
101      * Scope interface used during scoped memory access operations. A scope can be thought of as an object
102      * which embodies the temporal checks associated with a given memory region.
103      */
104     public interface Scope {
105 
106         void checkValidState();
107 
108         Thread ownerThread();
109 
110         void acquire0();
111 
112         void release0();
113 
114         /**
115          * Error thrown when memory access fails because the memory has already been released.
116          * Note: for performance reasons, this exception is never created by client; instead a shared instance
117          * is thrown (sometimes, this instance can be thrown asynchronously inside VM code). For this reason,
118          * it is important for clients to always catch this exception and throw a regular exception instead
119          * (which contains full stack information).
120          */
121         final class ScopedAccessError extends Error {
122             private ScopedAccessError() {
123                 super("Attempt to access an already released memory resource", null, false, false);
124             }
125             static final long serialVersionUID = 1L;
126 
127             public static final ScopedAccessError INSTANCE = new ScopedAccessError();
128         }
129     }
130 
131     @Target({ElementType.METHOD, ElementType.CONSTRUCTOR})
132     @Retention(RetentionPolicy.RUNTIME)
133     @interface Scoped { }
134 
135     // bulk ops
136 
137     @ForceInline
138     public void copyMemory(Scope srcScope, Scope dstScope,
139                                    Object srcBase, long srcOffset,
140                                    Object destBase, long destOffset,
141                                    long bytes) {
142           try {
143               copyMemoryInternal(srcScope, dstScope, srcBase, srcOffset, destBase, destOffset, bytes);
144           } catch (Scope.ScopedAccessError ex) {
145               throw new IllegalStateException("This segment is already closed");
146           }
147     }
148 
149     @ForceInline @Scoped
150     private void copyMemoryInternal(Scope srcScope, Scope dstScope,
151                                Object srcBase, long srcOffset,
152                                Object destBase, long destOffset,
153                                long bytes) {
154         try {
155             if (srcScope != null) {
156                 srcScope.checkValidState();
157             }
158             if (dstScope != null) {
159                 dstScope.checkValidState();
160             }
161             UNSAFE.copyMemory(srcBase, srcOffset, destBase, destOffset, bytes);
162         } finally {
163             Reference.reachabilityFence(srcScope);
164             Reference.reachabilityFence(dstScope);
165         }
166     }
167 
168     @ForceInline
169     public void copySwapMemory(Scope srcScope, Scope dstScope,
170                                    Object srcBase, long srcOffset,
171                                    Object destBase, long destOffset,
172                                    long bytes, long elemSize) {
173           try {
174               copySwapMemoryInternal(srcScope, dstScope, srcBase, srcOffset, destBase, destOffset, bytes, elemSize);
175           } catch (Scope.ScopedAccessError ex) {
176               throw new IllegalStateException("This segment is already closed");
177           }
178     }
179 
180     @ForceInline @Scoped
181     private void copySwapMemoryInternal(Scope srcScope, Scope dstScope,
182                                Object srcBase, long srcOffset,
183                                Object destBase, long destOffset,
184                                long bytes, long elemSize) {
185         try {
186             if (srcScope != null) {
187                 srcScope.checkValidState();
188             }
189             if (dstScope != null) {
190                 dstScope.checkValidState();
191             }
192             UNSAFE.copySwapMemory(srcBase, srcOffset, destBase, destOffset, bytes, elemSize);
193         } finally {
194             Reference.reachabilityFence(srcScope);
195             Reference.reachabilityFence(dstScope);
196         }
197     }
198 
199     @ForceInline
200     public void setMemory(Scope scope, Object o, long offset, long bytes, byte value) {
201         try {
202             setMemoryInternal(scope, o, offset, bytes, value);
203         } catch (Scope.ScopedAccessError ex) {
204             throw new IllegalStateException("This segment is already closed");
205         }
206     }
207 
208     @ForceInline @Scoped
209     private void setMemoryInternal(Scope scope, Object o, long offset, long bytes, byte value) {
210         try {
211             if (scope != null) {
212                 scope.checkValidState();
213             }
214             UNSAFE.setMemory(o, offset, bytes, value);
215         } finally {
216             Reference.reachabilityFence(scope);
217         }
218     }
219 
220     @ForceInline
221     public int vectorizedMismatch(Scope aScope, Scope bScope,
222                                              Object a, long aOffset,
223                                              Object b, long bOffset,
224                                              int length,
225                                              int log2ArrayIndexScale) {
226         try {
227             return vectorizedMismatchInternal(aScope, bScope, a, aOffset, b, bOffset, length, log2ArrayIndexScale);
228         } catch (Scope.ScopedAccessError ex) {
229             throw new IllegalStateException("This segment is already closed");
230         }
231     }
232 
233     @ForceInline @Scoped
234     private int vectorizedMismatchInternal(Scope aScope, Scope bScope,
235                                              Object a, long aOffset,
236                                              Object b, long bOffset,
237                                              int length,
238                                              int log2ArrayIndexScale) {
239         try {
240             if (aScope != null) {
241                 aScope.checkValidState();
242             }
243             if (bScope != null) {
244                 bScope.checkValidState();
245             }
246             return ArraysSupport.vectorizedMismatch(a, aOffset, b, bOffset, length, log2ArrayIndexScale);
247         } finally {
248             Reference.reachabilityFence(aScope);
249             Reference.reachabilityFence(bScope);
250         }
251     }
252 
253     @ForceInline
254     public boolean isLoaded(Scope scope, long address, boolean isSync, long size) {
255         try {
256             return isLoadedInternal(scope, address, isSync, size);
257         } catch (Scope.ScopedAccessError ex) {
258             throw new IllegalStateException("This segment is already closed");
259         }
260     }
261 
262     @ForceInline @Scoped
263     public boolean isLoadedInternal(Scope scope, long address, boolean isSync, long size) {
264         try {
265             if (scope != null) {
266                 scope.checkValidState();
267             }
268             return SharedSecrets.getJavaNioAccess().isLoaded(address, isSync, size);
269         } finally {
270             Reference.reachabilityFence(scope);
271         }
272     }
273 
274     @ForceInline
275     public void load(Scope scope, long address, boolean isSync, long size) {
276         try {
277             loadInternal(scope, address, isSync, size);
278         } catch (Scope.ScopedAccessError ex) {
279             throw new IllegalStateException("This segment is already closed");
280         }
281     }
282 
283     @ForceInline @Scoped
284     public void loadInternal(Scope scope, long address, boolean isSync, long size) {
285         try {
286             if (scope != null) {
287                 scope.checkValidState();
288             }
289             SharedSecrets.getJavaNioAccess().load(address, isSync, size);
290         } finally {
291             Reference.reachabilityFence(scope);
292         }
293     }
294 
295     @ForceInline
296     public void unload(Scope scope, long address, boolean isSync, long size) {
297         try {
298             unloadInternal(scope, address, isSync, size);
299         } catch (Scope.ScopedAccessError ex) {
300             throw new IllegalStateException("This segment is already closed");
301         }
302     }
303 
304     @ForceInline @Scoped
305     public void unloadInternal(Scope scope, long address, boolean isSync, long size) {
306         try {
307             if (scope != null) {
308                 scope.checkValidState();
309             }
310             SharedSecrets.getJavaNioAccess().unload(address, isSync, size);
311         } finally {
312             Reference.reachabilityFence(scope);
313         }
314     }
315 
316     @ForceInline
317     public void force(Scope scope, FileDescriptor fd, long address, boolean isSync, long index, long length) {
318         try {
319             forceInternal(scope, fd, address, isSync, index, length);
320         } catch (Scope.ScopedAccessError ex) {
321             throw new IllegalStateException("This segment is already closed");
322         }
323     }
324 
325     @ForceInline @Scoped
326     public void forceInternal(Scope scope, FileDescriptor fd, long address, boolean isSync, long index, long length) {
327         try {
328             if (scope != null) {
329                 scope.checkValidState();
330             }
331             SharedSecrets.getJavaNioAccess().force(fd, address, isSync, index, length);
332         } finally {
333             Reference.reachabilityFence(scope);
334         }
335     }
336 
337     // ByteBuffer vector access ops
338 
339     // Buffer access constants, to be initialized when required.
340     // Avoids a null value for NIO_ACCESS, due to class initialization dependencies
341     static final class BufferAccess {
342         // Buffer.address
343         static final long BUFFER_ADDRESS
344                 = UNSAFE.objectFieldOffset(Buffer.class, "address");
345 
346         // ByteBuffer.hb
347         static final long BYTE_BUFFER_HB
348                 = UNSAFE.objectFieldOffset(ByteBuffer.class, "hb");
349 
350         static final long BYTE_BUFFER_IS_READ_ONLY
351                 = UNSAFE.objectFieldOffset(ByteBuffer.class, "isReadOnly");
352 
353         @ForceInline
354         static Object bufferBase(ByteBuffer bb) {
355             return UNSAFE.getReference(bb, BYTE_BUFFER_HB);
356         }
357 
358         @ForceInline
359         static long bufferAddress(ByteBuffer bb, long offset) {
360             return UNSAFE.getLong(bb, BUFFER_ADDRESS) + offset;
361         }
362 
363         static final JavaNioAccess NIO_ACCESS = SharedSecrets.getJavaNioAccess();
364 
365         @ForceInline
366         static ScopedMemoryAccess.Scope scope(ByteBuffer bb) {
367             MemorySegmentProxy segmentProxy = NIO_ACCESS.bufferSegment(bb);
368             return segmentProxy != null ?
369                     segmentProxy.scope() : null;
370         }
371     }
372 
373     @ForceInline
374     public static boolean isReadOnly(ByteBuffer bb) {
375         return UNSAFE.getBoolean(bb, BufferAccess.BYTE_BUFFER_IS_READ_ONLY);
376     }
377 
378     @ForceInline
379     public static
380     <V extends VectorSupport.Vector<E>, E, S extends VectorSupport.VectorSpecies<E>>
381     V loadFromByteBuffer(Class<? extends V> vmClass, Class<E> e, int length,
382                           ByteBuffer bb, int offset,
383                           S s,
384                           VectorSupport.LoadOperation<ByteBuffer, V, S> defaultImpl) {
385         try {
386             return loadFromByteBufferScoped(
387                     BufferAccess.scope(bb),
388                     vmClass, e, length,
389                     bb, offset,
390                     s,
391                     defaultImpl);
392         } catch (ScopedMemoryAccess.Scope.ScopedAccessError ex) {
393             throw new IllegalStateException("This segment is already closed");
394         }
395     }
396 
397     @Scoped
398     @ForceInline
399     private static
400     <V extends VectorSupport.Vector<E>, E, S extends VectorSupport.VectorSpecies<E>>
401     V loadFromByteBufferScoped(ScopedMemoryAccess.Scope scope,
402                           Class<? extends V> vmClass, Class<E> e, int length,
403                           ByteBuffer bb, int offset,
404                           S s,
405                           VectorSupport.LoadOperation<ByteBuffer, V, S> defaultImpl) {
406         try {
407             if (scope != null) {
408                 scope.checkValidState();
409             }
410 
411             final byte[] base = (byte[]) BufferAccess.bufferBase(bb);
412 
413             return VectorSupport.load(vmClass, e, length,
414                       base, BufferAccess.bufferAddress(bb, offset),
415                       bb, offset, s,
416                       defaultImpl);
417         } finally {
418             Reference.reachabilityFence(scope);
419         }
420     }
421 
422     @ForceInline
423     public static
424     <V extends VectorSupport.Vector<E>, E, S extends VectorSupport.VectorSpecies<E>,
425      M extends VectorSupport.VectorMask<E>>
426     V loadFromByteBufferMasked(Class<? extends V> vmClass, Class<M> maskClass, Class<E> e,
427                                int length, ByteBuffer bb, int offset, M m, S s,
428                                VectorSupport.LoadVectorMaskedOperation<ByteBuffer, V, S, M> defaultImpl) {
429         try {
430             return loadFromByteBufferMaskedScoped(
431                     BufferAccess.scope(bb),
432                     vmClass, maskClass, e, length,
433                     bb, offset, m,
434                     s,
435                     defaultImpl);
436         } catch (ScopedMemoryAccess.Scope.ScopedAccessError ex) {
437             throw new IllegalStateException("This segment is already closed");
438         }
439     }
440 
441     @Scoped
442     @ForceInline
443     private static
444     <V extends VectorSupport.Vector<E>, E, S extends VectorSupport.VectorSpecies<E>,
445      M extends VectorSupport.VectorMask<E>>
446     V loadFromByteBufferMaskedScoped(ScopedMemoryAccess.Scope scope, Class<? extends V> vmClass,
447                                      Class<M> maskClass, Class<E> e, int length,
448                                      ByteBuffer bb, int offset, M m,
449                                      S s,
450                                      VectorSupport.LoadVectorMaskedOperation<ByteBuffer, V, S, M> defaultImpl) {
451         try {
452             if (scope != null) {
453                 scope.checkValidState();
454             }
455 
456             return VectorSupport.loadMasked(vmClass, maskClass, e, length,
457                     BufferAccess.bufferBase(bb), BufferAccess.bufferAddress(bb, offset), m,
458                     bb, offset, s,
459                     defaultImpl);
460         } finally {
461             Reference.reachabilityFence(scope);
462         }
463     }
464 
465     @ForceInline
466     public static
467     <V extends VectorSupport.Vector<E>, E>
468     void storeIntoByteBuffer(Class<? extends V> vmClass, Class<E> e, int length,
469                              V v,
470                              ByteBuffer bb, int offset,
471                              VectorSupport.StoreVectorOperation<ByteBuffer, V> defaultImpl) {
472         try {
473             storeIntoByteBufferScoped(
474                     BufferAccess.scope(bb),
475                     vmClass, e, length,
476                     v,
477                     bb, offset,
478                     defaultImpl);
479         } catch (ScopedMemoryAccess.Scope.ScopedAccessError ex) {
480             throw new IllegalStateException("This segment is already closed");
481         }
482     }
483 
484     @Scoped
485     @ForceInline
486     private static
487     <V extends VectorSupport.Vector<E>, E>
488     void storeIntoByteBufferScoped(ScopedMemoryAccess.Scope scope,
489                                    Class<? extends V> vmClass, Class<E> e, int length,
490                                    V v,
491                                    ByteBuffer bb, int offset,
492                                    VectorSupport.StoreVectorOperation<ByteBuffer, V> defaultImpl) {
493         try {
494             if (scope != null) {
495                 scope.checkValidState();
496             }
497 
498             final byte[] base = (byte[]) BufferAccess.bufferBase(bb);
499 
500             VectorSupport.store(vmClass, e, length,
501                                 base, BufferAccess.bufferAddress(bb, offset),
502                                 v,
503                                 bb, offset,
504                                 defaultImpl);
505         } finally {
506             Reference.reachabilityFence(scope);
507         }
508     }
509 
510     @ForceInline
511     public static
512     <V extends VectorSupport.Vector<E>, E, M extends VectorSupport.VectorMask<E>>
513     void storeIntoByteBufferMasked(Class<? extends V> vmClass, Class<M> maskClass, Class<E> e,
514                                    int length, V v, M m,
515                                    ByteBuffer bb, int offset,
516                                    VectorSupport.StoreVectorMaskedOperation<ByteBuffer, V, M> defaultImpl) {
517         try {
518             storeIntoByteBufferMaskedScoped(
519                     BufferAccess.scope(bb),
520                     vmClass, maskClass, e, length,
521                     v, m,
522                     bb, offset,
523                     defaultImpl);
524         } catch (ScopedMemoryAccess.Scope.ScopedAccessError ex) {
525             throw new IllegalStateException("This segment is already closed");
526         }
527     }
528 
529     @Scoped
530     @ForceInline
531     private static
532     <V extends VectorSupport.Vector<E>, E, M extends VectorSupport.VectorMask<E>>
533     void storeIntoByteBufferMaskedScoped(ScopedMemoryAccess.Scope scope,
534                                          Class<? extends V> vmClass, Class<M> maskClass,
535                                          Class<E> e, int length, V v, M m,
536                                          ByteBuffer bb, int offset,
537                                          VectorSupport.StoreVectorMaskedOperation<ByteBuffer, V, M> defaultImpl) {
538         try {
539             if (scope != null) {
540                 scope.checkValidState();
541             }
542 
543             VectorSupport.storeMasked(vmClass, maskClass, e, length,
544                     BufferAccess.bufferBase(bb), BufferAccess.bufferAddress(bb, offset),
545                     v, m,
546                     bb, offset,
547                     defaultImpl);
548         } finally {
549             Reference.reachabilityFence(scope);
550         }
551     }
552 
553     // typed-ops here
554 
555     // Note: all the accessor methods defined below take advantage of argument type profiling
556     // (see src/hotspot/share/oops/methodData.cpp) which greatly enhances performance when the same accessor
557     // method is used repeatedly with different 'base' objects.