1 /*
  2  * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.  Oracle designates this
  8  * particular file as subject to the "Classpath" exception as provided
  9  * by Oracle in the LICENSE file that accompanied this code.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  */
 25 
 26 package jdk.internal.misc;
 27 
 28 import java.lang.annotation.ElementType;
 29 import java.lang.annotation.Retention;
 30 import java.lang.annotation.RetentionPolicy;
 31 import java.lang.annotation.Target;
 32 import java.lang.ref.Reference;
 33 import java.io.FileDescriptor;
 34 import java.nio.Buffer;
 35 import java.nio.ByteBuffer;
 36 
 37 import jdk.internal.access.JavaNioAccess;
 38 import jdk.internal.access.SharedSecrets;
 39 import jdk.internal.access.foreign.MemorySegmentProxy;
 40 import jdk.internal.util.ArraysSupport;
 41 import jdk.internal.vm.annotation.ForceInline;
 42 import jdk.internal.vm.vector.VectorSupport;
 43 
 44 
 45 /**
 46  * This class defines low-level methods to access on-heap and off-heap memory. The methods in this class
 47  * can be thought of as thin wrappers around methods provided in the {@link Unsafe} class. All the methods in this
 48  * class, accept one or more {@link Scope} parameter, which is used to validate as to whether access to memory
 49  * can be performed in a safe fashion - more specifically, to ensure that the memory being accessed has not
 50  * already been released (which would result in a hard VM crash).
 51  * <p>
 52  * Accessing and releasing memory from a single thread is not problematic - after all, a given thread cannot,
 53  * at the same time, access a memory region <em>and</em> free it. But ensuring correctness of memory access
 54  * when multiple threads are involved is much trickier, as there can be cases where a thread is accessing
 55  * a memory region while another thread is releasing it.
 56  * <p>
 57  * This class provides tools to manage races when multiple threads are accessing and/or releasing the same memory
 58  * region concurrently. More specifically, when a thread wants to release a memory region, it should call the
 59  * {@link #closeScope(jdk.internal.misc.ScopedMemoryAccess.Scope)} method provided by this class. This method initiates
 60  * thread-local handshakes with all the other VM threads, which are then stopped one by one. If any thread is found
 61  * accessing memory that is associated to the very scope object being closed, that thread execution is asynchronously
 62  * interrupted with a {@link Scope.ScopedAccessError}.
 63  * <p>
 64  * This synchronization strategy relies on the idea that accessing memory is atomic with respect to checking the
 65  * validity of the scope associated with that memory region - that is, a thread that wants to perform memory access will be
 66  * suspended either <em>before</em> a scope check or <em>after</em> the memory access. To ensure this atomicity,
 67  * all methods in this class are marked with the special {@link Scoped} annotation, which is recognized by the VM,
 68  * and used during the thread-local handshake to detect (and stop) threads performing potentially problematic memory access
 69  * operations. Additionally, to make sure that the scope object(s) of the memory being accessed is always
 70  * reachable during an access operation, all the methods in this class add reachability fences around the underlying
 71  * unsafe access.
 72  * <p>
 73  * This form of synchronization allows APIs to use plain memory access without any other form of synchronization
 74  * which might be deemed to expensive; in other words, this approach prioritizes the performance of memory access over
 75  * that of releasing a shared memory resource.
 76  */
 77 public class ScopedMemoryAccess {
 78 
 79     private static final Unsafe UNSAFE = Unsafe.getUnsafe();
 80 
 81     private static native void registerNatives();
 82     static {
 83         registerNatives();
 84     }
 85 
 86     public boolean closeScope(Scope scope) {
 87         return closeScope0(scope, Scope.ScopedAccessError.INSTANCE);
 88     }
 89 
 90     native boolean closeScope0(Scope scope, Scope.ScopedAccessError exception);
 91 
 92     private ScopedMemoryAccess() {}
 93 
 94     private static final ScopedMemoryAccess theScopedMemoryAccess = new ScopedMemoryAccess();
 95 
 96     public static ScopedMemoryAccess getScopedMemoryAccess() {
 97         return theScopedMemoryAccess;
 98     }
 99 
100     /**
101      * Scope interface used during scoped memory access operations. A scope can be thought of as an object
102      * which embodies the temporal checks associated with a given memory region.
103      */
104     public interface Scope {
105 
106        interface Handle {
107             Scope scope();
108         }
109 
110         void checkValidState();
111 
112         Thread ownerThread();
113 
114         boolean isImplicit();
115 
116         Handle acquire();
117 
118         void release(Handle handle);
119 
120         /**
121          * Error thrown when memory access fails because the memory has already been released.
122          * Note: for performance reasons, this exception is never created by client; instead a shared instance
123          * is thrown (sometimes, this instance can be thrown asynchronously inside VM code). For this reason,
124          * it is important for clients to always catch this exception and throw a regular exception instead
125          * (which contains full stack information).
126          */
127         final class ScopedAccessError extends Error {
128             private ScopedAccessError() {
129                 super("Attempt to access an already released memory resource", null, false, false);
130             }
131             static final long serialVersionUID = 1L;
132 
133             public static final ScopedAccessError INSTANCE = new ScopedAccessError();
134         }
135     }
136 
137     @Target({ElementType.METHOD, ElementType.CONSTRUCTOR})
138     @Retention(RetentionPolicy.RUNTIME)
139     @interface Scoped { }
140 
141     // bulk ops
142 
143     @ForceInline
144     public void copyMemory(Scope srcScope, Scope dstScope,
145                                    Object srcBase, long srcOffset,
146                                    Object destBase, long destOffset,
147                                    long bytes) {
148           try {
149               copyMemoryInternal(srcScope, dstScope, srcBase, srcOffset, destBase, destOffset, bytes);
150           } catch (Scope.ScopedAccessError ex) {
151               throw new IllegalStateException("This segment is already closed");
152           }
153     }
154 
155     @ForceInline @Scoped
156     private void copyMemoryInternal(Scope srcScope, Scope dstScope,
157                                Object srcBase, long srcOffset,
158                                Object destBase, long destOffset,
159                                long bytes) {
160         try {
161             if (srcScope != null) {
162                 srcScope.checkValidState();
163             }
164             if (dstScope != null) {
165                 dstScope.checkValidState();
166             }
167             UNSAFE.copyMemory(srcBase, srcOffset, destBase, destOffset, bytes);
168         } finally {
169             Reference.reachabilityFence(srcScope);
170             Reference.reachabilityFence(dstScope);
171         }
172     }
173 
174     @ForceInline
175     public void copySwapMemory(Scope srcScope, Scope dstScope,
176                                    Object srcBase, long srcOffset,
177                                    Object destBase, long destOffset,
178                                    long bytes, long elemSize) {
179           try {
180               copySwapMemoryInternal(srcScope, dstScope, srcBase, srcOffset, destBase, destOffset, bytes, elemSize);
181           } catch (Scope.ScopedAccessError ex) {
182               throw new IllegalStateException("This segment is already closed");
183           }
184     }
185 
186     @ForceInline @Scoped
187     private void copySwapMemoryInternal(Scope srcScope, Scope dstScope,
188                                Object srcBase, long srcOffset,
189                                Object destBase, long destOffset,
190                                long bytes, long elemSize) {
191         try {
192             if (srcScope != null) {
193                 srcScope.checkValidState();
194             }
195             if (dstScope != null) {
196                 dstScope.checkValidState();
197             }
198             UNSAFE.copySwapMemory(srcBase, srcOffset, destBase, destOffset, bytes, elemSize);
199         } finally {
200             Reference.reachabilityFence(srcScope);
201             Reference.reachabilityFence(dstScope);
202         }
203     }
204 
205     @ForceInline
206     public void setMemory(Scope scope, Object o, long offset, long bytes, byte value) {
207         try {
208             setMemoryInternal(scope, o, offset, bytes, value);
209         } catch (Scope.ScopedAccessError ex) {
210             throw new IllegalStateException("This segment is already closed");
211         }
212     }
213 
214     @ForceInline @Scoped
215     private void setMemoryInternal(Scope scope, Object o, long offset, long bytes, byte value) {
216         try {
217             if (scope != null) {
218                 scope.checkValidState();
219             }
220             UNSAFE.setMemory(o, offset, bytes, value);
221         } finally {
222             Reference.reachabilityFence(scope);
223         }
224     }
225 
226     @ForceInline
227     public int vectorizedMismatch(Scope aScope, Scope bScope,
228                                              Object a, long aOffset,
229                                              Object b, long bOffset,
230                                              int length,
231                                              int log2ArrayIndexScale) {
232         try {
233             return vectorizedMismatchInternal(aScope, bScope, a, aOffset, b, bOffset, length, log2ArrayIndexScale);
234         } catch (Scope.ScopedAccessError ex) {
235             throw new IllegalStateException("This segment is already closed");
236         }
237     }
238 
239     @ForceInline @Scoped
240     private int vectorizedMismatchInternal(Scope aScope, Scope bScope,
241                                              Object a, long aOffset,
242                                              Object b, long bOffset,
243                                              int length,
244                                              int log2ArrayIndexScale) {
245         try {
246             if (aScope != null) {
247                 aScope.checkValidState();
248             }
249             if (bScope != null) {
250                 bScope.checkValidState();
251             }
252             return ArraysSupport.vectorizedMismatch(a, aOffset, b, bOffset, length, log2ArrayIndexScale);
253         } finally {
254             Reference.reachabilityFence(aScope);
255             Reference.reachabilityFence(bScope);
256         }
257     }
258 
259     @ForceInline
260     public boolean isLoaded(Scope scope, long address, boolean isSync, long size) {
261         try {
262             return isLoadedInternal(scope, address, isSync, size);
263         } catch (Scope.ScopedAccessError ex) {
264             throw new IllegalStateException("This segment is already closed");
265         }
266     }
267 
268     @ForceInline @Scoped
269     public boolean isLoadedInternal(Scope scope, long address, boolean isSync, long size) {
270         try {
271             if (scope != null) {
272                 scope.checkValidState();
273             }
274             return SharedSecrets.getJavaNioAccess().isLoaded(address, isSync, size);
275         } finally {
276             Reference.reachabilityFence(scope);
277         }
278     }
279 
280     @ForceInline
281     public void load(Scope scope, long address, boolean isSync, long size) {
282         try {
283             loadInternal(scope, address, isSync, size);
284         } catch (Scope.ScopedAccessError ex) {
285             throw new IllegalStateException("This segment is already closed");
286         }
287     }
288 
289     @ForceInline @Scoped
290     public void loadInternal(Scope scope, long address, boolean isSync, long size) {
291         try {
292             if (scope != null) {
293                 scope.checkValidState();
294             }
295             SharedSecrets.getJavaNioAccess().load(address, isSync, size);
296         } finally {
297             Reference.reachabilityFence(scope);
298         }
299     }
300 
301     @ForceInline
302     public void unload(Scope scope, long address, boolean isSync, long size) {
303         try {
304             unloadInternal(scope, address, isSync, size);
305         } catch (Scope.ScopedAccessError ex) {
306             throw new IllegalStateException("This segment is already closed");
307         }
308     }
309 
310     @ForceInline @Scoped
311     public void unloadInternal(Scope scope, long address, boolean isSync, long size) {
312         try {
313             if (scope != null) {
314                 scope.checkValidState();
315             }
316             SharedSecrets.getJavaNioAccess().unload(address, isSync, size);
317         } finally {
318             Reference.reachabilityFence(scope);
319         }
320     }
321 
322     @ForceInline
323     public void force(Scope scope, FileDescriptor fd, long address, boolean isSync, long index, long length) {
324         try {
325             forceInternal(scope, fd, address, isSync, index, length);
326         } catch (Scope.ScopedAccessError ex) {
327             throw new IllegalStateException("This segment is already closed");
328         }
329     }
330 
331     @ForceInline @Scoped
332     public void forceInternal(Scope scope, FileDescriptor fd, long address, boolean isSync, long index, long length) {
333         try {
334             if (scope != null) {
335                 scope.checkValidState();
336             }
337             SharedSecrets.getJavaNioAccess().force(fd, address, isSync, index, length);
338         } finally {
339             Reference.reachabilityFence(scope);
340         }
341     }
342 
343     // ByteBuffer vector access ops
344 
345     // Buffer access constants, to be initalized when required.
346     // Avoids a null value for NIO_ACCESS, due to class initalization dependencies
347     static final class BufferAccess {
348         // Buffer.address
349         static final long BUFFER_ADDRESS
350                 = UNSAFE.objectFieldOffset(Buffer.class, "address");
351 
352         // ByteBuffer.hb
353         static final long BYTE_BUFFER_HB
354                 = UNSAFE.objectFieldOffset(ByteBuffer.class, "hb");
355 
356         static final long BYTE_BUFFER_IS_READ_ONLY
357                 = UNSAFE.objectFieldOffset(ByteBuffer.class, "isReadOnly");
358 
359         @ForceInline
360         static Object bufferBase(ByteBuffer bb) {
361             return UNSAFE.getReference(bb, BYTE_BUFFER_HB);
362         }
363 
364         @ForceInline
365         static long bufferAddress(ByteBuffer bb, long offset) {
366             return UNSAFE.getLong(bb, BUFFER_ADDRESS) + offset;
367         }
368 
369         static final JavaNioAccess NIO_ACCESS = SharedSecrets.getJavaNioAccess();
370 
371         @ForceInline
372         static ScopedMemoryAccess.Scope scope(ByteBuffer bb) {
373             MemorySegmentProxy segmentProxy = NIO_ACCESS.bufferSegment(bb);
374             return segmentProxy != null ?
375                     segmentProxy.scope() : null;
376         }
377     }
378 
379     @ForceInline
380     public static boolean isReadOnly(ByteBuffer bb) {
381         return UNSAFE.getBoolean(bb, BufferAccess.BYTE_BUFFER_IS_READ_ONLY);
382     }
383 
384     @ForceInline
385     public static
386     <V extends VectorSupport.Vector<E>, E, S extends VectorSupport.VectorSpecies<E>>
387     V loadFromByteBuffer(Class<? extends V> vmClass, Class<E> e, int length,
388                           ByteBuffer bb, int offset,
389                           S s,
390                           VectorSupport.LoadOperation<ByteBuffer, V, S> defaultImpl) {
391         try {
392             return loadFromByteBufferScoped(
393                     BufferAccess.scope(bb),
394                     vmClass, e, length,
395                     bb, offset,
396                     s,
397                     defaultImpl);
398         } catch (ScopedMemoryAccess.Scope.ScopedAccessError ex) {
399             throw new IllegalStateException("This segment is already closed");
400         }
401     }
402 
403     @Scoped
404     @ForceInline
405     private static
406     <V extends VectorSupport.Vector<E>, E, S extends VectorSupport.VectorSpecies<E>>
407     V loadFromByteBufferScoped(ScopedMemoryAccess.Scope scope,
408                           Class<? extends V> vmClass, Class<E> e, int length,
409                           ByteBuffer bb, int offset,
410                           S s,
411                           VectorSupport.LoadOperation<ByteBuffer, V, S> defaultImpl) {
412         try {
413             if (scope != null) {
414                 scope.checkValidState();
415             }
416 
417             final byte[] base = (byte[]) BufferAccess.bufferBase(bb);
418 
419             return VectorSupport.load(vmClass, e, length,
420                       base, BufferAccess.bufferAddress(bb, offset),
421                       bb, offset, s,
422                       defaultImpl);
423         } finally {
424             Reference.reachabilityFence(scope);
425         }
426     }
427 
428     @ForceInline
429     public static
430     <V extends VectorSupport.Vector<E>, E, S extends VectorSupport.VectorSpecies<E>,
431      M extends VectorSupport.VectorMask<E>>
432     V loadFromByteBufferMasked(Class<? extends V> vmClass, Class<M> maskClass, Class<E> e,
433                                int length, ByteBuffer bb, int offset, M m, S s,
434                                VectorSupport.LoadVectorMaskedOperation<ByteBuffer, V, S, M> defaultImpl) {
435         try {
436             return loadFromByteBufferMaskedScoped(
437                     BufferAccess.scope(bb),
438                     vmClass, maskClass, e, length,
439                     bb, offset, m,
440                     s,
441                     defaultImpl);
442         } catch (ScopedMemoryAccess.Scope.ScopedAccessError ex) {
443             throw new IllegalStateException("This segment is already closed");
444         }
445     }
446 
447     @Scoped
448     @ForceInline
449     private static
450     <V extends VectorSupport.Vector<E>, E, S extends VectorSupport.VectorSpecies<E>,
451      M extends VectorSupport.VectorMask<E>>
452     V loadFromByteBufferMaskedScoped(ScopedMemoryAccess.Scope scope, Class<? extends V> vmClass,
453                                      Class<M> maskClass, Class<E> e, int length,
454                                      ByteBuffer bb, int offset, M m,
455                                      S s,
456                                      VectorSupport.LoadVectorMaskedOperation<ByteBuffer, V, S, M> defaultImpl) {
457         try {
458             if (scope != null) {
459                 scope.checkValidState();
460             }
461 
462             return VectorSupport.loadMasked(vmClass, maskClass, e, length,
463                     BufferAccess.bufferBase(bb), BufferAccess.bufferAddress(bb, offset), m,
464                     bb, offset, s,
465                     defaultImpl);
466         } finally {
467             Reference.reachabilityFence(scope);
468         }
469     }
470 
471     @ForceInline
472     public static
473     <V extends VectorSupport.Vector<E>, E>
474     void storeIntoByteBuffer(Class<? extends V> vmClass, Class<E> e, int length,
475                              V v,
476                              ByteBuffer bb, int offset,
477                              VectorSupport.StoreVectorOperation<ByteBuffer, V> defaultImpl) {
478         try {
479             storeIntoByteBufferScoped(
480                     BufferAccess.scope(bb),
481                     vmClass, e, length,
482                     v,
483                     bb, offset,
484                     defaultImpl);
485         } catch (ScopedMemoryAccess.Scope.ScopedAccessError ex) {
486             throw new IllegalStateException("This segment is already closed");
487         }
488     }
489 
490     @Scoped
491     @ForceInline
492     private static
493     <V extends VectorSupport.Vector<E>, E>
494     void storeIntoByteBufferScoped(ScopedMemoryAccess.Scope scope,
495                                    Class<? extends V> vmClass, Class<E> e, int length,
496                                    V v,
497                                    ByteBuffer bb, int offset,
498                                    VectorSupport.StoreVectorOperation<ByteBuffer, V> defaultImpl) {
499         try {
500             if (scope != null) {
501                 scope.checkValidState();
502             }
503 
504             final byte[] base = (byte[]) BufferAccess.bufferBase(bb);
505 
506             VectorSupport.store(vmClass, e, length,
507                                 base, BufferAccess.bufferAddress(bb, offset),
508                                 v,
509                                 bb, offset,
510                                 defaultImpl);
511         } finally {
512             Reference.reachabilityFence(scope);
513         }
514     }
515 
516     @ForceInline
517     public static
518     <V extends VectorSupport.Vector<E>, E, M extends VectorSupport.VectorMask<E>>
519     void storeIntoByteBufferMasked(Class<? extends V> vmClass, Class<M> maskClass, Class<E> e,
520                                    int length, V v, M m,
521                                    ByteBuffer bb, int offset,
522                                    VectorSupport.StoreVectorMaskedOperation<ByteBuffer, V, M> defaultImpl) {
523         try {
524             storeIntoByteBufferMaskedScoped(
525                     BufferAccess.scope(bb),
526                     vmClass, maskClass, e, length,
527                     v, m,
528                     bb, offset,
529                     defaultImpl);
530         } catch (ScopedMemoryAccess.Scope.ScopedAccessError ex) {
531             throw new IllegalStateException("This segment is already closed");
532         }
533     }
534 
535     @Scoped
536     @ForceInline
537     private static
538     <V extends VectorSupport.Vector<E>, E, M extends VectorSupport.VectorMask<E>>
539     void storeIntoByteBufferMaskedScoped(ScopedMemoryAccess.Scope scope,
540                                          Class<? extends V> vmClass, Class<M> maskClass,
541                                          Class<E> e, int length, V v, M m,
542                                          ByteBuffer bb, int offset,
543                                          VectorSupport.StoreVectorMaskedOperation<ByteBuffer, V, M> defaultImpl) {
544         try {
545             if (scope != null) {
546                 scope.checkValidState();
547             }
548 
549             VectorSupport.storeMasked(vmClass, maskClass, e, length,
550                     BufferAccess.bufferBase(bb), BufferAccess.bufferAddress(bb, offset),
551                     v, m,
552                     bb, offset,
553                     defaultImpl);
554         } finally {
555             Reference.reachabilityFence(scope);
556         }
557     }
558 
559     // typed-ops here
560 
561     // Note: all the accessor methods defined below take advantage of argument type profiling
562     // (see src/hotspot/share/oops/methodData.cpp) which greatly enhances performance when the same accessor
563     // method is used repeatedly with different 'base' objects.