1 /*
  2  * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.  Oracle designates this
  8  * particular file as subject to the "Classpath" exception as provided
  9  * by Oracle in the LICENSE file that accompanied this code.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  */
 25 
 26 package jdk.internal.misc;
 27 
 28 import java.lang.annotation.ElementType;
 29 import java.lang.annotation.Retention;
 30 import java.lang.annotation.RetentionPolicy;
 31 import java.lang.annotation.Target;
 32 import java.lang.ref.Reference;
 33 import java.io.FileDescriptor;
 34 import java.nio.Buffer;
 35 import java.nio.ByteBuffer;
 36 
 37 import jdk.internal.access.JavaNioAccess;
 38 import jdk.internal.access.SharedSecrets;
 39 import jdk.internal.access.foreign.MemorySegmentProxy;
 40 import jdk.internal.util.ArraysSupport;
 41 import jdk.internal.vm.annotation.ForceInline;
 42 import jdk.internal.vm.vector.VectorSupport;
 43 
 44 
 45 /**
 46  * This class defines low-level methods to access on-heap and off-heap memory. The methods in this class
 47  * can be thought of as thin wrappers around methods provided in the {@link Unsafe} class. All the methods in this
 48  * class, accept one or more {@link Scope} parameter, which is used to validate as to whether access to memory
 49  * can be performed in a safe fashion - more specifically, to ensure that the memory being accessed has not
 50  * already been released (which would result in a hard VM crash).
 51  * <p>
 52  * Accessing and releasing memory from a single thread is not problematic - after all, a given thread cannot,
 53  * at the same time, access a memory region <em>and</em> free it. But ensuring correctness of memory access
 54  * when multiple threads are involved is much trickier, as there can be cases where a thread is accessing
 55  * a memory region while another thread is releasing it.
 56  * <p>
 57  * This class provides tools to manage races when multiple threads are accessing and/or releasing the same memory
 58  * region concurrently. More specifically, when a thread wants to release a memory region, it should call the
 59  * {@link #closeScope(jdk.internal.misc.ScopedMemoryAccess.Scope)} method provided by this class. This method initiates
 60  * thread-local handshakes with all the other VM threads, which are then stopped one by one. If any thread is found
 61  * accessing memory that is associated to the very scope object being closed, that thread execution is asynchronously
 62  * interrupted with a {@link Scope.ScopedAccessError}.
 63  * <p>
 64  * This synchronization strategy relies on the idea that accessing memory is atomic with respect to checking the
 65  * validity of the scope associated with that memory region - that is, a thread that wants to perform memory access will be
 66  * suspended either <em>before</em> a scope check or <em>after</em> the memory access. To ensure this atomicity,
 67  * all methods in this class are marked with the special {@link Scoped} annotation, which is recognized by the VM,
 68  * and used during the thread-local handshake to detect (and stop) threads performing potentially problematic memory access
 69  * operations. Additionally, to make sure that the scope object(s) of the memory being accessed is always
 70  * reachable during an access operation, all the methods in this class add reachability fences around the underlying
 71  * unsafe access.
 72  * <p>
 73  * This form of synchronization allows APIs to use plain memory access without any other form of synchronization
 74  * which might be deemed to expensive; in other words, this approach prioritizes the performance of memory access over
 75  * that of releasing a shared memory resource.
 76  */
 77 public class ScopedMemoryAccess {
 78 
 79     private static final Unsafe UNSAFE = Unsafe.getUnsafe();
 80 
 81     private static native void registerNatives();
 82     static {
 83         registerNatives();
 84     }
 85 
 86     public boolean closeScope(Scope scope) {
 87         return closeScope0(scope, Scope.ScopedAccessError.INSTANCE);
 88     }
 89 
 90     native boolean closeScope0(Scope scope, Scope.ScopedAccessError exception);
 91 
 92     private ScopedMemoryAccess() {}
 93 
 94     private static final ScopedMemoryAccess theScopedMemoryAccess = new ScopedMemoryAccess();
 95 
 96     public static ScopedMemoryAccess getScopedMemoryAccess() {
 97         return theScopedMemoryAccess;
 98     }
 99 
100     /**
101      * Scope interface used during scoped memory access operations. A scope can be thought of as an object
102      * which embodies the temporal checks associated with a given memory region.
103      */
104     public interface Scope {
105 
106        interface Handle {
107             Scope scope();
108         }
109 
110         void checkValidState();
111 
112         Thread ownerThread();
113 
114         boolean isImplicit();
115 
116         Handle acquire();
117 
118         void release(Handle handle);
119 
120         /**
121          * Error thrown when memory access fails because the memory has already been released.
122          * Note: for performance reasons, this exception is never created by client; instead a shared instance
123          * is thrown (sometimes, this instance can be thrown asynchronously inside VM code). For this reason,
124          * it is important for clients to always catch this exception and throw a regular exception instead
125          * (which contains full stack information).
126          */
127         final class ScopedAccessError extends Error {
128             private ScopedAccessError() {
129                 super("Attempt to access an already released memory resource", null, false, false);
130             }
131             static final long serialVersionUID = 1L;
132 
133             public static final ScopedAccessError INSTANCE = new ScopedAccessError();
134         }
135     }
136 
137     @Target({ElementType.METHOD, ElementType.CONSTRUCTOR})
138     @Retention(RetentionPolicy.RUNTIME)
139     @interface Scoped { }
140 
141     // bulk ops
142 
143     @ForceInline
144     public void copyMemory(Scope srcScope, Scope dstScope,
145                                    Object srcBase, long srcOffset,
146                                    Object destBase, long destOffset,
147                                    long bytes) {
148           try {
149               copyMemoryInternal(srcScope, dstScope, srcBase, srcOffset, destBase, destOffset, bytes);
150           } catch (Scope.ScopedAccessError ex) {
151               throw new IllegalStateException("This segment is already closed");
152           }
153     }
154 
155     @ForceInline @Scoped
156     private void copyMemoryInternal(Scope srcScope, Scope dstScope,
157                                Object srcBase, long srcOffset,
158                                Object destBase, long destOffset,
159                                long bytes) {
160         try {
161             if (srcScope != null) {
162                 srcScope.checkValidState();
163             }
164             if (dstScope != null) {
165                 dstScope.checkValidState();
166             }
167             UNSAFE.copyMemory(srcBase, srcOffset, destBase, destOffset, bytes);
168         } finally {
169             Reference.reachabilityFence(srcScope);
170             Reference.reachabilityFence(dstScope);
171         }
172     }
173 
174     @ForceInline
175     public void copySwapMemory(Scope srcScope, Scope dstScope,
176                                    Object srcBase, long srcOffset,
177                                    Object destBase, long destOffset,
178                                    long bytes, long elemSize) {
179           try {
180               copySwapMemoryInternal(srcScope, dstScope, srcBase, srcOffset, destBase, destOffset, bytes, elemSize);
181           } catch (Scope.ScopedAccessError ex) {
182               throw new IllegalStateException("This segment is already closed");
183           }
184     }
185 
186     @ForceInline @Scoped
187     private void copySwapMemoryInternal(Scope srcScope, Scope dstScope,
188                                Object srcBase, long srcOffset,
189                                Object destBase, long destOffset,
190                                long bytes, long elemSize) {
191         try {
192             if (srcScope != null) {
193                 srcScope.checkValidState();
194             }
195             if (dstScope != null) {
196                 dstScope.checkValidState();
197             }
198             UNSAFE.copySwapMemory(srcBase, srcOffset, destBase, destOffset, bytes, elemSize);
199         } finally {
200             Reference.reachabilityFence(srcScope);
201             Reference.reachabilityFence(dstScope);
202         }
203     }
204 
205     @ForceInline
206     public void setMemory(Scope scope, Object o, long offset, long bytes, byte value) {
207         try {
208             setMemoryInternal(scope, o, offset, bytes, value);
209         } catch (Scope.ScopedAccessError ex) {
210             throw new IllegalStateException("This segment is already closed");
211         }
212     }
213 
214     @ForceInline @Scoped
215     private void setMemoryInternal(Scope scope, Object o, long offset, long bytes, byte value) {
216         try {
217             if (scope != null) {
218                 scope.checkValidState();
219             }
220             UNSAFE.setMemory(o, offset, bytes, value);
221         } finally {
222             Reference.reachabilityFence(scope);
223         }
224     }
225 
226     @ForceInline
227     public int vectorizedMismatch(Scope aScope, Scope bScope,
228                                              Object a, long aOffset,
229                                              Object b, long bOffset,
230                                              int length,
231                                              int log2ArrayIndexScale) {
232         try {
233             return vectorizedMismatchInternal(aScope, bScope, a, aOffset, b, bOffset, length, log2ArrayIndexScale);
234         } catch (Scope.ScopedAccessError ex) {
235             throw new IllegalStateException("This segment is already closed");
236         }
237     }
238 
239     @ForceInline @Scoped
240     private int vectorizedMismatchInternal(Scope aScope, Scope bScope,
241                                              Object a, long aOffset,
242                                              Object b, long bOffset,
243                                              int length,
244                                              int log2ArrayIndexScale) {
245         try {
246             if (aScope != null) {
247                 aScope.checkValidState();
248             }
249             if (bScope != null) {
250                 bScope.checkValidState();
251             }
252             return ArraysSupport.vectorizedMismatch(a, aOffset, b, bOffset, length, log2ArrayIndexScale);
253         } finally {
254             Reference.reachabilityFence(aScope);
255             Reference.reachabilityFence(bScope);
256         }
257     }
258 
259     @ForceInline
260     public boolean isLoaded(Scope scope, long address, boolean isSync, long size) {
261         try {
262             return isLoadedInternal(scope, address, isSync, size);
263         } catch (Scope.ScopedAccessError ex) {
264             throw new IllegalStateException("This segment is already closed");
265         }
266     }
267 
268     @ForceInline @Scoped
269     public boolean isLoadedInternal(Scope scope, long address, boolean isSync, long size) {
270         try {
271             if (scope != null) {
272                 scope.checkValidState();
273             }
274             return SharedSecrets.getJavaNioAccess().isLoaded(address, isSync, size);
275         } finally {
276             Reference.reachabilityFence(scope);
277         }
278     }
279 
280     @ForceInline
281     public void load(Scope scope, long address, boolean isSync, long size) {
282         try {
283             loadInternal(scope, address, isSync, size);
284         } catch (Scope.ScopedAccessError ex) {
285             throw new IllegalStateException("This segment is already closed");
286         }
287     }
288 
289     @ForceInline @Scoped
290     public void loadInternal(Scope scope, long address, boolean isSync, long size) {
291         try {
292             if (scope != null) {
293                 scope.checkValidState();
294             }
295             SharedSecrets.getJavaNioAccess().load(address, isSync, size);
296         } finally {
297             Reference.reachabilityFence(scope);
298         }
299     }
300 
301     @ForceInline
302     public void unload(Scope scope, long address, boolean isSync, long size) {
303         try {
304             unloadInternal(scope, address, isSync, size);
305         } catch (Scope.ScopedAccessError ex) {
306             throw new IllegalStateException("This segment is already closed");
307         }
308     }
309 
310     @ForceInline @Scoped
311     public void unloadInternal(Scope scope, long address, boolean isSync, long size) {
312         try {
313             if (scope != null) {
314                 scope.checkValidState();
315             }
316             SharedSecrets.getJavaNioAccess().unload(address, isSync, size);
317         } finally {
318             Reference.reachabilityFence(scope);
319         }
320     }
321 
322     @ForceInline
323     public void force(Scope scope, FileDescriptor fd, long address, boolean isSync, long index, long length) {
324         try {
325             forceInternal(scope, fd, address, isSync, index, length);
326         } catch (Scope.ScopedAccessError ex) {
327             throw new IllegalStateException("This segment is already closed");
328         }
329     }
330 
331     @ForceInline @Scoped
332     public void forceInternal(Scope scope, FileDescriptor fd, long address, boolean isSync, long index, long length) {
333         try {
334             if (scope != null) {
335                 scope.checkValidState();
336             }
337             SharedSecrets.getJavaNioAccess().force(fd, address, isSync, index, length);
338         } finally {
339             Reference.reachabilityFence(scope);
340         }
341     }
342 
343     // ByteBuffer vector access ops
344 
345     // Buffer access constants, to be initalized when required.
346     // Avoids a null value for NIO_ACCESS, due to class initalization dependencies
347     static final class BufferAccess {
348         // Buffer.address
349         static final long BUFFER_ADDRESS
350                 = UNSAFE.objectFieldOffset(Buffer.class, "address");
351 
352         // ByteBuffer.hb
353         static final long BYTE_BUFFER_HB
354                 = UNSAFE.objectFieldOffset(ByteBuffer.class, "hb");
355 
356         @ForceInline
357         static Object bufferBase(ByteBuffer bb) {
358             return UNSAFE.getReference(bb, BYTE_BUFFER_HB);
359         }
360 
361         @ForceInline
362         static long bufferAddress(ByteBuffer bb, long offset) {
363             return UNSAFE.getLong(bb, BUFFER_ADDRESS) + offset;
364         }
365 
366         static final JavaNioAccess NIO_ACCESS = SharedSecrets.getJavaNioAccess();
367 
368         @ForceInline
369         static ScopedMemoryAccess.Scope scope(ByteBuffer bb) {
370             MemorySegmentProxy segmentProxy = NIO_ACCESS.bufferSegment(bb);
371             return segmentProxy != null ?
372                     segmentProxy.scope() : null;
373         }
374     }
375 
376     @ForceInline
377     public static
378     <V extends VectorSupport.Vector<E>, E, S extends VectorSupport.VectorSpecies<E>>
379     V loadFromByteBuffer(Class<? extends V> vmClass, Class<E> e, int length,
380                           ByteBuffer bb, int offset,
381                           S s,
382                           VectorSupport.LoadOperation<ByteBuffer, V, E, S> defaultImpl) {
383         try {
384             return loadFromByteBufferScoped(
385                     BufferAccess.scope(bb),
386                     vmClass, e, length,
387                     bb, offset,
388                     s,
389                     defaultImpl);
390         } catch (ScopedMemoryAccess.Scope.ScopedAccessError ex) {
391             throw new IllegalStateException("This segment is already closed");
392         }
393     }
394 
395     @Scoped
396     @ForceInline
397     private static
398     <V extends VectorSupport.Vector<E>, E, S extends VectorSupport.VectorSpecies<E>>
399     V loadFromByteBufferScoped(ScopedMemoryAccess.Scope scope,
400                           Class<? extends V> vmClass, Class<E> e, int length,
401                           ByteBuffer bb, int offset,
402                           S s,
403                           VectorSupport.LoadOperation<ByteBuffer, V, E, S> defaultImpl) {
404         try {
405             if (scope != null) {
406                 scope.checkValidState();
407             }
408 
409             return VectorSupport.load(vmClass, e, length,
410                     BufferAccess.bufferBase(bb), BufferAccess.bufferAddress(bb, offset),
411                     bb, offset, s,
412                     defaultImpl);
413         } finally {
414             Reference.reachabilityFence(scope);
415         }
416     }
417 
418     @ForceInline
419     public static
420     <V extends VectorSupport.Vector<E>, E>
421     void storeIntoByteBuffer(Class<? extends V> vmClass, Class<E> e, int length,
422                              V v,
423                              ByteBuffer bb, int offset,
424                              VectorSupport.StoreVectorOperation<ByteBuffer, V> defaultImpl) {
425         try {
426             storeIntoByteBufferScoped(
427                     BufferAccess.scope(bb),
428                     vmClass, e, length,
429                     v,
430                     bb, offset,
431                     defaultImpl);
432         } catch (ScopedMemoryAccess.Scope.ScopedAccessError ex) {
433             throw new IllegalStateException("This segment is already closed");
434         }
435     }
436 
437     @Scoped
438     @ForceInline
439     private static
440     <V extends VectorSupport.Vector<E>, E>
441     void storeIntoByteBufferScoped(ScopedMemoryAccess.Scope scope,
442                                    Class<? extends V> vmClass, Class<E> e, int length,
443                                    V v,
444                                    ByteBuffer bb, int offset,
445                                    VectorSupport.StoreVectorOperation<ByteBuffer, V> defaultImpl) {
446         try {
447             if (scope != null) {
448                 scope.checkValidState();
449             }
450 
451             VectorSupport.store(vmClass, e, length,
452                     BufferAccess.bufferBase(bb), BufferAccess.bufferAddress(bb, offset),
453                     v,
454                     bb, offset,
455                     defaultImpl);
456         } finally {
457             Reference.reachabilityFence(scope);
458         }
459     }
460 
461 
462     // typed-ops here
463 
464     // Note: all the accessor methods defined below take advantage of argument type profiling
465     // (see src/hotspot/share/oops/methodData.cpp) which greatly enhances performance when the same accessor
466     // method is used repeatedly with different 'base' objects.