1 /*
  2  * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.  Oracle designates this
  8  * particular file as subject to the "Classpath" exception as provided
  9  * by Oracle in the LICENSE file that accompanied this code.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  */
 25 
 26 package jdk.internal.misc;
 27 
 28 import java.lang.annotation.ElementType;
 29 import java.lang.annotation.Retention;
 30 import java.lang.annotation.RetentionPolicy;
 31 import java.lang.annotation.Target;
 32 import java.lang.ref.Reference;
 33 import java.io.FileDescriptor;
 34 import java.nio.Buffer;
 35 import java.nio.ByteBuffer;
 36 
 37 import jdk.internal.access.JavaNioAccess;
 38 import jdk.internal.access.SharedSecrets;
 39 import jdk.internal.access.foreign.MemorySegmentProxy;
 40 import jdk.internal.util.ArraysSupport;
 41 import jdk.internal.vm.annotation.ForceInline;
 42 import jdk.internal.vm.vector.VectorSupport;
 43 
 44 
 45 /**
 46  * This class defines low-level methods to access on-heap and off-heap memory. The methods in this class
 47  * can be thought of as thin wrappers around methods provided in the {@link Unsafe} class. All the methods in this
 48  * class, accept one or more {@link Scope} parameter, which is used to validate as to whether access to memory
 49  * can be performed in a safe fashion - more specifically, to ensure that the memory being accessed has not
 50  * already been released (which would result in a hard VM crash).
 51  * <p>
 52  * Accessing and releasing memory from a single thread is not problematic - after all, a given thread cannot,
 53  * at the same time, access a memory region <em>and</em> free it. But ensuring correctness of memory access
 54  * when multiple threads are involved is much trickier, as there can be cases where a thread is accessing
 55  * a memory region while another thread is releasing it.
 56  * <p>
 57  * This class provides tools to manage races when multiple threads are accessing and/or releasing the same memory
 58  * region concurrently. More specifically, when a thread wants to release a memory region, it should call the
 59  * {@link #closeScope(jdk.internal.misc.ScopedMemoryAccess.Scope)} method provided by this class. This method initiates
 60  * thread-local handshakes with all the other VM threads, which are then stopped one by one. If any thread is found
 61  * accessing memory that is associated to the very scope object being closed, that thread execution is asynchronously
 62  * interrupted with a {@link Scope.ScopedAccessError}.
 63  * <p>
 64  * This synchronization strategy relies on the idea that accessing memory is atomic with respect to checking the
 65  * validity of the scope associated with that memory region - that is, a thread that wants to perform memory access will be
 66  * suspended either <em>before</em> a scope check or <em>after</em> the memory access. To ensure this atomicity,
 67  * all methods in this class are marked with the special {@link Scoped} annotation, which is recognized by the VM,
 68  * and used during the thread-local handshake to detect (and stop) threads performing potentially problematic memory access
 69  * operations. Additionally, to make sure that the scope object(s) of the memory being accessed is always
 70  * reachable during an access operation, all the methods in this class add reachability fences around the underlying
 71  * unsafe access.
 72  * <p>
 73  * This form of synchronization allows APIs to use plain memory access without any other form of synchronization
 74  * which might be deemed to expensive; in other words, this approach prioritizes the performance of memory access over
 75  * that of releasing a shared memory resource.
 76  */
 77 public class ScopedMemoryAccess {
 78 
 79     private static final Unsafe UNSAFE = Unsafe.getUnsafe();
 80 
 81     private static native void registerNatives();
 82     static {
 83         registerNatives();
 84     }
 85 
 86     public boolean closeScope(Scope scope) {
 87         return closeScope0(scope, Scope.ScopedAccessError.INSTANCE);
 88     }
 89 
 90     native boolean closeScope0(Scope scope, Scope.ScopedAccessError exception);
 91 
 92     private ScopedMemoryAccess() {}
 93 
 94     private static final ScopedMemoryAccess theScopedMemoryAccess = new ScopedMemoryAccess();
 95 
 96     public static ScopedMemoryAccess getScopedMemoryAccess() {
 97         return theScopedMemoryAccess;
 98     }
 99 
100     /**
101      * Scope interface used during scoped memory access operations. A scope can be thought of as an object
102      * which embodies the temporal checks associated with a given memory region.
103      */
104     public interface Scope {
105 
106         void checkValidState();
107 
108         Thread ownerThread();
109 
110         void acquire0();
111 
112         void release0();
113 
114         /**
115          * Error thrown when memory access fails because the memory has already been released.
116          * Note: for performance reasons, this exception is never created by client; instead a shared instance
117          * is thrown (sometimes, this instance can be thrown asynchronously inside VM code). For this reason,
118          * it is important for clients to always catch this exception and throw a regular exception instead
119          * (which contains full stack information).
120          */
121         final class ScopedAccessError extends Error {
122             private ScopedAccessError() {
123                 super("Attempt to access an already released memory resource", null, false, false);
124             }
125             static final long serialVersionUID = 1L;
126 
127             public static final ScopedAccessError INSTANCE = new ScopedAccessError();
128         }
129     }
130 
131     @Target({ElementType.METHOD, ElementType.CONSTRUCTOR})
132     @Retention(RetentionPolicy.RUNTIME)
133     @interface Scoped { }
134 
135     // bulk ops
136 
137     @ForceInline
138     public void copyMemory(Scope srcScope, Scope dstScope,
139                                    Object srcBase, long srcOffset,
140                                    Object destBase, long destOffset,
141                                    long bytes) {
142           try {
143               copyMemoryInternal(srcScope, dstScope, srcBase, srcOffset, destBase, destOffset, bytes);
144           } catch (Scope.ScopedAccessError ex) {
145               throw new IllegalStateException("This segment is already closed");
146           }
147     }
148 
149     @ForceInline @Scoped
150     private void copyMemoryInternal(Scope srcScope, Scope dstScope,
151                                Object srcBase, long srcOffset,
152                                Object destBase, long destOffset,
153                                long bytes) {
154         try {
155             if (srcScope != null) {
156                 srcScope.checkValidState();
157             }
158             if (dstScope != null) {
159                 dstScope.checkValidState();
160             }
161             UNSAFE.copyMemory(srcBase, srcOffset, destBase, destOffset, bytes);
162         } finally {
163             Reference.reachabilityFence(srcScope);
164             Reference.reachabilityFence(dstScope);
165         }
166     }
167 
168     @ForceInline
169     public void copySwapMemory(Scope srcScope, Scope dstScope,
170                                    Object srcBase, long srcOffset,
171                                    Object destBase, long destOffset,
172                                    long bytes, long elemSize) {
173           try {
174               copySwapMemoryInternal(srcScope, dstScope, srcBase, srcOffset, destBase, destOffset, bytes, elemSize);
175           } catch (Scope.ScopedAccessError ex) {
176               throw new IllegalStateException("This segment is already closed");
177           }
178     }
179 
180     @ForceInline @Scoped
181     private void copySwapMemoryInternal(Scope srcScope, Scope dstScope,
182                                Object srcBase, long srcOffset,
183                                Object destBase, long destOffset,
184                                long bytes, long elemSize) {
185         try {
186             if (srcScope != null) {
187                 srcScope.checkValidState();
188             }
189             if (dstScope != null) {
190                 dstScope.checkValidState();
191             }
192             UNSAFE.copySwapMemory(srcBase, srcOffset, destBase, destOffset, bytes, elemSize);
193         } finally {
194             Reference.reachabilityFence(srcScope);
195             Reference.reachabilityFence(dstScope);
196         }
197     }
198 
199     @ForceInline
200     public void setMemory(Scope scope, Object o, long offset, long bytes, byte value) {
201         try {
202             setMemoryInternal(scope, o, offset, bytes, value);
203         } catch (Scope.ScopedAccessError ex) {
204             throw new IllegalStateException("This segment is already closed");
205         }
206     }
207 
208     @ForceInline @Scoped
209     private void setMemoryInternal(Scope scope, Object o, long offset, long bytes, byte value) {
210         try {
211             if (scope != null) {
212                 scope.checkValidState();
213             }
214             UNSAFE.setMemory(o, offset, bytes, value);
215         } finally {
216             Reference.reachabilityFence(scope);
217         }
218     }
219 
220     @ForceInline
221     public int vectorizedMismatch(Scope aScope, Scope bScope,
222                                              Object a, long aOffset,
223                                              Object b, long bOffset,
224                                              int length,
225                                              int log2ArrayIndexScale) {
226         try {
227             return vectorizedMismatchInternal(aScope, bScope, a, aOffset, b, bOffset, length, log2ArrayIndexScale);
228         } catch (Scope.ScopedAccessError ex) {
229             throw new IllegalStateException("This segment is already closed");
230         }
231     }
232 
233     @ForceInline @Scoped
234     private int vectorizedMismatchInternal(Scope aScope, Scope bScope,
235                                              Object a, long aOffset,
236                                              Object b, long bOffset,
237                                              int length,
238                                              int log2ArrayIndexScale) {
239         try {
240             if (aScope != null) {
241                 aScope.checkValidState();
242             }
243             if (bScope != null) {
244                 bScope.checkValidState();
245             }
246             return ArraysSupport.vectorizedMismatch(a, aOffset, b, bOffset, length, log2ArrayIndexScale);
247         } finally {
248             Reference.reachabilityFence(aScope);
249             Reference.reachabilityFence(bScope);
250         }
251     }
252 
253     @ForceInline
254     public boolean isLoaded(Scope scope, long address, boolean isSync, long size) {
255         try {
256             return isLoadedInternal(scope, address, isSync, size);
257         } catch (Scope.ScopedAccessError ex) {
258             throw new IllegalStateException("This segment is already closed");
259         }
260     }
261 
262     @ForceInline @Scoped
263     public boolean isLoadedInternal(Scope scope, long address, boolean isSync, long size) {
264         try {
265             if (scope != null) {
266                 scope.checkValidState();
267             }
268             return SharedSecrets.getJavaNioAccess().isLoaded(address, isSync, size);
269         } finally {
270             Reference.reachabilityFence(scope);
271         }
272     }
273 
274     @ForceInline
275     public void load(Scope scope, long address, boolean isSync, long size) {
276         try {
277             loadInternal(scope, address, isSync, size);
278         } catch (Scope.ScopedAccessError ex) {
279             throw new IllegalStateException("This segment is already closed");
280         }
281     }
282 
283     @ForceInline @Scoped
284     public void loadInternal(Scope scope, long address, boolean isSync, long size) {
285         try {
286             if (scope != null) {
287                 scope.checkValidState();
288             }
289             SharedSecrets.getJavaNioAccess().load(address, isSync, size);
290         } finally {
291             Reference.reachabilityFence(scope);
292         }
293     }
294 
295     @ForceInline
296     public void unload(Scope scope, long address, boolean isSync, long size) {
297         try {
298             unloadInternal(scope, address, isSync, size);
299         } catch (Scope.ScopedAccessError ex) {
300             throw new IllegalStateException("This segment is already closed");
301         }
302     }
303 
304     @ForceInline @Scoped
305     public void unloadInternal(Scope scope, long address, boolean isSync, long size) {
306         try {
307             if (scope != null) {
308                 scope.checkValidState();
309             }
310             SharedSecrets.getJavaNioAccess().unload(address, isSync, size);
311         } finally {
312             Reference.reachabilityFence(scope);
313         }
314     }
315 
316     @ForceInline
317     public void force(Scope scope, FileDescriptor fd, long address, boolean isSync, long index, long length) {
318         try {
319             forceInternal(scope, fd, address, isSync, index, length);
320         } catch (Scope.ScopedAccessError ex) {
321             throw new IllegalStateException("This segment is already closed");
322         }
323     }
324 
325     @ForceInline @Scoped
326     public void forceInternal(Scope scope, FileDescriptor fd, long address, boolean isSync, long index, long length) {
327         try {
328             if (scope != null) {
329                 scope.checkValidState();
330             }
331             SharedSecrets.getJavaNioAccess().force(fd, address, isSync, index, length);
332         } finally {
333             Reference.reachabilityFence(scope);
334         }
335     }
336 
337     // ByteBuffer vector access ops
338 
339     // Buffer access constants, to be initalized when required.
340     // Avoids a null value for NIO_ACCESS, due to class initalization dependencies
341     static final class BufferAccess {
342         // Buffer.address
343         static final long BUFFER_ADDRESS
344                 = UNSAFE.objectFieldOffset(Buffer.class, "address");
345 
346         // ByteBuffer.hb
347         static final long BYTE_BUFFER_HB
348                 = UNSAFE.objectFieldOffset(ByteBuffer.class, "hb");
349 
350         @ForceInline
351         static Object bufferBase(ByteBuffer bb) {
352             return UNSAFE.getReference(bb, BYTE_BUFFER_HB);
353         }
354 
355         @ForceInline
356         static long bufferAddress(ByteBuffer bb, long offset) {
357             return UNSAFE.getLong(bb, BUFFER_ADDRESS) + offset;
358         }
359 
360         static final JavaNioAccess NIO_ACCESS = SharedSecrets.getJavaNioAccess();
361 
362         @ForceInline
363         static ScopedMemoryAccess.Scope scope(ByteBuffer bb) {
364             MemorySegmentProxy segmentProxy = NIO_ACCESS.bufferSegment(bb);
365             return segmentProxy != null ?
366                     segmentProxy.scope() : null;
367         }
368     }
369 
370     @ForceInline
371     public static
372     <V extends VectorSupport.Vector<E>, E, S extends VectorSupport.VectorSpecies<E>>
373     V loadFromByteBuffer(Class<? extends V> vmClass, Class<E> e, int length,
374                           ByteBuffer bb, int offset,
375                           S s,
376                           VectorSupport.LoadOperation<ByteBuffer, V, E, S> defaultImpl) {
377         try {
378             return loadFromByteBufferScoped(
379                     BufferAccess.scope(bb),
380                     vmClass, e, length,
381                     bb, offset,
382                     s,
383                     defaultImpl);
384         } catch (ScopedMemoryAccess.Scope.ScopedAccessError ex) {
385             throw new IllegalStateException("This segment is already closed");
386         }
387     }
388 
389     @Scoped
390     @ForceInline
391     private static
392     <V extends VectorSupport.Vector<E>, E, S extends VectorSupport.VectorSpecies<E>>
393     V loadFromByteBufferScoped(ScopedMemoryAccess.Scope scope,
394                           Class<? extends V> vmClass, Class<E> e, int length,
395                           ByteBuffer bb, int offset,
396                           S s,
397                           VectorSupport.LoadOperation<ByteBuffer, V, E, S> defaultImpl) {
398         try {
399             if (scope != null) {
400                 scope.checkValidState();
401             }
402 
403             return VectorSupport.load(vmClass, e, length,
404                     BufferAccess.bufferBase(bb), BufferAccess.bufferAddress(bb, offset),
405                     bb, offset, s,
406                     defaultImpl);
407         } finally {
408             Reference.reachabilityFence(scope);
409         }
410     }
411 
412     @ForceInline
413     public static
414     <V extends VectorSupport.Vector<E>, E>
415     void storeIntoByteBuffer(Class<? extends V> vmClass, Class<E> e, int length,
416                              V v,
417                              ByteBuffer bb, int offset,
418                              VectorSupport.StoreVectorOperation<ByteBuffer, V> defaultImpl) {
419         try {
420             storeIntoByteBufferScoped(
421                     BufferAccess.scope(bb),
422                     vmClass, e, length,
423                     v,
424                     bb, offset,
425                     defaultImpl);
426         } catch (ScopedMemoryAccess.Scope.ScopedAccessError ex) {
427             throw new IllegalStateException("This segment is already closed");
428         }
429     }
430 
431     @Scoped
432     @ForceInline
433     private static
434     <V extends VectorSupport.Vector<E>, E>
435     void storeIntoByteBufferScoped(ScopedMemoryAccess.Scope scope,
436                                    Class<? extends V> vmClass, Class<E> e, int length,
437                                    V v,
438                                    ByteBuffer bb, int offset,
439                                    VectorSupport.StoreVectorOperation<ByteBuffer, V> defaultImpl) {
440         try {
441             if (scope != null) {
442                 scope.checkValidState();
443             }
444 
445             VectorSupport.store(vmClass, e, length,
446                     BufferAccess.bufferBase(bb), BufferAccess.bufferAddress(bb, offset),
447                     v,
448                     bb, offset,
449                     defaultImpl);
450         } finally {
451             Reference.reachabilityFence(scope);
452         }
453     }
454 
455 
456     // typed-ops here
457 
458     // Note: all the accessor methods defined below take advantage of argument type profiling
459     // (see src/hotspot/share/oops/methodData.cpp) which greatly enhances performance when the same accessor
460     // method is used repeatedly with different 'base' objects.