< prev index next >

src/hotspot/share/gc/shared/barrierSet.hpp

Print this page

 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHARED_BARRIERSET_HPP
 26 #define SHARE_GC_SHARED_BARRIERSET_HPP
 27 
 28 #include "gc/shared/barrierSetConfig.hpp"
 29 #include "memory/memRegion.hpp"
 30 #include "oops/access.hpp"
 31 #include "oops/accessBackend.hpp"
 32 #include "oops/oopsHierarchy.hpp"

 33 #include "utilities/fakeRttiSupport.hpp"
 34 #include "utilities/macros.hpp"
 35 
 36 class BarrierSetAssembler;
 37 class BarrierSetC1;
 38 class BarrierSetC2;
 39 class BarrierSetNMethod;
 40 class BarrierSetStackChunk;
 41 class JavaThread;
 42 
 43 // This class provides the interface between a barrier implementation and
 44 // the rest of the system.
 45 
 46 class BarrierSet: public CHeapObj<mtGC> {
 47   friend class VMStructs;
 48 
 49   static BarrierSet* _barrier_set;
 50 
 51 public:
 52   enum Name {

102              BarrierSetNMethod* barrier_set_nmethod,
103              BarrierSetStackChunk* barrier_set_stack_chunk,
104              const FakeRtti& fake_rtti);
105   ~BarrierSet() { }
106 
107   template <class BarrierSetAssemblerT>
108   static BarrierSetAssembler* make_barrier_set_assembler() {
109     return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(nullptr);
110   }
111 
112   template <class BarrierSetC1T>
113   static BarrierSetC1* make_barrier_set_c1() {
114     return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(nullptr);
115   }
116 
117   template <class BarrierSetC2T>
118   static BarrierSetC2* make_barrier_set_c2() {
119     return COMPILER2_PRESENT(new BarrierSetC2T()) NOT_COMPILER2(nullptr);
120   }
121 



122 public:
123   // Support for optimizing compilers to call the barrier set on slow path allocations
124   // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
125   // The allocation is safe to use iff it returns true. If not, the slow-path allocation
126   // is redone until it succeeds. This can e.g. prevent allocations from the slow path
127   // to be in old.
128   virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
129   virtual void on_thread_create(Thread* thread) {}
130   virtual void on_thread_destroy(Thread* thread) {}
131 
132   // These perform BarrierSet-related initialization/cleanup before the thread
133   // is added to or removed from the corresponding set of threads. The
134   // argument thread is the current thread. These are called either holding
135   // the Threads_lock (for a JavaThread) and so not at a safepoint, or holding
136   // the NonJavaThreadsList_lock (for a NonJavaThread) locked by the
137   // caller. That locking ensures the operation is "atomic" with the list
138   // modification wrto operations that hold the NJTList_lock and either also
139   // hold the Threads_lock or are at a safepoint.
140   virtual void on_thread_attach(Thread* thread);
141   virtual void on_thread_detach(Thread* thread) {}

264 
265     template <typename T>
266     static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
267       return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
268     }
269 
270     static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
271       return Raw::oop_atomic_cmpxchg_at(base, offset, compare_value, new_value);
272     }
273 
274     template <typename T>
275     static oop oop_atomic_xchg_in_heap(T* addr, oop new_value) {
276       return Raw::oop_atomic_xchg(addr, new_value);
277     }
278 
279     static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
280       return Raw::oop_atomic_xchg_at(base, offset, new_value);
281     }
282 
283     template <typename T>
284     static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
285                                       arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
286                                       size_t length);
287 
288     // Off-heap oop accesses. These accessors get resolved when
289     // IN_HEAP is not set (e.g. when using the NativeAccess API), it is
290     // an oop* overload, and the barrier strength is AS_NORMAL.
291     template <typename T>
292     static oop oop_load_not_in_heap(T* addr) {
293       return Raw::template oop_load<oop>(addr);
294     }
295 
296     template <typename T>
297     static void oop_store_not_in_heap(T* addr, oop value) {
298       Raw::oop_store(addr, value);
299     }
300 
301     template <typename T>
302     static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) {
303       return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
304     }
305 
306     template <typename T>
307     static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value) {
308       return Raw::oop_atomic_xchg(addr, new_value);
309     }
310 
311     // Clone barrier support
312     static void clone_in_heap(oop src, oop dst, size_t size) {
313       Raw::clone(src, dst, size);
314     }





315   };
316 };
317 
318 template<typename T>
319 inline T* barrier_set_cast(BarrierSet* bs) {
320   assert(bs->is_a(BarrierSet::GetName<T>::value), "wrong type of barrier set");
321   return static_cast<T*>(bs);
322 }
323 
324 #endif // SHARE_GC_SHARED_BARRIERSET_HPP

 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHARED_BARRIERSET_HPP
 26 #define SHARE_GC_SHARED_BARRIERSET_HPP
 27 
 28 #include "gc/shared/barrierSetConfig.hpp"
 29 #include "memory/memRegion.hpp"
 30 #include "oops/access.hpp"
 31 #include "oops/accessBackend.hpp"
 32 #include "oops/oopsHierarchy.hpp"
 33 #include "utilities/exceptions.hpp"
 34 #include "utilities/fakeRttiSupport.hpp"
 35 #include "utilities/macros.hpp"
 36 
 37 class BarrierSetAssembler;
 38 class BarrierSetC1;
 39 class BarrierSetC2;
 40 class BarrierSetNMethod;
 41 class BarrierSetStackChunk;
 42 class JavaThread;
 43 
 44 // This class provides the interface between a barrier implementation and
 45 // the rest of the system.
 46 
 47 class BarrierSet: public CHeapObj<mtGC> {
 48   friend class VMStructs;
 49 
 50   static BarrierSet* _barrier_set;
 51 
 52 public:
 53   enum Name {

103              BarrierSetNMethod* barrier_set_nmethod,
104              BarrierSetStackChunk* barrier_set_stack_chunk,
105              const FakeRtti& fake_rtti);
106   ~BarrierSet() { }
107 
108   template <class BarrierSetAssemblerT>
109   static BarrierSetAssembler* make_barrier_set_assembler() {
110     return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(nullptr);
111   }
112 
113   template <class BarrierSetC1T>
114   static BarrierSetC1* make_barrier_set_c1() {
115     return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(nullptr);
116   }
117 
118   template <class BarrierSetC2T>
119   static BarrierSetC2* make_barrier_set_c2() {
120     return COMPILER2_PRESENT(new BarrierSetC2T()) NOT_COMPILER2(nullptr);
121   }
122 
123   static void throw_array_null_pointer_store_exception(arrayOop src, arrayOop dst, TRAPS);
124   static void throw_array_store_exception(arrayOop src, arrayOop dst, TRAPS);
125 
126 public:
127   // Support for optimizing compilers to call the barrier set on slow path allocations
128   // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
129   // The allocation is safe to use iff it returns true. If not, the slow-path allocation
130   // is redone until it succeeds. This can e.g. prevent allocations from the slow path
131   // to be in old.
132   virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
133   virtual void on_thread_create(Thread* thread) {}
134   virtual void on_thread_destroy(Thread* thread) {}
135 
136   // These perform BarrierSet-related initialization/cleanup before the thread
137   // is added to or removed from the corresponding set of threads. The
138   // argument thread is the current thread. These are called either holding
139   // the Threads_lock (for a JavaThread) and so not at a safepoint, or holding
140   // the NonJavaThreadsList_lock (for a NonJavaThread) locked by the
141   // caller. That locking ensures the operation is "atomic" with the list
142   // modification wrto operations that hold the NJTList_lock and either also
143   // hold the Threads_lock or are at a safepoint.
144   virtual void on_thread_attach(Thread* thread);
145   virtual void on_thread_detach(Thread* thread) {}

268 
269     template <typename T>
270     static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
271       return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
272     }
273 
274     static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
275       return Raw::oop_atomic_cmpxchg_at(base, offset, compare_value, new_value);
276     }
277 
278     template <typename T>
279     static oop oop_atomic_xchg_in_heap(T* addr, oop new_value) {
280       return Raw::oop_atomic_xchg(addr, new_value);
281     }
282 
283     static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
284       return Raw::oop_atomic_xchg_at(base, offset, new_value);
285     }
286 
287     template <typename T>
288     static void oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
289                                       arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
290                                       size_t length);
291 
292     // Off-heap oop accesses. These accessors get resolved when
293     // IN_HEAP is not set (e.g. when using the NativeAccess API), it is
294     // an oop* overload, and the barrier strength is AS_NORMAL.
295     template <typename T>
296     static oop oop_load_not_in_heap(T* addr) {
297       return Raw::template oop_load<oop>(addr);
298     }
299 
300     template <typename T>
301     static void oop_store_not_in_heap(T* addr, oop value) {
302       Raw::oop_store(addr, value);
303     }
304 
305     template <typename T>
306     static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) {
307       return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
308     }
309 
310     template <typename T>
311     static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value) {
312       return Raw::oop_atomic_xchg(addr, new_value);
313     }
314 
315     // Clone barrier support
316     static void clone_in_heap(oop src, oop dst, size_t size) {
317       Raw::clone(src, dst, size);
318     }
319 
320     static void value_copy_in_heap(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
321       Raw::value_copy(src, dst, md, lk);
322     }
323 
324   };
325 };
326 
327 template<typename T>
328 inline T* barrier_set_cast(BarrierSet* bs) {
329   assert(bs->is_a(BarrierSet::GetName<T>::value), "wrong type of barrier set");
330   return static_cast<T*>(bs);
331 }
332 
333 #endif // SHARE_GC_SHARED_BARRIERSET_HPP
< prev index next >