< prev index next >

src/hotspot/share/gc/shared/barrierSet.hpp

Print this page

 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHARED_BARRIERSET_HPP
 26 #define SHARE_GC_SHARED_BARRIERSET_HPP
 27 
 28 #include "gc/shared/barrierSetConfig.hpp"
 29 #include "memory/memRegion.hpp"
 30 #include "oops/access.hpp"
 31 #include "oops/accessBackend.hpp"
 32 #include "oops/oopsHierarchy.hpp"

 33 #include "utilities/fakeRttiSupport.hpp"
 34 #include "utilities/macros.hpp"
 35 
 36 class BarrierSetAssembler;
 37 class BarrierSetC1;
 38 class BarrierSetC2;
 39 class BarrierSetNMethod;
 40 class JavaThread;
 41 
 42 // This class provides the interface between a barrier implementation and
 43 // the rest of the system.
 44 
 45 class BarrierSet: public CHeapObj<mtGC> {
 46   friend class VMStructs;
 47 
 48   static BarrierSet* _barrier_set;
 49 
 50 public:
 51   enum Name {
 52 #define BARRIER_SET_DECLARE_BS_ENUM(bs_name) bs_name ,

104     _barrier_set_c1(barrier_set_c1),
105     _barrier_set_c2(barrier_set_c2),
106     _barrier_set_nmethod(barrier_set_nmethod) {}
107   ~BarrierSet() { }
108 
109   template <class BarrierSetAssemblerT>
110   static BarrierSetAssembler* make_barrier_set_assembler() {
111     return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(NULL);
112   }
113 
114   template <class BarrierSetC1T>
115   static BarrierSetC1* make_barrier_set_c1() {
116     return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(NULL);
117   }
118 
119   template <class BarrierSetC2T>
120   static BarrierSetC2* make_barrier_set_c2() {
121     return COMPILER2_PRESENT(new BarrierSetC2T()) NOT_COMPILER2(NULL);
122   }
123 



124 public:
125   // Support for optimizing compilers to call the barrier set on slow path allocations
126   // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
127   // The allocation is safe to use iff it returns true. If not, the slow-path allocation
128   // is redone until it succeeds. This can e.g. prevent allocations from the slow path
129   // to be in old.
130   virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
131   virtual void on_thread_create(Thread* thread) {}
132   virtual void on_thread_destroy(Thread* thread) {}
133 
134   // These perform BarrierSet-related initialization/cleanup before the thread
135   // is added to or removed from the corresponding set of threads. The
136   // argument thread is the current thread. These are called either holding
137   // the Threads_lock (for a JavaThread) and so not at a safepoint, or holding
138   // the NonJavaThreadsList_lock (for a NonJavaThread) locked by the
139   // caller. That locking ensures the operation is "atomic" with the list
140   // modification wrto operations that hold the NJTList_lock and either also
141   // hold the Threads_lock or are at a safepoint.
142   virtual void on_thread_attach(Thread* thread) {}
143   virtual void on_thread_detach(Thread* thread) {}

262 
263     template <typename T>
264     static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
265       return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
266     }
267 
268     static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
269       return Raw::oop_atomic_cmpxchg_at(base, offset, compare_value, new_value);
270     }
271 
272     template <typename T>
273     static oop oop_atomic_xchg_in_heap(T* addr, oop new_value) {
274       return Raw::oop_atomic_xchg(addr, new_value);
275     }
276 
277     static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
278       return Raw::oop_atomic_xchg_at(base, offset, new_value);
279     }
280 
281     template <typename T>
282     static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
283                                       arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
284                                       size_t length);
285 
286     // Off-heap oop accesses. These accessors get resolved when
287     // IN_HEAP is not set (e.g. when using the NativeAccess API), it is
288     // an oop* overload, and the barrier strength is AS_NORMAL.
289     template <typename T>
290     static oop oop_load_not_in_heap(T* addr) {
291       return Raw::template oop_load<oop>(addr);
292     }
293 
294     template <typename T>
295     static void oop_store_not_in_heap(T* addr, oop value) {
296       Raw::oop_store(addr, value);
297     }
298 
299     template <typename T>
300     static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) {
301       return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
302     }
303 
304     template <typename T>
305     static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value) {
306       return Raw::oop_atomic_xchg(addr, new_value);
307     }
308 
309     // Clone barrier support
310     static void clone_in_heap(oop src, oop dst, size_t size) {
311       Raw::clone(src, dst, size);
312     }





313   };
314 };
315 
316 template<typename T>
317 inline T* barrier_set_cast(BarrierSet* bs) {
318   assert(bs->is_a(BarrierSet::GetName<T>::value), "wrong type of barrier set");
319   return static_cast<T*>(bs);
320 }
321 
322 #endif // SHARE_GC_SHARED_BARRIERSET_HPP

 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHARED_BARRIERSET_HPP
 26 #define SHARE_GC_SHARED_BARRIERSET_HPP
 27 
 28 #include "gc/shared/barrierSetConfig.hpp"
 29 #include "memory/memRegion.hpp"
 30 #include "oops/access.hpp"
 31 #include "oops/accessBackend.hpp"
 32 #include "oops/oopsHierarchy.hpp"
 33 #include "utilities/exceptions.hpp"
 34 #include "utilities/fakeRttiSupport.hpp"
 35 #include "utilities/macros.hpp"
 36 
 37 class BarrierSetAssembler;
 38 class BarrierSetC1;
 39 class BarrierSetC2;
 40 class BarrierSetNMethod;
 41 class JavaThread;
 42 
 43 // This class provides the interface between a barrier implementation and
 44 // the rest of the system.
 45 
 46 class BarrierSet: public CHeapObj<mtGC> {
 47   friend class VMStructs;
 48 
 49   static BarrierSet* _barrier_set;
 50 
 51 public:
 52   enum Name {
 53 #define BARRIER_SET_DECLARE_BS_ENUM(bs_name) bs_name ,

105     _barrier_set_c1(barrier_set_c1),
106     _barrier_set_c2(barrier_set_c2),
107     _barrier_set_nmethod(barrier_set_nmethod) {}
108   ~BarrierSet() { }
109 
110   template <class BarrierSetAssemblerT>
111   static BarrierSetAssembler* make_barrier_set_assembler() {
112     return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(NULL);
113   }
114 
115   template <class BarrierSetC1T>
116   static BarrierSetC1* make_barrier_set_c1() {
117     return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(NULL);
118   }
119 
120   template <class BarrierSetC2T>
121   static BarrierSetC2* make_barrier_set_c2() {
122     return COMPILER2_PRESENT(new BarrierSetC2T()) NOT_COMPILER2(NULL);
123   }
124 
125   static void throw_array_null_pointer_store_exception(arrayOop src, arrayOop dst, TRAPS);
126   static void throw_array_store_exception(arrayOop src, arrayOop dst, TRAPS);
127 
128 public:
129   // Support for optimizing compilers to call the barrier set on slow path allocations
130   // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
131   // The allocation is safe to use iff it returns true. If not, the slow-path allocation
132   // is redone until it succeeds. This can e.g. prevent allocations from the slow path
133   // to be in old.
134   virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
135   virtual void on_thread_create(Thread* thread) {}
136   virtual void on_thread_destroy(Thread* thread) {}
137 
138   // These perform BarrierSet-related initialization/cleanup before the thread
139   // is added to or removed from the corresponding set of threads. The
140   // argument thread is the current thread. These are called either holding
141   // the Threads_lock (for a JavaThread) and so not at a safepoint, or holding
142   // the NonJavaThreadsList_lock (for a NonJavaThread) locked by the
143   // caller. That locking ensures the operation is "atomic" with the list
144   // modification wrto operations that hold the NJTList_lock and either also
145   // hold the Threads_lock or are at a safepoint.
146   virtual void on_thread_attach(Thread* thread) {}
147   virtual void on_thread_detach(Thread* thread) {}

266 
267     template <typename T>
268     static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
269       return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
270     }
271 
272     static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
273       return Raw::oop_atomic_cmpxchg_at(base, offset, compare_value, new_value);
274     }
275 
276     template <typename T>
277     static oop oop_atomic_xchg_in_heap(T* addr, oop new_value) {
278       return Raw::oop_atomic_xchg(addr, new_value);
279     }
280 
281     static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
282       return Raw::oop_atomic_xchg_at(base, offset, new_value);
283     }
284 
285     template <typename T>
286     static void oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
287                                       arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
288                                       size_t length);
289 
290     // Off-heap oop accesses. These accessors get resolved when
291     // IN_HEAP is not set (e.g. when using the NativeAccess API), it is
292     // an oop* overload, and the barrier strength is AS_NORMAL.
293     template <typename T>
294     static oop oop_load_not_in_heap(T* addr) {
295       return Raw::template oop_load<oop>(addr);
296     }
297 
298     template <typename T>
299     static void oop_store_not_in_heap(T* addr, oop value) {
300       Raw::oop_store(addr, value);
301     }
302 
303     template <typename T>
304     static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) {
305       return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
306     }
307 
308     template <typename T>
309     static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value) {
310       return Raw::oop_atomic_xchg(addr, new_value);
311     }
312 
313     // Clone barrier support
314     static void clone_in_heap(oop src, oop dst, size_t size) {
315       Raw::clone(src, dst, size);
316     }
317 
318     static void value_copy_in_heap(void* src, void* dst, InlineKlass* md) {
319       Raw::value_copy(src, dst, md);
320     }
321 
322   };
323 };
324 
325 template<typename T>
326 inline T* barrier_set_cast(BarrierSet* bs) {
327   assert(bs->is_a(BarrierSet::GetName<T>::value), "wrong type of barrier set");
328   return static_cast<T*>(bs);
329 }
330 
331 #endif // SHARE_GC_SHARED_BARRIERSET_HPP
< prev index next >