< prev index next >

src/hotspot/share/gc/shared/barrierSet.hpp

Print this page




  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHARED_BARRIERSET_HPP
  26 #define SHARE_GC_SHARED_BARRIERSET_HPP
  27 
  28 #include "gc/shared/barrierSetConfig.hpp"
  29 #include "memory/memRegion.hpp"
  30 #include "oops/access.hpp"
  31 #include "oops/accessBackend.hpp"
  32 #include "oops/oopsHierarchy.hpp"

  33 #include "utilities/fakeRttiSupport.hpp"
  34 #include "utilities/macros.hpp"
  35 
  36 class BarrierSetAssembler;
  37 class BarrierSetC1;
  38 class BarrierSetC2;
  39 class BarrierSetNMethod;
  40 class JavaThread;
  41 
  42 // This class provides the interface between a barrier implementation and
  43 // the rest of the system.
  44 
  45 class BarrierSet: public CHeapObj<mtGC> {
  46   friend class VMStructs;
  47 
  48   static BarrierSet* _barrier_set;
  49 
  50 public:
  51   enum Name {
  52 #define BARRIER_SET_DECLARE_BS_ENUM(bs_name) bs_name ,


 104     _barrier_set_c1(barrier_set_c1),
 105     _barrier_set_c2(barrier_set_c2),
 106     _barrier_set_nmethod(barrier_set_nmethod) {}
 107   ~BarrierSet() { }
 108 
 109   template <class BarrierSetAssemblerT>
 110   static BarrierSetAssembler* make_barrier_set_assembler() {
 111     return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(NULL);
 112   }
 113 
 114   template <class BarrierSetC1T>
 115   static BarrierSetC1* make_barrier_set_c1() {
 116     return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(NULL);
 117   }
 118 
 119   template <class BarrierSetC2T>
 120   static BarrierSetC2* make_barrier_set_c2() {
 121     return COMPILER2_PRESENT(new BarrierSetC2T()) NOT_COMPILER2(NULL);
 122   }
 123 



 124 public:
 125   // Support for optimizing compilers to call the barrier set on slow path allocations
 126   // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
 127   // The allocation is safe to use iff it returns true. If not, the slow-path allocation
 128   // is redone until it succeeds. This can e.g. prevent allocations from the slow path
 129   // to be in old.
 130   virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
 131   virtual void on_thread_create(Thread* thread) {}
 132   virtual void on_thread_destroy(Thread* thread) {}
 133 
 134   // These perform BarrierSet-related initialization/cleanup before the thread
 135   // is added to or removed from the corresponding set of threads. The
 136   // argument thread is the current thread. These are called either holding
 137   // the Threads_lock (for a JavaThread) and so not at a safepoint, or holding
 138   // the NonJavaThreadsList_lock (for a NonJavaThread) locked by the
 139   // caller. That locking ensures the operation is "atomic" with the list
 140   // modification wrto operations that hold the NJTList_lock and either also
 141   // hold the Threads_lock or are at a safepoint.
 142   virtual void on_thread_attach(Thread* thread) {}
 143   virtual void on_thread_detach(Thread* thread) {}


 266 
 267     template <typename T>
 268     static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
 269       return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
 270     }
 271 
 272     static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
 273       return Raw::oop_atomic_cmpxchg_at(new_value, base, offset, compare_value);
 274     }
 275 
 276     template <typename T>
 277     static oop oop_atomic_xchg_in_heap(oop new_value, T* addr) {
 278       return Raw::oop_atomic_xchg(new_value, addr);
 279     }
 280 
 281     static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
 282       return Raw::oop_atomic_xchg_at(new_value, base, offset);
 283     }
 284 
 285     template <typename T>
 286     static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 287                                       arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 288                                       size_t length);
 289 
 290     // Off-heap oop accesses. These accessors get resolved when
 291     // IN_HEAP is not set (e.g. when using the NativeAccess API), it is
 292     // an oop* overload, and the barrier strength is AS_NORMAL.
 293     template <typename T>
 294     static oop oop_load_not_in_heap(T* addr) {
 295       return Raw::template oop_load<oop>(addr);
 296     }
 297 
 298     template <typename T>
 299     static void oop_store_not_in_heap(T* addr, oop value) {
 300       Raw::oop_store(addr, value);
 301     }
 302 
 303     template <typename T>
 304     static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
 305       return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
 306     }




  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHARED_BARRIERSET_HPP
  26 #define SHARE_GC_SHARED_BARRIERSET_HPP
  27 
  28 #include "gc/shared/barrierSetConfig.hpp"
  29 #include "memory/memRegion.hpp"
  30 #include "oops/access.hpp"
  31 #include "oops/accessBackend.hpp"
  32 #include "oops/oopsHierarchy.hpp"
  33 #include "utilities/exceptions.hpp"
  34 #include "utilities/fakeRttiSupport.hpp"
  35 #include "utilities/macros.hpp"
  36 
  37 class BarrierSetAssembler;
  38 class BarrierSetC1;
  39 class BarrierSetC2;
  40 class BarrierSetNMethod;
  41 class JavaThread;
  42 
  43 // This class provides the interface between a barrier implementation and
  44 // the rest of the system.
  45 
  46 class BarrierSet: public CHeapObj<mtGC> {
  47   friend class VMStructs;
  48 
  49   static BarrierSet* _barrier_set;
  50 
  51 public:
  52   enum Name {
  53 #define BARRIER_SET_DECLARE_BS_ENUM(bs_name) bs_name ,


 105     _barrier_set_c1(barrier_set_c1),
 106     _barrier_set_c2(barrier_set_c2),
 107     _barrier_set_nmethod(barrier_set_nmethod) {}
 108   ~BarrierSet() { }
 109 
 110   template <class BarrierSetAssemblerT>
 111   static BarrierSetAssembler* make_barrier_set_assembler() {
 112     return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(NULL);
 113   }
 114 
 115   template <class BarrierSetC1T>
 116   static BarrierSetC1* make_barrier_set_c1() {
 117     return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(NULL);
 118   }
 119 
 120   template <class BarrierSetC2T>
 121   static BarrierSetC2* make_barrier_set_c2() {
 122     return COMPILER2_PRESENT(new BarrierSetC2T()) NOT_COMPILER2(NULL);
 123   }
 124 
 125   static void throw_array_null_pointer_store_exception(arrayOop src, arrayOop dst, TRAPS);
 126   static void throw_array_store_exception(arrayOop src, arrayOop dst, TRAPS);
 127 
 128 public:
 129   // Support for optimizing compilers to call the barrier set on slow path allocations
 130   // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
 131   // The allocation is safe to use iff it returns true. If not, the slow-path allocation
 132   // is redone until it succeeds. This can e.g. prevent allocations from the slow path
 133   // to be in old.
 134   virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
 135   virtual void on_thread_create(Thread* thread) {}
 136   virtual void on_thread_destroy(Thread* thread) {}
 137 
 138   // These perform BarrierSet-related initialization/cleanup before the thread
 139   // is added to or removed from the corresponding set of threads. The
 140   // argument thread is the current thread. These are called either holding
 141   // the Threads_lock (for a JavaThread) and so not at a safepoint, or holding
 142   // the NonJavaThreadsList_lock (for a NonJavaThread) locked by the
 143   // caller. That locking ensures the operation is "atomic" with the list
 144   // modification wrto operations that hold the NJTList_lock and either also
 145   // hold the Threads_lock or are at a safepoint.
 146   virtual void on_thread_attach(Thread* thread) {}
 147   virtual void on_thread_detach(Thread* thread) {}


 270 
 271     template <typename T>
 272     static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
 273       return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
 274     }
 275 
 276     static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
 277       return Raw::oop_atomic_cmpxchg_at(new_value, base, offset, compare_value);
 278     }
 279 
 280     template <typename T>
 281     static oop oop_atomic_xchg_in_heap(oop new_value, T* addr) {
 282       return Raw::oop_atomic_xchg(new_value, addr);
 283     }
 284 
 285     static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
 286       return Raw::oop_atomic_xchg_at(new_value, base, offset);
 287     }
 288 
 289     template <typename T>
 290     static void oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 291                                       arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 292                                       size_t length);
 293 
 294     // Off-heap oop accesses. These accessors get resolved when
 295     // IN_HEAP is not set (e.g. when using the NativeAccess API), it is
 296     // an oop* overload, and the barrier strength is AS_NORMAL.
 297     template <typename T>
 298     static oop oop_load_not_in_heap(T* addr) {
 299       return Raw::template oop_load<oop>(addr);
 300     }
 301 
 302     template <typename T>
 303     static void oop_store_not_in_heap(T* addr, oop value) {
 304       Raw::oop_store(addr, value);
 305     }
 306 
 307     template <typename T>
 308     static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
 309       return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
 310     }


< prev index next >