13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_SHARED_BARRIERSET_HPP
26 #define SHARE_GC_SHARED_BARRIERSET_HPP
27
28 #include "gc/shared/barrierSetConfig.hpp"
29 #include "memory/memRegion.hpp"
30 #include "oops/access.hpp"
31 #include "oops/accessBackend.hpp"
32 #include "oops/oopsHierarchy.hpp"
33 #include "utilities/fakeRttiSupport.hpp"
34 #include "utilities/macros.hpp"
35
36 class BarrierSetAssembler;
37 class BarrierSetC1;
38 class BarrierSetC2;
39 class BarrierSetNMethod;
40 class BarrierSetStackChunk;
41 class JavaThread;
42
43 // This class provides the interface between a barrier implementation and
44 // the rest of the system.
45
46 class BarrierSet: public CHeapObj<mtGC> {
47 friend class VMStructs;
48
49 static BarrierSet* _barrier_set;
50
51 public:
52 enum Name {
102 BarrierSetNMethod* barrier_set_nmethod,
103 BarrierSetStackChunk* barrier_set_stack_chunk,
104 const FakeRtti& fake_rtti);
105 ~BarrierSet() { }
106
107 template <class BarrierSetAssemblerT>
108 static BarrierSetAssembler* make_barrier_set_assembler() {
109 return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(nullptr);
110 }
111
112 template <class BarrierSetC1T>
113 static BarrierSetC1* make_barrier_set_c1() {
114 return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(nullptr);
115 }
116
117 template <class BarrierSetC2T>
118 static BarrierSetC2* make_barrier_set_c2() {
119 return COMPILER2_PRESENT(new BarrierSetC2T()) NOT_COMPILER2(nullptr);
120 }
121
122 public:
123 // Support for optimizing compilers to call the barrier set on slow path allocations
124 // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
125 // The allocation is safe to use iff it returns true. If not, the slow-path allocation
126 // is redone until it succeeds. This can e.g. prevent allocations from the slow path
127 // to be in old.
128 virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
129 virtual void on_thread_create(Thread* thread) {}
130 virtual void on_thread_destroy(Thread* thread) {}
131
132 // These perform BarrierSet-related initialization/cleanup before the thread
133 // is added to or removed from the corresponding set of threads. The
134 // argument thread is the current thread. These are called either holding
135 // the Threads_lock (for a JavaThread) and so not at a safepoint, or holding
136 // the NonJavaThreadsList_lock (for a NonJavaThread) locked by the
137 // caller. That locking ensures the operation is "atomic" with the list
138 // modification wrto operations that hold the NJTList_lock and either also
139 // hold the Threads_lock or are at a safepoint.
140 virtual void on_thread_attach(Thread* thread);
141 virtual void on_thread_detach(Thread* thread) {}
265
266 template <typename T>
267 static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
268 return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
269 }
270
271 static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
272 return Raw::oop_atomic_cmpxchg_at(base, offset, compare_value, new_value);
273 }
274
275 template <typename T>
276 static oop oop_atomic_xchg_in_heap(T* addr, oop new_value) {
277 return Raw::oop_atomic_xchg(addr, new_value);
278 }
279
280 static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
281 return Raw::oop_atomic_xchg_at(base, offset, new_value);
282 }
283
284 template <typename T>
285 static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
286 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
287 size_t length);
288
289 // Off-heap oop accesses. These accessors get resolved when
290 // IN_HEAP is not set (e.g. when using the NativeAccess API), it is
291 // an oop* overload, and the barrier strength is AS_NORMAL.
292 template <typename T>
293 static oop oop_load_not_in_heap(T* addr) {
294 return Raw::template oop_load<oop>(addr);
295 }
296
297 template <typename T>
298 static void oop_store_not_in_heap(T* addr, oop value) {
299 Raw::oop_store(addr, value);
300 }
301
302 template <typename T>
303 static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) {
304 return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
305 }
306
307 template <typename T>
308 static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value) {
309 return Raw::oop_atomic_xchg(addr, new_value);
310 }
311
312 // Clone barrier support
313 static void clone_in_heap(oop src, oop dst, size_t size) {
314 Raw::clone(src, dst, size);
315 }
316 };
317 };
318
319 template<typename T>
320 inline T* barrier_set_cast(BarrierSet* bs) {
321 assert(bs->is_a(BarrierSet::GetName<T>::value), "wrong type of barrier set");
322 return static_cast<T*>(bs);
323 }
324
325 #endif // SHARE_GC_SHARED_BARRIERSET_HPP
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_SHARED_BARRIERSET_HPP
26 #define SHARE_GC_SHARED_BARRIERSET_HPP
27
28 #include "gc/shared/barrierSetConfig.hpp"
29 #include "memory/memRegion.hpp"
30 #include "oops/access.hpp"
31 #include "oops/accessBackend.hpp"
32 #include "oops/oopsHierarchy.hpp"
33 #include "utilities/exceptions.hpp"
34 #include "utilities/fakeRttiSupport.hpp"
35 #include "utilities/macros.hpp"
36
37 class BarrierSetAssembler;
38 class BarrierSetC1;
39 class BarrierSetC2;
40 class BarrierSetNMethod;
41 class BarrierSetStackChunk;
42 class JavaThread;
43
44 // This class provides the interface between a barrier implementation and
45 // the rest of the system.
46
47 class BarrierSet: public CHeapObj<mtGC> {
48 friend class VMStructs;
49
50 static BarrierSet* _barrier_set;
51
52 public:
53 enum Name {
103 BarrierSetNMethod* barrier_set_nmethod,
104 BarrierSetStackChunk* barrier_set_stack_chunk,
105 const FakeRtti& fake_rtti);
106 ~BarrierSet() { }
107
108 template <class BarrierSetAssemblerT>
109 static BarrierSetAssembler* make_barrier_set_assembler() {
110 return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(nullptr);
111 }
112
113 template <class BarrierSetC1T>
114 static BarrierSetC1* make_barrier_set_c1() {
115 return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(nullptr);
116 }
117
118 template <class BarrierSetC2T>
119 static BarrierSetC2* make_barrier_set_c2() {
120 return COMPILER2_PRESENT(new BarrierSetC2T()) NOT_COMPILER2(nullptr);
121 }
122
123 static void throw_array_null_pointer_store_exception(arrayOop src, arrayOop dst, TRAPS);
124 static void throw_array_store_exception(arrayOop src, arrayOop dst, TRAPS);
125
126 public:
127 // Support for optimizing compilers to call the barrier set on slow path allocations
128 // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
129 // The allocation is safe to use iff it returns true. If not, the slow-path allocation
130 // is redone until it succeeds. This can e.g. prevent allocations from the slow path
131 // to be in old.
132 virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
133 virtual void on_thread_create(Thread* thread) {}
134 virtual void on_thread_destroy(Thread* thread) {}
135
136 // These perform BarrierSet-related initialization/cleanup before the thread
137 // is added to or removed from the corresponding set of threads. The
138 // argument thread is the current thread. These are called either holding
139 // the Threads_lock (for a JavaThread) and so not at a safepoint, or holding
140 // the NonJavaThreadsList_lock (for a NonJavaThread) locked by the
141 // caller. That locking ensures the operation is "atomic" with the list
142 // modification wrto operations that hold the NJTList_lock and either also
143 // hold the Threads_lock or are at a safepoint.
144 virtual void on_thread_attach(Thread* thread);
145 virtual void on_thread_detach(Thread* thread) {}
269
270 template <typename T>
271 static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
272 return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
273 }
274
275 static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
276 return Raw::oop_atomic_cmpxchg_at(base, offset, compare_value, new_value);
277 }
278
279 template <typename T>
280 static oop oop_atomic_xchg_in_heap(T* addr, oop new_value) {
281 return Raw::oop_atomic_xchg(addr, new_value);
282 }
283
284 static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
285 return Raw::oop_atomic_xchg_at(base, offset, new_value);
286 }
287
288 template <typename T>
289 static void oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
290 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
291 size_t length);
292
293 // Off-heap oop accesses. These accessors get resolved when
294 // IN_HEAP is not set (e.g. when using the NativeAccess API), it is
295 // an oop* overload, and the barrier strength is AS_NORMAL.
296 template <typename T>
297 static oop oop_load_not_in_heap(T* addr) {
298 return Raw::template oop_load<oop>(addr);
299 }
300
301 template <typename T>
302 static void oop_store_not_in_heap(T* addr, oop value) {
303 Raw::oop_store(addr, value);
304 }
305
306 template <typename T>
307 static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) {
308 return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
309 }
310
311 template <typename T>
312 static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value) {
313 return Raw::oop_atomic_xchg(addr, new_value);
314 }
315
316 // Clone barrier support
317 static void clone_in_heap(oop src, oop dst, size_t size) {
318 Raw::clone(src, dst, size);
319 }
320
321 static void value_copy_in_heap(void* src, void* dst, InlineKlass* md) {
322 Raw::value_copy(src, dst, md);
323 }
324
325 };
326 };
327
328 template<typename T>
329 inline T* barrier_set_cast(BarrierSet* bs) {
330 assert(bs->is_a(BarrierSet::GetName<T>::value), "wrong type of barrier set");
331 return static_cast<T*>(bs);
332 }
333
334 #endif // SHARE_GC_SHARED_BARRIERSET_HPP
|