1 /*
2 * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_SHARED_BARRIERSET_HPP
26 #define SHARE_GC_SHARED_BARRIERSET_HPP
27
28 #include "gc/shared/barrierSetConfig.hpp"
29 #include "memory/memRegion.hpp"
30 #include "oops/access.hpp"
31 #include "oops/accessBackend.hpp"
32 #include "oops/oopsHierarchy.hpp"
33 #include "utilities/fakeRttiSupport.hpp"
34 #include "utilities/macros.hpp"
35
36 class BarrierSetAssembler;
37 class BarrierSetC1;
38 class BarrierSetC2;
39 class BarrierSetNMethod;
40 class BarrierSetStackChunk;
41 class JavaThread;
42
43 // This class provides the interface between a barrier implementation and
44 // the rest of the system.
45
46 class BarrierSet: public CHeapObj<mtGC> {
47 friend class VMStructs;
48
49 static BarrierSet* _barrier_set;
50
51 public:
52 enum Name {
53 #define BARRIER_SET_DECLARE_BS_ENUM(bs_name) bs_name ,
54 FOR_EACH_BARRIER_SET_DO(BARRIER_SET_DECLARE_BS_ENUM)
55 #undef BARRIER_SET_DECLARE_BS_ENUM
56 UnknownBS
57 };
58
59 protected:
60 // Fake RTTI support. For a derived class T to participate
61 // - T must have a corresponding Name entry.
62 // - GetName<T> must be specialized to return the corresponding Name
63 // entry.
64 // - If T is a base class, the constructor must have a FakeRtti
65 // parameter and pass it up to its base class, with the tag set
66 // augmented with the corresponding Name entry.
67 // - If T is a concrete class, the constructor must create a
68 // FakeRtti object whose tag set includes the corresponding Name
69 // entry, and pass it up to its base class.
70 typedef FakeRttiSupport<BarrierSet, Name> FakeRtti;
71
72 private:
73 FakeRtti _fake_rtti;
74 BarrierSetAssembler* _barrier_set_assembler;
75 BarrierSetC1* _barrier_set_c1;
76 BarrierSetC2* _barrier_set_c2;
77 BarrierSetNMethod* _barrier_set_nmethod;
78 BarrierSetStackChunk* _barrier_set_stack_chunk;
79
80 public:
81 // Metafunction mapping a class derived from BarrierSet to the
82 // corresponding Name enum tag.
83 template<typename T> struct GetName;
84
85 // Metafunction mapping a Name enum type to the corresponding
86 // lass derived from BarrierSet.
87 template<BarrierSet::Name T> struct GetType;
88
89 // Note: This is not presently the Name corresponding to the
90 // concrete class of this object.
91 BarrierSet::Name kind() const { return _fake_rtti.concrete_tag(); }
92
93 // Test whether this object is of the type corresponding to bsn.
94 bool is_a(BarrierSet::Name bsn) const { return _fake_rtti.has_tag(bsn); }
95
96 // End of fake RTTI support.
97
98 protected:
99 BarrierSet(BarrierSetAssembler* barrier_set_assembler,
100 BarrierSetC1* barrier_set_c1,
101 BarrierSetC2* barrier_set_c2,
102 BarrierSetNMethod* barrier_set_nmethod,
103 BarrierSetStackChunk* barrier_set_stack_chunk,
104 const FakeRtti& fake_rtti);
105 ~BarrierSet() { }
106
107 template <class BarrierSetAssemblerT>
108 static BarrierSetAssembler* make_barrier_set_assembler() {
109 return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(nullptr);
110 }
111
112 template <class BarrierSetC1T>
113 static BarrierSetC1* make_barrier_set_c1() {
114 return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(nullptr);
115 }
116
117 template <class BarrierSetC2T>
118 static BarrierSetC2* make_barrier_set_c2() {
119 return COMPILER2_PRESENT(new BarrierSetC2T()) NOT_COMPILER2(nullptr);
120 }
121
122 public:
123 // Support for optimizing compilers to call the barrier set on slow path allocations
124 // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
125 // The allocation is safe to use iff it returns true. If not, the slow-path allocation
126 // is redone until it succeeds. This can e.g. prevent allocations from the slow path
127 // to be in old.
128 virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
129 virtual void on_thread_create(Thread* thread) {}
130 virtual void on_thread_destroy(Thread* thread) {}
131
132 // These perform BarrierSet-related initialization/cleanup before the thread
133 // is added to or removed from the corresponding set of threads. The
134 // argument thread is the current thread. These are called either holding
135 // the Threads_lock (for a JavaThread) and so not at a safepoint, or holding
136 // the NonJavaThreadsList_lock (for a NonJavaThread) locked by the
137 // caller. That locking ensures the operation is "atomic" with the list
138 // modification wrto operations that hold the NJTList_lock and either also
139 // hold the Threads_lock or are at a safepoint.
140 virtual void on_thread_attach(Thread* thread);
141 virtual void on_thread_detach(Thread* thread) {}
142
143 virtual void make_parsable(JavaThread* thread) {}
144
145 // Print a description of the memory for the barrier set
146 virtual void print_on(outputStream* st) const = 0;
147
148 static BarrierSet* barrier_set() { return _barrier_set; }
149 static void set_barrier_set(BarrierSet* barrier_set);
150
151 BarrierSetAssembler* barrier_set_assembler() {
152 assert(_barrier_set_assembler != nullptr, "should be set");
153 return _barrier_set_assembler;
154 }
155
156 BarrierSetC1* barrier_set_c1() {
157 assert(_barrier_set_c1 != nullptr, "should be set");
158 return _barrier_set_c1;
159 }
160
161 BarrierSetC2* barrier_set_c2() {
162 assert(_barrier_set_c2 != nullptr, "should be set");
163 return _barrier_set_c2;
164 }
165
166 BarrierSetNMethod* barrier_set_nmethod() {
167 return _barrier_set_nmethod;
168 }
169
170 BarrierSetStackChunk* barrier_set_stack_chunk() {
171 assert(_barrier_set_stack_chunk != nullptr, "should be set");
172 return _barrier_set_stack_chunk;
173 }
174
175 // The AccessBarrier of a BarrierSet subclass is called by the Access API
176 // (cf. oops/access.hpp) to perform decorated accesses. GC implementations
177 // may override these default access operations by declaring an
178 // AccessBarrier class in its BarrierSet. Its accessors will then be
179 // automatically resolved at runtime.
180 //
181 // In order to register a new FooBarrierSet::AccessBarrier with the Access API,
182 // the following steps should be taken:
183 // 1) Provide an enum "name" for the BarrierSet in barrierSetConfig.hpp
184 // 2) Make sure the barrier set headers are included from barrierSetConfig.inline.hpp
185 // 3) Provide specializations for BarrierSet::GetName and BarrierSet::GetType.
186 template <DecoratorSet decorators, typename BarrierSetT>
187 class AccessBarrier: protected RawAccessBarrier<decorators> {
188 private:
189 typedef RawAccessBarrier<decorators> Raw;
190
191 public:
192 // Primitive heap accesses. These accessors get resolved when
193 // IN_HEAP is set (e.g. when using the HeapAccess API), it is
194 // not an oop_* overload, and the barrier strength is AS_NORMAL.
195 template <typename T>
196 static T load_in_heap(T* addr) {
197 return Raw::template load<T>(addr);
198 }
199
200 template <typename T>
201 static T load_in_heap_at(oop base, ptrdiff_t offset) {
202 return Raw::template load_at<T>(base, offset);
203 }
204
205 template <typename T>
206 static void store_in_heap(T* addr, T value) {
207 Raw::store(addr, value);
208 }
209
210 template <typename T>
211 static void store_in_heap_at(oop base, ptrdiff_t offset, T value) {
212 Raw::store_at(base, offset, value);
213 }
214
215 template <typename T>
216 static T atomic_cmpxchg_in_heap(T* addr, T compare_value, T new_value) {
217 return Raw::atomic_cmpxchg(addr, compare_value, new_value);
218 }
219
220 template <typename T>
221 static T atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
222 return Raw::atomic_cmpxchg_at(base, offset, compare_value, new_value);
223 }
224
225 template <typename T>
226 static T atomic_xchg_in_heap(T* addr, T new_value) {
227 return Raw::atomic_xchg(addr, new_value);
228 }
229
230 template <typename T>
231 static T atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, T new_value) {
232 return Raw::atomic_xchg_at(base, offset, new_value);
233 }
234
235 template <typename T>
236 static void arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
237 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
238 size_t length) {
239 Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
240 dst_obj, dst_offset_in_bytes, dst_raw,
241 length);
242 }
243
244 // Heap oop accesses. These accessors get resolved when
245 // IN_HEAP is set (e.g. when using the HeapAccess API), it is
246 // an oop_* overload, and the barrier strength is AS_NORMAL.
247 template <typename T>
248 static oop oop_load_in_heap(T* addr) {
249 return Raw::template oop_load<oop>(addr);
250 }
251
252 static oop oop_load_in_heap_at(oop base, ptrdiff_t offset) {
253 return Raw::template oop_load_at<oop>(base, offset);
254 }
255
256 template <typename T>
257 static void oop_store_in_heap(T* addr, oop value) {
258 Raw::oop_store(addr, value);
259 }
260
261 static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) {
262 Raw::oop_store_at(base, offset, value);
263 }
264
265 template <typename T>
266 static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
267 return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
268 }
269
270 static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
271 return Raw::oop_atomic_cmpxchg_at(base, offset, compare_value, new_value);
272 }
273
274 template <typename T>
275 static oop oop_atomic_xchg_in_heap(T* addr, oop new_value) {
276 return Raw::oop_atomic_xchg(addr, new_value);
277 }
278
279 static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
280 return Raw::oop_atomic_xchg_at(base, offset, new_value);
281 }
282
283 template <typename T>
284 static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
285 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
286 size_t length);
287
288 // Off-heap oop accesses. These accessors get resolved when
289 // IN_HEAP is not set (e.g. when using the NativeAccess API), it is
290 // an oop* overload, and the barrier strength is AS_NORMAL.
291 template <typename T>
292 static oop oop_load_not_in_heap(T* addr) {
293 return Raw::template oop_load<oop>(addr);
294 }
295
296 template <typename T>
297 static void oop_store_not_in_heap(T* addr, oop value) {
298 Raw::oop_store(addr, value);
299 }
300
301 template <typename T>
302 static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) {
303 return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
304 }
305
306 template <typename T>
307 static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value) {
308 return Raw::oop_atomic_xchg(addr, new_value);
309 }
310
311 // Clone barrier support
312 static void clone_in_heap(oop src, oop dst, size_t size) {
313 Raw::clone(src, dst, size);
314 }
315 };
316 };
317
318 template<typename T>
319 inline T* barrier_set_cast(BarrierSet* bs) {
320 assert(bs->is_a(BarrierSet::GetName<T>::value), "wrong type of barrier set");
321 return static_cast<T*>(bs);
322 }
323
324 #endif // SHARE_GC_SHARED_BARRIERSET_HPP