1 /*
2 * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_SHARED_BARRIERSET_HPP
26 #define SHARE_GC_SHARED_BARRIERSET_HPP
27
28 #include "gc/shared/barrierSetConfig.hpp"
29 #include "memory/memRegion.hpp"
30 #include "oops/access.hpp"
31 #include "oops/accessBackend.hpp"
32 #include "oops/oopsHierarchy.hpp"
33 #include "utilities/exceptions.hpp"
34 #include "utilities/fakeRttiSupport.hpp"
35 #include "utilities/macros.hpp"
36
37 class BarrierSetAssembler;
38 class BarrierSetC1;
39 class BarrierSetC2;
40 class BarrierSetNMethod;
41 class BarrierSetStackChunk;
42 class JavaThread;
43
44 // This class provides the interface between a barrier implementation and
45 // the rest of the system.
46
47 class BarrierSet: public CHeapObj<mtGC> {
48 friend class VMStructs;
49
50 static BarrierSet* _barrier_set;
51
52 public:
53 enum Name {
54 #define BARRIER_SET_DECLARE_BS_ENUM(bs_name) bs_name ,
55 FOR_EACH_BARRIER_SET_DO(BARRIER_SET_DECLARE_BS_ENUM)
56 #undef BARRIER_SET_DECLARE_BS_ENUM
57 UnknownBS
58 };
59
60 protected:
61 // Fake RTTI support. For a derived class T to participate
62 // - T must have a corresponding Name entry.
63 // - GetName<T> must be specialized to return the corresponding Name
64 // entry.
65 // - If T is a base class, the constructor must have a FakeRtti
66 // parameter and pass it up to its base class, with the tag set
67 // augmented with the corresponding Name entry.
68 // - If T is a concrete class, the constructor must create a
69 // FakeRtti object whose tag set includes the corresponding Name
70 // entry, and pass it up to its base class.
71 typedef FakeRttiSupport<BarrierSet, Name> FakeRtti;
72
73 private:
74 FakeRtti _fake_rtti;
75 BarrierSetAssembler* _barrier_set_assembler;
76 BarrierSetC1* _barrier_set_c1;
77 BarrierSetC2* _barrier_set_c2;
78 BarrierSetNMethod* _barrier_set_nmethod;
79 BarrierSetStackChunk* _barrier_set_stack_chunk;
80
81 public:
82 // Metafunction mapping a class derived from BarrierSet to the
83 // corresponding Name enum tag.
84 template<typename T> struct GetName;
85
86 // Metafunction mapping a Name enum type to the corresponding
87 // lass derived from BarrierSet.
88 template<BarrierSet::Name T> struct GetType;
89
90 // Note: This is not presently the Name corresponding to the
91 // concrete class of this object.
92 BarrierSet::Name kind() const { return _fake_rtti.concrete_tag(); }
93
94 // Test whether this object is of the type corresponding to bsn.
95 bool is_a(BarrierSet::Name bsn) const { return _fake_rtti.has_tag(bsn); }
96
97 // End of fake RTTI support.
98
99 protected:
100 BarrierSet(BarrierSetAssembler* barrier_set_assembler,
101 BarrierSetC1* barrier_set_c1,
102 BarrierSetC2* barrier_set_c2,
103 BarrierSetNMethod* barrier_set_nmethod,
104 BarrierSetStackChunk* barrier_set_stack_chunk,
105 const FakeRtti& fake_rtti);
106 ~BarrierSet() { }
107
108 template <class BarrierSetAssemblerT>
109 static BarrierSetAssembler* make_barrier_set_assembler() {
110 return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(nullptr);
111 }
112
113 template <class BarrierSetC1T>
114 static BarrierSetC1* make_barrier_set_c1() {
115 return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(nullptr);
116 }
117
118 template <class BarrierSetC2T>
119 static BarrierSetC2* make_barrier_set_c2() {
120 return COMPILER2_PRESENT(new BarrierSetC2T()) NOT_COMPILER2(nullptr);
121 }
122
123 public:
124 // Support for optimizing compilers to call the barrier set on slow path allocations
125 // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
126 // The allocation is safe to use iff it returns true. If not, the slow-path allocation
127 // is redone until it succeeds. This can e.g. prevent allocations from the slow path
128 // to be in old.
129 virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
130 virtual void on_thread_create(Thread* thread) {}
131 virtual void on_thread_destroy(Thread* thread) {}
132
133 // These perform BarrierSet-related initialization/cleanup before the thread
134 // is added to or removed from the corresponding set of threads. The
135 // argument thread is the current thread. These are called either holding
136 // the Threads_lock (for a JavaThread) and so not at a safepoint, or holding
137 // the NonJavaThreadsList_lock (for a NonJavaThread) locked by the
138 // caller. That locking ensures the operation is "atomic" with the list
139 // modification wrto operations that hold the NJTList_lock and either also
140 // hold the Threads_lock or are at a safepoint.
141 virtual void on_thread_attach(Thread* thread);
142 virtual void on_thread_detach(Thread* thread) {}
143
144 virtual void make_parsable(JavaThread* thread) {}
145
146 // Print a description of the memory for the barrier set
147 virtual void print_on(outputStream* st) const = 0;
148
149 static BarrierSet* barrier_set() { return _barrier_set; }
150 static void set_barrier_set(BarrierSet* barrier_set);
151
152 BarrierSetAssembler* barrier_set_assembler() {
153 assert(_barrier_set_assembler != nullptr, "should be set");
154 return _barrier_set_assembler;
155 }
156
157 BarrierSetC1* barrier_set_c1() {
158 assert(_barrier_set_c1 != nullptr, "should be set");
159 return _barrier_set_c1;
160 }
161
162 BarrierSetC2* barrier_set_c2() {
163 assert(_barrier_set_c2 != nullptr, "should be set");
164 return _barrier_set_c2;
165 }
166
167 BarrierSetNMethod* barrier_set_nmethod() {
168 return _barrier_set_nmethod;
169 }
170
171 BarrierSetStackChunk* barrier_set_stack_chunk() {
172 assert(_barrier_set_stack_chunk != nullptr, "should be set");
173 return _barrier_set_stack_chunk;
174 }
175
176 // The AccessBarrier of a BarrierSet subclass is called by the Access API
177 // (cf. oops/access.hpp) to perform decorated accesses. GC implementations
178 // may override these default access operations by declaring an
179 // AccessBarrier class in its BarrierSet. Its accessors will then be
180 // automatically resolved at runtime.
181 //
182 // In order to register a new FooBarrierSet::AccessBarrier with the Access API,
183 // the following steps should be taken:
184 // 1) Provide an enum "name" for the BarrierSet in barrierSetConfig.hpp
185 // 2) Make sure the barrier set headers are included from barrierSetConfig.inline.hpp
186 // 3) Provide specializations for BarrierSet::GetName and BarrierSet::GetType.
187 template <DecoratorSet decorators, typename BarrierSetT>
188 class AccessBarrier: protected RawAccessBarrier<decorators> {
189 private:
190 typedef RawAccessBarrier<decorators> Raw;
191
192 public:
193 // Primitive heap accesses. These accessors get resolved when
194 // IN_HEAP is set (e.g. when using the HeapAccess API), it is
195 // not an oop_* overload, and the barrier strength is AS_NORMAL.
196 template <typename T>
197 static T load_in_heap(T* addr) {
198 return Raw::template load<T>(addr);
199 }
200
201 template <typename T>
202 static T load_in_heap_at(oop base, ptrdiff_t offset) {
203 return Raw::template load_at<T>(base, offset);
204 }
205
206 template <typename T>
207 static void store_in_heap(T* addr, T value) {
208 Raw::store(addr, value);
209 }
210
211 template <typename T>
212 static void store_in_heap_at(oop base, ptrdiff_t offset, T value) {
213 Raw::store_at(base, offset, value);
214 }
215
216 template <typename T>
217 static T atomic_cmpxchg_in_heap(T* addr, T compare_value, T new_value) {
218 return Raw::atomic_cmpxchg(addr, compare_value, new_value);
219 }
220
221 template <typename T>
222 static T atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
223 return Raw::atomic_cmpxchg_at(base, offset, compare_value, new_value);
224 }
225
226 template <typename T>
227 static T atomic_xchg_in_heap(T* addr, T new_value) {
228 return Raw::atomic_xchg(addr, new_value);
229 }
230
231 template <typename T>
232 static T atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, T new_value) {
233 return Raw::atomic_xchg_at(base, offset, new_value);
234 }
235
236 template <typename T>
237 static void arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
238 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
239 size_t length) {
240 Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
241 dst_obj, dst_offset_in_bytes, dst_raw,
242 length);
243 }
244
245 // Heap oop accesses. These accessors get resolved when
246 // IN_HEAP is set (e.g. when using the HeapAccess API), it is
247 // an oop_* overload, and the barrier strength is AS_NORMAL.
248 template <typename T>
249 static oop oop_load_in_heap(T* addr) {
250 return Raw::template oop_load<oop>(addr);
251 }
252
253 static oop oop_load_in_heap_at(oop base, ptrdiff_t offset) {
254 return Raw::template oop_load_at<oop>(base, offset);
255 }
256
257 template <typename T>
258 static void oop_store_in_heap(T* addr, oop value) {
259 Raw::oop_store(addr, value);
260 }
261
262 static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) {
263 Raw::oop_store_at(base, offset, value);
264 }
265
266 template <typename T>
267 static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
268 return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
269 }
270
271 static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
272 return Raw::oop_atomic_cmpxchg_at(base, offset, compare_value, new_value);
273 }
274
275 template <typename T>
276 static oop oop_atomic_xchg_in_heap(T* addr, oop new_value) {
277 return Raw::oop_atomic_xchg(addr, new_value);
278 }
279
280 static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
281 return Raw::oop_atomic_xchg_at(base, offset, new_value);
282 }
283
284 template <typename T>
285 static OopCopyResult oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
286 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
287 size_t length);
288
289 // Off-heap oop accesses. These accessors get resolved when
290 // IN_HEAP is not set (e.g. when using the NativeAccess API), it is
291 // an oop* overload, and the barrier strength is AS_NORMAL.
292 template <typename T>
293 static oop oop_load_not_in_heap(T* addr) {
294 return Raw::template oop_load<oop>(addr);
295 }
296
297 template <typename T>
298 static void oop_store_not_in_heap(T* addr, oop value) {
299 Raw::oop_store(addr, value);
300 }
301
302 template <typename T>
303 static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) {
304 return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
305 }
306
307 template <typename T>
308 static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value) {
309 return Raw::oop_atomic_xchg(addr, new_value);
310 }
311
312 // Clone barrier support
313 static void clone_in_heap(oop src, oop dst, size_t size) {
314 Raw::clone(src, dst, size);
315 }
316
317 static void value_copy_in_heap(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
318 Raw::value_copy(src, dst, md, lk);
319 }
320
321 };
322 };
323
324 template<typename T>
325 inline T* barrier_set_cast(BarrierSet* bs) {
326 assert(bs->is_a(BarrierSet::GetName<T>::value), "wrong type of barrier set");
327 return static_cast<T*>(bs);
328 }
329
330 #endif // SHARE_GC_SHARED_BARRIERSET_HPP