1 /*
  2  * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHARED_BARRIERSET_HPP
 26 #define SHARE_GC_SHARED_BARRIERSET_HPP
 27 
 28 #include "gc/shared/barrierSetConfig.hpp"
 29 #include "memory/memRegion.hpp"
 30 #include "oops/access.hpp"
 31 #include "oops/accessBackend.hpp"
 32 #include "oops/oopsHierarchy.hpp"
 33 #include "utilities/fakeRttiSupport.hpp"
 34 #include "utilities/macros.hpp"
 35 
 36 class BarrierSetAssembler;
 37 class BarrierSetC1;
 38 class BarrierSetC2;
 39 class BarrierSetNMethod;
 40 class BarrierSetStackChunk;
 41 class JavaThread;
 42 
 43 // This class provides the interface between a barrier implementation and
 44 // the rest of the system.
 45 
 46 class BarrierSet: public CHeapObj<mtGC> {
 47   friend class VMStructs;
 48 
 49   static BarrierSet* _barrier_set;
 50 
 51 public:
 52   enum Name {
 53 #define BARRIER_SET_DECLARE_BS_ENUM(bs_name) bs_name ,
 54     FOR_EACH_BARRIER_SET_DO(BARRIER_SET_DECLARE_BS_ENUM)
 55 #undef BARRIER_SET_DECLARE_BS_ENUM
 56     UnknownBS
 57   };
 58 
 59 protected:
 60   // Fake RTTI support.  For a derived class T to participate
 61   // - T must have a corresponding Name entry.
 62   // - GetName<T> must be specialized to return the corresponding Name
 63   //   entry.
 64   // - If T is a base class, the constructor must have a FakeRtti
 65   //   parameter and pass it up to its base class, with the tag set
 66   //   augmented with the corresponding Name entry.
 67   // - If T is a concrete class, the constructor must create a
 68   //   FakeRtti object whose tag set includes the corresponding Name
 69   //   entry, and pass it up to its base class.
 70   typedef FakeRttiSupport<BarrierSet, Name> FakeRtti;
 71 
 72 private:
 73   FakeRtti _fake_rtti;
 74   BarrierSetAssembler* _barrier_set_assembler;
 75   BarrierSetC1* _barrier_set_c1;
 76   BarrierSetC2* _barrier_set_c2;
 77   BarrierSetNMethod* _barrier_set_nmethod;
 78   BarrierSetStackChunk* _barrier_set_stack_chunk;
 79 
 80 public:
 81   // Metafunction mapping a class derived from BarrierSet to the
 82   // corresponding Name enum tag.
 83   template<typename T> struct GetName;
 84 
 85   // Metafunction mapping a Name enum type to the corresponding
 86   // lass derived from BarrierSet.
 87   template<BarrierSet::Name T> struct GetType;
 88 
 89   // Note: This is not presently the Name corresponding to the
 90   // concrete class of this object.
 91   BarrierSet::Name kind() const { return _fake_rtti.concrete_tag(); }
 92 
 93   // Test whether this object is of the type corresponding to bsn.
 94   bool is_a(BarrierSet::Name bsn) const { return _fake_rtti.has_tag(bsn); }
 95 
 96   // End of fake RTTI support.
 97 
 98 protected:
 99   BarrierSet(BarrierSetAssembler* barrier_set_assembler,
100              BarrierSetC1* barrier_set_c1,
101              BarrierSetC2* barrier_set_c2,
102              BarrierSetNMethod* barrier_set_nmethod,
103              BarrierSetStackChunk* barrier_set_stack_chunk,
104              const FakeRtti& fake_rtti);
105   ~BarrierSet() { }
106 
107   template <class BarrierSetAssemblerT>
108   static BarrierSetAssembler* make_barrier_set_assembler() {
109     return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(nullptr);
110   }
111 
112   template <class BarrierSetC1T>
113   static BarrierSetC1* make_barrier_set_c1() {
114     return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(nullptr);
115   }
116 
117   template <class BarrierSetC2T>
118   static BarrierSetC2* make_barrier_set_c2() {
119     return COMPILER2_PRESENT(new BarrierSetC2T()) NOT_COMPILER2(nullptr);
120   }
121 
122 public:
123   // Support for optimizing compilers to call the barrier set on slow path allocations
124   // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
125   // The allocation is safe to use iff it returns true. If not, the slow-path allocation
126   // is redone until it succeeds. This can e.g. prevent allocations from the slow path
127   // to be in old.
128   virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
129   virtual void on_thread_create(Thread* thread) {}
130   virtual void on_thread_destroy(Thread* thread) {}
131 
132   // These perform BarrierSet-related initialization/cleanup before the thread
133   // is added to or removed from the corresponding set of threads. The
134   // argument thread is the current thread. These are called either holding
135   // the Threads_lock (for a JavaThread) and so not at a safepoint, or holding
136   // the NonJavaThreadsList_lock (for a NonJavaThread) locked by the
137   // caller. That locking ensures the operation is "atomic" with the list
138   // modification wrto operations that hold the NJTList_lock and either also
139   // hold the Threads_lock or are at a safepoint.
140   virtual void on_thread_attach(Thread* thread);
141   virtual void on_thread_detach(Thread* thread) {}
142 
143   virtual void make_parsable(JavaThread* thread) {}
144 
145 public:
146   // Print a description of the memory for the barrier set
147   virtual void print_on(outputStream* st) const = 0;
148 
149   static BarrierSet* barrier_set() { return _barrier_set; }
150   static void set_barrier_set(BarrierSet* barrier_set);
151 
152   BarrierSetAssembler* barrier_set_assembler() {
153     assert(_barrier_set_assembler != nullptr, "should be set");
154     return _barrier_set_assembler;
155   }
156 
157   BarrierSetC1* barrier_set_c1() {
158     assert(_barrier_set_c1 != nullptr, "should be set");
159     return _barrier_set_c1;
160   }
161 
162   BarrierSetC2* barrier_set_c2() {
163     assert(_barrier_set_c2 != nullptr, "should be set");
164     return _barrier_set_c2;
165   }
166 
167   BarrierSetNMethod* barrier_set_nmethod() {
168     return _barrier_set_nmethod;
169   }
170 
171   BarrierSetStackChunk* barrier_set_stack_chunk() {
172     assert(_barrier_set_stack_chunk != nullptr, "should be set");
173     return _barrier_set_stack_chunk;
174   }
175 
176   // The AccessBarrier of a BarrierSet subclass is called by the Access API
177   // (cf. oops/access.hpp) to perform decorated accesses. GC implementations
178   // may override these default access operations by declaring an
179   // AccessBarrier class in its BarrierSet. Its accessors will then be
180   // automatically resolved at runtime.
181   //
182   // In order to register a new FooBarrierSet::AccessBarrier with the Access API,
183   // the following steps should be taken:
184   // 1) Provide an enum "name" for the BarrierSet in barrierSetConfig.hpp
185   // 2) Make sure the barrier set headers are included from barrierSetConfig.inline.hpp
186   // 3) Provide specializations for BarrierSet::GetName and BarrierSet::GetType.
187   template <DecoratorSet decorators, typename BarrierSetT>
188   class AccessBarrier: protected RawAccessBarrier<decorators> {
189   private:
190     typedef RawAccessBarrier<decorators> Raw;
191 
192   public:
193     // Primitive heap accesses. These accessors get resolved when
194     // IN_HEAP is set (e.g. when using the HeapAccess API), it is
195     // not an oop_* overload, and the barrier strength is AS_NORMAL.
196     template <typename T>
197     static T load_in_heap(T* addr) {
198       return Raw::template load<T>(addr);
199     }
200 
201     template <typename T>
202     static T load_in_heap_at(oop base, ptrdiff_t offset) {
203       return Raw::template load_at<T>(base, offset);
204     }
205 
206     template <typename T>
207     static void store_in_heap(T* addr, T value) {
208       Raw::store(addr, value);
209     }
210 
211     template <typename T>
212     static void store_in_heap_at(oop base, ptrdiff_t offset, T value) {
213       Raw::store_at(base, offset, value);
214     }
215 
216     template <typename T>
217     static T atomic_cmpxchg_in_heap(T* addr, T compare_value, T new_value) {
218       return Raw::atomic_cmpxchg(addr, compare_value, new_value);
219     }
220 
221     template <typename T>
222     static T atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
223       return Raw::atomic_cmpxchg_at(base, offset, compare_value, new_value);
224     }
225 
226     template <typename T>
227     static T atomic_xchg_in_heap(T* addr, T new_value) {
228       return Raw::atomic_xchg(addr, new_value);
229     }
230 
231     template <typename T>
232     static T atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, T new_value) {
233       return Raw::atomic_xchg_at(base, offset, new_value);
234     }
235 
236     template <typename T>
237     static void arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
238                                   arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
239                                   size_t length) {
240       Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
241                      dst_obj, dst_offset_in_bytes, dst_raw,
242                      length);
243     }
244 
245     // Heap oop accesses. These accessors get resolved when
246     // IN_HEAP is set (e.g. when using the HeapAccess API), it is
247     // an oop_* overload, and the barrier strength is AS_NORMAL.
248     template <typename T>
249     static oop oop_load_in_heap(T* addr) {
250       return Raw::template oop_load<oop>(addr);
251     }
252 
253     static oop oop_load_in_heap_at(oop base, ptrdiff_t offset) {
254       return Raw::template oop_load_at<oop>(base, offset);
255     }
256 
257     template <typename T>
258     static void oop_store_in_heap(T* addr, oop value) {
259       Raw::oop_store(addr, value);
260     }
261 
262     static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) {
263       Raw::oop_store_at(base, offset, value);
264     }
265 
266     template <typename T>
267     static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
268       return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
269     }
270 
271     static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
272       return Raw::oop_atomic_cmpxchg_at(base, offset, compare_value, new_value);
273     }
274 
275     template <typename T>
276     static oop oop_atomic_xchg_in_heap(T* addr, oop new_value) {
277       return Raw::oop_atomic_xchg(addr, new_value);
278     }
279 
280     static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
281       return Raw::oop_atomic_xchg_at(base, offset, new_value);
282     }
283 
284     template <typename T>
285     static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
286                                       arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
287                                       size_t length);
288 
289     // Off-heap oop accesses. These accessors get resolved when
290     // IN_HEAP is not set (e.g. when using the NativeAccess API), it is
291     // an oop* overload, and the barrier strength is AS_NORMAL.
292     template <typename T>
293     static oop oop_load_not_in_heap(T* addr) {
294       return Raw::template oop_load<oop>(addr);
295     }
296 
297     template <typename T>
298     static void oop_store_not_in_heap(T* addr, oop value) {
299       Raw::oop_store(addr, value);
300     }
301 
302     template <typename T>
303     static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) {
304       return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
305     }
306 
307     template <typename T>
308     static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value) {
309       return Raw::oop_atomic_xchg(addr, new_value);
310     }
311 
312     // Clone barrier support
313     static void clone_in_heap(oop src, oop dst, size_t size) {
314       Raw::clone(src, dst, size);
315     }
316   };
317 };
318 
319 template<typename T>
320 inline T* barrier_set_cast(BarrierSet* bs) {
321   assert(bs->is_a(BarrierSet::GetName<T>::value), "wrong type of barrier set");
322   return static_cast<T*>(bs);
323 }
324 
325 #endif // SHARE_GC_SHARED_BARRIERSET_HPP