1 /*
2 * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_G1_G1BARRIERSET_HPP
26 #define SHARE_GC_G1_G1BARRIERSET_HPP
27
28 #include "gc/g1/g1HeapRegion.hpp"
29 #include "gc/g1/g1SATBMarkQueueSet.hpp"
30 #include "gc/shared/bufferNode.hpp"
31 #include "gc/shared/cardTable.hpp"
32 #include "gc/shared/cardTableBarrierSet.hpp"
33 #include "runtime/atomic.hpp"
34
35 class G1CardTable;
36 class Thread;
37
38 // This barrier set is specialized to manage two card tables:
39 // * one the mutator is currently working on ("card table")
40 // * one the refinement threads or GC during pause are working on ("refinement table")
41 //
42 // The card table acts like a regular card table where the mutator dirties cards
43 // containing potentially interesting references.
44 //
45 // When the amount of dirty cards on the card table exceeds a threshold, G1 swaps
46 // the card tables and has the refinement threads reduce them by "refining"
47 // them.
48 // I.e. refinement looks at all dirty cards on the refinement table, and updates
49 // the remembered sets accordingly, clearing the cards on the refinement table.
50 //
51 // Meanwhile the mutator continues dirtying the now empty card table.
52 //
53 // This separation of data the mutator and refinement threads are working on
54 // removes the need for any fine-grained (per mutator write) synchronization between
55 // them, keeping the write barrier simple.
56 //
57 // The refinement threads mark cards in the current collection set specially on the
58 // card table - this is fine wrt synchronization with the mutator, because at
59 // most the mutator will overwrite it again if there is a race, as G1 will scan the
60 // entire card either way during the GC pause.
61 //
62 // During garbage collection, if the refinement table is known to be non-empty, G1
63 // merges it back (and cleaning it) to the card table which is scanned for dirty
64 // cards.
65 //
66 class G1BarrierSet: public CardTableBarrierSet {
67 private:
68 BufferNode::Allocator _satb_mark_queue_buffer_allocator;
69 G1SATBMarkQueueSet _satb_mark_queue_set;
70
71 Atomic<G1CardTable*> _refinement_table;
72
73 public:
74 G1BarrierSet(G1CardTable* card_table, G1CardTable* refinement_table);
75 virtual ~G1BarrierSet();
76
77 static G1BarrierSet* g1_barrier_set() {
78 return barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
79 }
80
81 G1CardTable* refinement_table() const { return _refinement_table.load_relaxed(); }
82
83 // Swap the global card table references, without synchronization.
84 void swap_global_card_table();
85
86 // Update the given thread's card table (byte map) base to the current card table's.
87 void update_card_table_base(Thread* thread);
88
89 // Add "pre_val" to a set of objects that may have been disconnected from the
90 // pre-marking object graph. Prefer the version that takes location, as it
91 // can avoid touching the heap unnecessarily.
92 template <class T> static void enqueue(T* dst);
93 static void enqueue_preloaded(oop pre_val);
94
95 static void enqueue_preloaded_if_weak(DecoratorSet decorators, oop value);
96
97 template <class T> void write_ref_array_pre_work(T* dst, size_t count);
98 virtual void write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized);
99 virtual void write_ref_array_pre(narrowOop* dst, size_t count, bool dest_uninitialized);
100
101 template <DecoratorSet decorators, typename T>
102 void write_ref_field_pre(T* field);
103
104 virtual void write_region(MemRegion mr);
105
106 template <DecoratorSet decorators = DECORATORS_NONE, typename T>
107 void write_ref_field_post(T* field);
108
109 virtual void on_thread_create(Thread* thread);
110 virtual void on_thread_destroy(Thread* thread);
111 virtual void on_thread_attach(Thread* thread);
112 virtual void on_thread_detach(Thread* thread);
113
114 static G1SATBMarkQueueSet& satb_mark_queue_set() {
115 return g1_barrier_set()->_satb_mark_queue_set;
116 }
117
118 virtual void print_on(outputStream* st) const;
119
120 virtual uint grain_shift() { return G1HeapRegion::LogOfHRGrainBytes; }
121
122 // Callbacks for runtime accesses.
123 template <DecoratorSet decorators, typename BarrierSetT = G1BarrierSet>
124 class AccessBarrier: public CardTableBarrierSet::AccessBarrier<decorators, BarrierSetT> {
125 typedef CardTableBarrierSet::AccessBarrier<decorators, BarrierSetT> CardTableBS;
126 typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;
127
128 public:
129 // Needed for loads on non-heap weak references
130 template <typename T>
131 static oop oop_load_not_in_heap(T* addr);
132
133 // Needed for non-heap stores
134 template <typename T>
135 static void oop_store_not_in_heap(T* addr, oop new_value);
136
137 // Needed for weak references
138 static oop oop_load_in_heap_at(oop base, ptrdiff_t offset);
139
140 // Defensive: will catch weak oops at addresses in heap
141 template <typename T>
142 static oop oop_load_in_heap(T* addr);
143
144 template <typename T>
145 static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value);
146 template <typename T>
147 static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value);
148 };
149 };
150
151 template<>
152 struct BarrierSet::GetName<G1BarrierSet> {
153 static const BarrierSet::Name value = BarrierSet::G1BarrierSet;
154 };
155
156 template<>
157 struct BarrierSet::GetType<BarrierSet::G1BarrierSet> {
158 typedef ::G1BarrierSet type;
159 };
160
161 #endif // SHARE_GC_G1_G1BARRIERSET_HPP