1 /*
2 * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_G1_G1CONCURRENTMARK_INLINE_HPP
26 #define SHARE_GC_G1_G1CONCURRENTMARK_INLINE_HPP
27
28 #include "gc/g1/g1ConcurrentMark.hpp"
29
30 #include "gc/g1/g1CollectedHeap.inline.hpp"
31 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
32 #include "gc/g1/g1HeapRegion.hpp"
33 #include "gc/g1/g1HeapRegionRemSet.inline.hpp"
34 #include "gc/g1/g1OopClosures.inline.hpp"
35 #include "gc/g1/g1Policy.hpp"
36 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
37 #include "gc/g1/g1RemSetTrackingPolicy.hpp"
38 #include "gc/shared/suspendibleThreadSet.hpp"
39 #include "gc/shared/taskqueue.inline.hpp"
40 #include "utilities/bitMap.inline.hpp"
41 #include "utilities/checkedCast.hpp"
42
43 inline bool G1CMIsAliveClosure::do_object_b(oop obj) {
44 // Check whether the passed in object is null. During discovery the referent
45 // may be cleared between the initial check and being passed in here.
46 if (obj == nullptr) {
47 // Return true to avoid discovery when the referent is null.
48 return true;
49 }
50
51 // All objects allocated since the start of marking are considered live.
52 if (_cm->obj_allocated_since_mark_start(obj)) {
53 return true;
54 }
55
56 // All objects that are marked are live.
57 return _cm->is_marked_in_bitmap(obj);
58 }
59
60 inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) {
61 assert(obj != nullptr, "precondition");
62 assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
63
64 return _g1h->heap_region_containing(obj)->is_old_or_humongous();
65 }
66
67 inline bool G1ConcurrentMark::mark_in_bitmap(uint const worker_id, oop const obj) {
68 if (obj_allocated_since_mark_start(obj)) {
69 return false;
70 }
71
72 // Some callers may have stale objects to mark above TAMS after humongous reclaim.
73 // Can't assert that this is a valid object at this point, since it might be in the process of being copied by another thread.
74 DEBUG_ONLY(G1HeapRegion* const hr = _g1h->heap_region_containing(obj);)
75 assert(!hr->is_continues_humongous(),
76 "Should not try to mark object " PTR_FORMAT " in Humongous continues region %u above TAMS " PTR_FORMAT,
77 p2i(obj), hr->hrm_index(), p2i(top_at_mark_start(hr)));
78
79 bool success = _mark_bitmap.par_mark(obj);
80 if (success) {
81 add_to_liveness(worker_id, obj, obj->size());
82 }
83 return success;
84 }
85
86 #ifndef PRODUCT
87 template<typename Fn>
88 inline void G1CMMarkStack::iterate(Fn fn) const {
89 assert_at_safepoint_on_vm_thread();
90
91 size_t num_chunks = 0;
92
93 TaskQueueEntryChunk* cur = _chunk_list;
94 while (cur != nullptr) {
95 guarantee(num_chunks <= _chunks_in_chunk_list, "Found %zu oop chunks which is more than there should be", num_chunks);
96
97 for (size_t i = 0; i < EntriesPerChunk; ++i) {
98 if (cur->data[i].is_null()) {
99 break;
100 }
101 fn(cur->data[i]);
102 }
103 cur = cur->next;
104 num_chunks++;
105 }
106 }
107 #endif
108
109 // It scans an object and visits its children.
110 inline void G1CMTask::process_entry(G1TaskQueueEntry task_entry, bool stolen) {
111 process_grey_task_entry<true>(task_entry, stolen);
112 }
113
114 inline void G1CMTask::push(G1TaskQueueEntry task_entry) {
115 assert(task_entry.is_partial_array_state() || _g1h->is_in_reserved(task_entry.to_oop()), "invariant");
116 assert(task_entry.is_partial_array_state() || !_g1h->is_on_master_free_list(
117 _g1h->heap_region_containing(task_entry.to_oop())), "invariant");
118 assert(task_entry.is_partial_array_state() || _mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.to_oop())), "invariant");
119
120 if (!_task_queue->push(task_entry)) {
121 // The local task queue looks full. We need to push some entries
122 // to the global stack.
123 move_entries_to_global_stack();
124
125 // this should succeed since, even if we overflow the global
126 // stack, we should have definitely removed some entries from the
127 // local queue. So, there must be space on it.
128 bool success = _task_queue->push(task_entry);
129 assert(success, "invariant");
130 }
131 }
132
133 inline bool G1CMTask::is_below_finger(oop obj, HeapWord* global_finger) const {
134 // If obj is above the global finger, then the mark bitmap scan
135 // will find it later, and no push is needed. Similarly, if we have
136 // a current region and obj is between the local finger and the
137 // end of the current region, then no push is needed. The tradeoff
138 // of checking both vs only checking the global finger is that the
139 // local check will be more accurate and so result in fewer pushes,
140 // but may also be a little slower.
141 HeapWord* objAddr = cast_from_oop<HeapWord*>(obj);
142 if (_finger != nullptr) {
143 // We have a current region.
144
145 // Finger and region values are all null or all non-null. We
146 // use _finger to check since we immediately use its value.
147 assert(_curr_region != nullptr, "invariant");
148 assert(_region_limit != nullptr, "invariant");
149 assert(_region_limit <= global_finger, "invariant");
150
151 // True if obj is less than the local finger, or is between
152 // the region limit and the global finger.
153 if (objAddr < _finger) {
154 return true;
155 } else if (objAddr < _region_limit) {
156 return false;
157 } // Else check global finger.
158 }
159 // Check global finger.
160 return objAddr < global_finger;
161 }
162
163 template<bool scan>
164 inline void G1CMTask::process_grey_task_entry(G1TaskQueueEntry task_entry, bool stolen) {
165 assert(scan || (!task_entry.is_partial_array_state() && task_entry.to_oop()->is_typeArray()), "Skipping scan of grey non-typeArray");
166 assert(task_entry.is_partial_array_state() || _mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.to_oop())),
167 "Any stolen object should be a slice or marked");
168
169 if (scan) {
170 if (task_entry.is_partial_array_state()) {
171 _words_scanned += process_partial_array(task_entry, stolen);
172 } else {
173 oop obj = task_entry.to_oop();
174 if (should_be_sliced(obj)) {
175 _words_scanned += start_partial_array_processing(obj);
176 } else {
177 _words_scanned += obj->oop_iterate_size(_cm_oop_closure);
178 }
179 }
180 }
181 check_limits();
182 }
183
184 inline bool G1CMTask::should_be_sliced(oop obj) {
185 return obj->is_refArray() && ((refArrayOop)obj)->length() >= (int)ObjArrayMarkingStride;
186 }
187
188 inline void G1CMTask::process_array_chunk(objArrayOop obj, size_t start, size_t end) {
189 assert(obj->is_refArray(), "Must be");
190 refArrayOop(obj)->oop_iterate_elements_range(_cm_oop_closure,
191 checked_cast<int>(start),
192 checked_cast<int>(end));
193 }
194
195 inline void G1ConcurrentMark::update_top_at_mark_start(G1HeapRegion* r) {
196 uint const region = r->hrm_index();
197 assert(region < _g1h->max_num_regions(), "Tried to access TAMS for region %u out of bounds", region);
198 _top_at_mark_starts[region] = r->top();
199 }
200
201 inline void G1ConcurrentMark::reset_top_at_mark_start(G1HeapRegion* r) {
202 _top_at_mark_starts[r->hrm_index()] = r->bottom();
203 }
204
205 inline HeapWord* G1ConcurrentMark::top_at_mark_start(const G1HeapRegion* r) const {
206 return top_at_mark_start(r->hrm_index());
207 }
208
209 inline HeapWord* G1ConcurrentMark::top_at_mark_start(uint region) const {
210 assert(region < _g1h->max_num_regions(), "Tried to access TARS for region %u out of bounds", region);
211 return _top_at_mark_starts[region];
212 }
213
214 inline bool G1ConcurrentMark::obj_allocated_since_mark_start(oop obj) const {
215 uint const region = _g1h->addr_to_region(obj);
216 assert(region < _g1h->max_num_regions(), "obj " PTR_FORMAT " outside heap %u", p2i(obj), region);
217 return cast_from_oop<HeapWord*>(obj) >= top_at_mark_start(region);
218 }
219
220 inline HeapWord* G1ConcurrentMark::top_at_rebuild_start(G1HeapRegion* r) const {
221 return _top_at_rebuild_starts[r->hrm_index()];
222 }
223
224 inline void G1ConcurrentMark::update_top_at_rebuild_start(G1HeapRegion* r) {
225 assert(r->is_old() || r->is_humongous(), "precondition");
226
227 uint const region = r->hrm_index();
228 assert(region < _g1h->max_num_regions(), "Tried to access TARS for region %u out of bounds", region);
229 assert(_top_at_rebuild_starts[region] == nullptr,
230 "TARS for region %u has already been set to " PTR_FORMAT " should be null",
231 region, p2i(_top_at_rebuild_starts[region]));
232 _top_at_rebuild_starts[region] = r->top();
233 }
234
235 inline void G1CMTask::update_liveness(oop const obj, const size_t obj_size) {
236 _mark_stats_cache.add_live_words(_g1h->addr_to_region(obj), obj_size);
237 }
238
239 inline void G1CMTask::inc_incoming_refs(oop const obj) {
240 _mark_stats_cache.inc_incoming_refs(_g1h->addr_to_region(obj));
241 }
242
243 inline void G1ConcurrentMark::add_to_liveness(uint worker_id, oop const obj, size_t size) {
244 task(worker_id)->update_liveness(obj, size);
245 }
246
247 inline void G1CMTask::abort_marking_if_regular_check_fail() {
248 if (!regular_clock_call()) {
249 set_has_aborted();
250 }
251 }
252
253 inline bool G1CMTask::make_reference_grey(oop obj) {
254 if (!_cm->mark_in_bitmap(_worker_id, obj)) {
255 return false;
256 }
257
258 // No OrderAccess:store_load() is needed. It is implicit in the
259 // CAS done in G1CMBitMap::parMark() call in the routine above.
260 HeapWord* global_finger = _cm->finger();
261
262 // We only need to push a newly grey object on the mark
263 // stack if it is in a section of memory the mark bitmap
264 // scan has already examined. Mark bitmap scanning
265 // maintains progress "fingers" for determining that.
266 //
267 // Notice that the global finger might be moving forward
268 // concurrently. This is not a problem. In the worst case, we
269 // mark the object while it is above the global finger and, by
270 // the time we read the global finger, it has moved forward
271 // past this object. In this case, the object will probably
272 // be visited when a task is scanning the region and will also
273 // be pushed on the stack. So, some duplicate work, but no
274 // correctness problems.
275 if (is_below_finger(obj, global_finger)) {
276 G1TaskQueueEntry entry(obj);
277 if (obj->is_typeArray()) {
278 // Immediately process arrays of primitive types, rather
279 // than pushing on the mark stack. This keeps us from
280 // adding humongous objects to the mark stack that might
281 // be reclaimed before the entry is processed - see
282 // selection of candidates for eager reclaim of humongous
283 // objects. The cost of the additional type test is
284 // mitigated by avoiding a trip through the mark stack,
285 // by only doing a bookkeeping update and avoiding the
286 // actual scan of the object - a typeArray contains no
287 // references, and the metadata is built-in.
288 process_grey_task_entry<false>(entry, false /* stolen */);
289 } else {
290 push(entry);
291 }
292 }
293 return true;
294 }
295
296 template <class T>
297 inline bool G1CMTask::deal_with_reference(T* p) {
298 increment_refs_reached();
299 oop const obj = RawAccess<MO_RELAXED>::oop_load(p);
300 if (obj == nullptr) {
301 return false;
302 }
303
304 if (!G1HeapRegion::is_in_same_region(p, obj)) {
305 inc_incoming_refs(obj);
306 }
307 return make_reference_grey(obj);
308 }
309
310 inline void G1ConcurrentMark::raw_mark_in_bitmap(oop obj) {
311 _mark_bitmap.par_mark(obj);
312 }
313
314 bool G1ConcurrentMark::is_marked_in_bitmap(oop p) const {
315 assert(p != nullptr && oopDesc::is_oop(p), "expected an oop");
316 return _mark_bitmap.is_marked(cast_from_oop<HeapWord*>(p));
317 }
318
319 inline bool G1ConcurrentMark::do_yield_check() {
320 if (SuspendibleThreadSet::should_yield()) {
321 SuspendibleThreadSet::yield();
322 return true;
323 } else {
324 return false;
325 }
326 }
327
328 #endif // SHARE_GC_G1_G1CONCURRENTMARK_INLINE_HPP