1 /*
  2  * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_G1_G1CONCURRENTMARK_INLINE_HPP
 26 #define SHARE_GC_G1_G1CONCURRENTMARK_INLINE_HPP
 27 
 28 #include "gc/g1/g1ConcurrentMark.hpp"
 29 
 30 #include "gc/g1/g1CollectedHeap.inline.hpp"
 31 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
 32 #include "gc/g1/g1HeapRegion.hpp"
 33 #include "gc/g1/g1HeapRegionRemSet.inline.hpp"
 34 #include "gc/g1/g1OopClosures.inline.hpp"
 35 #include "gc/g1/g1Policy.hpp"
 36 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
 37 #include "gc/g1/g1RemSetTrackingPolicy.hpp"
 38 #include "gc/shared/suspendibleThreadSet.hpp"
 39 #include "gc/shared/taskqueue.inline.hpp"
 40 #include "utilities/bitMap.inline.hpp"
 41 #include "utilities/checkedCast.hpp"
 42 
 43 inline bool G1CMIsAliveClosure::do_object_b(oop obj) {
 44   // Check whether the passed in object is null. During discovery the referent
 45   // may be cleared between the initial check and being passed in here.
 46   if (obj == nullptr) {
 47     // Return true to avoid discovery when the referent is null.
 48     return true;
 49   }
 50 
 51   // All objects allocated since the start of marking are considered live.
 52   if (_cm->obj_allocated_since_mark_start(obj)) {
 53     return true;
 54   }
 55 
 56   // All objects that are marked are live.
 57   return _cm->is_marked_in_bitmap(obj);
 58 }
 59 
 60 inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) {
 61   assert(obj != nullptr, "precondition");
 62   assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
 63 
 64   return _g1h->heap_region_containing(obj)->is_old_or_humongous();
 65 }
 66 
 67 inline bool G1ConcurrentMark::mark_in_bitmap(uint const worker_id, oop const obj) {
 68   if (obj_allocated_since_mark_start(obj)) {
 69     return false;
 70   }
 71 
 72   // Some callers may have stale objects to mark above TAMS after humongous reclaim.
 73   // Can't assert that this is a valid object at this point, since it might be in the process of being copied by another thread.
 74   DEBUG_ONLY(G1HeapRegion* const hr = _g1h->heap_region_containing(obj);)
 75   assert(!hr->is_continues_humongous(),
 76          "Should not try to mark object " PTR_FORMAT " in Humongous continues region %u above TAMS " PTR_FORMAT,
 77          p2i(obj), hr->hrm_index(), p2i(top_at_mark_start(hr)));
 78 
 79   bool success = _mark_bitmap.par_mark(obj);
 80   if (success) {
 81     add_to_liveness(worker_id, obj, obj->size());
 82   }
 83   return success;
 84 }
 85 
 86 #ifndef PRODUCT
 87 template<typename Fn>
 88 inline void G1CMMarkStack::iterate(Fn fn) const {
 89   assert_at_safepoint_on_vm_thread();
 90 
 91   size_t num_chunks = 0;
 92 
 93   TaskQueueEntryChunk* cur = _chunk_list;
 94   while (cur != nullptr) {
 95     guarantee(num_chunks <= _chunks_in_chunk_list, "Found %zu oop chunks which is more than there should be", num_chunks);
 96 
 97     for (size_t i = 0; i < EntriesPerChunk; ++i) {
 98       if (cur->data[i].is_null()) {
 99         break;
100       }
101       fn(cur->data[i]);
102     }
103     cur = cur->next;
104     num_chunks++;
105   }
106 }
107 #endif
108 
109 // It scans an object and visits its children.
110 inline void G1CMTask::process_entry(G1TaskQueueEntry task_entry, bool stolen) {
111   process_grey_task_entry<true>(task_entry, stolen);
112 }
113 
114 inline void G1CMTask::push(G1TaskQueueEntry task_entry) {
115   assert(task_entry.is_partial_array_state() || _g1h->is_in_reserved(task_entry.to_oop()), "invariant");
116   assert(task_entry.is_partial_array_state() || !_g1h->is_on_master_free_list(
117               _g1h->heap_region_containing(task_entry.to_oop())), "invariant");
118   assert(task_entry.is_partial_array_state() || _mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.to_oop())), "invariant");
119 
120   if (!_task_queue->push(task_entry)) {
121     // The local task queue looks full. We need to push some entries
122     // to the global stack.
123     move_entries_to_global_stack();
124 
125     // this should succeed since, even if we overflow the global
126     // stack, we should have definitely removed some entries from the
127     // local queue. So, there must be space on it.
128     bool success = _task_queue->push(task_entry);
129     assert(success, "invariant");
130   }
131 }
132 
133 inline bool G1CMTask::is_below_finger(oop obj, HeapWord* global_finger) const {
134   // If obj is above the global finger, then the mark bitmap scan
135   // will find it later, and no push is needed.  Similarly, if we have
136   // a current region and obj is between the local finger and the
137   // end of the current region, then no push is needed.  The tradeoff
138   // of checking both vs only checking the global finger is that the
139   // local check will be more accurate and so result in fewer pushes,
140   // but may also be a little slower.
141   HeapWord* objAddr = cast_from_oop<HeapWord*>(obj);
142   if (_finger != nullptr) {
143     // We have a current region.
144 
145     // Finger and region values are all null or all non-null.  We
146     // use _finger to check since we immediately use its value.
147     assert(_curr_region != nullptr, "invariant");
148     assert(_region_limit != nullptr, "invariant");
149     assert(_region_limit <= global_finger, "invariant");
150 
151     // True if obj is less than the local finger, or is between
152     // the region limit and the global finger.
153     if (objAddr < _finger) {
154       return true;
155     } else if (objAddr < _region_limit) {
156       return false;
157     } // Else check global finger.
158   }
159   // Check global finger.
160   return objAddr < global_finger;
161 }
162 
163 template<bool scan>
164 inline void G1CMTask::process_grey_task_entry(G1TaskQueueEntry task_entry, bool stolen) {
165   assert(scan || (!task_entry.is_partial_array_state() && task_entry.to_oop()->is_typeArray()), "Skipping scan of grey non-typeArray");
166   assert(task_entry.is_partial_array_state() || _mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.to_oop())),
167          "Any stolen object should be a slice or marked");
168 
169   if (scan) {
170     if (task_entry.is_partial_array_state()) {
171       _words_scanned += process_partial_array(task_entry, stolen);
172     } else {
173       oop obj = task_entry.to_oop();
174       if (should_be_sliced(obj)) {
175         _words_scanned += start_partial_array_processing(obj);
176       } else {
177         _words_scanned += obj->oop_iterate_size(_cm_oop_closure);
178       }
179     }
180   }
181   check_limits();
182 }
183 
184 inline bool G1CMTask::should_be_sliced(oop obj) {
185   return obj->is_objArray() && ((objArrayOop)obj)->length() >= (int)ObjArrayMarkingStride;
186 }
187 
188 inline void G1CMTask::process_array_chunk(objArrayOop obj, size_t start, size_t end) {
189   obj->oop_iterate_elements_range(_cm_oop_closure,
190                                   checked_cast<int>(start),
191                                   checked_cast<int>(end));
192 }
193 
194 inline void G1ConcurrentMark::update_top_at_mark_start(G1HeapRegion* r) {
195   uint const region = r->hrm_index();
196   assert(region < _g1h->max_num_regions(), "Tried to access TAMS for region %u out of bounds", region);
197   _top_at_mark_starts[region] = r->top();
198 }
199 
200 inline void G1ConcurrentMark::reset_top_at_mark_start(G1HeapRegion* r) {
201   _top_at_mark_starts[r->hrm_index()] = r->bottom();
202 }
203 
204 inline HeapWord* G1ConcurrentMark::top_at_mark_start(const G1HeapRegion* r) const {
205   return top_at_mark_start(r->hrm_index());
206 }
207 
208 inline HeapWord* G1ConcurrentMark::top_at_mark_start(uint region) const {
209   assert(region < _g1h->max_num_regions(), "Tried to access TARS for region %u out of bounds", region);
210   return _top_at_mark_starts[region];
211 }
212 
213 inline bool G1ConcurrentMark::obj_allocated_since_mark_start(oop obj) const {
214   uint const region = _g1h->addr_to_region(obj);
215   assert(region < _g1h->max_num_regions(), "obj " PTR_FORMAT " outside heap %u", p2i(obj), region);
216   return cast_from_oop<HeapWord*>(obj) >= top_at_mark_start(region);
217 }
218 
219 inline HeapWord* G1ConcurrentMark::top_at_rebuild_start(G1HeapRegion* r) const {
220   return _top_at_rebuild_starts[r->hrm_index()];
221 }
222 
223 inline void G1ConcurrentMark::update_top_at_rebuild_start(G1HeapRegion* r) {
224   assert(r->is_old() || r->is_humongous(), "precondition");
225 
226   uint const region = r->hrm_index();
227   assert(region < _g1h->max_num_regions(), "Tried to access TARS for region %u out of bounds", region);
228   assert(_top_at_rebuild_starts[region] == nullptr,
229          "TARS for region %u has already been set to " PTR_FORMAT " should be null",
230          region, p2i(_top_at_rebuild_starts[region]));
231   _top_at_rebuild_starts[region] = r->top();
232 }
233 
234 inline void G1CMTask::update_liveness(oop const obj, const size_t obj_size) {
235   _mark_stats_cache.add_live_words(_g1h->addr_to_region(obj), obj_size);
236 }
237 
238 inline void G1CMTask::inc_incoming_refs(oop const obj) {
239   _mark_stats_cache.inc_incoming_refs(_g1h->addr_to_region(obj));
240 }
241 
242 inline void G1ConcurrentMark::add_to_liveness(uint worker_id, oop const obj, size_t size) {
243   task(worker_id)->update_liveness(obj, size);
244 }
245 
246 inline void G1CMTask::abort_marking_if_regular_check_fail() {
247   if (!regular_clock_call()) {
248     set_has_aborted();
249   }
250 }
251 
252 inline bool G1CMTask::make_reference_grey(oop obj) {
253   if (!_cm->mark_in_bitmap(_worker_id, obj)) {
254     return false;
255   }
256 
257   // No OrderAccess:store_load() is needed. It is implicit in the
258   // CAS done in G1CMBitMap::parMark() call in the routine above.
259   HeapWord* global_finger = _cm->finger();
260 
261   // We only need to push a newly grey object on the mark
262   // stack if it is in a section of memory the mark bitmap
263   // scan has already examined.  Mark bitmap scanning
264   // maintains progress "fingers" for determining that.
265   //
266   // Notice that the global finger might be moving forward
267   // concurrently. This is not a problem. In the worst case, we
268   // mark the object while it is above the global finger and, by
269   // the time we read the global finger, it has moved forward
270   // past this object. In this case, the object will probably
271   // be visited when a task is scanning the region and will also
272   // be pushed on the stack. So, some duplicate work, but no
273   // correctness problems.
274   if (is_below_finger(obj, global_finger)) {
275     G1TaskQueueEntry entry(obj);
276     if (obj->is_typeArray()) {
277       // Immediately process arrays of primitive types, rather
278       // than pushing on the mark stack.  This keeps us from
279       // adding humongous objects to the mark stack that might
280       // be reclaimed before the entry is processed - see
281       // selection of candidates for eager reclaim of humongous
282       // objects.  The cost of the additional type test is
283       // mitigated by avoiding a trip through the mark stack,
284       // by only doing a bookkeeping update and avoiding the
285       // actual scan of the object - a typeArray contains no
286       // references, and the metadata is built-in.
287       process_grey_task_entry<false>(entry, false /* stolen */);
288     } else {
289       push(entry);
290     }
291   }
292   return true;
293 }
294 
295 template <class T>
296 inline bool G1CMTask::deal_with_reference(T* p) {
297   increment_refs_reached();
298   oop const obj = RawAccess<MO_RELAXED>::oop_load(p);
299   if (obj == nullptr) {
300     return false;
301   }
302 
303   if (!G1HeapRegion::is_in_same_region(p, obj)) {
304     inc_incoming_refs(obj);
305   }
306   return make_reference_grey(obj);
307 }
308 
309 inline void G1ConcurrentMark::raw_mark_in_bitmap(oop obj) {
310   _mark_bitmap.par_mark(obj);
311 }
312 
313 bool G1ConcurrentMark::is_marked_in_bitmap(oop p) const {
314   assert(p != nullptr && oopDesc::is_oop(p), "expected an oop");
315   return _mark_bitmap.is_marked(cast_from_oop<HeapWord*>(p));
316 }
317 
318 inline bool G1ConcurrentMark::do_yield_check() {
319   if (SuspendibleThreadSet::should_yield()) {
320     SuspendibleThreadSet::yield();
321     return true;
322   } else {
323     return false;
324   }
325 }
326 
327 #endif // SHARE_GC_G1_G1CONCURRENTMARK_INLINE_HPP