1 /*
2 * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
27
28 #include "gc/g1/g1CollectedHeap.hpp"
29
30 #include "gc/g1/g1BarrierSet.hpp"
31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
32 #include "gc/g1/g1EvacFailureRegions.hpp"
33 #include "gc/g1/g1EvacStats.inline.hpp"
34 #include "gc/g1/g1HeapRegion.inline.hpp"
35 #include "gc/g1/g1HeapRegionManager.inline.hpp"
36 #include "gc/g1/g1HeapRegionRemSet.hpp"
37 #include "gc/g1/g1HeapRegionSet.inline.hpp"
38 #include "gc/g1/g1Policy.hpp"
39 #include "gc/g1/g1RegionPinCache.inline.hpp"
40 #include "gc/g1/g1RemSet.hpp"
41 #include "gc/shared/collectedHeap.inline.hpp"
42 #include "gc/shared/markBitMap.inline.hpp"
43 #include "gc/shared/taskqueue.inline.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "oops/stackChunkOop.hpp"
46 #include "runtime/threadSMR.inline.hpp"
47 #include "utilities/bitMap.inline.hpp"
48
49 inline bool G1STWIsAliveClosure::do_object_b(oop p) {
50 // An object is reachable if it is outside the collection set and not a
51 // humongous candidate, or is inside and copied.
52 return !_g1h->is_in_cset_or_humongous_candidate(p) || p->is_forwarded();
53 }
54
55 inline JavaThread* const* G1JavaThreadsListClaimer::claim(uint& count) {
56 count = 0;
57 if (_cur_claim.load_relaxed() >= _list.length()) {
58 return nullptr;
59 }
60 uint claim = _cur_claim.fetch_then_add(_claim_step);
61 if (claim >= _list.length()) {
62 return nullptr;
63 }
64 count = MIN2(_list.length() - claim, _claim_step);
65 return _list.list()->threads() + claim;
66 }
67
68 inline void G1JavaThreadsListClaimer::apply(ThreadClosure* cl) {
69 JavaThread* const* list;
70 uint count;
71
72 while ((list = claim(count)) != nullptr) {
73 for (uint i = 0; i < count; i++) {
74 cl->do_thread(list[i]);
75 }
76 }
77 }
78
79 bool G1CollectedHeap::can_be_marked_through_immediately(oop obj) const {
80 return obj->is_array() && !obj->is_array_with_oops();
81 }
82
83 G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
84 return _policy->phase_times();
85 }
86
87 G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {
88 switch (dest.type()) {
89 case G1HeapRegionAttr::Young:
90 return &_survivor_evac_stats;
91 case G1HeapRegionAttr::Old:
92 return &_old_evac_stats;
93 default:
94 ShouldNotReachHere();
95 return nullptr; // Keep some compilers happy
96 }
97 }
98
99 size_t G1CollectedHeap::desired_plab_sz(G1HeapRegionAttr dest) {
100 size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_size(workers()->active_workers());
101 return clamp_plab_size(gclab_word_size);
102 }
103
104 inline size_t G1CollectedHeap::clamp_plab_size(size_t value) const {
105 return clamp(value, PLAB::min_size(), _humongous_object_threshold_in_words);
106 }
107
108 // Inline functions for G1CollectedHeap
109
110 // Return the region with the given index. It assumes the index is valid.
111 inline G1HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
112
113 // Return the region with the given index, or null if unmapped. It assumes the index is valid.
114 inline G1HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); }
115
116 template <typename Func>
117 inline void G1CollectedHeap::humongous_obj_regions_iterate(G1HeapRegion* start, const Func& f) {
118 assert(start->is_starts_humongous(), "must be");
119
120 do {
121 G1HeapRegion* next = _hrm.next_region_in_humongous(start);
122 f(start);
123 start = next;
124 } while (start != nullptr);
125 }
126
127 inline uint G1CollectedHeap::addr_to_region(const void* addr) const {
128 assert(is_in_reserved(addr),
129 "Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
130 p2i(addr), p2i(reserved().start()), p2i(reserved().end()));
131 return (uint)(pointer_delta(addr, reserved().start(), sizeof(uint8_t)) >> G1HeapRegion::LogOfHRGrainBytes);
132 }
133
134 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
135 return _hrm.reserved().start() + index * G1HeapRegion::GrainWords;
136 }
137
138
139 inline G1HeapRegion* G1CollectedHeap::heap_region_containing(const void* addr) const {
140 uint const region_idx = addr_to_region(addr);
141 return region_at(region_idx);
142 }
143
144 inline G1HeapRegion* G1CollectedHeap::heap_region_containing_or_null(const void* addr) const {
145 uint const region_idx = addr_to_region(addr);
146 return region_at_or_null(region_idx);
147 }
148
149 inline void G1CollectedHeap::old_set_add(G1HeapRegion* hr) {
150 _old_set.add(hr);
151 }
152
153 inline void G1CollectedHeap::old_set_remove(G1HeapRegion* hr) {
154 _old_set.remove(hr);
155 }
156
157 inline G1ScannerTasksQueueSet* G1CollectedHeap::task_queues() const {
158 return _task_queues;
159 }
160
161 inline G1ScannerTasksQueue* G1CollectedHeap::task_queue(uint i) const {
162 return _task_queues->queue(i);
163 }
164
165 inline bool G1CollectedHeap::is_marked(oop obj) const {
166 return _cm->mark_bitmap()->is_marked(obj);
167 }
168
169 inline bool G1CollectedHeap::is_in_cset(oop obj) const {
170 return is_in_cset(cast_from_oop<HeapWord*>(obj));
171 }
172
173 inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) const {
174 return _region_attr.is_in_cset(addr);
175 }
176
177 bool G1CollectedHeap::is_in_cset(const G1HeapRegion* hr) const {
178 return _region_attr.is_in_cset(hr);
179 }
180
181 bool G1CollectedHeap::is_in_cset_or_humongous_candidate(const oop obj) {
182 return _region_attr.is_in_cset_or_humongous_candidate(cast_from_oop<HeapWord*>(obj));
183 }
184
185 G1HeapRegionAttr G1CollectedHeap::region_attr(const void* addr) const {
186 return _region_attr.at((HeapWord*)addr);
187 }
188
189 G1HeapRegionAttr G1CollectedHeap::region_attr(uint idx) const {
190 return _region_attr.get_by_index(idx);
191 }
192
193 void G1CollectedHeap::register_humongous_candidate_region_with_region_attr(uint index) {
194 assert(!region_at(index)->has_pinned_objects(), "must be");
195 assert(region_at(index)->rem_set()->is_complete(), "must be");
196 _region_attr.set_humongous_candidate(index);
197 }
198
199 void G1CollectedHeap::register_young_region_with_region_attr(G1HeapRegion* r) {
200 assert(!is_in_cset(r), "should not already be registered as in collection set");
201 _region_attr.set_in_young(r->hrm_index(), r->has_pinned_objects());
202 }
203
204 void G1CollectedHeap::register_new_survivor_region_with_region_attr(G1HeapRegion* r) {
205 assert(!is_in_cset(r), "should not already be registered as in collection set");
206 _region_attr.set_new_survivor_region(r->hrm_index(), r->has_pinned_objects());
207 }
208
209 void G1CollectedHeap::update_region_attr(G1HeapRegion* r) {
210 _region_attr.set_is_remset_tracked(r->hrm_index(), r->rem_set()->is_tracked());
211 _region_attr.set_is_pinned(r->hrm_index(), r->has_pinned_objects());
212 }
213
214 void G1CollectedHeap::register_old_collection_set_region_with_region_attr(G1HeapRegion* r) {
215 assert(!is_in_cset(r), "should not already be registered as in collection set");
216 assert(r->is_old(), "must be");
217 assert(r->rem_set()->is_complete(), "must be");
218 _region_attr.set_in_old(r->hrm_index(), true, r->has_pinned_objects());
219 _rem_set->exclude_region_from_scan(r->hrm_index());
220 }
221
222 void G1CollectedHeap::register_optional_region_with_region_attr(G1HeapRegion* r) {
223 _region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
224 }
225
226 inline bool G1CollectedHeap::is_in_young(const oop obj) const {
227 if (obj == nullptr) {
228 return false;
229 }
230 return heap_region_containing(obj)->is_young();
231 }
232
233 inline bool G1CollectedHeap::requires_barriers(stackChunkOop obj) const {
234 assert(obj != nullptr, "");
235 return !heap_region_containing(obj)->is_young(); // is_in_young does an unnecessary null check
236 }
237
238 inline bool G1CollectedHeap::is_obj_dead(const oop obj, const G1HeapRegion* hr) const {
239 assert(!hr->is_free(), "looking up obj " PTR_FORMAT " in Free region %u", p2i(obj), hr->hrm_index());
240 if (hr->is_in_parsable_area(obj)) {
241 // This object is in the parsable part of the heap, live unless scrubbed.
242 return is_filler_object(obj);
243 } else {
244 // From Remark until a region has been concurrently scrubbed, parts of the
245 // region is not guaranteed to be parsable. Use the bitmap for liveness.
246 return !concurrent_mark()->mark_bitmap()->is_marked(obj);
247 }
248 }
249
250 inline void G1CollectedHeap::pin_object(JavaThread* thread, oop obj) {
251 assert(obj != nullptr, "obj must not be null");
252 assert(!is_stw_gc_active(), "must not pin objects during a GC pause");
253 assert(obj->is_typeArray(), "must be typeArray");
254
255 uint obj_region_idx = heap_region_containing(obj)->hrm_index();
256 G1ThreadLocalData::pin_count_cache(thread).inc_count(obj_region_idx);
257 }
258
259 inline void G1CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
260 assert(obj != nullptr, "obj must not be null");
261 assert(!is_stw_gc_active(), "must not unpin objects during a GC pause");
262
263 uint obj_region_idx = heap_region_containing(obj)->hrm_index();
264 G1ThreadLocalData::pin_count_cache(thread).dec_count(obj_region_idx);
265 }
266
267 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
268 assert(obj != nullptr, "precondition");
269
270 return is_obj_dead(obj, heap_region_containing(obj));
271 }
272
273 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const G1HeapRegion* hr) const {
274 return !is_marked(obj);
275 }
276
277 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
278 return is_obj_dead_full(obj, heap_region_containing(obj));
279 }
280
281 inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
282 return _region_attr.is_humongous_candidate(region);
283 }
284
285 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
286 uint region = addr_to_region(obj);
287 // Reset the entry in the region attribute table so that subsequent
288 // references to the same humongous object do not go into the slow path
289 // again. This is racy, as multiple threads may at the same time enter here,
290 // but this is benign because the transition is unidirectional, from
291 // humongous-candidate to not, and the write, in evacuation, is
292 // separated from the read, in post-evacuation.
293 if (_region_attr.is_humongous_candidate(region)) {
294 _region_attr.clear_humongous_candidate(region);
295 }
296 }
297
298 inline bool G1CollectedHeap::is_collection_set_candidate(const G1HeapRegion* r) const {
299 const G1CollectionSetCandidates* candidates = collection_set()->candidates();
300 return candidates->contains(r);
301 }
302
303 inline uint G1CollectedHeap::eden_target_length() const {
304 return _policy->young_list_target_length() - survivor_regions_count();
305 }
306
307 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP