1 /*
2 * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
27
28 #include "gc/g1/g1CollectedHeap.hpp"
29
30 #include "gc/g1/g1BarrierSet.hpp"
31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
32 #include "gc/g1/g1EvacFailureRegions.hpp"
33 #include "gc/g1/g1EvacStats.inline.hpp"
34 #include "gc/g1/g1HeapRegion.inline.hpp"
35 #include "gc/g1/g1HeapRegionManager.inline.hpp"
36 #include "gc/g1/g1HeapRegionRemSet.hpp"
37 #include "gc/g1/g1HeapRegionSet.inline.hpp"
38 #include "gc/g1/g1Policy.hpp"
39 #include "gc/g1/g1RegionPinCache.inline.hpp"
40 #include "gc/g1/g1RemSet.hpp"
41 #include "gc/shared/collectedHeap.inline.hpp"
42 #include "gc/shared/markBitMap.inline.hpp"
43 #include "gc/shared/taskqueue.inline.hpp"
44 #include "oops/stackChunkOop.hpp"
45 #include "runtime/threadSMR.inline.hpp"
46 #include "utilities/bitMap.inline.hpp"
47
48 inline bool G1STWIsAliveClosure::do_object_b(oop p) {
49 // An object is reachable if it is outside the collection set and not a
50 // humongous candidate, or is inside and copied.
51 return !_g1h->is_in_cset_or_humongous_candidate(p) || p->is_forwarded();
52 }
53
54 inline JavaThread* const* G1JavaThreadsListClaimer::claim(uint& count) {
55 count = 0;
56 if (_cur_claim.load_relaxed() >= _list.length()) {
57 return nullptr;
58 }
59 uint claim = _cur_claim.fetch_then_add(_claim_step);
60 if (claim >= _list.length()) {
61 return nullptr;
62 }
63 count = MIN2(_list.length() - claim, _claim_step);
64 return _list.list()->threads() + claim;
65 }
66
67 inline void G1JavaThreadsListClaimer::apply(ThreadClosure* cl) {
68 JavaThread* const* list;
69 uint count;
70
71 while ((list = claim(count)) != nullptr) {
72 for (uint i = 0; i < count; i++) {
73 cl->do_thread(list[i]);
74 }
75 }
76 }
77
78 G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
79 return _policy->phase_times();
80 }
81
82 G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {
83 switch (dest.type()) {
84 case G1HeapRegionAttr::Young:
85 return &_survivor_evac_stats;
86 case G1HeapRegionAttr::Old:
87 return &_old_evac_stats;
88 default:
89 ShouldNotReachHere();
90 return nullptr; // Keep some compilers happy
91 }
92 }
93
94 size_t G1CollectedHeap::desired_plab_sz(G1HeapRegionAttr dest) {
95 size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_size(workers()->active_workers());
96 return clamp_plab_size(gclab_word_size);
97 }
98
99 inline size_t G1CollectedHeap::clamp_plab_size(size_t value) const {
100 return clamp(value, PLAB::min_size(), _humongous_object_threshold_in_words);
101 }
102
103 // Inline functions for G1CollectedHeap
104
105 // Return the region with the given index. It assumes the index is valid.
106 inline G1HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
107
108 // Return the region with the given index, or null if unmapped. It assumes the index is valid.
109 inline G1HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); }
110
111 template <typename Func>
112 inline void G1CollectedHeap::humongous_obj_regions_iterate(G1HeapRegion* start, const Func& f) {
113 assert(start->is_starts_humongous(), "must be");
114
115 do {
116 G1HeapRegion* next = _hrm.next_region_in_humongous(start);
117 f(start);
118 start = next;
119 } while (start != nullptr);
120 }
121
122 inline uint G1CollectedHeap::addr_to_region(const void* addr) const {
123 assert(is_in_reserved(addr),
124 "Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
125 p2i(addr), p2i(reserved().start()), p2i(reserved().end()));
126 return (uint)(pointer_delta(addr, reserved().start(), sizeof(uint8_t)) >> G1HeapRegion::LogOfHRGrainBytes);
127 }
128
129 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
130 return _hrm.reserved().start() + index * G1HeapRegion::GrainWords;
131 }
132
133
134 inline G1HeapRegion* G1CollectedHeap::heap_region_containing(const void* addr) const {
135 uint const region_idx = addr_to_region(addr);
136 return region_at(region_idx);
137 }
138
139 inline G1HeapRegion* G1CollectedHeap::heap_region_containing_or_null(const void* addr) const {
140 uint const region_idx = addr_to_region(addr);
141 return region_at_or_null(region_idx);
142 }
143
144 inline void G1CollectedHeap::old_set_add(G1HeapRegion* hr) {
145 _old_set.add(hr);
146 }
147
148 inline void G1CollectedHeap::old_set_remove(G1HeapRegion* hr) {
149 _old_set.remove(hr);
150 }
151
152 inline G1ScannerTasksQueueSet* G1CollectedHeap::task_queues() const {
153 return _task_queues;
154 }
155
156 inline G1ScannerTasksQueue* G1CollectedHeap::task_queue(uint i) const {
157 return _task_queues->queue(i);
158 }
159
160 inline bool G1CollectedHeap::is_marked(oop obj) const {
161 return _cm->mark_bitmap()->is_marked(obj);
162 }
163
164 inline bool G1CollectedHeap::is_in_cset(oop obj) const {
165 return is_in_cset(cast_from_oop<HeapWord*>(obj));
166 }
167
168 inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) const {
169 return _region_attr.is_in_cset(addr);
170 }
171
172 bool G1CollectedHeap::is_in_cset(const G1HeapRegion* hr) const {
173 return _region_attr.is_in_cset(hr);
174 }
175
176 bool G1CollectedHeap::is_in_cset_or_humongous_candidate(const oop obj) {
177 return _region_attr.is_in_cset_or_humongous_candidate(cast_from_oop<HeapWord*>(obj));
178 }
179
180 G1HeapRegionAttr G1CollectedHeap::region_attr(const void* addr) const {
181 return _region_attr.at((HeapWord*)addr);
182 }
183
184 G1HeapRegionAttr G1CollectedHeap::region_attr(uint idx) const {
185 return _region_attr.get_by_index(idx);
186 }
187
188 void G1CollectedHeap::register_humongous_candidate_region_with_region_attr(uint index) {
189 assert(!region_at(index)->has_pinned_objects(), "must be");
190 assert(region_at(index)->rem_set()->is_complete(), "must be");
191 _region_attr.set_humongous_candidate(index);
192 }
193
194 void G1CollectedHeap::register_young_region_with_region_attr(G1HeapRegion* r) {
195 assert(!is_in_cset(r), "should not already be registered as in collection set");
196 _region_attr.set_in_young(r->hrm_index(), r->has_pinned_objects());
197 }
198
199 void G1CollectedHeap::register_new_survivor_region_with_region_attr(G1HeapRegion* r) {
200 assert(!is_in_cset(r), "should not already be registered as in collection set");
201 _region_attr.set_new_survivor_region(r->hrm_index(), r->has_pinned_objects());
202 }
203
204 void G1CollectedHeap::update_region_attr(G1HeapRegion* r) {
205 _region_attr.set_is_remset_tracked(r->hrm_index(), r->rem_set()->is_tracked());
206 _region_attr.set_is_pinned(r->hrm_index(), r->has_pinned_objects());
207 }
208
209 void G1CollectedHeap::register_old_collection_set_region_with_region_attr(G1HeapRegion* r) {
210 assert(!is_in_cset(r), "should not already be registered as in collection set");
211 assert(r->is_old(), "must be");
212 assert(r->rem_set()->is_complete(), "must be");
213 _region_attr.set_in_old(r->hrm_index(), true, r->has_pinned_objects());
214 _rem_set->exclude_region_from_scan(r->hrm_index());
215 }
216
217 void G1CollectedHeap::register_optional_region_with_region_attr(G1HeapRegion* r) {
218 _region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
219 }
220
221 inline bool G1CollectedHeap::is_in_young(const oop obj) const {
222 if (obj == nullptr) {
223 return false;
224 }
225 return heap_region_containing(obj)->is_young();
226 }
227
228 inline bool G1CollectedHeap::requires_barriers(stackChunkOop obj) const {
229 assert(obj != nullptr, "");
230 return !heap_region_containing(obj)->is_young(); // is_in_young does an unnecessary null check
231 }
232
233 inline bool G1CollectedHeap::is_obj_dead(const oop obj, const G1HeapRegion* hr) const {
234 assert(!hr->is_free(), "looking up obj " PTR_FORMAT " in Free region %u", p2i(obj), hr->hrm_index());
235 if (hr->is_in_parsable_area(obj)) {
236 // This object is in the parsable part of the heap, live unless scrubbed.
237 return is_filler_object(obj);
238 } else {
239 // From Remark until a region has been concurrently scrubbed, parts of the
240 // region is not guaranteed to be parsable. Use the bitmap for liveness.
241 return !concurrent_mark()->mark_bitmap()->is_marked(obj);
242 }
243 }
244
245 inline void G1CollectedHeap::pin_object(JavaThread* thread, oop obj) {
246 assert(obj != nullptr, "obj must not be null");
247 assert(!is_stw_gc_active(), "must not pin objects during a GC pause");
248 assert(obj->is_typeArray(), "must be typeArray");
249
250 uint obj_region_idx = heap_region_containing(obj)->hrm_index();
251 G1ThreadLocalData::pin_count_cache(thread).inc_count(obj_region_idx);
252 }
253
254 inline void G1CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
255 assert(obj != nullptr, "obj must not be null");
256 assert(!is_stw_gc_active(), "must not unpin objects during a GC pause");
257
258 uint obj_region_idx = heap_region_containing(obj)->hrm_index();
259 G1ThreadLocalData::pin_count_cache(thread).dec_count(obj_region_idx);
260 }
261
262 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
263 assert(obj != nullptr, "precondition");
264
265 return is_obj_dead(obj, heap_region_containing(obj));
266 }
267
268 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const G1HeapRegion* hr) const {
269 return !is_marked(obj);
270 }
271
272 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
273 return is_obj_dead_full(obj, heap_region_containing(obj));
274 }
275
276 inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
277 return _region_attr.is_humongous_candidate(region);
278 }
279
280 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
281 uint region = addr_to_region(obj);
282 // Reset the entry in the region attribute table so that subsequent
283 // references to the same humongous object do not go into the slow path
284 // again. This is racy, as multiple threads may at the same time enter here,
285 // but this is benign because the transition is unidirectional, from
286 // humongous-candidate to not, and the write, in evacuation, is
287 // separated from the read, in post-evacuation.
288 if (_region_attr.is_humongous_candidate(region)) {
289 _region_attr.clear_humongous_candidate(region);
290 }
291 }
292
293 inline bool G1CollectedHeap::is_collection_set_candidate(const G1HeapRegion* r) const {
294 const G1CollectionSetCandidates* candidates = collection_set()->candidates();
295 return candidates->contains(r);
296 }
297
298 inline uint G1CollectedHeap::eden_target_length() const {
299 return _policy->young_list_target_length() - survivor_regions_count();
300 }
301
302 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP