1 /*
2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
3 * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "gc/shared/fullGCForwarding.inline.hpp"
27 #include "gc/shared/preservedMarks.inline.hpp"
28 #include "gc/shenandoah/shenandoahGeneration.hpp"
29 #include "gc/shenandoah/shenandoahGenerationalFullGC.hpp"
30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
33 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
34 #include "gc/shenandoah/shenandoahUtils.hpp"
35 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
36
37 #ifdef ASSERT
38 void assert_regions_used_not_more_than_capacity(ShenandoahGeneration* generation) {
39 assert(generation->used_regions_size() <= generation->max_capacity(),
40 "%s generation affiliated regions must be less than capacity", generation->name());
41 }
42
43 void assert_usage_not_more_than_regions_used(ShenandoahGeneration* generation) {
44 assert(generation->used_including_humongous_waste() <= generation->used_regions_size(),
45 "%s consumed can be no larger than span of affiliated regions", generation->name());
46 }
47 #else
48 void assert_regions_used_not_more_than_capacity(ShenandoahGeneration* generation) {}
49 void assert_usage_not_more_than_regions_used(ShenandoahGeneration* generation) {}
50 #endif
51
52
53 void ShenandoahGenerationalFullGC::prepare() {
54 auto heap = ShenandoahGenerationalHeap::heap();
55 // Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL.
56 heap->set_gc_generation(heap->global_generation());
57 heap->set_active_generation();
58
59 // No need for old_gen->increase_used() as this was done when plabs were allocated.
60 heap->reset_generation_reserves();
61
62 // Full GC supersedes any marking or coalescing in old generation.
63 heap->old_generation()->cancel_gc();
64 }
65
66 void ShenandoahGenerationalFullGC::handle_completion(ShenandoahHeap* heap) {
67 // Full GC should reset time since last gc for young and old heuristics
68 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(heap);
69 ShenandoahYoungGeneration* young = gen_heap->young_generation();
70 ShenandoahOldGeneration* old = gen_heap->old_generation();
71 young->heuristics()->record_cycle_end();
72 old->heuristics()->record_cycle_end();
73
74 gen_heap->mmu_tracker()->record_full(GCId::current());
75 gen_heap->log_heap_status("At end of Full GC");
76
77 assert(old->is_idle(), "After full GC, old generation should be idle.");
78
79 // Since we allow temporary violation of these constraints during Full GC, we want to enforce that the assertions are
80 // made valid by the time Full GC completes.
81 assert_regions_used_not_more_than_capacity(old);
82 assert_regions_used_not_more_than_capacity(young);
83 assert_usage_not_more_than_regions_used(old);
84 assert_usage_not_more_than_regions_used(young);
85
86 // Establish baseline for next old-has-grown trigger.
87 old->set_live_bytes_after_last_mark(old->used_including_humongous_waste());
88 }
89
90 void ShenandoahGenerationalFullGC::rebuild_remembered_set(ShenandoahHeap* heap) {
91 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_reconstruct_remembered_set);
92
93 ShenandoahScanRemembered* scanner = heap->old_generation()->card_scan();
94 scanner->mark_read_table_as_clean();
95 scanner->swap_card_tables();
96
97 ShenandoahRegionIterator regions;
98 ShenandoahReconstructRememberedSetTask task(®ions);
99 heap->workers()->run_task(&task);
100
101 // Rebuilding the remembered set recomputes all the card offsets for objects.
102 // The adjust pointers phase coalesces and fills all necessary regions. In case
103 // we came to the full GC from an incomplete global cycle, we need to indicate
104 // that the old regions are parsable.
105 heap->old_generation()->set_parsable(true);
106 }
107
108 void ShenandoahGenerationalFullGC::balance_generations_after_gc(ShenandoahHeap* heap) {
109 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(heap);
110 ShenandoahOldGeneration* const old_gen = gen_heap->old_generation();
111
112 size_t old_usage = old_gen->used_regions_size();
113 size_t old_capacity = old_gen->max_capacity();
114
115 assert(old_usage % ShenandoahHeapRegion::region_size_bytes() == 0, "Old usage must align with region size");
116 assert(old_capacity % ShenandoahHeapRegion::region_size_bytes() == 0, "Old capacity must align with region size");
117
118 if (old_capacity > old_usage) {
119 size_t excess_old_regions = (old_capacity - old_usage) / ShenandoahHeapRegion::region_size_bytes();
120 gen_heap->generation_sizer()->transfer_to_young(excess_old_regions);
121 } else if (old_capacity < old_usage) {
122 size_t old_regions_deficit = (old_usage - old_capacity) / ShenandoahHeapRegion::region_size_bytes();
123 gen_heap->generation_sizer()->force_transfer_to_old(old_regions_deficit);
124 }
125
126 log_info(gc, ergo)("FullGC done: young usage: " PROPERFMT ", old usage: " PROPERFMT,
127 PROPERFMTARGS(gen_heap->young_generation()->used()),
128 PROPERFMTARGS(old_gen->used()));
129 }
130
131 ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set() {
132 return ShenandoahGenerationalHeap::heap()->balance_generations();
133 }
134
135 void ShenandoahGenerationalFullGC::log_live_in_old(ShenandoahHeap* heap) {
136 LogTarget(Debug, gc) lt;
137 if (lt.is_enabled()) {
138 size_t live_bytes_in_old = 0;
139 for (size_t i = 0; i < heap->num_regions(); i++) {
140 ShenandoahHeapRegion* r = heap->get_region(i);
141 if (r->is_old()) {
142 live_bytes_in_old += r->get_live_data_bytes();
143 }
144 }
145 log_debug(gc)("Live bytes in old after STW mark: " PROPERFMT, PROPERFMTARGS(live_bytes_in_old));
146 }
147 }
148
149 void ShenandoahGenerationalFullGC::restore_top_before_promote(ShenandoahHeap* heap) {
150 for (size_t i = 0; i < heap->num_regions(); i++) {
151 ShenandoahHeapRegion* r = heap->get_region(i);
152 if (r->get_top_before_promote() != nullptr) {
153 r->restore_top_before_promote();
154 }
155 }
156 }
157
158 void ShenandoahGenerationalFullGC::account_for_region(ShenandoahHeapRegion* r, size_t ®ion_count, size_t ®ion_usage, size_t &humongous_waste) {
159 region_count++;
160 region_usage += r->used();
161 if (r->is_humongous_start()) {
162 // For each humongous object, we take this path once regardless of how many regions it spans.
163 HeapWord* obj_addr = r->bottom();
164 oop obj = cast_to_oop(obj_addr);
165 size_t word_size = obj->size();
166 size_t region_size_words = ShenandoahHeapRegion::region_size_words();
167 size_t overreach = word_size % region_size_words;
168 if (overreach != 0) {
169 humongous_waste += (region_size_words - overreach) * HeapWordSize;
170 }
171 // else, this humongous object aligns exactly on region size, so no waste.
172 }
173 }
174
175 void ShenandoahGenerationalFullGC::maybe_coalesce_and_fill_region(ShenandoahHeapRegion* r) {
176 if (r->is_pinned() && r->is_old() && r->is_active() && !r->is_humongous()) {
177 r->begin_preemptible_coalesce_and_fill();
178 r->oop_coalesce_and_fill(false);
179 }
180 }
181
182 void ShenandoahGenerationalFullGC::compute_balances() {
183 auto heap = ShenandoahGenerationalHeap::heap();
184
185 // In case this Full GC resulted from degeneration, clear the tally on anticipated promotion.
186 heap->old_generation()->set_promotion_potential(0);
187 // Invoke this in case we are able to transfer memory from OLD to YOUNG.
188 heap->compute_old_generation_balance(0, 0);
189 }
190
191 ShenandoahPrepareForGenerationalCompactionObjectClosure::ShenandoahPrepareForGenerationalCompactionObjectClosure(PreservedMarks* preserved_marks,
192 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
193 ShenandoahHeapRegion* from_region, uint worker_id) :
194 _preserved_marks(preserved_marks),
195 _heap(ShenandoahGenerationalHeap::heap()),
196 _tenuring_threshold(0),
197 _empty_regions(empty_regions),
198 _empty_regions_pos(0),
199 _old_to_region(nullptr),
200 _young_to_region(nullptr),
201 _from_region(nullptr),
202 _from_affiliation(ShenandoahAffiliation::FREE),
203 _old_compact_point(nullptr),
204 _young_compact_point(nullptr),
205 _worker_id(worker_id) {
206 assert(from_region != nullptr, "Worker needs from_region");
207 // assert from_region has live?
208 if (from_region->is_old()) {
209 _old_to_region = from_region;
210 _old_compact_point = from_region->bottom();
211 } else if (from_region->is_young()) {
212 _young_to_region = from_region;
213 _young_compact_point = from_region->bottom();
214 }
215
216 _tenuring_threshold = _heap->age_census()->tenuring_threshold();
217 }
218
219 void ShenandoahPrepareForGenerationalCompactionObjectClosure::set_from_region(ShenandoahHeapRegion* from_region) {
220 log_debug(gc)("Worker %u compacting %s Region %zu which had used %zu and %s live",
221 _worker_id, from_region->affiliation_name(),
222 from_region->index(), from_region->used(), from_region->has_live()? "has": "does not have");
223
224 _from_region = from_region;
225 _from_affiliation = from_region->affiliation();
226 if (_from_region->has_live()) {
227 if (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION) {
228 if (_old_to_region == nullptr) {
229 _old_to_region = from_region;
230 _old_compact_point = from_region->bottom();
231 }
232 } else {
233 assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION, "from_region must be OLD or YOUNG");
234 if (_young_to_region == nullptr) {
235 _young_to_region = from_region;
236 _young_compact_point = from_region->bottom();
237 }
238 }
239 } // else, we won't iterate over this _from_region so we don't need to set up to region to hold copies
240 }
241
242 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish() {
243 finish_old_region();
244 finish_young_region();
245 }
246
247 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_old_region() {
248 if (_old_to_region != nullptr) {
249 log_debug(gc)("Planned compaction into Old Region %zu, used: %zu tabulated by worker %u",
250 _old_to_region->index(), _old_compact_point - _old_to_region->bottom(), _worker_id);
251 _old_to_region->set_new_top(_old_compact_point);
252 _old_to_region = nullptr;
253 }
254 }
255
256 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_young_region() {
257 if (_young_to_region != nullptr) {
258 log_debug(gc)("Worker %u planned compaction into Young Region %zu, used: %zu",
259 _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom());
260 _young_to_region->set_new_top(_young_compact_point);
261 _young_to_region = nullptr;
262 }
263 }
264
265 bool ShenandoahPrepareForGenerationalCompactionObjectClosure::is_compact_same_region() {
266 return (_from_region == _old_to_region) || (_from_region == _young_to_region);
267 }
268
269 void ShenandoahPrepareForGenerationalCompactionObjectClosure::do_object(oop p) {
270 assert(_from_region != nullptr, "must set before work");
271 assert((_from_region->bottom() <= cast_from_oop<HeapWord*>(p)) && (cast_from_oop<HeapWord*>(p) < _from_region->top()),
272 "Object must reside in _from_region");
273 assert(_heap->global_generation()->complete_marking_context()->is_marked(p), "must be marked");
274 assert(!_heap->global_generation()->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
275
276 size_t obj_size = p->size();
277 uint from_region_age = _from_region->age();
278 uint object_age = p->age();
279
280 bool promote_object = false;
281 if ((_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION) &&
282 (from_region_age + object_age >= _tenuring_threshold)) {
283 if ((_old_to_region != nullptr) && (_old_compact_point + obj_size > _old_to_region->end())) {
284 finish_old_region();
285 _old_to_region = nullptr;
286 }
287 if (_old_to_region == nullptr) {
288 if (_empty_regions_pos < _empty_regions.length()) {
289 ShenandoahHeapRegion* new_to_region = _empty_regions.at(_empty_regions_pos);
290 _empty_regions_pos++;
291 new_to_region->set_affiliation(OLD_GENERATION);
292 _old_to_region = new_to_region;
293 _old_compact_point = _old_to_region->bottom();
294 promote_object = true;
295 }
296 // Else this worker thread does not yet have any empty regions into which this aged object can be promoted so
297 // we leave promote_object as false, deferring the promotion.
298 } else {
299 promote_object = true;
300 }
301 }
302
303 if (promote_object || (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION)) {
304 assert(_old_to_region != nullptr, "_old_to_region should not be nullptr when evacuating to OLD region");
305 if (_old_compact_point + obj_size > _old_to_region->end()) {
306 ShenandoahHeapRegion* new_to_region;
307
308 log_debug(gc)("Worker %u finishing old region %zu, compact_point: " PTR_FORMAT ", obj_size: %zu"
309 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _old_to_region->index(),
310 p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end()));
311
312 // Object does not fit. Get a new _old_to_region.
313 finish_old_region();
314 if (_empty_regions_pos < _empty_regions.length()) {
315 new_to_region = _empty_regions.at(_empty_regions_pos);
316 _empty_regions_pos++;
317 new_to_region->set_affiliation(OLD_GENERATION);
318 } else {
319 // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct
320 // from _from_region. That's because there is always room for _from_region to be compacted into itself.
321 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
322 new_to_region = _from_region;
323 }
324
325 assert(new_to_region != _old_to_region, "must not reuse same OLD to-region");
326 assert(new_to_region != nullptr, "must not be nullptr");
327 _old_to_region = new_to_region;
328 _old_compact_point = _old_to_region->bottom();
329 }
330
331 // Object fits into current region, record new location, if object does not move:
332 assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit");
333 shenandoah_assert_not_forwarded(nullptr, p);
334 if (_old_compact_point != cast_from_oop<HeapWord*>(p)) {
335 _preserved_marks->push_if_necessary(p, p->mark());
336 FullGCForwarding::forward_to(p, cast_to_oop(_old_compact_point));
337 }
338 _old_compact_point += obj_size;
339 } else {
340 assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION,
341 "_from_region must be OLD_GENERATION or YOUNG_GENERATION");
342 assert(_young_to_region != nullptr, "_young_to_region should not be nullptr when compacting YOUNG _from_region");
343
344 // After full gc compaction, all regions have age 0. Embed the region's age into the object's age in order to preserve
345 // tenuring progress.
346 if (_heap->is_aging_cycle()) {
347 ShenandoahHeap::increase_object_age(p, from_region_age + 1);
348 } else {
349 ShenandoahHeap::increase_object_age(p, from_region_age);
350 }
351
352 if (_young_compact_point + obj_size > _young_to_region->end()) {
353 ShenandoahHeapRegion* new_to_region;
354
355 log_debug(gc)("Worker %u finishing young region %zu, compact_point: " PTR_FORMAT ", obj_size: %zu"
356 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _young_to_region->index(),
357 p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end()));
358
359 // Object does not fit. Get a new _young_to_region.
360 finish_young_region();
361 if (_empty_regions_pos < _empty_regions.length()) {
362 new_to_region = _empty_regions.at(_empty_regions_pos);
363 _empty_regions_pos++;
364 new_to_region->set_affiliation(YOUNG_GENERATION);
365 } else {
366 // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct
367 // from _from_region. That's because there is always room for _from_region to be compacted into itself.
368 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
369 new_to_region = _from_region;
370 }
371
372 assert(new_to_region != _young_to_region, "must not reuse same OLD to-region");
373 assert(new_to_region != nullptr, "must not be nullptr");
374 _young_to_region = new_to_region;
375 _young_compact_point = _young_to_region->bottom();
376 }
377
378 // Object fits into current region, record new location, if object does not move:
379 assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit");
380 shenandoah_assert_not_forwarded(nullptr, p);
381
382 if (_young_compact_point != cast_from_oop<HeapWord*>(p)) {
383 _preserved_marks->push_if_necessary(p, p->mark());
384 FullGCForwarding::forward_to(p, cast_to_oop(_young_compact_point));
385 }
386 _young_compact_point += obj_size;
387 }
388 }