1 /* 2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 3 * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "gc/shared/fullGCForwarding.inline.hpp" 27 #include "gc/shared/preservedMarks.inline.hpp" 28 #include "gc/shenandoah/shenandoahGenerationalFullGC.hpp" 29 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 30 #include "gc/shenandoah/shenandoahGeneration.hpp" 31 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 32 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 33 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 34 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 35 #include "gc/shenandoah/shenandoahUtils.hpp" 36 37 #ifdef ASSERT 38 void assert_regions_used_not_more_than_capacity(ShenandoahGeneration* generation) { 39 assert(generation->used_regions_size() <= generation->max_capacity(), 40 "%s generation affiliated regions must be less than capacity", generation->name()); 41 } 42 43 void assert_usage_not_more_than_regions_used(ShenandoahGeneration* generation) { 44 assert(generation->used_including_humongous_waste() <= generation->used_regions_size(), 45 "%s consumed can be no larger than span of affiliated regions", generation->name()); 46 } 47 #else 48 void assert_regions_used_not_more_than_capacity(ShenandoahGeneration* generation) {} 49 void assert_usage_not_more_than_regions_used(ShenandoahGeneration* generation) {} 50 #endif 51 52 53 void ShenandoahGenerationalFullGC::prepare() { 54 auto heap = ShenandoahGenerationalHeap::heap(); 55 // Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL. 56 heap->set_gc_generation(heap->global_generation()); 57 heap->set_active_generation(); 58 59 // No need for old_gen->increase_used() as this was done when plabs were allocated. 60 heap->reset_generation_reserves(); 61 62 // Full GC supersedes any marking or coalescing in old generation. 63 heap->old_generation()->cancel_gc(); 64 } 65 66 void ShenandoahGenerationalFullGC::handle_completion(ShenandoahHeap* heap) { 67 // Full GC should reset time since last gc for young and old heuristics 68 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(heap); 69 ShenandoahYoungGeneration* young = gen_heap->young_generation(); 70 ShenandoahOldGeneration* old = gen_heap->old_generation(); 71 young->heuristics()->record_cycle_end(); 72 old->heuristics()->record_cycle_end(); 73 74 gen_heap->mmu_tracker()->record_full(GCId::current()); 75 gen_heap->log_heap_status("At end of Full GC"); 76 77 assert(old->is_idle(), "After full GC, old generation should be idle."); 78 79 // Since we allow temporary violation of these constraints during Full GC, we want to enforce that the assertions are 80 // made valid by the time Full GC completes. 81 assert_regions_used_not_more_than_capacity(old); 82 assert_regions_used_not_more_than_capacity(young); 83 assert_usage_not_more_than_regions_used(old); 84 assert_usage_not_more_than_regions_used(young); 85 86 // Establish baseline for next old-has-grown trigger. 87 old->set_live_bytes_after_last_mark(old->used_including_humongous_waste()); 88 } 89 90 void ShenandoahGenerationalFullGC::rebuild_remembered_set(ShenandoahHeap* heap) { 91 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_reconstruct_remembered_set); 92 93 ShenandoahScanRemembered* scanner = heap->old_generation()->card_scan(); 94 scanner->mark_read_table_as_clean(); 95 scanner->swap_card_tables(); 96 97 ShenandoahRegionIterator regions; 98 ShenandoahReconstructRememberedSetTask task(®ions); 99 heap->workers()->run_task(&task); 100 101 // Rebuilding the remembered set recomputes all the card offsets for objects. 102 // The adjust pointers phase coalesces and fills all necessary regions. In case 103 // we came to the full GC from an incomplete global cycle, we need to indicate 104 // that the old regions are parsable. 105 heap->old_generation()->set_parsable(true); 106 } 107 108 void ShenandoahGenerationalFullGC::balance_generations_after_gc(ShenandoahHeap* heap) { 109 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(heap); 110 ShenandoahOldGeneration* const old_gen = gen_heap->old_generation(); 111 112 size_t old_usage = old_gen->used_regions_size(); 113 size_t old_capacity = old_gen->max_capacity(); 114 115 assert(old_usage % ShenandoahHeapRegion::region_size_bytes() == 0, "Old usage must align with region size"); 116 assert(old_capacity % ShenandoahHeapRegion::region_size_bytes() == 0, "Old capacity must align with region size"); 117 118 if (old_capacity > old_usage) { 119 size_t excess_old_regions = (old_capacity - old_usage) / ShenandoahHeapRegion::region_size_bytes(); 120 gen_heap->generation_sizer()->transfer_to_young(excess_old_regions); 121 } else if (old_capacity < old_usage) { 122 size_t old_regions_deficit = (old_usage - old_capacity) / ShenandoahHeapRegion::region_size_bytes(); 123 gen_heap->generation_sizer()->force_transfer_to_old(old_regions_deficit); 124 } 125 126 log_info(gc, ergo)("FullGC done: young usage: " PROPERFMT ", old usage: " PROPERFMT, 127 PROPERFMTARGS(gen_heap->young_generation()->used()), 128 PROPERFMTARGS(old_gen->used())); 129 } 130 131 void ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set() { 132 auto result = ShenandoahGenerationalHeap::heap()->balance_generations(); 133 LogTarget(Info, gc, ergo) lt; 134 if (lt.is_enabled()) { 135 LogStream ls(lt); 136 result.print_on("Full GC", &ls); 137 } 138 } 139 140 void ShenandoahGenerationalFullGC::log_live_in_old(ShenandoahHeap* heap) { 141 LogTarget(Debug, gc) lt; 142 if (lt.is_enabled()) { 143 size_t live_bytes_in_old = 0; 144 for (size_t i = 0; i < heap->num_regions(); i++) { 145 ShenandoahHeapRegion* r = heap->get_region(i); 146 if (r->is_old()) { 147 live_bytes_in_old += r->get_live_data_bytes(); 148 } 149 } 150 log_debug(gc)("Live bytes in old after STW mark: " PROPERFMT, PROPERFMTARGS(live_bytes_in_old)); 151 } 152 } 153 154 void ShenandoahGenerationalFullGC::restore_top_before_promote(ShenandoahHeap* heap) { 155 for (size_t i = 0; i < heap->num_regions(); i++) { 156 ShenandoahHeapRegion* r = heap->get_region(i); 157 if (r->get_top_before_promote() != nullptr) { 158 r->restore_top_before_promote(); 159 } 160 } 161 } 162 163 void ShenandoahGenerationalFullGC::account_for_region(ShenandoahHeapRegion* r, size_t ®ion_count, size_t ®ion_usage, size_t &humongous_waste) { 164 region_count++; 165 region_usage += r->used(); 166 if (r->is_humongous_start()) { 167 // For each humongous object, we take this path once regardless of how many regions it spans. 168 HeapWord* obj_addr = r->bottom(); 169 oop obj = cast_to_oop(obj_addr); 170 size_t word_size = obj->size(); 171 size_t region_size_words = ShenandoahHeapRegion::region_size_words(); 172 size_t overreach = word_size % region_size_words; 173 if (overreach != 0) { 174 humongous_waste += (region_size_words - overreach) * HeapWordSize; 175 } 176 // else, this humongous object aligns exactly on region size, so no waste. 177 } 178 } 179 180 void ShenandoahGenerationalFullGC::maybe_coalesce_and_fill_region(ShenandoahHeapRegion* r) { 181 if (r->is_pinned() && r->is_old() && r->is_active() && !r->is_humongous()) { 182 r->begin_preemptible_coalesce_and_fill(); 183 r->oop_coalesce_and_fill(false); 184 } 185 } 186 187 void ShenandoahGenerationalFullGC::compute_balances() { 188 auto heap = ShenandoahGenerationalHeap::heap(); 189 190 // In case this Full GC resulted from degeneration, clear the tally on anticipated promotion. 191 heap->old_generation()->set_promotion_potential(0); 192 // Invoke this in case we are able to transfer memory from OLD to YOUNG. 193 heap->compute_old_generation_balance(0, 0); 194 } 195 196 ShenandoahPrepareForGenerationalCompactionObjectClosure::ShenandoahPrepareForGenerationalCompactionObjectClosure(PreservedMarks* preserved_marks, 197 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 198 ShenandoahHeapRegion* from_region, uint worker_id) : 199 _preserved_marks(preserved_marks), 200 _heap(ShenandoahGenerationalHeap::heap()), 201 _tenuring_threshold(0), 202 _empty_regions(empty_regions), 203 _empty_regions_pos(0), 204 _old_to_region(nullptr), 205 _young_to_region(nullptr), 206 _from_region(nullptr), 207 _from_affiliation(ShenandoahAffiliation::FREE), 208 _old_compact_point(nullptr), 209 _young_compact_point(nullptr), 210 _worker_id(worker_id) { 211 assert(from_region != nullptr, "Worker needs from_region"); 212 // assert from_region has live? 213 if (from_region->is_old()) { 214 _old_to_region = from_region; 215 _old_compact_point = from_region->bottom(); 216 } else if (from_region->is_young()) { 217 _young_to_region = from_region; 218 _young_compact_point = from_region->bottom(); 219 } 220 221 _tenuring_threshold = _heap->age_census()->tenuring_threshold(); 222 } 223 224 void ShenandoahPrepareForGenerationalCompactionObjectClosure::set_from_region(ShenandoahHeapRegion* from_region) { 225 log_debug(gc)("Worker %u compacting %s Region %zu which had used %zu and %s live", 226 _worker_id, from_region->affiliation_name(), 227 from_region->index(), from_region->used(), from_region->has_live()? "has": "does not have"); 228 229 _from_region = from_region; 230 _from_affiliation = from_region->affiliation(); 231 if (_from_region->has_live()) { 232 if (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION) { 233 if (_old_to_region == nullptr) { 234 _old_to_region = from_region; 235 _old_compact_point = from_region->bottom(); 236 } 237 } else { 238 assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION, "from_region must be OLD or YOUNG"); 239 if (_young_to_region == nullptr) { 240 _young_to_region = from_region; 241 _young_compact_point = from_region->bottom(); 242 } 243 } 244 } // else, we won't iterate over this _from_region so we don't need to set up to region to hold copies 245 } 246 247 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish() { 248 finish_old_region(); 249 finish_young_region(); 250 } 251 252 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_old_region() { 253 if (_old_to_region != nullptr) { 254 log_debug(gc)("Planned compaction into Old Region %zu, used: %zu tabulated by worker %u", 255 _old_to_region->index(), _old_compact_point - _old_to_region->bottom(), _worker_id); 256 _old_to_region->set_new_top(_old_compact_point); 257 _old_to_region = nullptr; 258 } 259 } 260 261 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_young_region() { 262 if (_young_to_region != nullptr) { 263 log_debug(gc)("Worker %u planned compaction into Young Region %zu, used: %zu", 264 _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom()); 265 _young_to_region->set_new_top(_young_compact_point); 266 _young_to_region = nullptr; 267 } 268 } 269 270 bool ShenandoahPrepareForGenerationalCompactionObjectClosure::is_compact_same_region() { 271 return (_from_region == _old_to_region) || (_from_region == _young_to_region); 272 } 273 274 void ShenandoahPrepareForGenerationalCompactionObjectClosure::do_object(oop p) { 275 assert(_from_region != nullptr, "must set before work"); 276 assert((_from_region->bottom() <= cast_from_oop<HeapWord*>(p)) && (cast_from_oop<HeapWord*>(p) < _from_region->top()), 277 "Object must reside in _from_region"); 278 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 279 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked"); 280 281 size_t old_size = p->size(); 282 size_t new_size = p->copy_size(old_size, p->mark()); 283 uint from_region_age = _from_region->age(); 284 uint object_age = p->age(); 285 286 bool promote_object = false; 287 if ((_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION) && 288 (from_region_age + object_age >= _tenuring_threshold)) { 289 if ((_old_to_region != nullptr) && (_old_compact_point + new_size > _old_to_region->end())) { 290 finish_old_region(); 291 _old_to_region = nullptr; 292 } 293 if (_old_to_region == nullptr) { 294 if (_empty_regions_pos < _empty_regions.length()) { 295 ShenandoahHeapRegion* new_to_region = _empty_regions.at(_empty_regions_pos); 296 _empty_regions_pos++; 297 new_to_region->set_affiliation(OLD_GENERATION); 298 _old_to_region = new_to_region; 299 _old_compact_point = _old_to_region->bottom(); 300 promote_object = true; 301 } 302 // Else this worker thread does not yet have any empty regions into which this aged object can be promoted so 303 // we leave promote_object as false, deferring the promotion. 304 } else { 305 promote_object = true; 306 } 307 } 308 309 if (promote_object || (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION)) { 310 assert(_old_to_region != nullptr, "_old_to_region should not be nullptr when evacuating to OLD region"); 311 size_t obj_size = _old_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size; 312 if (_old_compact_point + obj_size > _old_to_region->end()) { 313 ShenandoahHeapRegion* new_to_region; 314 315 log_debug(gc)("Worker %u finishing old region %zu, compact_point: " PTR_FORMAT ", obj_size: %zu" 316 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _old_to_region->index(), 317 p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end())); 318 319 // Object does not fit. Get a new _old_to_region. 320 finish_old_region(); 321 if (_empty_regions_pos < _empty_regions.length()) { 322 new_to_region = _empty_regions.at(_empty_regions_pos); 323 _empty_regions_pos++; 324 new_to_region->set_affiliation(OLD_GENERATION); 325 } else { 326 // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct 327 // from _from_region. That's because there is always room for _from_region to be compacted into itself. 328 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction. 329 new_to_region = _from_region; 330 } 331 332 assert(new_to_region != _old_to_region, "must not reuse same OLD to-region"); 333 assert(new_to_region != nullptr, "must not be nullptr"); 334 _old_to_region = new_to_region; 335 _old_compact_point = _old_to_region->bottom(); 336 obj_size = _old_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size; 337 } 338 339 // Object fits into current region, record new location, if object does not move: 340 assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit"); 341 shenandoah_assert_not_forwarded(nullptr, p); 342 if (_old_compact_point != cast_from_oop<HeapWord*>(p)) { 343 _preserved_marks->push_if_necessary(p, p->mark()); 344 FullGCForwarding::forward_to(p, cast_to_oop(_old_compact_point)); 345 } 346 _old_compact_point += obj_size; 347 } else { 348 assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION, 349 "_from_region must be OLD_GENERATION or YOUNG_GENERATION"); 350 assert(_young_to_region != nullptr, "_young_to_region should not be nullptr when compacting YOUNG _from_region"); 351 352 // After full gc compaction, all regions have age 0. Embed the region's age into the object's age in order to preserve 353 // tenuring progress. 354 if (_heap->is_aging_cycle()) { 355 ShenandoahHeap::increase_object_age(p, from_region_age + 1); 356 } else { 357 ShenandoahHeap::increase_object_age(p, from_region_age); 358 } 359 360 size_t obj_size = _young_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size; 361 if (_young_compact_point + obj_size > _young_to_region->end()) { 362 ShenandoahHeapRegion* new_to_region; 363 364 log_debug(gc)("Worker %u finishing young region %zu, compact_point: " PTR_FORMAT ", obj_size: %zu" 365 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _young_to_region->index(), 366 p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end())); 367 368 // Object does not fit. Get a new _young_to_region. 369 finish_young_region(); 370 if (_empty_regions_pos < _empty_regions.length()) { 371 new_to_region = _empty_regions.at(_empty_regions_pos); 372 _empty_regions_pos++; 373 new_to_region->set_affiliation(YOUNG_GENERATION); 374 } else { 375 // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct 376 // from _from_region. That's because there is always room for _from_region to be compacted into itself. 377 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction. 378 new_to_region = _from_region; 379 } 380 381 assert(new_to_region != _young_to_region, "must not reuse same OLD to-region"); 382 assert(new_to_region != nullptr, "must not be nullptr"); 383 _young_to_region = new_to_region; 384 obj_size = _young_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size; 385 _young_compact_point = _young_to_region->bottom(); 386 } 387 388 // Object fits into current region, record new location, if object does not move: 389 assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit"); 390 shenandoah_assert_not_forwarded(nullptr, p); 391 392 if (_young_compact_point != cast_from_oop<HeapWord*>(p)) { 393 _preserved_marks->push_if_necessary(p, p->mark()); 394 FullGCForwarding::forward_to(p, cast_to_oop(_young_compact_point)); 395 } 396 _young_compact_point += obj_size; 397 } 398 }