1 /* 2 * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved. 3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 4 * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 28 #include "compiler/oopMap.hpp" 29 #include "gc/shared/continuationGCSupport.hpp" 30 #include "gc/shared/fullGCForwarding.inline.hpp" 31 #include "gc/shared/gcTraceTime.inline.hpp" 32 #include "gc/shared/preservedMarks.inline.hpp" 33 #include "gc/shared/tlab_globals.hpp" 34 #include "gc/shared/workerThread.hpp" 35 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 36 #include "gc/shenandoah/shenandoahClosures.inline.hpp" 37 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 38 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 39 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 40 #include "gc/shenandoah/shenandoahFreeSet.hpp" 41 #include "gc/shenandoah/shenandoahFullGC.hpp" 42 #include "gc/shenandoah/shenandoahGenerationalFullGC.hpp" 43 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp" 44 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 45 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 46 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp" 47 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 48 #include "gc/shenandoah/shenandoahMark.inline.hpp" 49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 50 #include "gc/shenandoah/shenandoahMetrics.hpp" 51 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 52 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 53 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 54 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 55 #include "gc/shenandoah/shenandoahSTWMark.hpp" 56 #include "gc/shenandoah/shenandoahUtils.hpp" 57 #include "gc/shenandoah/shenandoahVerifier.hpp" 58 #include "gc/shenandoah/shenandoahVMOperations.hpp" 59 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 60 #include "memory/metaspaceUtils.hpp" 61 #include "memory/universe.hpp" 62 #include "oops/compressedOops.inline.hpp" 63 #include "oops/oop.inline.hpp" 64 #include "runtime/orderAccess.hpp" 65 #include "runtime/vmThread.hpp" 66 #include "utilities/copy.hpp" 67 #include "utilities/events.hpp" 68 #include "utilities/growableArray.hpp" 69 70 ShenandoahFullGC::ShenandoahFullGC() : 71 _gc_timer(ShenandoahHeap::heap()->gc_timer()), 72 _preserved_marks(new PreservedMarksSet(true)) {} 73 74 ShenandoahFullGC::~ShenandoahFullGC() { 75 delete _preserved_marks; 76 } 77 78 bool ShenandoahFullGC::collect(GCCause::Cause cause) { 79 vmop_entry_full(cause); 80 // Always success 81 return true; 82 } 83 84 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) { 85 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 86 TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters()); 87 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross); 88 89 heap->try_inject_alloc_failure(); 90 VM_ShenandoahFullGC op(cause, this); 91 VMThread::execute(&op); 92 } 93 94 void ShenandoahFullGC::entry_full(GCCause::Cause cause) { 95 static const char* msg = "Pause Full"; 96 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */); 97 EventMark em("%s", msg); 98 99 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 100 ShenandoahWorkerPolicy::calc_workers_for_fullgc(), 101 "full gc"); 102 103 op_full(cause); 104 } 105 106 void ShenandoahFullGC::op_full(GCCause::Cause cause) { 107 ShenandoahMetricsSnapshot metrics; 108 metrics.snap_before(); 109 110 // Perform full GC 111 do_it(cause); 112 113 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 114 115 if (heap->mode()->is_generational()) { 116 ShenandoahGenerationalFullGC::handle_completion(heap); 117 } 118 119 metrics.snap_after(); 120 121 if (metrics.is_good_progress(heap->global_generation())) { 122 heap->notify_gc_progress(); 123 } else { 124 // Nothing to do. Tell the allocation path that we have failed to make 125 // progress, and it can finally fail. 126 heap->notify_gc_no_progress(); 127 } 128 129 // Regardless if progress was made, we record that we completed a "successful" full GC. 130 heap->global_generation()->heuristics()->record_success_full(); 131 heap->shenandoah_policy()->record_success_full(); 132 133 { 134 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_propagate_gc_state); 135 heap->propagate_gc_state_to_all_threads(); 136 } 137 } 138 139 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { 140 ShenandoahHeap* heap = ShenandoahHeap::heap(); 141 142 if (heap->mode()->is_generational()) { 143 ShenandoahGenerationalFullGC::prepare(); 144 } 145 146 if (ShenandoahVerify) { 147 heap->verifier()->verify_before_fullgc(); 148 } 149 150 if (VerifyBeforeGC) { 151 Universe::verify(); 152 } 153 154 // Degenerated GC may carry concurrent root flags when upgrading to 155 // full GC. We need to reset it before mutators resume. 156 heap->set_concurrent_strong_root_in_progress(false); 157 heap->set_concurrent_weak_root_in_progress(false); 158 159 heap->set_full_gc_in_progress(true); 160 161 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); 162 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped"); 163 164 { 165 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre); 166 heap->pre_full_gc_dump(_gc_timer); 167 } 168 169 { 170 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare); 171 // Full GC is supposed to recover from any GC state: 172 173 // a0. Remember if we have forwarded objects 174 bool has_forwarded_objects = heap->has_forwarded_objects(); 175 176 // a1. Cancel evacuation, if in progress 177 if (heap->is_evacuation_in_progress()) { 178 heap->set_evacuation_in_progress(false); 179 } 180 assert(!heap->is_evacuation_in_progress(), "sanity"); 181 182 // a2. Cancel update-refs, if in progress 183 if (heap->is_update_refs_in_progress()) { 184 heap->set_update_refs_in_progress(false); 185 } 186 assert(!heap->is_update_refs_in_progress(), "sanity"); 187 188 // b. Cancel all concurrent marks, if in progress 189 if (heap->is_concurrent_mark_in_progress()) { 190 heap->cancel_concurrent_mark(); 191 } 192 assert(!heap->is_concurrent_mark_in_progress(), "sanity"); 193 194 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots. 195 if (has_forwarded_objects) { 196 update_roots(true /*full_gc*/); 197 } 198 199 // d. Abandon reference discovery and clear all discovered references. 200 ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor(); 201 rp->abandon_partial_discovery(); 202 203 // e. Sync pinned region status from the CP marks 204 heap->sync_pinned_region_status(); 205 206 if (heap->mode()->is_generational()) { 207 ShenandoahGenerationalFullGC::restore_top_before_promote(heap); 208 } 209 210 // The rest of prologue: 211 _preserved_marks->init(heap->workers()->active_workers()); 212 213 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change"); 214 } 215 216 if (UseTLAB) { 217 // Note: PLABs are also retired with GCLABs in generational mode. 218 heap->gclabs_retire(ResizeTLAB); 219 heap->tlabs_retire(ResizeTLAB); 220 } 221 222 OrderAccess::fence(); 223 224 phase1_mark_heap(); 225 226 // Once marking is done, which may have fixed up forwarded objects, we can drop it. 227 // Coming out of Full GC, we would not have any forwarded objects. 228 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3. 229 heap->set_has_forwarded_objects(false); 230 231 heap->set_full_gc_move_in_progress(true); 232 233 // Setup workers for the rest 234 OrderAccess::fence(); 235 236 // Initialize worker slices 237 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC); 238 for (uint i = 0; i < heap->max_workers(); i++) { 239 worker_slices[i] = new ShenandoahHeapRegionSet(); 240 } 241 242 { 243 // The rest of code performs region moves, where region status is undefined 244 // until all phases run together. 245 ShenandoahHeapLocker lock(heap->lock()); 246 247 FullGCForwarding::begin(); 248 249 phase2_calculate_target_addresses(worker_slices); 250 251 OrderAccess::fence(); 252 253 phase3_update_references(); 254 255 phase4_compact_objects(worker_slices); 256 257 phase5_epilog(); 258 259 FullGCForwarding::end(); 260 } 261 262 // Resize metaspace 263 MetaspaceGC::compute_new_size(); 264 265 // Free worker slices 266 for (uint i = 0; i < heap->max_workers(); i++) { 267 delete worker_slices[i]; 268 } 269 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices); 270 271 heap->set_full_gc_move_in_progress(false); 272 heap->set_full_gc_in_progress(false); 273 274 if (ShenandoahVerify) { 275 heap->verifier()->verify_after_fullgc(); 276 } 277 278 if (VerifyAfterGC) { 279 Universe::verify(); 280 } 281 282 { 283 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post); 284 heap->post_full_gc_dump(_gc_timer); 285 } 286 } 287 288 void ShenandoahFullGC::phase1_mark_heap() { 289 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer); 290 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark); 291 292 ShenandoahHeap* heap = ShenandoahHeap::heap(); 293 294 heap->global_generation()->reset_mark_bitmap<true, true>(); 295 assert(heap->marking_context()->is_bitmap_clear(), "sanity"); 296 assert(!heap->global_generation()->is_mark_complete(), "sanity"); 297 298 heap->set_unload_classes(heap->global_generation()->heuristics()->can_unload_classes()); 299 300 ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor(); 301 // enable ("weak") refs discovery 302 rp->set_soft_reference_policy(true); // forcefully purge all soft references 303 304 ShenandoahSTWMark mark(heap->global_generation(), true /*full_gc*/); 305 mark.mark(); 306 heap->parallel_cleaning(true /* full_gc */); 307 308 if (ShenandoahHeap::heap()->mode()->is_generational()) { 309 ShenandoahGenerationalFullGC::log_live_in_old(heap); 310 } 311 } 312 313 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { 314 private: 315 PreservedMarks* const _preserved_marks; 316 ShenandoahHeap* const _heap; 317 GrowableArray<ShenandoahHeapRegion*>& _empty_regions; 318 int _empty_regions_pos; 319 ShenandoahHeapRegion* _to_region; 320 ShenandoahHeapRegion* _from_region; 321 HeapWord* _compact_point; 322 323 public: 324 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks, 325 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 326 ShenandoahHeapRegion* to_region) : 327 _preserved_marks(preserved_marks), 328 _heap(ShenandoahHeap::heap()), 329 _empty_regions(empty_regions), 330 _empty_regions_pos(0), 331 _to_region(to_region), 332 _from_region(nullptr), 333 _compact_point(to_region->bottom()) {} 334 335 void set_from_region(ShenandoahHeapRegion* from_region) { 336 _from_region = from_region; 337 } 338 339 void finish() { 340 assert(_to_region != nullptr, "should not happen"); 341 _to_region->set_new_top(_compact_point); 342 } 343 344 bool is_compact_same_region() { 345 return _from_region == _to_region; 346 } 347 348 int empty_regions_pos() { 349 return _empty_regions_pos; 350 } 351 352 void do_object(oop p) { 353 assert(_from_region != nullptr, "must set before work"); 354 assert(_heap->gc_generation()->complete_marking_context()->is_marked(p), "must be marked"); 355 assert(!_heap->gc_generation()->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked"); 356 357 size_t old_size = p->size(); 358 size_t new_size = p->copy_size(old_size, p->mark()); 359 size_t obj_size = _compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size; 360 if (_compact_point + obj_size > _to_region->end()) { 361 finish(); 362 363 // Object doesn't fit. Pick next empty region and start compacting there. 364 ShenandoahHeapRegion* new_to_region; 365 if (_empty_regions_pos < _empty_regions.length()) { 366 new_to_region = _empty_regions.at(_empty_regions_pos); 367 _empty_regions_pos++; 368 } else { 369 // Out of empty region? Compact within the same region. 370 new_to_region = _from_region; 371 } 372 373 assert(new_to_region != _to_region, "must not reuse same to-region"); 374 assert(new_to_region != nullptr, "must not be null"); 375 _to_region = new_to_region; 376 _compact_point = _to_region->bottom(); 377 obj_size = _compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size; 378 } 379 380 // Object fits into current region, record new location, if object does not move: 381 assert(_compact_point + obj_size <= _to_region->end(), "must fit"); 382 shenandoah_assert_not_forwarded(nullptr, p); 383 if (_compact_point != cast_from_oop<HeapWord*>(p)) { 384 _preserved_marks->push_if_necessary(p, p->mark()); 385 FullGCForwarding::forward_to(p, cast_to_oop(_compact_point)); 386 } 387 _compact_point += obj_size; 388 } 389 }; 390 391 class ShenandoahPrepareForCompactionTask : public WorkerTask { 392 private: 393 PreservedMarksSet* const _preserved_marks; 394 ShenandoahHeap* const _heap; 395 ShenandoahHeapRegionSet** const _worker_slices; 396 397 public: 398 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) : 399 WorkerTask("Shenandoah Prepare For Compaction"), 400 _preserved_marks(preserved_marks), 401 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) { 402 } 403 404 static bool is_candidate_region(ShenandoahHeapRegion* r) { 405 // Empty region: get it into the slice to defragment the slice itself. 406 // We could have skipped this without violating correctness, but we really 407 // want to compact all live regions to the start of the heap, which sometimes 408 // means moving them into the fully empty regions. 409 if (r->is_empty()) return true; 410 411 // Can move the region, and this is not the humongous region. Humongous 412 // moves are special cased here, because their moves are handled separately. 413 return r->is_stw_move_allowed() && !r->is_humongous(); 414 } 415 416 void work(uint worker_id) override; 417 private: 418 template<typename ClosureType> 419 void prepare_for_compaction(ClosureType& cl, 420 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 421 ShenandoahHeapRegionSetIterator& it, 422 ShenandoahHeapRegion* from_region); 423 }; 424 425 void ShenandoahPrepareForCompactionTask::work(uint worker_id) { 426 ShenandoahParallelWorkerSession worker_session(worker_id); 427 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id]; 428 ShenandoahHeapRegionSetIterator it(slice); 429 ShenandoahHeapRegion* from_region = it.next(); 430 // No work? 431 if (from_region == nullptr) { 432 return; 433 } 434 435 // Sliding compaction. Walk all regions in the slice, and compact them. 436 // Remember empty regions and reuse them as needed. 437 ResourceMark rm; 438 439 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions()); 440 441 if (_heap->mode()->is_generational()) { 442 ShenandoahPrepareForGenerationalCompactionObjectClosure cl(_preserved_marks->get(worker_id), 443 empty_regions, from_region, worker_id); 444 prepare_for_compaction(cl, empty_regions, it, from_region); 445 } else { 446 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region); 447 prepare_for_compaction(cl, empty_regions, it, from_region); 448 } 449 } 450 451 template<typename ClosureType> 452 void ShenandoahPrepareForCompactionTask::prepare_for_compaction(ClosureType& cl, 453 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 454 ShenandoahHeapRegionSetIterator& it, 455 ShenandoahHeapRegion* from_region) { 456 while (from_region != nullptr) { 457 assert(is_candidate_region(from_region), "Sanity"); 458 cl.set_from_region(from_region); 459 if (from_region->has_live()) { 460 _heap->marked_object_iterate(from_region, &cl); 461 } 462 463 // Compacted the region to somewhere else? From-region is empty then. 464 if (!cl.is_compact_same_region()) { 465 empty_regions.append(from_region); 466 } 467 from_region = it.next(); 468 } 469 cl.finish(); 470 471 // Mark all remaining regions as empty 472 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) { 473 ShenandoahHeapRegion* r = empty_regions.at(pos); 474 r->set_new_top(r->bottom()); 475 } 476 } 477 478 void ShenandoahFullGC::calculate_target_humongous_objects() { 479 ShenandoahHeap* heap = ShenandoahHeap::heap(); 480 481 // Compute the new addresses for humongous objects. We need to do this after addresses 482 // for regular objects are calculated, and we know what regions in heap suffix are 483 // available for humongous moves. 484 // 485 // Scan the heap backwards, because we are compacting humongous regions towards the end. 486 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide 487 // humongous start there. 488 // 489 // The complication is potential non-movable regions during the scan. If such region is 490 // detected, then sliding restarts towards that non-movable region. 491 492 size_t to_begin = heap->num_regions(); 493 size_t to_end = heap->num_regions(); 494 495 log_debug(gc)("Full GC calculating target humongous objects from end %zu", to_end); 496 for (size_t c = heap->num_regions(); c > 0; c--) { 497 ShenandoahHeapRegion *r = heap->get_region(c - 1); 498 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) { 499 // To-region candidate: record this, and continue scan 500 to_begin = r->index(); 501 continue; 502 } 503 504 if (r->is_humongous_start() && r->is_stw_move_allowed()) { 505 // From-region candidate: movable humongous region 506 oop old_obj = cast_to_oop(r->bottom()); 507 size_t words_size = old_obj->size(); 508 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 509 510 size_t start = to_end - num_regions; 511 512 if (start >= to_begin && start != r->index()) { 513 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan. 514 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark()); 515 FullGCForwarding::forward_to(old_obj, cast_to_oop(heap->get_region(start)->bottom())); 516 to_end = start; 517 continue; 518 } 519 } 520 521 // Failed to fit. Scan starting from current region. 522 to_begin = r->index(); 523 to_end = r->index(); 524 } 525 } 526 527 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure { 528 private: 529 ShenandoahHeap* const _heap; 530 531 public: 532 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {} 533 void heap_region_do(ShenandoahHeapRegion* r) { 534 if (r->is_trash()) { 535 r->try_recycle_under_lock(); 536 } 537 if (r->is_cset()) { 538 // Leave affiliation unchanged 539 r->make_regular_bypass(); 540 } 541 if (r->is_empty_uncommitted()) { 542 r->make_committed_bypass(); 543 } 544 assert (r->is_committed(), "only committed regions in heap now, see region %zu", r->index()); 545 546 // Record current region occupancy: this communicates empty regions are free 547 // to the rest of Full GC code. 548 r->set_new_top(r->top()); 549 } 550 }; 551 552 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure { 553 private: 554 ShenandoahHeap* const _heap; 555 ShenandoahMarkingContext* const _ctx; 556 557 public: 558 ShenandoahTrashImmediateGarbageClosure() : 559 _heap(ShenandoahHeap::heap()), 560 _ctx(ShenandoahHeap::heap()->global_generation()->complete_marking_context()) {} 561 562 void heap_region_do(ShenandoahHeapRegion* r) override { 563 if (r->is_humongous_start()) { 564 oop humongous_obj = cast_to_oop(r->bottom()); 565 if (!_ctx->is_marked(humongous_obj)) { 566 assert(!r->has_live(), "Region %zu is not marked, should not have live", r->index()); 567 _heap->trash_humongous_region_at(r); 568 } else { 569 assert(r->has_live(), "Region %zu should have live", r->index()); 570 } 571 } else if (r->is_humongous_continuation()) { 572 // If we hit continuation, the non-live humongous starts should have been trashed already 573 assert(r->humongous_start_region()->has_live(), "Region %zu should have live", r->index()); 574 } else if (r->is_regular()) { 575 if (!r->has_live()) { 576 r->make_trash_immediate(); 577 } 578 } 579 } 580 }; 581 582 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) { 583 ShenandoahHeap* heap = ShenandoahHeap::heap(); 584 585 uint n_workers = heap->workers()->active_workers(); 586 size_t n_regions = heap->num_regions(); 587 588 // What we want to accomplish: have the dense prefix of data, while still balancing 589 // out the parallel work. 590 // 591 // Assuming the amount of work is driven by the live data that needs moving, we can slice 592 // the entire heap into equal-live-sized prefix slices, and compact into them. So, each 593 // thread takes all regions in its prefix subset, and then it takes some regions from 594 // the tail. 595 // 596 // Tail region selection becomes interesting. 597 // 598 // First, we want to distribute the regions fairly between the workers, and those regions 599 // might have different amount of live data. So, until we sure no workers need live data, 600 // we need to only take what the worker needs. 601 // 602 // Second, since we slide everything to the left in each slice, the most busy regions 603 // would be the ones on the left. Which means we want to have all workers have their after-tail 604 // regions as close to the left as possible. 605 // 606 // The easiest way to do this is to distribute after-tail regions in round-robin between 607 // workers that still need live data. 608 // 609 // Consider parallel workers A, B, C, then the target slice layout would be: 610 // 611 // AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA 612 // 613 // (.....dense-prefix.....) (.....................tail...................) 614 // [all regions fully live] [left-most regions are fuller that right-most] 615 // 616 617 // Compute how much live data is there. This would approximate the size of dense prefix 618 // we target to create. 619 size_t total_live = 0; 620 for (size_t idx = 0; idx < n_regions; idx++) { 621 ShenandoahHeapRegion *r = heap->get_region(idx); 622 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 623 total_live += r->get_live_data_words(); 624 } 625 } 626 627 // Estimate the size for the dense prefix. Note that we specifically count only the 628 // "full" regions, so there would be some non-full regions in the slice tail. 629 size_t live_per_worker = total_live / n_workers; 630 size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words(); 631 size_t prefix_regions_total = prefix_regions_per_worker * n_workers; 632 prefix_regions_total = MIN2(prefix_regions_total, n_regions); 633 assert(prefix_regions_total <= n_regions, "Sanity"); 634 635 // There might be non-candidate regions in the prefix. To compute where the tail actually 636 // ends up being, we need to account those as well. 637 size_t prefix_end = prefix_regions_total; 638 for (size_t idx = 0; idx < prefix_regions_total; idx++) { 639 ShenandoahHeapRegion *r = heap->get_region(idx); 640 if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 641 prefix_end++; 642 } 643 } 644 prefix_end = MIN2(prefix_end, n_regions); 645 assert(prefix_end <= n_regions, "Sanity"); 646 647 // Distribute prefix regions per worker: each thread definitely gets its own same-sized 648 // subset of dense prefix. 649 size_t prefix_idx = 0; 650 651 size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC); 652 653 for (size_t wid = 0; wid < n_workers; wid++) { 654 ShenandoahHeapRegionSet* slice = worker_slices[wid]; 655 656 live[wid] = 0; 657 size_t regs = 0; 658 659 // Add all prefix regions for this worker 660 while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) { 661 ShenandoahHeapRegion *r = heap->get_region(prefix_idx); 662 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 663 slice->add_region(r); 664 live[wid] += r->get_live_data_words(); 665 regs++; 666 } 667 prefix_idx++; 668 } 669 } 670 671 // Distribute the tail among workers in round-robin fashion. 672 size_t wid = n_workers - 1; 673 674 for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) { 675 ShenandoahHeapRegion *r = heap->get_region(tail_idx); 676 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 677 assert(wid < n_workers, "Sanity"); 678 679 size_t live_region = r->get_live_data_words(); 680 681 // Select next worker that still needs live data. 682 size_t old_wid = wid; 683 do { 684 wid++; 685 if (wid == n_workers) wid = 0; 686 } while (live[wid] + live_region >= live_per_worker && old_wid != wid); 687 688 if (old_wid == wid) { 689 // Circled back to the same worker? This means liveness data was 690 // miscalculated. Bump the live_per_worker limit so that 691 // everyone gets a piece of the leftover work. 692 live_per_worker += ShenandoahHeapRegion::region_size_words(); 693 } 694 695 worker_slices[wid]->add_region(r); 696 live[wid] += live_region; 697 } 698 } 699 700 FREE_C_HEAP_ARRAY(size_t, live); 701 702 #ifdef ASSERT 703 ResourceBitMap map(n_regions); 704 for (size_t wid = 0; wid < n_workers; wid++) { 705 ShenandoahHeapRegionSetIterator it(worker_slices[wid]); 706 ShenandoahHeapRegion* r = it.next(); 707 while (r != nullptr) { 708 size_t idx = r->index(); 709 assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: %zu", idx); 710 assert(!map.at(idx), "No region distributed twice: %zu", idx); 711 map.at_put(idx, true); 712 r = it.next(); 713 } 714 } 715 716 for (size_t rid = 0; rid < n_regions; rid++) { 717 bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid)); 718 bool is_distributed = map.at(rid); 719 assert(is_distributed || !is_candidate, "All candidates are distributed: %zu", rid); 720 } 721 #endif 722 } 723 724 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) { 725 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer); 726 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses); 727 728 ShenandoahHeap* heap = ShenandoahHeap::heap(); 729 730 // About to figure out which regions can be compacted, make sure pinning status 731 // had been updated in GC prologue. 732 heap->assert_pinned_region_status(); 733 734 { 735 // Trash the immediately collectible regions before computing addresses 736 ShenandoahTrashImmediateGarbageClosure trash_immediate_garbage; 737 ShenandoahExcludeRegionClosure<FREE> cl(&trash_immediate_garbage); 738 heap->heap_region_iterate(&cl); 739 740 // Make sure regions are in good state: committed, active, clean. 741 // This is needed because we are potentially sliding the data through them. 742 ShenandoahEnsureHeapActiveClosure ecl; 743 heap->heap_region_iterate(&ecl); 744 } 745 746 // Compute the new addresses for regular objects 747 { 748 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular); 749 750 distribute_slices(worker_slices); 751 752 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices); 753 heap->workers()->run_task(&task); 754 } 755 756 // Compute the new addresses for humongous objects 757 { 758 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong); 759 calculate_target_humongous_objects(); 760 } 761 } 762 763 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure { 764 private: 765 ShenandoahHeap* const _heap; 766 ShenandoahMarkingContext* const _ctx; 767 768 template <class T> 769 inline void do_oop_work(T* p) { 770 T o = RawAccess<>::oop_load(p); 771 if (!CompressedOops::is_null(o)) { 772 oop obj = CompressedOops::decode_not_null(o); 773 assert(_ctx->is_marked(obj), "must be marked"); 774 if (FullGCForwarding::is_forwarded(obj)) { 775 oop forw = FullGCForwarding::forwardee(obj); 776 RawAccess<IS_NOT_NULL>::oop_store(p, forw); 777 } 778 } 779 } 780 781 public: 782 ShenandoahAdjustPointersClosure() : 783 _heap(ShenandoahHeap::heap()), 784 _ctx(ShenandoahHeap::heap()->gc_generation()->complete_marking_context()) {} 785 786 void do_oop(oop* p) { do_oop_work(p); } 787 void do_oop(narrowOop* p) { do_oop_work(p); } 788 void do_method(Method* m) {} 789 void do_nmethod(nmethod* nm) {} 790 }; 791 792 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure { 793 private: 794 ShenandoahHeap* const _heap; 795 ShenandoahAdjustPointersClosure _cl; 796 797 public: 798 ShenandoahAdjustPointersObjectClosure() : 799 _heap(ShenandoahHeap::heap()) { 800 } 801 void do_object(oop p) { 802 assert(_heap->gc_generation()->complete_marking_context()->is_marked(p), "must be marked"); 803 p->oop_iterate(&_cl); 804 } 805 }; 806 807 class ShenandoahAdjustPointersTask : public WorkerTask { 808 private: 809 ShenandoahHeap* const _heap; 810 ShenandoahRegionIterator _regions; 811 812 public: 813 ShenandoahAdjustPointersTask() : 814 WorkerTask("Shenandoah Adjust Pointers"), 815 _heap(ShenandoahHeap::heap()) { 816 } 817 818 void work(uint worker_id) { 819 ShenandoahParallelWorkerSession worker_session(worker_id); 820 ShenandoahAdjustPointersObjectClosure obj_cl; 821 ShenandoahHeapRegion* r = _regions.next(); 822 while (r != nullptr) { 823 if (!r->is_humongous_continuation() && r->has_live()) { 824 _heap->marked_object_iterate(r, &obj_cl); 825 } 826 if (_heap->mode()->is_generational()) { 827 ShenandoahGenerationalFullGC::maybe_coalesce_and_fill_region(r); 828 } 829 r = _regions.next(); 830 } 831 } 832 }; 833 834 class ShenandoahAdjustRootPointersTask : public WorkerTask { 835 private: 836 ShenandoahRootAdjuster* _rp; 837 PreservedMarksSet* _preserved_marks; 838 public: 839 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) : 840 WorkerTask("Shenandoah Adjust Root Pointers"), 841 _rp(rp), 842 _preserved_marks(preserved_marks) {} 843 844 void work(uint worker_id) { 845 ShenandoahParallelWorkerSession worker_session(worker_id); 846 ShenandoahAdjustPointersClosure cl; 847 _rp->roots_do(worker_id, &cl); 848 _preserved_marks->get(worker_id)->adjust_during_full_gc(); 849 } 850 }; 851 852 void ShenandoahFullGC::phase3_update_references() { 853 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer); 854 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers); 855 856 ShenandoahHeap* heap = ShenandoahHeap::heap(); 857 858 WorkerThreads* workers = heap->workers(); 859 uint nworkers = workers->active_workers(); 860 { 861 #if COMPILER2_OR_JVMCI 862 DerivedPointerTable::clear(); 863 #endif 864 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots); 865 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks); 866 workers->run_task(&task); 867 #if COMPILER2_OR_JVMCI 868 DerivedPointerTable::update_pointers(); 869 #endif 870 } 871 872 ShenandoahAdjustPointersTask adjust_pointers_task; 873 workers->run_task(&adjust_pointers_task); 874 } 875 876 class ShenandoahCompactObjectsClosure : public ObjectClosure { 877 private: 878 ShenandoahHeap* const _heap; 879 uint const _worker_id; 880 881 public: 882 ShenandoahCompactObjectsClosure(uint worker_id) : 883 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {} 884 885 void do_object(oop p) { 886 assert(_heap->gc_generation()->complete_marking_context()->is_marked(p), "must be marked"); 887 size_t size = p->size(); 888 if (FullGCForwarding::is_forwarded(p)) { 889 HeapWord* compact_from = cast_from_oop<HeapWord*>(p); 890 HeapWord* compact_to = cast_from_oop<HeapWord*>(FullGCForwarding::forwardee(p)); 891 assert(compact_from != compact_to, "Forwarded object should move"); 892 Copy::aligned_conjoint_words(compact_from, compact_to, size); 893 oop new_obj = cast_to_oop(compact_to); 894 895 ContinuationGCSupport::relativize_stack_chunk(new_obj); 896 new_obj->init_mark(); 897 new_obj->initialize_hash_if_necessary(p); 898 } 899 } 900 }; 901 902 class ShenandoahCompactObjectsTask : public WorkerTask { 903 private: 904 ShenandoahHeap* const _heap; 905 ShenandoahHeapRegionSet** const _worker_slices; 906 907 public: 908 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) : 909 WorkerTask("Shenandoah Compact Objects"), 910 _heap(ShenandoahHeap::heap()), 911 _worker_slices(worker_slices) { 912 } 913 914 void work(uint worker_id) { 915 ShenandoahParallelWorkerSession worker_session(worker_id); 916 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]); 917 918 ShenandoahCompactObjectsClosure cl(worker_id); 919 ShenandoahHeapRegion* r = slice.next(); 920 while (r != nullptr) { 921 assert(!r->is_humongous(), "must not get humongous regions here"); 922 if (r->has_live()) { 923 _heap->marked_object_iterate(r, &cl); 924 } 925 r->set_top(r->new_top()); 926 r = slice.next(); 927 } 928 } 929 }; 930 931 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { 932 private: 933 ShenandoahHeap* const _heap; 934 bool _is_generational; 935 size_t _young_regions, _young_usage, _young_humongous_waste; 936 size_t _old_regions, _old_usage, _old_humongous_waste; 937 938 public: 939 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), 940 _is_generational(_heap->mode()->is_generational()), 941 _young_regions(0), 942 _young_usage(0), 943 _young_humongous_waste(0), 944 _old_regions(0), 945 _old_usage(0), 946 _old_humongous_waste(0) 947 { 948 _heap->free_set()->clear(); 949 } 950 951 void heap_region_do(ShenandoahHeapRegion* r) { 952 assert (!r->is_cset(), "cset regions should have been demoted already"); 953 954 // Need to reset the complete-top-at-mark-start pointer here because 955 // the complete marking bitmap is no longer valid. This ensures 956 // size-based iteration in marked_object_iterate(). 957 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip 958 // pinned regions. 959 if (!r->is_pinned()) { 960 _heap->gc_generation()->complete_marking_context()->reset_top_at_mark_start(r); 961 } 962 963 size_t live = r->used(); 964 965 // Make empty regions that have been allocated into regular 966 if (r->is_empty() && live > 0) { 967 if (!_is_generational) { 968 r->make_affiliated_maybe(); 969 } 970 // else, generational mode compaction has already established affiliation. 971 r->make_regular_bypass(); 972 if (ZapUnusedHeapArea) { 973 SpaceMangler::mangle_region(MemRegion(r->top(), r->end())); 974 } 975 } 976 977 // Reclaim regular regions that became empty 978 if (r->is_regular() && live == 0) { 979 r->make_trash(); 980 } 981 982 // Recycle all trash regions 983 if (r->is_trash()) { 984 live = 0; 985 r->try_recycle_under_lock(); 986 } else { 987 if (r->is_old()) { 988 ShenandoahGenerationalFullGC::account_for_region(r, _old_regions, _old_usage, _old_humongous_waste); 989 } else if (r->is_young()) { 990 ShenandoahGenerationalFullGC::account_for_region(r, _young_regions, _young_usage, _young_humongous_waste); 991 } 992 } 993 r->set_live_data(live); 994 r->reset_alloc_metadata(); 995 } 996 997 void update_generation_usage() { 998 if (_is_generational) { 999 _heap->old_generation()->establish_usage(_old_regions, _old_usage, _old_humongous_waste); 1000 _heap->young_generation()->establish_usage(_young_regions, _young_usage, _young_humongous_waste); 1001 } else { 1002 assert(_old_regions == 0, "Old regions only expected in generational mode"); 1003 assert(_old_usage == 0, "Old usage only expected in generational mode"); 1004 assert(_old_humongous_waste == 0, "Old humongous waste only expected in generational mode"); 1005 } 1006 1007 // In generational mode, global usage should be the sum of young and old. This is also true 1008 // for non-generational modes except that there are no old regions. 1009 _heap->global_generation()->establish_usage(_old_regions + _young_regions, 1010 _old_usage + _young_usage, 1011 _old_humongous_waste + _young_humongous_waste); 1012 } 1013 }; 1014 1015 void ShenandoahFullGC::compact_humongous_objects() { 1016 // Compact humongous regions, based on their fwdptr objects. 1017 // 1018 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases, 1019 // humongous regions are already compacted, and do not require further moves, which alleviates 1020 // sliding costs. We may consider doing this in parallel in the future. 1021 1022 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1023 1024 for (size_t c = heap->num_regions(); c > 0; c--) { 1025 ShenandoahHeapRegion* r = heap->get_region(c - 1); 1026 if (r->is_humongous_start()) { 1027 oop old_obj = cast_to_oop(r->bottom()); 1028 if (!FullGCForwarding::is_forwarded(old_obj)) { 1029 // No need to move the object, it stays at the same slot 1030 continue; 1031 } 1032 size_t words_size = old_obj->size(); 1033 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 1034 1035 size_t old_start = r->index(); 1036 size_t old_end = old_start + num_regions - 1; 1037 size_t new_start = heap->heap_region_index_containing(FullGCForwarding::forwardee(old_obj)); 1038 size_t new_end = new_start + num_regions - 1; 1039 assert(old_start != new_start, "must be real move"); 1040 assert(r->is_stw_move_allowed(), "Region %zu should be movable", r->index()); 1041 1042 log_debug(gc)("Full GC compaction moves humongous object from region %zu to region %zu", old_start, new_start); 1043 Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size); 1044 ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom())); 1045 1046 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom()); 1047 new_obj->init_mark(); 1048 1049 { 1050 ShenandoahAffiliation original_affiliation = r->affiliation(); 1051 for (size_t c = old_start; c <= old_end; c++) { 1052 ShenandoahHeapRegion* r = heap->get_region(c); 1053 // Leave humongous region affiliation unchanged. 1054 r->make_regular_bypass(); 1055 r->set_top(r->bottom()); 1056 } 1057 1058 for (size_t c = new_start; c <= new_end; c++) { 1059 ShenandoahHeapRegion* r = heap->get_region(c); 1060 if (c == new_start) { 1061 r->make_humongous_start_bypass(original_affiliation); 1062 } else { 1063 r->make_humongous_cont_bypass(original_affiliation); 1064 } 1065 1066 // Trailing region may be non-full, record the remainder there 1067 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); 1068 if ((c == new_end) && (remainder != 0)) { 1069 r->set_top(r->bottom() + remainder); 1070 } else { 1071 r->set_top(r->end()); 1072 } 1073 1074 r->reset_alloc_metadata(); 1075 } 1076 } 1077 } 1078 } 1079 } 1080 1081 // This is slightly different to ShHeap::reset_next_mark_bitmap: 1082 // we need to remain able to walk pinned regions. 1083 // Since pinned region do not move and don't get compacted, we will get holes with 1084 // unreachable objects in them (which may have pointers to unloaded Klasses and thus 1085 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using 1086 // a valid marking bitmap and valid TAMS pointer. This class only resets marking 1087 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions. 1088 class ShenandoahMCResetCompleteBitmapTask : public WorkerTask { 1089 private: 1090 ShenandoahRegionIterator _regions; 1091 1092 public: 1093 ShenandoahMCResetCompleteBitmapTask() : 1094 WorkerTask("Shenandoah Reset Bitmap") { 1095 } 1096 1097 void work(uint worker_id) { 1098 ShenandoahParallelWorkerSession worker_session(worker_id); 1099 ShenandoahHeapRegion* region = _regions.next(); 1100 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1101 ShenandoahMarkingContext* const ctx = heap->gc_generation()->complete_marking_context(); 1102 while (region != nullptr) { 1103 if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) { 1104 ctx->clear_bitmap(region); 1105 } 1106 region = _regions.next(); 1107 } 1108 } 1109 }; 1110 1111 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) { 1112 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer); 1113 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects); 1114 1115 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1116 1117 // Compact regular objects first 1118 { 1119 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular); 1120 ShenandoahCompactObjectsTask compact_task(worker_slices); 1121 heap->workers()->run_task(&compact_task); 1122 } 1123 1124 // Compact humongous objects after regular object moves 1125 { 1126 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong); 1127 compact_humongous_objects(); 1128 } 1129 } 1130 1131 void ShenandoahFullGC::phase5_epilog() { 1132 GCTraceTime(Info, gc, phases) time("Phase 5: Full GC epilog", _gc_timer); 1133 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1134 1135 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer 1136 // and must ensure the bitmap is in sync. 1137 { 1138 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete); 1139 ShenandoahMCResetCompleteBitmapTask task; 1140 heap->workers()->run_task(&task); 1141 } 1142 1143 // Bring regions in proper states after the collection, and set heap properties. 1144 { 1145 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild); 1146 ShenandoahPostCompactClosure post_compact; 1147 heap->heap_region_iterate(&post_compact); 1148 post_compact.update_generation_usage(); 1149 1150 if (heap->mode()->is_generational()) { 1151 ShenandoahGenerationalFullGC::balance_generations_after_gc(heap); 1152 } 1153 1154 heap->collection_set()->clear(); 1155 size_t young_cset_regions, old_cset_regions; 1156 size_t first_old, last_old, num_old; 1157 heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); 1158 1159 // We also do not expand old generation size following Full GC because we have scrambled age populations and 1160 // no longer have objects separated by age into distinct regions. 1161 if (heap->mode()->is_generational()) { 1162 ShenandoahGenerationalFullGC::compute_balances(); 1163 } 1164 1165 heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old); 1166 1167 // Set mark incomplete because the marking bitmaps have been reset except pinned regions. 1168 heap->global_generation()->set_mark_incomplete(); 1169 1170 heap->clear_cancelled_gc(true /* clear oom handler */); 1171 } 1172 1173 _preserved_marks->restore(heap->workers()); 1174 _preserved_marks->reclaim(); 1175 1176 // We defer generation resizing actions until after cset regions have been recycled. We do this even following an 1177 // abbreviated cycle. 1178 if (heap->mode()->is_generational()) { 1179 ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set(); 1180 ShenandoahGenerationalFullGC::rebuild_remembered_set(heap); 1181 } 1182 }