1 /*
   2  * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 
  28 #include "compiler/oopMap.hpp"
  29 #include "gc/shared/continuationGCSupport.hpp"
  30 #include "gc/shared/fullGCForwarding.inline.hpp"
  31 #include "gc/shared/gcTraceTime.inline.hpp"
  32 #include "gc/shared/preservedMarks.inline.hpp"
  33 #include "gc/shared/tlab_globals.hpp"
  34 #include "gc/shared/workerThread.hpp"
  35 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  36 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  37 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  38 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  39 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  40 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  41 #include "gc/shenandoah/shenandoahFullGC.hpp"
  42 #include "gc/shenandoah/shenandoahGenerationalFullGC.hpp"
  43 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  44 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  45 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  46 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
  47 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  48 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  50 #include "gc/shenandoah/shenandoahMetrics.hpp"
  51 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  52 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  53 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  54 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  55 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  56 #include "gc/shenandoah/shenandoahUtils.hpp"
  57 #include "gc/shenandoah/shenandoahVerifier.hpp"
  58 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  59 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  60 #include "memory/metaspaceUtils.hpp"
  61 #include "memory/universe.hpp"
  62 #include "oops/compressedOops.inline.hpp"
  63 #include "oops/oop.inline.hpp"
  64 #include "runtime/orderAccess.hpp"
  65 #include "runtime/vmThread.hpp"
  66 #include "utilities/copy.hpp"
  67 #include "utilities/events.hpp"
  68 #include "utilities/growableArray.hpp"
  69 
  70 ShenandoahFullGC::ShenandoahFullGC() :
  71   ShenandoahGC(ShenandoahHeap::heap()->global_generation()),
  72   _gc_timer(ShenandoahHeap::heap()->gc_timer()),
  73   _preserved_marks(new PreservedMarksSet(true)) {}
  74 
  75 ShenandoahFullGC::~ShenandoahFullGC() {
  76   delete _preserved_marks;
  77 }
  78 
  79 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
  80   vmop_entry_full(cause);
  81   // Always success
  82   return true;
  83 }
  84 
  85 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) {
  86   ShenandoahHeap* const heap = ShenandoahHeap::heap();
  87   TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
  88   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
  89 
  90   heap->try_inject_alloc_failure();
  91   VM_ShenandoahFullGC op(cause, this);
  92   VMThread::execute(&op);
  93 }
  94 
  95 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
  96   static const char* msg = "Pause Full";
  97   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
  98   EventMark em("%s", msg);
  99 
 100   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 101                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
 102                               "full gc");
 103 
 104   op_full(cause);
 105 }
 106 
 107 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
 108   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 109 
 110   ShenandoahMetricsSnapshot metrics(heap->free_set());
 111 
 112   // Perform full GC
 113   do_it(cause);
 114 
 115   if (heap->mode()->is_generational()) {
 116     ShenandoahGenerationalFullGC::handle_completion(heap);
 117   }
 118 
 119   if (metrics.is_good_progress()) {
 120     heap->notify_gc_progress();
 121   } else {
 122     // Nothing to do. Tell the allocation path that we have failed to make
 123     // progress, and it can finally fail.
 124     heap->notify_gc_no_progress();
 125   }
 126 
 127   // Regardless if progress was made, we record that we completed a "successful" full GC.
 128   _generation->heuristics()->record_success_full();
 129   heap->shenandoah_policy()->record_success_full();
 130 
 131   {
 132     ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_propagate_gc_state);
 133     heap->propagate_gc_state_to_all_threads();
 134   }
 135 }
 136 
 137 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
 138   ShenandoahHeap* heap = ShenandoahHeap::heap();
 139 
 140   if (heap->mode()->is_generational()) {
 141     ShenandoahGenerationalFullGC::prepare();
 142   }
 143 
 144   if (ShenandoahVerify) {
 145     heap->verifier()->verify_before_fullgc(_generation);
 146   }
 147 
 148   if (VerifyBeforeGC) {
 149     Universe::verify();
 150   }
 151 
 152   // Degenerated GC may carry concurrent root flags when upgrading to
 153   // full GC. We need to reset it before mutators resume.
 154   heap->set_concurrent_strong_root_in_progress(false);
 155   heap->set_concurrent_weak_root_in_progress(false);
 156 
 157   heap->set_full_gc_in_progress(true);
 158 
 159   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 160   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
 161 
 162   {
 163     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre);
 164     heap->pre_full_gc_dump(_gc_timer);
 165   }
 166 
 167   {
 168     ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
 169     // Full GC is supposed to recover from any GC state:
 170 
 171     // a0. Remember if we have forwarded objects
 172     bool has_forwarded_objects = heap->has_forwarded_objects();
 173 
 174     // a1. Cancel evacuation, if in progress
 175     if (heap->is_evacuation_in_progress()) {
 176       heap->set_evacuation_in_progress(false);
 177     }
 178     assert(!heap->is_evacuation_in_progress(), "sanity");
 179 
 180     // a2. Cancel update-refs, if in progress
 181     if (heap->is_update_refs_in_progress()) {
 182       heap->set_update_refs_in_progress(false);
 183     }
 184     assert(!heap->is_update_refs_in_progress(), "sanity");
 185 
 186     // b. Cancel all concurrent marks, if in progress
 187     if (heap->is_concurrent_mark_in_progress()) {
 188       heap->cancel_concurrent_mark();
 189     }
 190     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 191 
 192     // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
 193     if (has_forwarded_objects) {
 194       update_roots(true /*full_gc*/);
 195     }
 196 
 197     // d. Abandon reference discovery and clear all discovered references.
 198     ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 199     rp->abandon_partial_discovery();
 200 
 201     // e. Sync pinned region status from the CP marks
 202     heap->sync_pinned_region_status();
 203 
 204     if (heap->mode()->is_generational()) {
 205       ShenandoahGenerationalFullGC::restore_top_before_promote(heap);
 206     }
 207 
 208     // The rest of prologue:
 209     _preserved_marks->init(heap->workers()->active_workers());
 210 
 211     assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
 212   }
 213 
 214   if (UseTLAB) {
 215     // Note: PLABs are also retired with GCLABs in generational mode.
 216     heap->gclabs_retire(ResizeTLAB);
 217     heap->tlabs_retire(ResizeTLAB);
 218   }
 219 
 220   OrderAccess::fence();
 221 
 222   phase1_mark_heap();
 223 
 224   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 225   // Coming out of Full GC, we would not have any forwarded objects.
 226   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 227   heap->set_has_forwarded_objects(false);
 228 
 229   heap->set_full_gc_move_in_progress(true);
 230 
 231   // Setup workers for the rest
 232   OrderAccess::fence();
 233 
 234   // Initialize worker slices
 235   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
 236   for (uint i = 0; i < heap->max_workers(); i++) {
 237     worker_slices[i] = new ShenandoahHeapRegionSet();
 238   }
 239 
 240   {
 241     // The rest of code performs region moves, where region status is undefined
 242     // until all phases run together.
 243     ShenandoahHeapLocker lock(heap->lock());
 244 
 245     phase2_calculate_target_addresses(worker_slices);
 246 
 247     OrderAccess::fence();
 248 
 249     phase3_update_references();
 250 
 251     phase4_compact_objects(worker_slices);
 252 
 253     phase5_epilog();
 254   }
 255 
 256   // Resize metaspace
 257   MetaspaceGC::compute_new_size();
 258 
 259   // Free worker slices
 260   for (uint i = 0; i < heap->max_workers(); i++) {
 261     delete worker_slices[i];
 262   }
 263   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 264 
 265   heap->set_full_gc_move_in_progress(false);
 266   heap->set_full_gc_in_progress(false);
 267 
 268   if (ShenandoahVerify) {
 269     heap->verifier()->verify_after_fullgc(_generation);
 270   }
 271 
 272   if (VerifyAfterGC) {
 273     Universe::verify();
 274   }
 275 
 276   {
 277     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
 278     heap->post_full_gc_dump(_gc_timer);
 279   }
 280 }
 281 
 282 void ShenandoahFullGC::phase1_mark_heap() {
 283   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 284   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 285 
 286   ShenandoahHeap* heap = ShenandoahHeap::heap();
 287 
 288   _generation->reset_mark_bitmap<true, true>();
 289   assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 290   assert(!_generation->is_mark_complete(), "sanity");
 291 
 292   heap->set_unload_classes(_generation->heuristics()->can_unload_classes());
 293 
 294   ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 295   // enable ("weak") refs discovery
 296   rp->set_soft_reference_policy(true); // forcefully purge all soft references
 297 
 298   ShenandoahSTWMark mark(_generation, true /*full_gc*/);
 299   mark.mark();
 300   heap->parallel_cleaning(_generation, true /* full_gc */);
 301 
 302   if (ShenandoahHeap::heap()->mode()->is_generational()) {
 303     ShenandoahGenerationalFullGC::log_live_in_old(heap);
 304   }
 305 }
 306 
 307 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 308 private:
 309   PreservedMarks*          const _preserved_marks;
 310   ShenandoahHeap*          const _heap;
 311   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 312   int _empty_regions_pos;
 313   ShenandoahHeapRegion*          _to_region;
 314   ShenandoahHeapRegion*          _from_region;
 315   HeapWord* _compact_point;
 316 
 317 public:
 318   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 319                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 320                                               ShenandoahHeapRegion* to_region) :
 321     _preserved_marks(preserved_marks),
 322     _heap(ShenandoahHeap::heap()),
 323     _empty_regions(empty_regions),
 324     _empty_regions_pos(0),
 325     _to_region(to_region),
 326     _from_region(nullptr),
 327     _compact_point(to_region->bottom()) {}
 328 
 329   void set_from_region(ShenandoahHeapRegion* from_region) {
 330     _from_region = from_region;
 331   }
 332 
 333   void finish() {
 334     assert(_to_region != nullptr, "should not happen");
 335     _to_region->set_new_top(_compact_point);
 336   }
 337 
 338   bool is_compact_same_region() {
 339     return _from_region == _to_region;
 340   }
 341 
 342   int empty_regions_pos() {
 343     return _empty_regions_pos;
 344   }
 345 
 346   void do_object(oop p) override {
 347     shenandoah_assert_mark_complete(cast_from_oop<HeapWord*>(p));
 348     assert(_from_region != nullptr, "must set before work");
 349     assert(_heap->global_generation()->is_mark_complete(), "marking must be finished");
 350     assert(_heap->marking_context()->is_marked(p), "must be marked");
 351     assert(!_heap->marking_context()->allocated_after_mark_start(p), "must be truly marked");
 352 
 353     size_t obj_size = p->size();
 354     if (_compact_point + obj_size > _to_region->end()) {
 355       finish();
 356 
 357       // Object doesn't fit. Pick next empty region and start compacting there.
 358       ShenandoahHeapRegion* new_to_region;
 359       if (_empty_regions_pos < _empty_regions.length()) {
 360         new_to_region = _empty_regions.at(_empty_regions_pos);
 361         _empty_regions_pos++;
 362       } else {
 363         // Out of empty region? Compact within the same region.
 364         new_to_region = _from_region;
 365       }
 366 
 367       assert(new_to_region != _to_region, "must not reuse same to-region");
 368       assert(new_to_region != nullptr, "must not be null");
 369       _to_region = new_to_region;
 370       _compact_point = _to_region->bottom();
 371     }
 372 
 373     // Object fits into current region, record new location, if object does not move:
 374     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 375     shenandoah_assert_not_forwarded(nullptr, p);
 376     if (_compact_point != cast_from_oop<HeapWord*>(p)) {
 377       _preserved_marks->push_if_necessary(p, p->mark());
 378       FullGCForwarding::forward_to(p, cast_to_oop(_compact_point));
 379     }
 380     _compact_point += obj_size;
 381   }
 382 };
 383 
 384 class ShenandoahPrepareForCompactionTask : public WorkerTask {
 385 private:
 386   PreservedMarksSet*        const _preserved_marks;
 387   ShenandoahHeap*           const _heap;
 388   ShenandoahHeapRegionSet** const _worker_slices;
 389 
 390 public:
 391   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
 392     WorkerTask("Shenandoah Prepare For Compaction"),
 393     _preserved_marks(preserved_marks),
 394     _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
 395   }
 396 
 397   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 398     // Empty region: get it into the slice to defragment the slice itself.
 399     // We could have skipped this without violating correctness, but we really
 400     // want to compact all live regions to the start of the heap, which sometimes
 401     // means moving them into the fully empty regions.
 402     if (r->is_empty()) return true;
 403 
 404     // Can move the region, and this is not the humongous region. Humongous
 405     // moves are special cased here, because their moves are handled separately.
 406     return r->is_stw_move_allowed() && !r->is_humongous();
 407   }
 408 
 409   void work(uint worker_id) override;
 410 private:
 411   template<typename ClosureType>
 412   void prepare_for_compaction(ClosureType& cl,
 413                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 414                               ShenandoahHeapRegionSetIterator& it,
 415                               ShenandoahHeapRegion* from_region);
 416 };
 417 
 418 void ShenandoahPrepareForCompactionTask::work(uint worker_id) {
 419   ShenandoahParallelWorkerSession worker_session(worker_id);
 420   ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 421   ShenandoahHeapRegionSetIterator it(slice);
 422   ShenandoahHeapRegion* from_region = it.next();
 423   // No work?
 424   if (from_region == nullptr) {
 425     return;
 426   }
 427 
 428   // Sliding compaction. Walk all regions in the slice, and compact them.
 429   // Remember empty regions and reuse them as needed.
 430   ResourceMark rm;
 431 
 432   GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
 433 
 434   if (_heap->mode()->is_generational()) {
 435     ShenandoahPrepareForGenerationalCompactionObjectClosure cl(_preserved_marks->get(worker_id),
 436                                                                empty_regions, from_region, worker_id);
 437     prepare_for_compaction(cl, empty_regions, it, from_region);
 438   } else {
 439     ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
 440     prepare_for_compaction(cl, empty_regions, it, from_region);
 441   }
 442 }
 443 
 444 template<typename ClosureType>
 445 void ShenandoahPrepareForCompactionTask::prepare_for_compaction(ClosureType& cl,
 446                                                                 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 447                                                                 ShenandoahHeapRegionSetIterator& it,
 448                                                                 ShenandoahHeapRegion* from_region) {
 449   while (from_region != nullptr) {
 450     assert(is_candidate_region(from_region), "Sanity");
 451     cl.set_from_region(from_region);
 452     if (from_region->has_live()) {
 453       _heap->marked_object_iterate(from_region, &cl);
 454     }
 455 
 456     // Compacted the region to somewhere else? From-region is empty then.
 457     if (!cl.is_compact_same_region()) {
 458       empty_regions.append(from_region);
 459     }
 460     from_region = it.next();
 461   }
 462   cl.finish();
 463 
 464   // Mark all remaining regions as empty
 465   for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 466     ShenandoahHeapRegion* r = empty_regions.at(pos);
 467     r->set_new_top(r->bottom());
 468   }
 469 }
 470 
 471 void ShenandoahFullGC::calculate_target_humongous_objects() {
 472   ShenandoahHeap* heap = ShenandoahHeap::heap();
 473 
 474   // Compute the new addresses for humongous objects. We need to do this after addresses
 475   // for regular objects are calculated, and we know what regions in heap suffix are
 476   // available for humongous moves.
 477   //
 478   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 479   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 480   // humongous start there.
 481   //
 482   // The complication is potential non-movable regions during the scan. If such region is
 483   // detected, then sliding restarts towards that non-movable region.
 484 
 485   size_t to_begin = heap->num_regions();
 486   size_t to_end = heap->num_regions();
 487 
 488   log_debug(gc)("Full GC calculating target humongous objects from end %zu", to_end);
 489   for (size_t c = heap->num_regions(); c > 0; c--) {
 490     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 491     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 492       // To-region candidate: record this, and continue scan
 493       to_begin = r->index();
 494       continue;
 495     }
 496 
 497     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 498       // From-region candidate: movable humongous region
 499       oop old_obj = cast_to_oop(r->bottom());
 500       size_t words_size = old_obj->size();
 501       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 502 
 503       size_t start = to_end - num_regions;
 504 
 505       if (start >= to_begin && start != r->index()) {
 506         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 507         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 508         FullGCForwarding::forward_to(old_obj, cast_to_oop(heap->get_region(start)->bottom()));
 509         to_end = start;
 510         continue;
 511       }
 512     }
 513 
 514     // Failed to fit. Scan starting from current region.
 515     to_begin = r->index();
 516     to_end = r->index();
 517   }
 518 }
 519 
 520 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 521 public:
 522   void heap_region_do(ShenandoahHeapRegion* r) override {
 523     if (r->is_trash()) {
 524       r->try_recycle_under_lock();
 525     }
 526     if (r->is_cset()) {
 527       // Leave affiliation unchanged
 528       r->make_regular_bypass();
 529     }
 530     if (r->is_empty_uncommitted()) {
 531       r->make_committed_bypass();
 532     }
 533     assert (r->is_committed(), "only committed regions in heap now, see region %zu", r->index());
 534 
 535     // Record current region occupancy: this communicates empty regions are free
 536     // to the rest of Full GC code.
 537     r->set_new_top(r->top());
 538   }
 539 };
 540 
 541 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
 542 private:
 543   ShenandoahHeap* const _heap;
 544   ShenandoahMarkingContext* const _ctx;
 545 
 546 public:
 547   ShenandoahTrashImmediateGarbageClosure() :
 548     _heap(ShenandoahHeap::heap()),
 549     _ctx(ShenandoahHeap::heap()->global_generation()->complete_marking_context()) {}
 550 
 551   void heap_region_do(ShenandoahHeapRegion* r) override {
 552     if (r->is_humongous_start()) {
 553       oop humongous_obj = cast_to_oop(r->bottom());
 554       if (!_ctx->is_marked(humongous_obj)) {
 555         assert(!r->has_live(), "Region %zu is not marked, should not have live", r->index());
 556         _heap->trash_humongous_region_at(r);
 557       } else {
 558         assert(r->has_live(), "Region %zu should have live", r->index());
 559       }
 560     } else if (r->is_humongous_continuation()) {
 561       // If we hit continuation, the non-live humongous starts should have been trashed already
 562       assert(r->humongous_start_region()->has_live(), "Region %zu should have live", r->index());
 563     } else if (r->is_regular()) {
 564       if (!r->has_live()) {
 565         r->make_trash_immediate();
 566       }
 567     }
 568   }
 569 };
 570 
 571 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
 572   ShenandoahHeap* heap = ShenandoahHeap::heap();
 573 
 574   uint n_workers = heap->workers()->active_workers();
 575   size_t n_regions = heap->num_regions();
 576 
 577   // What we want to accomplish: have the dense prefix of data, while still balancing
 578   // out the parallel work.
 579   //
 580   // Assuming the amount of work is driven by the live data that needs moving, we can slice
 581   // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
 582   // thread takes all regions in its prefix subset, and then it takes some regions from
 583   // the tail.
 584   //
 585   // Tail region selection becomes interesting.
 586   //
 587   // First, we want to distribute the regions fairly between the workers, and those regions
 588   // might have different amount of live data. So, until we sure no workers need live data,
 589   // we need to only take what the worker needs.
 590   //
 591   // Second, since we slide everything to the left in each slice, the most busy regions
 592   // would be the ones on the left. Which means we want to have all workers have their after-tail
 593   // regions as close to the left as possible.
 594   //
 595   // The easiest way to do this is to distribute after-tail regions in round-robin between
 596   // workers that still need live data.
 597   //
 598   // Consider parallel workers A, B, C, then the target slice layout would be:
 599   //
 600   //  AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA
 601   //
 602   //  (.....dense-prefix.....) (.....................tail...................)
 603   //  [all regions fully live] [left-most regions are fuller that right-most]
 604   //
 605 
 606   // Compute how much live data is there. This would approximate the size of dense prefix
 607   // we target to create.
 608   size_t total_live = 0;
 609   for (size_t idx = 0; idx < n_regions; idx++) {
 610     ShenandoahHeapRegion *r = heap->get_region(idx);
 611     if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 612       total_live += r->get_live_data_words();
 613     }
 614   }
 615 
 616   // Estimate the size for the dense prefix. Note that we specifically count only the
 617   // "full" regions, so there would be some non-full regions in the slice tail.
 618   size_t live_per_worker = total_live / n_workers;
 619   size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words();
 620   size_t prefix_regions_total = prefix_regions_per_worker * n_workers;
 621   prefix_regions_total = MIN2(prefix_regions_total, n_regions);
 622   assert(prefix_regions_total <= n_regions, "Sanity");
 623 
 624   // There might be non-candidate regions in the prefix. To compute where the tail actually
 625   // ends up being, we need to account those as well.
 626   size_t prefix_end = prefix_regions_total;
 627   for (size_t idx = 0; idx < prefix_regions_total; idx++) {
 628     ShenandoahHeapRegion *r = heap->get_region(idx);
 629     if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 630       prefix_end++;
 631     }
 632   }
 633   prefix_end = MIN2(prefix_end, n_regions);
 634   assert(prefix_end <= n_regions, "Sanity");
 635 
 636   // Distribute prefix regions per worker: each thread definitely gets its own same-sized
 637   // subset of dense prefix.
 638   size_t prefix_idx = 0;
 639 
 640   size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC);
 641 
 642   for (size_t wid = 0; wid < n_workers; wid++) {
 643     ShenandoahHeapRegionSet* slice = worker_slices[wid];
 644 
 645     live[wid] = 0;
 646     size_t regs = 0;
 647 
 648     // Add all prefix regions for this worker
 649     while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) {
 650       ShenandoahHeapRegion *r = heap->get_region(prefix_idx);
 651       if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 652         slice->add_region(r);
 653         live[wid] += r->get_live_data_words();
 654         regs++;
 655       }
 656       prefix_idx++;
 657     }
 658   }
 659 
 660   // Distribute the tail among workers in round-robin fashion.
 661   size_t wid = n_workers - 1;
 662 
 663   for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) {
 664     ShenandoahHeapRegion *r = heap->get_region(tail_idx);
 665     if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 666       assert(wid < n_workers, "Sanity");
 667 
 668       size_t live_region = r->get_live_data_words();
 669 
 670       // Select next worker that still needs live data.
 671       size_t old_wid = wid;
 672       do {
 673         wid++;
 674         if (wid == n_workers) wid = 0;
 675       } while (live[wid] + live_region >= live_per_worker && old_wid != wid);
 676 
 677       if (old_wid == wid) {
 678         // Circled back to the same worker? This means liveness data was
 679         // miscalculated. Bump the live_per_worker limit so that
 680         // everyone gets a piece of the leftover work.
 681         live_per_worker += ShenandoahHeapRegion::region_size_words();
 682       }
 683 
 684       worker_slices[wid]->add_region(r);
 685       live[wid] += live_region;
 686     }
 687   }
 688 
 689   FREE_C_HEAP_ARRAY(size_t, live);
 690 
 691 #ifdef ASSERT
 692   ResourceBitMap map(n_regions);
 693   for (size_t wid = 0; wid < n_workers; wid++) {
 694     ShenandoahHeapRegionSetIterator it(worker_slices[wid]);
 695     ShenandoahHeapRegion* r = it.next();
 696     while (r != nullptr) {
 697       size_t idx = r->index();
 698       assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: %zu", idx);
 699       assert(!map.at(idx), "No region distributed twice: %zu", idx);
 700       map.at_put(idx, true);
 701       r = it.next();
 702     }
 703   }
 704 
 705   for (size_t rid = 0; rid < n_regions; rid++) {
 706     bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid));
 707     bool is_distributed = map.at(rid);
 708     assert(is_distributed || !is_candidate, "All candidates are distributed: %zu", rid);
 709   }
 710 #endif
 711 }
 712 
 713 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
 714   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
 715   ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
 716 
 717   ShenandoahHeap* heap = ShenandoahHeap::heap();
 718 
 719   // About to figure out which regions can be compacted, make sure pinning status
 720   // had been updated in GC prologue.
 721   heap->assert_pinned_region_status();
 722 
 723   {
 724     // Trash the immediately collectible regions before computing addresses
 725     ShenandoahTrashImmediateGarbageClosure trash_immediate_garbage;
 726     ShenandoahExcludeRegionClosure<FREE> cl(&trash_immediate_garbage);
 727     heap->heap_region_iterate(&cl);
 728 
 729     // Make sure regions are in good state: committed, active, clean.
 730     // This is needed because we are potentially sliding the data through them.
 731     ShenandoahEnsureHeapActiveClosure ecl;
 732     heap->heap_region_iterate(&ecl);
 733   }
 734 
 735   // Compute the new addresses for regular objects
 736   {
 737     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
 738 
 739     distribute_slices(worker_slices);
 740 
 741     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
 742     heap->workers()->run_task(&task);
 743   }
 744 
 745   // Compute the new addresses for humongous objects
 746   {
 747     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
 748     calculate_target_humongous_objects();
 749   }
 750 }
 751 
 752 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
 753 private:
 754   ShenandoahMarkingContext* const _ctx;
 755 
 756   template <class T>
 757   inline void do_oop_work(T* p) {
 758     T o = RawAccess<>::oop_load(p);
 759     if (!CompressedOops::is_null(o)) {
 760       oop obj = CompressedOops::decode_not_null(o);
 761       assert(_ctx->is_marked(obj), "must be marked");
 762       if (FullGCForwarding::is_forwarded(obj)) {
 763         oop forw = FullGCForwarding::forwardee(obj);
 764         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 765       }
 766     }
 767   }
 768 
 769 public:
 770   ShenandoahAdjustPointersClosure() :
 771     _ctx(ShenandoahHeap::heap()->global_generation()->complete_marking_context()) {}
 772 
 773   void do_oop(oop* p)       { do_oop_work(p); }
 774   void do_oop(narrowOop* p) { do_oop_work(p); }
 775   void do_method(Method* m) {}
 776   void do_nmethod(nmethod* nm) {}
 777 };
 778 
 779 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
 780 private:
 781   ShenandoahAdjustPointersClosure _cl;
 782 
 783 public:
 784   void do_object(oop p) override {
 785     assert(ShenandoahHeap::heap()->global_generation()->is_mark_complete(), "marking must be complete");
 786     assert(ShenandoahHeap::heap()->marking_context()->is_marked(p), "must be marked");
 787     p->oop_iterate(&_cl);
 788   }
 789 };
 790 
 791 class ShenandoahAdjustPointersTask : public WorkerTask {
 792 private:
 793   ShenandoahHeap*          const _heap;
 794   ShenandoahRegionIterator       _regions;
 795 
 796 public:
 797   ShenandoahAdjustPointersTask() :
 798     WorkerTask("Shenandoah Adjust Pointers"),
 799     _heap(ShenandoahHeap::heap()) {
 800   }
 801 
 802   void work(uint worker_id) override {
 803     ShenandoahParallelWorkerSession worker_session(worker_id);
 804     ShenandoahAdjustPointersObjectClosure obj_cl;
 805     ShenandoahHeapRegion* r = _regions.next();
 806     while (r != nullptr) {
 807       if (!r->is_humongous_continuation() && r->has_live()) {
 808         _heap->marked_object_iterate(r, &obj_cl);
 809       }
 810       if (_heap->mode()->is_generational()) {
 811         ShenandoahGenerationalFullGC::maybe_coalesce_and_fill_region(r);
 812       }
 813       r = _regions.next();
 814     }
 815   }
 816 };
 817 
 818 class ShenandoahAdjustRootPointersTask : public WorkerTask {
 819 private:
 820   ShenandoahRootAdjuster* _rp;
 821   PreservedMarksSet* _preserved_marks;
 822 public:
 823   ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
 824     WorkerTask("Shenandoah Adjust Root Pointers"),
 825     _rp(rp),
 826     _preserved_marks(preserved_marks) {}
 827 
 828   void work(uint worker_id) override {
 829     ShenandoahParallelWorkerSession worker_session(worker_id);
 830     ShenandoahAdjustPointersClosure cl;
 831     _rp->roots_do(worker_id, &cl);
 832     _preserved_marks->get(worker_id)->adjust_during_full_gc();
 833   }
 834 };
 835 
 836 void ShenandoahFullGC::phase3_update_references() {
 837   GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
 838   ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
 839 
 840   ShenandoahHeap* heap = ShenandoahHeap::heap();
 841 
 842   WorkerThreads* workers = heap->workers();
 843   uint nworkers = workers->active_workers();
 844   {
 845 #if COMPILER2_OR_JVMCI
 846     DerivedPointerTable::clear();
 847 #endif
 848     ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
 849     ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
 850     workers->run_task(&task);
 851 #if COMPILER2_OR_JVMCI
 852     DerivedPointerTable::update_pointers();
 853 #endif
 854   }
 855 
 856   ShenandoahAdjustPointersTask adjust_pointers_task;
 857   workers->run_task(&adjust_pointers_task);
 858 }
 859 
 860 class ShenandoahCompactObjectsClosure : public ObjectClosure {
 861 private:
 862   uint const _worker_id;
 863 
 864 public:
 865   explicit ShenandoahCompactObjectsClosure(uint worker_id) :
 866     _worker_id(worker_id) {}
 867 
 868   void do_object(oop p) override {
 869     assert(ShenandoahHeap::heap()->global_generation()->is_mark_complete(), "marking must be finished");
 870     assert(ShenandoahHeap::heap()->marking_context()->is_marked(p), "must be marked");
 871     size_t size = p->size();
 872     if (FullGCForwarding::is_forwarded(p)) {
 873       HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
 874       HeapWord* compact_to = cast_from_oop<HeapWord*>(FullGCForwarding::forwardee(p));
 875       assert(compact_from != compact_to, "Forwarded object should move");
 876       Copy::aligned_conjoint_words(compact_from, compact_to, size);
 877       oop new_obj = cast_to_oop(compact_to);
 878 
 879       ContinuationGCSupport::relativize_stack_chunk(new_obj);
 880       new_obj->init_mark();
 881     }
 882   }
 883 };
 884 
 885 class ShenandoahCompactObjectsTask : public WorkerTask {
 886 private:
 887   ShenandoahHeap* const _heap;
 888   ShenandoahHeapRegionSet** const _worker_slices;
 889 
 890 public:
 891   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
 892     WorkerTask("Shenandoah Compact Objects"),
 893     _heap(ShenandoahHeap::heap()),
 894     _worker_slices(worker_slices) {
 895   }
 896 
 897   void work(uint worker_id) override {
 898     ShenandoahParallelWorkerSession worker_session(worker_id);
 899     ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
 900 
 901     ShenandoahCompactObjectsClosure cl(worker_id);
 902     ShenandoahHeapRegion* r = slice.next();
 903     while (r != nullptr) {
 904       assert(!r->is_humongous(), "must not get humongous regions here");
 905       if (r->has_live()) {
 906         _heap->marked_object_iterate(r, &cl);
 907       }
 908       r->set_top(r->new_top());
 909       r = slice.next();
 910     }
 911   }
 912 };
 913 
 914 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
 915 private:
 916   ShenandoahHeap* const _heap;
 917   bool _is_generational;
 918   size_t _young_regions, _young_usage, _young_humongous_waste;
 919   size_t _old_regions, _old_usage, _old_humongous_waste;
 920 
 921 public:
 922   ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()),
 923                                    _is_generational(_heap->mode()->is_generational()),
 924                                    _young_regions(0),
 925                                    _young_usage(0),
 926                                    _young_humongous_waste(0),
 927                                    _old_regions(0),
 928                                    _old_usage(0),
 929                                    _old_humongous_waste(0)
 930   {
 931     _heap->free_set()->clear();
 932   }
 933 
 934   void heap_region_do(ShenandoahHeapRegion* r) override {
 935     assert (!r->is_cset(), "cset regions should have been demoted already");
 936 
 937     // Need to reset the complete-top-at-mark-start pointer here because
 938     // the complete marking bitmap is no longer valid. This ensures
 939     // size-based iteration in marked_object_iterate().
 940     // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
 941     // pinned regions.
 942     if (!r->is_pinned()) {
 943       _heap->marking_context()->reset_top_at_mark_start(r);
 944     }
 945 
 946     size_t live = r->used();
 947 
 948     // Make empty regions that have been allocated into regular
 949     if (r->is_empty() && live > 0) {
 950       if (!_is_generational) {
 951         r->make_affiliated_maybe();
 952       }
 953       // else, generational mode compaction has already established affiliation.
 954       r->make_regular_bypass();
 955       if (ZapUnusedHeapArea) {
 956         SpaceMangler::mangle_region(MemRegion(r->top(), r->end()));
 957       }
 958     }
 959 
 960     // Reclaim regular regions that became empty
 961     if (r->is_regular() && live == 0) {
 962       r->make_trash();
 963     }
 964 
 965     // Recycle all trash regions
 966     if (r->is_trash()) {
 967       live = 0;
 968       r->try_recycle_under_lock();
 969     } else {
 970       if (r->is_old()) {
 971         ShenandoahGenerationalFullGC::account_for_region(r, _old_regions, _old_usage, _old_humongous_waste);
 972       } else if (r->is_young()) {
 973         ShenandoahGenerationalFullGC::account_for_region(r, _young_regions, _young_usage, _young_humongous_waste);
 974       }
 975     }
 976     r->set_live_data(live);
 977     r->reset_alloc_metadata();
 978   }
 979 };
 980 
 981 void ShenandoahFullGC::compact_humongous_objects() {
 982   // Compact humongous regions, based on their fwdptr objects.
 983   //
 984   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
 985   // humongous regions are already compacted, and do not require further moves, which alleviates
 986   // sliding costs. We may consider doing this in parallel in the future.
 987 
 988   ShenandoahHeap* heap = ShenandoahHeap::heap();
 989 
 990   for (size_t c = heap->num_regions(); c > 0; c--) {
 991     ShenandoahHeapRegion* r = heap->get_region(c - 1);
 992     if (r->is_humongous_start()) {
 993       oop old_obj = cast_to_oop(r->bottom());
 994       if (!FullGCForwarding::is_forwarded(old_obj)) {
 995         // No need to move the object, it stays at the same slot
 996         continue;
 997       }
 998       size_t words_size = old_obj->size();
 999       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1000 
1001       size_t old_start = r->index();
1002       size_t old_end   = old_start + num_regions - 1;
1003       size_t new_start = heap->heap_region_index_containing(FullGCForwarding::forwardee(old_obj));
1004       size_t new_end   = new_start + num_regions - 1;
1005       assert(old_start != new_start, "must be real move");
1006       assert(r->is_stw_move_allowed(), "Region %zu should be movable", r->index());
1007 
1008       log_debug(gc)("Full GC compaction moves humongous object from region %zu to region %zu", old_start, new_start);
1009       Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
1010       ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
1011 
1012       oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
1013       new_obj->init_mark();
1014 
1015       {
1016         ShenandoahAffiliation original_affiliation = r->affiliation();
1017         for (size_t c = old_start; c <= old_end; c++) {
1018           ShenandoahHeapRegion* r = heap->get_region(c);
1019           // Leave humongous region affiliation unchanged.
1020           r->make_regular_bypass();
1021           r->set_top(r->bottom());
1022         }
1023 
1024         for (size_t c = new_start; c <= new_end; c++) {
1025           ShenandoahHeapRegion* r = heap->get_region(c);
1026           if (c == new_start) {
1027             r->make_humongous_start_bypass(original_affiliation);
1028           } else {
1029             r->make_humongous_cont_bypass(original_affiliation);
1030           }
1031 
1032           // Trailing region may be non-full, record the remainder there
1033           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
1034           if ((c == new_end) && (remainder != 0)) {
1035             r->set_top(r->bottom() + remainder);
1036           } else {
1037             r->set_top(r->end());
1038           }
1039 
1040           r->reset_alloc_metadata();
1041         }
1042       }
1043     }
1044   }
1045 }
1046 
1047 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1048 // we need to remain able to walk pinned regions.
1049 // Since pinned region do not move and don't get compacted, we will get holes with
1050 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
1051 // cannot be iterated over using oop->size()). The only way to safely iterate over those is using
1052 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
1053 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
1054 class ShenandoahMCResetCompleteBitmapTask : public WorkerTask {
1055 private:
1056   ShenandoahRegionIterator _regions;
1057 
1058 public:
1059   ShenandoahMCResetCompleteBitmapTask() :
1060     WorkerTask("Shenandoah Reset Bitmap") {
1061   }
1062 
1063   void work(uint worker_id) override {
1064     ShenandoahParallelWorkerSession worker_session(worker_id);
1065     ShenandoahHeapRegion* region = _regions.next();
1066     ShenandoahHeap* heap = ShenandoahHeap::heap();
1067     ShenandoahMarkingContext* const ctx = heap->marking_context();
1068     assert(heap->global_generation()->is_mark_complete(), "Marking must be complete");
1069     while (region != nullptr) {
1070       if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) {
1071         ctx->clear_bitmap(region);
1072       }
1073       region = _regions.next();
1074     }
1075   }
1076 };
1077 
1078 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
1079   GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
1080   ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
1081 
1082   ShenandoahHeap* heap = ShenandoahHeap::heap();
1083 
1084   // Compact regular objects first
1085   {
1086     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
1087     ShenandoahCompactObjectsTask compact_task(worker_slices);
1088     heap->workers()->run_task(&compact_task);
1089   }
1090 
1091   // Compact humongous objects after regular object moves
1092   {
1093     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1094     compact_humongous_objects();
1095   }
1096 }
1097 
1098 void ShenandoahFullGC::phase5_epilog() {
1099   GCTraceTime(Info, gc, phases) time("Phase 5: Full GC epilog", _gc_timer);
1100   ShenandoahHeap* heap = ShenandoahHeap::heap();
1101 
1102   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1103   // and must ensure the bitmap is in sync.
1104   {
1105     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1106     ShenandoahMCResetCompleteBitmapTask task;
1107     heap->workers()->run_task(&task);
1108   }
1109 
1110   // Bring regions in proper states after the collection, and set heap properties.
1111   {
1112     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1113     ShenandoahPostCompactClosure post_compact;
1114     heap->heap_region_iterate(&post_compact);
1115     heap->collection_set()->clear();
1116     size_t young_cset_regions, old_cset_regions;
1117     size_t first_old, last_old, num_old;
1118     heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
1119 
1120     // We also do not expand old generation size following Full GC because we have scrambled age populations and
1121     // no longer have objects separated by age into distinct regions.
1122     if (heap->mode()->is_generational()) {
1123       ShenandoahGenerationalFullGC::compute_balances();
1124     }
1125 
1126     heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
1127 
1128     // Set mark incomplete because the marking bitmaps have been reset except pinned regions.
1129     _generation->set_mark_incomplete();
1130 
1131     heap->clear_cancelled_gc();
1132   }
1133 
1134   _preserved_marks->restore(heap->workers());
1135   _preserved_marks->reclaim();
1136 
1137   if (heap->mode()->is_generational()) {
1138     ShenandoahGenerationalFullGC::rebuild_remembered_set(heap);
1139   }
1140 }