1 /*
   2  * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 
  28 #include "compiler/oopMap.hpp"
  29 #include "gc/shared/continuationGCSupport.hpp"
  30 #include "gc/shared/fullGCForwarding.inline.hpp"
  31 #include "gc/shared/gcTraceTime.inline.hpp"
  32 #include "gc/shared/preservedMarks.inline.hpp"
  33 #include "gc/shared/tlab_globals.hpp"
  34 #include "gc/shared/workerThread.hpp"
  35 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  36 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  37 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  38 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  39 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  40 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  41 #include "gc/shenandoah/shenandoahFullGC.hpp"
  42 #include "gc/shenandoah/shenandoahGenerationalFullGC.hpp"
  43 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  44 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  45 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  46 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
  47 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  48 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  50 #include "gc/shenandoah/shenandoahMetrics.hpp"
  51 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  52 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  53 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  54 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  55 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  56 #include "gc/shenandoah/shenandoahUtils.hpp"
  57 #include "gc/shenandoah/shenandoahVerifier.hpp"
  58 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  59 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  60 #include "memory/metaspaceUtils.hpp"
  61 #include "memory/universe.hpp"
  62 #include "oops/compressedOops.inline.hpp"
  63 #include "oops/oop.inline.hpp"
  64 #include "runtime/orderAccess.hpp"
  65 #include "runtime/vmThread.hpp"
  66 #include "utilities/copy.hpp"
  67 #include "utilities/events.hpp"
  68 #include "utilities/growableArray.hpp"
  69 
  70 ShenandoahFullGC::ShenandoahFullGC() :
  71   ShenandoahGC(ShenandoahHeap::heap()->global_generation()),
  72   _gc_timer(ShenandoahHeap::heap()->gc_timer()),
  73   _preserved_marks(new PreservedMarksSet(true)) {}
  74 
  75 ShenandoahFullGC::~ShenandoahFullGC() {
  76   delete _preserved_marks;
  77 }
  78 
  79 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
  80   vmop_entry_full(cause);
  81   // Always success
  82   return true;
  83 }
  84 
  85 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) {
  86   ShenandoahHeap* const heap = ShenandoahHeap::heap();
  87   TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
  88   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
  89 
  90   heap->try_inject_alloc_failure();
  91   VM_ShenandoahFullGC op(cause, this);
  92   VMThread::execute(&op);
  93 }
  94 
  95 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
  96   static const char* msg = "Pause Full";
  97   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
  98   EventMark em("%s", msg);
  99 
 100   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 101                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
 102                               "full gc");
 103 
 104   op_full(cause);
 105 }
 106 
 107 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
 108   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 109 
 110   ShenandoahMetricsSnapshot metrics(heap->free_set());
 111 
 112   // Perform full GC
 113   do_it(cause);
 114 
 115   if (heap->mode()->is_generational()) {
 116     ShenandoahGenerationalFullGC::handle_completion(heap);
 117   }
 118 
 119   if (metrics.is_good_progress()) {
 120     heap->notify_gc_progress();
 121   } else {
 122     // Nothing to do. Tell the allocation path that we have failed to make
 123     // progress, and it can finally fail.
 124     heap->notify_gc_no_progress();
 125   }
 126 
 127   // Regardless if progress was made, we record that we completed a "successful" full GC.
 128   _generation->heuristics()->record_success_full();
 129   heap->shenandoah_policy()->record_success_full();
 130 
 131   {
 132     ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_propagate_gc_state);
 133     heap->propagate_gc_state_to_all_threads();
 134   }
 135 }
 136 
 137 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
 138   ShenandoahHeap* heap = ShenandoahHeap::heap();
 139 
 140   if (heap->mode()->is_generational()) {
 141     ShenandoahGenerationalFullGC::prepare();
 142   }
 143 
 144   if (ShenandoahVerify) {
 145     heap->verifier()->verify_before_fullgc(_generation);
 146   }
 147 
 148   if (VerifyBeforeGC) {
 149     Universe::verify();
 150   }
 151 
 152   // Degenerated GC may carry concurrent root flags when upgrading to
 153   // full GC. We need to reset it before mutators resume.
 154   heap->set_concurrent_strong_root_in_progress(false);
 155   heap->set_concurrent_weak_root_in_progress(false);
 156 
 157   heap->set_full_gc_in_progress(true);
 158 
 159   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 160   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
 161 
 162   {
 163     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre);
 164     heap->pre_full_gc_dump(_gc_timer);
 165   }
 166 
 167   {
 168     ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
 169     // Full GC is supposed to recover from any GC state:
 170 
 171     // a0. Remember if we have forwarded objects
 172     bool has_forwarded_objects = heap->has_forwarded_objects();
 173 
 174     // a1. Cancel evacuation, if in progress
 175     if (heap->is_evacuation_in_progress()) {
 176       heap->set_evacuation_in_progress(false);
 177     }
 178     assert(!heap->is_evacuation_in_progress(), "sanity");
 179 
 180     // a2. Cancel update-refs, if in progress
 181     if (heap->is_update_refs_in_progress()) {
 182       heap->set_update_refs_in_progress(false);
 183     }
 184     assert(!heap->is_update_refs_in_progress(), "sanity");
 185 
 186     // b. Cancel all concurrent marks, if in progress
 187     if (heap->is_concurrent_mark_in_progress()) {
 188       heap->cancel_concurrent_mark();
 189     }
 190     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 191 
 192     // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
 193     if (has_forwarded_objects) {
 194       update_roots(true /*full_gc*/);
 195     }
 196 
 197     // d. Abandon reference discovery and clear all discovered references.
 198     ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 199     rp->abandon_partial_discovery();
 200 
 201     // e. Sync pinned region status from the CP marks
 202     heap->sync_pinned_region_status();
 203 
 204     if (heap->mode()->is_generational()) {
 205       ShenandoahGenerationalFullGC::restore_top_before_promote(heap);
 206     }
 207 
 208     // The rest of prologue:
 209     _preserved_marks->init(heap->workers()->active_workers());
 210 
 211     assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
 212   }
 213 
 214   if (UseTLAB) {
 215     // Note: PLABs are also retired with GCLABs in generational mode.
 216     heap->gclabs_retire(ResizeTLAB);
 217     heap->tlabs_retire(ResizeTLAB);
 218   }
 219 
 220   OrderAccess::fence();
 221 
 222   phase1_mark_heap();
 223 
 224   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 225   // Coming out of Full GC, we would not have any forwarded objects.
 226   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 227   heap->set_has_forwarded_objects(false);
 228 
 229   heap->set_full_gc_move_in_progress(true);
 230 
 231   // Setup workers for the rest
 232   OrderAccess::fence();
 233 
 234   // Initialize worker slices
 235   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
 236   for (uint i = 0; i < heap->max_workers(); i++) {
 237     worker_slices[i] = new ShenandoahHeapRegionSet();
 238   }
 239 
 240   {
 241     // The rest of code performs region moves, where region status is undefined
 242     // until all phases run together.
 243     ShenandoahHeapLocker lock(heap->lock());
 244 
 245     FullGCForwarding::begin();
 246 
 247     phase2_calculate_target_addresses(worker_slices);
 248 
 249     OrderAccess::fence();
 250 
 251     phase3_update_references();
 252 
 253     phase4_compact_objects(worker_slices);
 254 
 255     phase5_epilog();
 256 
 257     FullGCForwarding::end();
 258   }
 259 
 260   // Resize metaspace
 261   MetaspaceGC::compute_new_size();
 262 
 263   // Free worker slices
 264   for (uint i = 0; i < heap->max_workers(); i++) {
 265     delete worker_slices[i];
 266   }
 267   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 268 
 269   heap->set_full_gc_move_in_progress(false);
 270   heap->set_full_gc_in_progress(false);
 271 
 272   if (ShenandoahVerify) {
 273     heap->verifier()->verify_after_fullgc(_generation);
 274   }
 275 
 276   if (VerifyAfterGC) {
 277     Universe::verify();
 278   }
 279 
 280   {
 281     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
 282     heap->post_full_gc_dump(_gc_timer);
 283   }
 284 }
 285 
 286 void ShenandoahFullGC::phase1_mark_heap() {
 287   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 288   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 289 
 290   ShenandoahHeap* heap = ShenandoahHeap::heap();
 291 
 292   _generation->reset_mark_bitmap<true, true>();
 293   assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 294   assert(!_generation->is_mark_complete(), "sanity");
 295 
 296   heap->set_unload_classes(_generation->heuristics()->can_unload_classes());
 297 
 298   ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 299   // enable ("weak") refs discovery
 300   rp->set_soft_reference_policy(true); // forcefully purge all soft references
 301 
 302   ShenandoahSTWMark mark(_generation, true /*full_gc*/);
 303   mark.mark();
 304   heap->parallel_cleaning(_generation, true /* full_gc */);
 305 
 306   if (ShenandoahHeap::heap()->mode()->is_generational()) {
 307     ShenandoahGenerationalFullGC::log_live_in_old(heap);
 308   }
 309 }
 310 
 311 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 312 private:
 313   PreservedMarks*          const _preserved_marks;
 314   ShenandoahHeap*          const _heap;
 315   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 316   int _empty_regions_pos;
 317   ShenandoahHeapRegion*          _to_region;
 318   ShenandoahHeapRegion*          _from_region;
 319   HeapWord* _compact_point;
 320 
 321 public:
 322   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 323                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 324                                               ShenandoahHeapRegion* to_region) :
 325     _preserved_marks(preserved_marks),
 326     _heap(ShenandoahHeap::heap()),
 327     _empty_regions(empty_regions),
 328     _empty_regions_pos(0),
 329     _to_region(to_region),
 330     _from_region(nullptr),
 331     _compact_point(to_region->bottom()) {}
 332 
 333   void set_from_region(ShenandoahHeapRegion* from_region) {
 334     _from_region = from_region;
 335   }
 336 
 337   void finish() {
 338     assert(_to_region != nullptr, "should not happen");
 339     _to_region->set_new_top(_compact_point);
 340   }
 341 
 342   bool is_compact_same_region() {
 343     return _from_region == _to_region;
 344   }
 345 
 346   int empty_regions_pos() {
 347     return _empty_regions_pos;
 348   }
 349 
 350   void do_object(oop p) override {
 351     shenandoah_assert_mark_complete(cast_from_oop<HeapWord*>(p));
 352     assert(_from_region != nullptr, "must set before work");
 353     assert(_heap->global_generation()->is_mark_complete(), "marking must be finished");
 354     assert(_heap->marking_context()->is_marked(p), "must be marked");
 355     assert(!_heap->marking_context()->allocated_after_mark_start(p), "must be truly marked");
 356 
 357     size_t old_size = p->size();
 358     size_t new_size = p->copy_size(old_size, p->mark());
 359     size_t obj_size = _compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
 360     if (_compact_point + obj_size > _to_region->end()) {
 361       finish();
 362 
 363       // Object doesn't fit. Pick next empty region and start compacting there.
 364       ShenandoahHeapRegion* new_to_region;
 365       if (_empty_regions_pos < _empty_regions.length()) {
 366         new_to_region = _empty_regions.at(_empty_regions_pos);
 367         _empty_regions_pos++;
 368       } else {
 369         // Out of empty region? Compact within the same region.
 370         new_to_region = _from_region;
 371       }
 372 
 373       assert(new_to_region != _to_region, "must not reuse same to-region");
 374       assert(new_to_region != nullptr, "must not be null");
 375       _to_region = new_to_region;
 376       _compact_point = _to_region->bottom();
 377       obj_size = _compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
 378     }
 379 
 380     // Object fits into current region, record new location, if object does not move:
 381     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 382     shenandoah_assert_not_forwarded(nullptr, p);
 383     if (_compact_point != cast_from_oop<HeapWord*>(p)) {
 384       _preserved_marks->push_if_necessary(p, p->mark());
 385       FullGCForwarding::forward_to(p, cast_to_oop(_compact_point));
 386     }
 387     _compact_point += obj_size;
 388   }
 389 };
 390 
 391 class ShenandoahPrepareForCompactionTask : public WorkerTask {
 392 private:
 393   PreservedMarksSet*        const _preserved_marks;
 394   ShenandoahHeap*           const _heap;
 395   ShenandoahHeapRegionSet** const _worker_slices;
 396 
 397 public:
 398   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
 399     WorkerTask("Shenandoah Prepare For Compaction"),
 400     _preserved_marks(preserved_marks),
 401     _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
 402   }
 403 
 404   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 405     // Empty region: get it into the slice to defragment the slice itself.
 406     // We could have skipped this without violating correctness, but we really
 407     // want to compact all live regions to the start of the heap, which sometimes
 408     // means moving them into the fully empty regions.
 409     if (r->is_empty()) return true;
 410 
 411     // Can move the region, and this is not the humongous region. Humongous
 412     // moves are special cased here, because their moves are handled separately.
 413     return r->is_stw_move_allowed() && !r->is_humongous();
 414   }
 415 
 416   void work(uint worker_id) override;
 417 private:
 418   template<typename ClosureType>
 419   void prepare_for_compaction(ClosureType& cl,
 420                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 421                               ShenandoahHeapRegionSetIterator& it,
 422                               ShenandoahHeapRegion* from_region);
 423 };
 424 
 425 void ShenandoahPrepareForCompactionTask::work(uint worker_id) {
 426   ShenandoahParallelWorkerSession worker_session(worker_id);
 427   ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 428   ShenandoahHeapRegionSetIterator it(slice);
 429   ShenandoahHeapRegion* from_region = it.next();
 430   // No work?
 431   if (from_region == nullptr) {
 432     return;
 433   }
 434 
 435   // Sliding compaction. Walk all regions in the slice, and compact them.
 436   // Remember empty regions and reuse them as needed.
 437   ResourceMark rm;
 438 
 439   GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
 440 
 441   if (_heap->mode()->is_generational()) {
 442     ShenandoahPrepareForGenerationalCompactionObjectClosure cl(_preserved_marks->get(worker_id),
 443                                                                empty_regions, from_region, worker_id);
 444     prepare_for_compaction(cl, empty_regions, it, from_region);
 445   } else {
 446     ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
 447     prepare_for_compaction(cl, empty_regions, it, from_region);
 448   }
 449 }
 450 
 451 template<typename ClosureType>
 452 void ShenandoahPrepareForCompactionTask::prepare_for_compaction(ClosureType& cl,
 453                                                                 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 454                                                                 ShenandoahHeapRegionSetIterator& it,
 455                                                                 ShenandoahHeapRegion* from_region) {
 456   while (from_region != nullptr) {
 457     assert(is_candidate_region(from_region), "Sanity");
 458     cl.set_from_region(from_region);
 459     if (from_region->has_live()) {
 460       _heap->marked_object_iterate(from_region, &cl);
 461     }
 462 
 463     // Compacted the region to somewhere else? From-region is empty then.
 464     if (!cl.is_compact_same_region()) {
 465       empty_regions.append(from_region);
 466     }
 467     from_region = it.next();
 468   }
 469   cl.finish();
 470 
 471   // Mark all remaining regions as empty
 472   for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 473     ShenandoahHeapRegion* r = empty_regions.at(pos);
 474     r->set_new_top(r->bottom());
 475   }
 476 }
 477 
 478 void ShenandoahFullGC::calculate_target_humongous_objects() {
 479   ShenandoahHeap* heap = ShenandoahHeap::heap();
 480 
 481   // Compute the new addresses for humongous objects. We need to do this after addresses
 482   // for regular objects are calculated, and we know what regions in heap suffix are
 483   // available for humongous moves.
 484   //
 485   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 486   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 487   // humongous start there.
 488   //
 489   // The complication is potential non-movable regions during the scan. If such region is
 490   // detected, then sliding restarts towards that non-movable region.
 491 
 492   size_t to_begin = heap->num_regions();
 493   size_t to_end = heap->num_regions();
 494 
 495   log_debug(gc)("Full GC calculating target humongous objects from end %zu", to_end);
 496   for (size_t c = heap->num_regions(); c > 0; c--) {
 497     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 498     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 499       // To-region candidate: record this, and continue scan
 500       to_begin = r->index();
 501       continue;
 502     }
 503 
 504     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 505       // From-region candidate: movable humongous region
 506       oop old_obj = cast_to_oop(r->bottom());
 507       size_t words_size = old_obj->size();
 508       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 509 
 510       size_t start = to_end - num_regions;
 511 
 512       if (start >= to_begin && start != r->index()) {
 513         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 514         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 515         FullGCForwarding::forward_to(old_obj, cast_to_oop(heap->get_region(start)->bottom()));
 516         to_end = start;
 517         continue;
 518       }
 519     }
 520 
 521     // Failed to fit. Scan starting from current region.
 522     to_begin = r->index();
 523     to_end = r->index();
 524   }
 525 }
 526 
 527 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 528 public:
 529   void heap_region_do(ShenandoahHeapRegion* r) override {
 530     if (r->is_trash()) {
 531       r->try_recycle_under_lock();
 532     }
 533     if (r->is_cset()) {
 534       // Leave affiliation unchanged
 535       r->make_regular_bypass();
 536     }
 537     if (r->is_empty_uncommitted()) {
 538       r->make_committed_bypass();
 539     }
 540     assert (r->is_committed(), "only committed regions in heap now, see region %zu", r->index());
 541 
 542     // Record current region occupancy: this communicates empty regions are free
 543     // to the rest of Full GC code.
 544     r->set_new_top(r->top());
 545   }
 546 };
 547 
 548 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
 549 private:
 550   ShenandoahHeap* const _heap;
 551   ShenandoahMarkingContext* const _ctx;
 552 
 553 public:
 554   ShenandoahTrashImmediateGarbageClosure() :
 555     _heap(ShenandoahHeap::heap()),
 556     _ctx(ShenandoahHeap::heap()->global_generation()->complete_marking_context()) {}
 557 
 558   void heap_region_do(ShenandoahHeapRegion* r) override {
 559     if (r->is_humongous_start()) {
 560       oop humongous_obj = cast_to_oop(r->bottom());
 561       if (!_ctx->is_marked(humongous_obj)) {
 562         assert(!r->has_live(), "Region %zu is not marked, should not have live", r->index());
 563         _heap->trash_humongous_region_at(r);
 564       } else {
 565         assert(r->has_live(), "Region %zu should have live", r->index());
 566       }
 567     } else if (r->is_humongous_continuation()) {
 568       // If we hit continuation, the non-live humongous starts should have been trashed already
 569       assert(r->humongous_start_region()->has_live(), "Region %zu should have live", r->index());
 570     } else if (r->is_regular()) {
 571       if (!r->has_live()) {
 572         r->make_trash_immediate();
 573       }
 574     }
 575   }
 576 };
 577 
 578 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
 579   ShenandoahHeap* heap = ShenandoahHeap::heap();
 580 
 581   uint n_workers = heap->workers()->active_workers();
 582   size_t n_regions = heap->num_regions();
 583 
 584   // What we want to accomplish: have the dense prefix of data, while still balancing
 585   // out the parallel work.
 586   //
 587   // Assuming the amount of work is driven by the live data that needs moving, we can slice
 588   // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
 589   // thread takes all regions in its prefix subset, and then it takes some regions from
 590   // the tail.
 591   //
 592   // Tail region selection becomes interesting.
 593   //
 594   // First, we want to distribute the regions fairly between the workers, and those regions
 595   // might have different amount of live data. So, until we sure no workers need live data,
 596   // we need to only take what the worker needs.
 597   //
 598   // Second, since we slide everything to the left in each slice, the most busy regions
 599   // would be the ones on the left. Which means we want to have all workers have their after-tail
 600   // regions as close to the left as possible.
 601   //
 602   // The easiest way to do this is to distribute after-tail regions in round-robin between
 603   // workers that still need live data.
 604   //
 605   // Consider parallel workers A, B, C, then the target slice layout would be:
 606   //
 607   //  AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA
 608   //
 609   //  (.....dense-prefix.....) (.....................tail...................)
 610   //  [all regions fully live] [left-most regions are fuller that right-most]
 611   //
 612 
 613   // Compute how much live data is there. This would approximate the size of dense prefix
 614   // we target to create.
 615   size_t total_live = 0;
 616   for (size_t idx = 0; idx < n_regions; idx++) {
 617     ShenandoahHeapRegion *r = heap->get_region(idx);
 618     if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 619       total_live += r->get_live_data_words();
 620     }
 621   }
 622 
 623   // Estimate the size for the dense prefix. Note that we specifically count only the
 624   // "full" regions, so there would be some non-full regions in the slice tail.
 625   size_t live_per_worker = total_live / n_workers;
 626   size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words();
 627   size_t prefix_regions_total = prefix_regions_per_worker * n_workers;
 628   prefix_regions_total = MIN2(prefix_regions_total, n_regions);
 629   assert(prefix_regions_total <= n_regions, "Sanity");
 630 
 631   // There might be non-candidate regions in the prefix. To compute where the tail actually
 632   // ends up being, we need to account those as well.
 633   size_t prefix_end = prefix_regions_total;
 634   for (size_t idx = 0; idx < prefix_regions_total; idx++) {
 635     ShenandoahHeapRegion *r = heap->get_region(idx);
 636     if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 637       prefix_end++;
 638     }
 639   }
 640   prefix_end = MIN2(prefix_end, n_regions);
 641   assert(prefix_end <= n_regions, "Sanity");
 642 
 643   // Distribute prefix regions per worker: each thread definitely gets its own same-sized
 644   // subset of dense prefix.
 645   size_t prefix_idx = 0;
 646 
 647   size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC);
 648 
 649   for (size_t wid = 0; wid < n_workers; wid++) {
 650     ShenandoahHeapRegionSet* slice = worker_slices[wid];
 651 
 652     live[wid] = 0;
 653     size_t regs = 0;
 654 
 655     // Add all prefix regions for this worker
 656     while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) {
 657       ShenandoahHeapRegion *r = heap->get_region(prefix_idx);
 658       if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 659         slice->add_region(r);
 660         live[wid] += r->get_live_data_words();
 661         regs++;
 662       }
 663       prefix_idx++;
 664     }
 665   }
 666 
 667   // Distribute the tail among workers in round-robin fashion.
 668   size_t wid = n_workers - 1;
 669 
 670   for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) {
 671     ShenandoahHeapRegion *r = heap->get_region(tail_idx);
 672     if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 673       assert(wid < n_workers, "Sanity");
 674 
 675       size_t live_region = r->get_live_data_words();
 676 
 677       // Select next worker that still needs live data.
 678       size_t old_wid = wid;
 679       do {
 680         wid++;
 681         if (wid == n_workers) wid = 0;
 682       } while (live[wid] + live_region >= live_per_worker && old_wid != wid);
 683 
 684       if (old_wid == wid) {
 685         // Circled back to the same worker? This means liveness data was
 686         // miscalculated. Bump the live_per_worker limit so that
 687         // everyone gets a piece of the leftover work.
 688         live_per_worker += ShenandoahHeapRegion::region_size_words();
 689       }
 690 
 691       worker_slices[wid]->add_region(r);
 692       live[wid] += live_region;
 693     }
 694   }
 695 
 696   FREE_C_HEAP_ARRAY(size_t, live);
 697 
 698 #ifdef ASSERT
 699   ResourceBitMap map(n_regions);
 700   for (size_t wid = 0; wid < n_workers; wid++) {
 701     ShenandoahHeapRegionSetIterator it(worker_slices[wid]);
 702     ShenandoahHeapRegion* r = it.next();
 703     while (r != nullptr) {
 704       size_t idx = r->index();
 705       assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: %zu", idx);
 706       assert(!map.at(idx), "No region distributed twice: %zu", idx);
 707       map.at_put(idx, true);
 708       r = it.next();
 709     }
 710   }
 711 
 712   for (size_t rid = 0; rid < n_regions; rid++) {
 713     bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid));
 714     bool is_distributed = map.at(rid);
 715     assert(is_distributed || !is_candidate, "All candidates are distributed: %zu", rid);
 716   }
 717 #endif
 718 }
 719 
 720 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
 721   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
 722   ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
 723 
 724   ShenandoahHeap* heap = ShenandoahHeap::heap();
 725 
 726   // About to figure out which regions can be compacted, make sure pinning status
 727   // had been updated in GC prologue.
 728   heap->assert_pinned_region_status();
 729 
 730   {
 731     // Trash the immediately collectible regions before computing addresses
 732     ShenandoahTrashImmediateGarbageClosure trash_immediate_garbage;
 733     ShenandoahExcludeRegionClosure<FREE> cl(&trash_immediate_garbage);
 734     heap->heap_region_iterate(&cl);
 735 
 736     // Make sure regions are in good state: committed, active, clean.
 737     // This is needed because we are potentially sliding the data through them.
 738     ShenandoahEnsureHeapActiveClosure ecl;
 739     heap->heap_region_iterate(&ecl);
 740   }
 741 
 742   // Compute the new addresses for regular objects
 743   {
 744     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
 745 
 746     distribute_slices(worker_slices);
 747 
 748     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
 749     heap->workers()->run_task(&task);
 750   }
 751 
 752   // Compute the new addresses for humongous objects
 753   {
 754     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
 755     calculate_target_humongous_objects();
 756   }
 757 }
 758 
 759 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
 760 private:
 761   ShenandoahMarkingContext* const _ctx;
 762 
 763   template <class T>
 764   inline void do_oop_work(T* p) {
 765     T o = RawAccess<>::oop_load(p);
 766     if (!CompressedOops::is_null(o)) {
 767       oop obj = CompressedOops::decode_not_null(o);
 768       assert(_ctx->is_marked(obj), "must be marked");
 769       if (FullGCForwarding::is_forwarded(obj)) {
 770         oop forw = FullGCForwarding::forwardee(obj);
 771         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 772       }
 773     }
 774   }
 775 
 776 public:
 777   ShenandoahAdjustPointersClosure() :
 778     _ctx(ShenandoahHeap::heap()->global_generation()->complete_marking_context()) {}
 779 
 780   void do_oop(oop* p)       { do_oop_work(p); }
 781   void do_oop(narrowOop* p) { do_oop_work(p); }
 782   void do_method(Method* m) {}
 783   void do_nmethod(nmethod* nm) {}
 784 };
 785 
 786 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
 787 private:
 788   ShenandoahAdjustPointersClosure _cl;
 789 
 790 public:
 791   void do_object(oop p) override {
 792     assert(ShenandoahHeap::heap()->global_generation()->is_mark_complete(), "marking must be complete");
 793     assert(ShenandoahHeap::heap()->marking_context()->is_marked(p), "must be marked");
 794     p->oop_iterate(&_cl);
 795   }
 796 };
 797 
 798 class ShenandoahAdjustPointersTask : public WorkerTask {
 799 private:
 800   ShenandoahHeap*          const _heap;
 801   ShenandoahRegionIterator       _regions;
 802 
 803 public:
 804   ShenandoahAdjustPointersTask() :
 805     WorkerTask("Shenandoah Adjust Pointers"),
 806     _heap(ShenandoahHeap::heap()) {
 807   }
 808 
 809   void work(uint worker_id) override {
 810     ShenandoahParallelWorkerSession worker_session(worker_id);
 811     ShenandoahAdjustPointersObjectClosure obj_cl;
 812     ShenandoahHeapRegion* r = _regions.next();
 813     while (r != nullptr) {
 814       if (!r->is_humongous_continuation() && r->has_live()) {
 815         _heap->marked_object_iterate(r, &obj_cl);
 816       }
 817       if (_heap->mode()->is_generational()) {
 818         ShenandoahGenerationalFullGC::maybe_coalesce_and_fill_region(r);
 819       }
 820       r = _regions.next();
 821     }
 822   }
 823 };
 824 
 825 class ShenandoahAdjustRootPointersTask : public WorkerTask {
 826 private:
 827   ShenandoahRootAdjuster* _rp;
 828   PreservedMarksSet* _preserved_marks;
 829 public:
 830   ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
 831     WorkerTask("Shenandoah Adjust Root Pointers"),
 832     _rp(rp),
 833     _preserved_marks(preserved_marks) {}
 834 
 835   void work(uint worker_id) override {
 836     ShenandoahParallelWorkerSession worker_session(worker_id);
 837     ShenandoahAdjustPointersClosure cl;
 838     _rp->roots_do(worker_id, &cl);
 839     _preserved_marks->get(worker_id)->adjust_during_full_gc();
 840   }
 841 };
 842 
 843 void ShenandoahFullGC::phase3_update_references() {
 844   GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
 845   ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
 846 
 847   ShenandoahHeap* heap = ShenandoahHeap::heap();
 848 
 849   WorkerThreads* workers = heap->workers();
 850   uint nworkers = workers->active_workers();
 851   {
 852 #if COMPILER2_OR_JVMCI
 853     DerivedPointerTable::clear();
 854 #endif
 855     ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
 856     ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
 857     workers->run_task(&task);
 858 #if COMPILER2_OR_JVMCI
 859     DerivedPointerTable::update_pointers();
 860 #endif
 861   }
 862 
 863   ShenandoahAdjustPointersTask adjust_pointers_task;
 864   workers->run_task(&adjust_pointers_task);
 865 }
 866 
 867 class ShenandoahCompactObjectsClosure : public ObjectClosure {
 868 private:
 869   uint const _worker_id;
 870 
 871 public:
 872   explicit ShenandoahCompactObjectsClosure(uint worker_id) :
 873     _worker_id(worker_id) {}
 874 
 875   void do_object(oop p) override {
 876     assert(ShenandoahHeap::heap()->global_generation()->is_mark_complete(), "marking must be finished");
 877     assert(ShenandoahHeap::heap()->marking_context()->is_marked(p), "must be marked");
 878     size_t size = p->size();
 879     if (FullGCForwarding::is_forwarded(p)) {
 880       HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
 881       HeapWord* compact_to = cast_from_oop<HeapWord*>(FullGCForwarding::forwardee(p));
 882       assert(compact_from != compact_to, "Forwarded object should move");
 883       Copy::aligned_conjoint_words(compact_from, compact_to, size);
 884       oop new_obj = cast_to_oop(compact_to);
 885 
 886       ContinuationGCSupport::relativize_stack_chunk(new_obj);
 887       new_obj->init_mark();
 888       new_obj->initialize_hash_if_necessary(p);
 889     }
 890   }
 891 };
 892 
 893 class ShenandoahCompactObjectsTask : public WorkerTask {
 894 private:
 895   ShenandoahHeap* const _heap;
 896   ShenandoahHeapRegionSet** const _worker_slices;
 897 
 898 public:
 899   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
 900     WorkerTask("Shenandoah Compact Objects"),
 901     _heap(ShenandoahHeap::heap()),
 902     _worker_slices(worker_slices) {
 903   }
 904 
 905   void work(uint worker_id) override {
 906     ShenandoahParallelWorkerSession worker_session(worker_id);
 907     ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
 908 
 909     ShenandoahCompactObjectsClosure cl(worker_id);
 910     ShenandoahHeapRegion* r = slice.next();
 911     while (r != nullptr) {
 912       assert(!r->is_humongous(), "must not get humongous regions here");
 913       if (r->has_live()) {
 914         _heap->marked_object_iterate(r, &cl);
 915       }
 916       r->set_top(r->new_top());
 917       r = slice.next();
 918     }
 919   }
 920 };
 921 
 922 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
 923 private:
 924   ShenandoahHeap* const _heap;
 925   bool _is_generational;
 926   size_t _young_regions, _young_usage, _young_humongous_waste;
 927   size_t _old_regions, _old_usage, _old_humongous_waste;
 928 
 929 public:
 930   ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()),
 931                                    _is_generational(_heap->mode()->is_generational()),
 932                                    _young_regions(0),
 933                                    _young_usage(0),
 934                                    _young_humongous_waste(0),
 935                                    _old_regions(0),
 936                                    _old_usage(0),
 937                                    _old_humongous_waste(0)
 938   {
 939     _heap->free_set()->clear();
 940   }
 941 
 942   void heap_region_do(ShenandoahHeapRegion* r) override {
 943     assert (!r->is_cset(), "cset regions should have been demoted already");
 944 
 945     // Need to reset the complete-top-at-mark-start pointer here because
 946     // the complete marking bitmap is no longer valid. This ensures
 947     // size-based iteration in marked_object_iterate().
 948     // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
 949     // pinned regions.
 950     if (!r->is_pinned()) {
 951       _heap->marking_context()->reset_top_at_mark_start(r);
 952     }
 953 
 954     size_t live = r->used();
 955 
 956     // Make empty regions that have been allocated into regular
 957     if (r->is_empty() && live > 0) {
 958       if (!_is_generational) {
 959         r->make_affiliated_maybe();
 960       }
 961       // else, generational mode compaction has already established affiliation.
 962       r->make_regular_bypass();
 963       if (ZapUnusedHeapArea) {
 964         SpaceMangler::mangle_region(MemRegion(r->top(), r->end()));
 965       }
 966     }
 967 
 968     // Reclaim regular regions that became empty
 969     if (r->is_regular() && live == 0) {
 970       r->make_trash();
 971     }
 972 
 973     // Recycle all trash regions
 974     if (r->is_trash()) {
 975       live = 0;
 976       r->try_recycle_under_lock();
 977     } else {
 978       if (r->is_old()) {
 979         ShenandoahGenerationalFullGC::account_for_region(r, _old_regions, _old_usage, _old_humongous_waste);
 980       } else if (r->is_young()) {
 981         ShenandoahGenerationalFullGC::account_for_region(r, _young_regions, _young_usage, _young_humongous_waste);
 982       }
 983     }
 984     r->set_live_data(live);
 985     r->reset_alloc_metadata();
 986   }
 987 };
 988 
 989 void ShenandoahFullGC::compact_humongous_objects() {
 990   // Compact humongous regions, based on their fwdptr objects.
 991   //
 992   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
 993   // humongous regions are already compacted, and do not require further moves, which alleviates
 994   // sliding costs. We may consider doing this in parallel in the future.
 995 
 996   ShenandoahHeap* heap = ShenandoahHeap::heap();
 997 
 998   for (size_t c = heap->num_regions(); c > 0; c--) {
 999     ShenandoahHeapRegion* r = heap->get_region(c - 1);
1000     if (r->is_humongous_start()) {
1001       oop old_obj = cast_to_oop(r->bottom());
1002       if (!FullGCForwarding::is_forwarded(old_obj)) {
1003         // No need to move the object, it stays at the same slot
1004         continue;
1005       }
1006       size_t words_size = old_obj->size();
1007       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1008 
1009       size_t old_start = r->index();
1010       size_t old_end   = old_start + num_regions - 1;
1011       size_t new_start = heap->heap_region_index_containing(FullGCForwarding::forwardee(old_obj));
1012       size_t new_end   = new_start + num_regions - 1;
1013       assert(old_start != new_start, "must be real move");
1014       assert(r->is_stw_move_allowed(), "Region %zu should be movable", r->index());
1015 
1016       log_debug(gc)("Full GC compaction moves humongous object from region %zu to region %zu", old_start, new_start);
1017       Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
1018       ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
1019 
1020       oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
1021       new_obj->init_mark();
1022 
1023       {
1024         ShenandoahAffiliation original_affiliation = r->affiliation();
1025         for (size_t c = old_start; c <= old_end; c++) {
1026           ShenandoahHeapRegion* r = heap->get_region(c);
1027           // Leave humongous region affiliation unchanged.
1028           r->make_regular_bypass();
1029           r->set_top(r->bottom());
1030         }
1031 
1032         for (size_t c = new_start; c <= new_end; c++) {
1033           ShenandoahHeapRegion* r = heap->get_region(c);
1034           if (c == new_start) {
1035             r->make_humongous_start_bypass(original_affiliation);
1036           } else {
1037             r->make_humongous_cont_bypass(original_affiliation);
1038           }
1039 
1040           // Trailing region may be non-full, record the remainder there
1041           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
1042           if ((c == new_end) && (remainder != 0)) {
1043             r->set_top(r->bottom() + remainder);
1044           } else {
1045             r->set_top(r->end());
1046           }
1047 
1048           r->reset_alloc_metadata();
1049         }
1050       }
1051     }
1052   }
1053 }
1054 
1055 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1056 // we need to remain able to walk pinned regions.
1057 // Since pinned region do not move and don't get compacted, we will get holes with
1058 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
1059 // cannot be iterated over using oop->size()). The only way to safely iterate over those is using
1060 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
1061 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
1062 class ShenandoahMCResetCompleteBitmapTask : public WorkerTask {
1063 private:
1064   ShenandoahRegionIterator _regions;
1065 
1066 public:
1067   ShenandoahMCResetCompleteBitmapTask() :
1068     WorkerTask("Shenandoah Reset Bitmap") {
1069   }
1070 
1071   void work(uint worker_id) override {
1072     ShenandoahParallelWorkerSession worker_session(worker_id);
1073     ShenandoahHeapRegion* region = _regions.next();
1074     ShenandoahHeap* heap = ShenandoahHeap::heap();
1075     ShenandoahMarkingContext* const ctx = heap->marking_context();
1076     assert(heap->global_generation()->is_mark_complete(), "Marking must be complete");
1077     while (region != nullptr) {
1078       if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) {
1079         ctx->clear_bitmap(region);
1080       }
1081       region = _regions.next();
1082     }
1083   }
1084 };
1085 
1086 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
1087   GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
1088   ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
1089 
1090   ShenandoahHeap* heap = ShenandoahHeap::heap();
1091 
1092   // Compact regular objects first
1093   {
1094     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
1095     ShenandoahCompactObjectsTask compact_task(worker_slices);
1096     heap->workers()->run_task(&compact_task);
1097   }
1098 
1099   // Compact humongous objects after regular object moves
1100   {
1101     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1102     compact_humongous_objects();
1103   }
1104 }
1105 
1106 void ShenandoahFullGC::phase5_epilog() {
1107   GCTraceTime(Info, gc, phases) time("Phase 5: Full GC epilog", _gc_timer);
1108   ShenandoahHeap* heap = ShenandoahHeap::heap();
1109 
1110   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1111   // and must ensure the bitmap is in sync.
1112   {
1113     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1114     ShenandoahMCResetCompleteBitmapTask task;
1115     heap->workers()->run_task(&task);
1116   }
1117 
1118   // Bring regions in proper states after the collection, and set heap properties.
1119   {
1120     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1121     ShenandoahPostCompactClosure post_compact;
1122     heap->heap_region_iterate(&post_compact);
1123     heap->collection_set()->clear();
1124     size_t young_cset_regions, old_cset_regions;
1125     size_t first_old, last_old, num_old;
1126     heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
1127 
1128     // We also do not expand old generation size following Full GC because we have scrambled age populations and
1129     // no longer have objects separated by age into distinct regions.
1130     if (heap->mode()->is_generational()) {
1131       ShenandoahGenerationalFullGC::compute_balances();
1132     }
1133 
1134     heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
1135 
1136     // Set mark incomplete because the marking bitmaps have been reset except pinned regions.
1137     _generation->set_mark_incomplete();
1138 
1139     heap->clear_cancelled_gc();
1140   }
1141 
1142   _preserved_marks->restore(heap->workers());
1143   _preserved_marks->reclaim();
1144 
1145   if (heap->mode()->is_generational()) {
1146     ShenandoahGenerationalFullGC::rebuild_remembered_set(heap);
1147   }
1148 }