1 /*
   2  * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "compiler/oopMap.hpp"
  29 #include "gc/shared/continuationGCSupport.hpp"
  30 #include "gc/shared/fullGCForwarding.inline.hpp"
  31 #include "gc/shared/gcTraceTime.inline.hpp"
  32 #include "gc/shared/preservedMarks.inline.hpp"
  33 #include "gc/shared/tlab_globals.hpp"
  34 #include "gc/shared/workerThread.hpp"
  35 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  36 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  37 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  38 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  39 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  40 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  41 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  42 #include "gc/shenandoah/shenandoahFullGC.hpp"
  43 #include "gc/shenandoah/shenandoahGenerationalFullGC.hpp"
  44 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  45 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  46 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  47 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  48 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
  49 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  50 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  51 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  52 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  53 #include "gc/shenandoah/shenandoahMetrics.hpp"
  54 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  55 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  56 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  57 #include "gc/shenandoah/shenandoahUtils.hpp"
  58 #include "gc/shenandoah/shenandoahVerifier.hpp"
  59 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  60 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  61 #include "memory/metaspaceUtils.hpp"
  62 #include "memory/universe.hpp"
  63 #include "oops/compressedOops.inline.hpp"
  64 #include "oops/oop.inline.hpp"
  65 #include "runtime/orderAccess.hpp"
  66 #include "runtime/vmThread.hpp"
  67 #include "utilities/copy.hpp"
  68 #include "utilities/events.hpp"
  69 #include "utilities/growableArray.hpp"
  70 
  71 ShenandoahFullGC::ShenandoahFullGC() :
  72   _gc_timer(ShenandoahHeap::heap()->gc_timer()),
  73   _preserved_marks(new PreservedMarksSet(true)) {}
  74 
  75 ShenandoahFullGC::~ShenandoahFullGC() {
  76   delete _preserved_marks;
  77 }
  78 
  79 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
  80   vmop_entry_full(cause);
  81   // Always success
  82   return true;
  83 }
  84 
  85 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) {
  86   ShenandoahHeap* const heap = ShenandoahHeap::heap();
  87   TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
  88   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
  89 
  90   heap->try_inject_alloc_failure();
  91   VM_ShenandoahFullGC op(cause, this);
  92   VMThread::execute(&op);
  93 }
  94 
  95 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
  96   static const char* msg = "Pause Full";
  97   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
  98   EventMark em("%s", msg);
  99 
 100   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 101                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
 102                               "full gc");
 103 
 104   op_full(cause);
 105 }
 106 
 107 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
 108   ShenandoahMetricsSnapshot metrics;
 109   metrics.snap_before();
 110 
 111   // Perform full GC
 112   do_it(cause);
 113 
 114   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 115 
 116   if (heap->mode()->is_generational()) {
 117     ShenandoahGenerationalFullGC::handle_completion(heap);
 118   }
 119 
 120   metrics.snap_after();
 121 
 122   if (metrics.is_good_progress()) {
 123     heap->notify_gc_progress();
 124   } else {
 125     // Nothing to do. Tell the allocation path that we have failed to make
 126     // progress, and it can finally fail.
 127     heap->notify_gc_no_progress();
 128   }
 129 
 130   // Regardless if progress was made, we record that we completed a "successful" full GC.
 131   heap->global_generation()->heuristics()->record_success_full();
 132   heap->shenandoah_policy()->record_success_full();
 133 }
 134 
 135 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
 136   ShenandoahHeap* heap = ShenandoahHeap::heap();
 137 
 138   if (heap->mode()->is_generational()) {
 139     ShenandoahGenerationalFullGC::prepare();
 140   }
 141 
 142   if (ShenandoahVerify) {
 143     heap->verifier()->verify_before_fullgc();
 144   }
 145 
 146   if (VerifyBeforeGC) {
 147     Universe::verify();
 148   }
 149 
 150   // Degenerated GC may carry concurrent root flags when upgrading to
 151   // full GC. We need to reset it before mutators resume.
 152   heap->set_concurrent_strong_root_in_progress(false);
 153   heap->set_concurrent_weak_root_in_progress(false);
 154 
 155   heap->set_full_gc_in_progress(true);
 156 
 157   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 158   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
 159 
 160   {
 161     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre);
 162     heap->pre_full_gc_dump(_gc_timer);
 163   }
 164 
 165   {
 166     ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
 167     // Full GC is supposed to recover from any GC state:
 168 
 169     // a0. Remember if we have forwarded objects
 170     bool has_forwarded_objects = heap->has_forwarded_objects();
 171 
 172     // a1. Cancel evacuation, if in progress
 173     if (heap->is_evacuation_in_progress()) {
 174       heap->set_evacuation_in_progress(false);
 175     }
 176     assert(!heap->is_evacuation_in_progress(), "sanity");
 177 
 178     // a2. Cancel update-refs, if in progress
 179     if (heap->is_update_refs_in_progress()) {
 180       heap->set_update_refs_in_progress(false);
 181     }
 182     assert(!heap->is_update_refs_in_progress(), "sanity");
 183 
 184     // b. Cancel all concurrent marks, if in progress
 185     if (heap->is_concurrent_mark_in_progress()) {
 186       heap->cancel_concurrent_mark();
 187     }
 188     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 189 
 190     // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
 191     if (has_forwarded_objects) {
 192       update_roots(true /*full_gc*/);
 193     }
 194 
 195     // d. Reset the bitmaps for new marking
 196     heap->global_generation()->reset_mark_bitmap();
 197     assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 198     assert(!heap->global_generation()->is_mark_complete(), "sanity");
 199 
 200     // e. Abandon reference discovery and clear all discovered references.
 201     ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
 202     rp->abandon_partial_discovery();
 203 
 204     // f. Sync pinned region status from the CP marks
 205     heap->sync_pinned_region_status();
 206 
 207     if (heap->mode()->is_generational()) {
 208       ShenandoahGenerationalFullGC::restore_top_before_promote(heap);
 209     }
 210 
 211     // The rest of prologue:
 212     _preserved_marks->init(heap->workers()->active_workers());
 213 
 214     assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
 215   }
 216 
 217   if (UseTLAB) {
 218     // Note: PLABs are also retired with GCLABs in generational mode.
 219     heap->gclabs_retire(ResizeTLAB);
 220     heap->tlabs_retire(ResizeTLAB);
 221   }
 222 
 223   OrderAccess::fence();
 224 
 225   phase1_mark_heap();
 226 
 227   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 228   // Coming out of Full GC, we would not have any forwarded objects.
 229   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 230   heap->set_has_forwarded_objects(false);
 231 
 232   heap->set_full_gc_move_in_progress(true);
 233 
 234   // Setup workers for the rest
 235   OrderAccess::fence();
 236 
 237   // Initialize worker slices
 238   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
 239   for (uint i = 0; i < heap->max_workers(); i++) {
 240     worker_slices[i] = new ShenandoahHeapRegionSet();
 241   }
 242 
 243   {
 244     // The rest of code performs region moves, where region status is undefined
 245     // until all phases run together.
 246     ShenandoahHeapLocker lock(heap->lock());
 247 
 248     phase2_calculate_target_addresses(worker_slices);
 249 
 250     OrderAccess::fence();
 251 
 252     phase3_update_references();
 253 
 254     phase4_compact_objects(worker_slices);
 255 
 256     phase5_epilog();
 257   }
 258 
 259   // Resize metaspace
 260   MetaspaceGC::compute_new_size();
 261 
 262   // Free worker slices
 263   for (uint i = 0; i < heap->max_workers(); i++) {
 264     delete worker_slices[i];
 265   }
 266   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 267 
 268   heap->set_full_gc_move_in_progress(false);
 269   heap->set_full_gc_in_progress(false);
 270 
 271   if (ShenandoahVerify) {
 272     heap->verifier()->verify_after_fullgc();
 273   }
 274 
 275   if (VerifyAfterGC) {
 276     Universe::verify();
 277   }
 278 
 279   {
 280     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
 281     heap->post_full_gc_dump(_gc_timer);
 282   }
 283 }
 284 
 285 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
 286 private:
 287   ShenandoahMarkingContext* const _ctx;
 288 
 289 public:
 290   ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 291 
 292   void heap_region_do(ShenandoahHeapRegion *r) override {
 293     _ctx->capture_top_at_mark_start(r);
 294     r->clear_live_data();
 295   }
 296 
 297   bool is_thread_safe() override { return true; }
 298 };
 299 
 300 void ShenandoahFullGC::phase1_mark_heap() {
 301   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 302   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 303 
 304   ShenandoahHeap* heap = ShenandoahHeap::heap();
 305 
 306   ShenandoahPrepareForMarkClosure prepare_for_mark;
 307   ShenandoahExcludeRegionClosure<FREE> cl(&prepare_for_mark);
 308   heap->parallel_heap_region_iterate(&cl);
 309 
 310   heap->set_unload_classes(heap->global_generation()->heuristics()->can_unload_classes());
 311 
 312   ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
 313   // enable ("weak") refs discovery
 314   rp->set_soft_reference_policy(true); // forcefully purge all soft references
 315 
 316   ShenandoahSTWMark mark(heap->global_generation(), true /*full_gc*/);
 317   mark.mark();
 318   heap->parallel_cleaning(true /* full_gc */);
 319 
 320   if (ShenandoahHeap::heap()->mode()->is_generational()) {
 321     ShenandoahGenerationalFullGC::log_live_in_old(heap);
 322   }
 323 }
 324 
 325 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 326 private:
 327   PreservedMarks*          const _preserved_marks;
 328   ShenandoahHeap*          const _heap;
 329   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 330   int _empty_regions_pos;
 331   ShenandoahHeapRegion*          _to_region;
 332   ShenandoahHeapRegion*          _from_region;
 333   HeapWord* _compact_point;
 334 
 335 public:
 336   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 337                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 338                                               ShenandoahHeapRegion* to_region) :
 339     _preserved_marks(preserved_marks),
 340     _heap(ShenandoahHeap::heap()),
 341     _empty_regions(empty_regions),
 342     _empty_regions_pos(0),
 343     _to_region(to_region),
 344     _from_region(nullptr),
 345     _compact_point(to_region->bottom()) {}
 346 
 347   void set_from_region(ShenandoahHeapRegion* from_region) {
 348     _from_region = from_region;
 349   }
 350 
 351   void finish() {
 352     assert(_to_region != nullptr, "should not happen");
 353     _to_region->set_new_top(_compact_point);
 354   }
 355 
 356   bool is_compact_same_region() {
 357     return _from_region == _to_region;
 358   }
 359 
 360   int empty_regions_pos() {
 361     return _empty_regions_pos;
 362   }
 363 
 364   void do_object(oop p) {
 365     assert(_from_region != nullptr, "must set before work");
 366     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 367     assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
 368 
 369     size_t obj_size = p->size();
 370     if (_compact_point + obj_size > _to_region->end()) {
 371       finish();
 372 
 373       // Object doesn't fit. Pick next empty region and start compacting there.
 374       ShenandoahHeapRegion* new_to_region;
 375       if (_empty_regions_pos < _empty_regions.length()) {
 376         new_to_region = _empty_regions.at(_empty_regions_pos);
 377         _empty_regions_pos++;
 378       } else {
 379         // Out of empty region? Compact within the same region.
 380         new_to_region = _from_region;
 381       }
 382 
 383       assert(new_to_region != _to_region, "must not reuse same to-region");
 384       assert(new_to_region != nullptr, "must not be null");
 385       _to_region = new_to_region;
 386       _compact_point = _to_region->bottom();
 387     }
 388 
 389     // Object fits into current region, record new location, if object does not move:
 390     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 391     shenandoah_assert_not_forwarded(nullptr, p);
 392     if (_compact_point != cast_from_oop<HeapWord*>(p)) {
 393       _preserved_marks->push_if_necessary(p, p->mark());
 394       FullGCForwarding::forward_to(p, cast_to_oop(_compact_point));
 395     }
 396     _compact_point += obj_size;
 397   }
 398 };
 399 
 400 class ShenandoahPrepareForCompactionTask : public WorkerTask {
 401 private:
 402   PreservedMarksSet*        const _preserved_marks;
 403   ShenandoahHeap*           const _heap;
 404   ShenandoahHeapRegionSet** const _worker_slices;
 405 
 406 public:
 407   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
 408     WorkerTask("Shenandoah Prepare For Compaction"),
 409     _preserved_marks(preserved_marks),
 410     _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
 411   }
 412 
 413   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 414     // Empty region: get it into the slice to defragment the slice itself.
 415     // We could have skipped this without violating correctness, but we really
 416     // want to compact all live regions to the start of the heap, which sometimes
 417     // means moving them into the fully empty regions.
 418     if (r->is_empty()) return true;
 419 
 420     // Can move the region, and this is not the humongous region. Humongous
 421     // moves are special cased here, because their moves are handled separately.
 422     return r->is_stw_move_allowed() && !r->is_humongous();
 423   }
 424 
 425   void work(uint worker_id) override;
 426 private:
 427   template<typename ClosureType>
 428   void prepare_for_compaction(ClosureType& cl,
 429                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 430                               ShenandoahHeapRegionSetIterator& it,
 431                               ShenandoahHeapRegion* from_region);
 432 };
 433 
 434 void ShenandoahPrepareForCompactionTask::work(uint worker_id) {
 435   ShenandoahParallelWorkerSession worker_session(worker_id);
 436   ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 437   ShenandoahHeapRegionSetIterator it(slice);
 438   ShenandoahHeapRegion* from_region = it.next();
 439   // No work?
 440   if (from_region == nullptr) {
 441     return;
 442   }
 443 
 444   // Sliding compaction. Walk all regions in the slice, and compact them.
 445   // Remember empty regions and reuse them as needed.
 446   ResourceMark rm;
 447 
 448   GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
 449 
 450   if (_heap->mode()->is_generational()) {
 451     ShenandoahPrepareForGenerationalCompactionObjectClosure cl(_preserved_marks->get(worker_id),
 452                                                                empty_regions, from_region, worker_id);
 453     prepare_for_compaction(cl, empty_regions, it, from_region);
 454   } else {
 455     ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
 456     prepare_for_compaction(cl, empty_regions, it, from_region);
 457   }
 458 }
 459 
 460 template<typename ClosureType>
 461 void ShenandoahPrepareForCompactionTask::prepare_for_compaction(ClosureType& cl,
 462                                                                 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 463                                                                 ShenandoahHeapRegionSetIterator& it,
 464                                                                 ShenandoahHeapRegion* from_region) {
 465   while (from_region != nullptr) {
 466     assert(is_candidate_region(from_region), "Sanity");
 467     cl.set_from_region(from_region);
 468     if (from_region->has_live()) {
 469       _heap->marked_object_iterate(from_region, &cl);
 470     }
 471 
 472     // Compacted the region to somewhere else? From-region is empty then.
 473     if (!cl.is_compact_same_region()) {
 474       empty_regions.append(from_region);
 475     }
 476     from_region = it.next();
 477   }
 478   cl.finish();
 479 
 480   // Mark all remaining regions as empty
 481   for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 482     ShenandoahHeapRegion* r = empty_regions.at(pos);
 483     r->set_new_top(r->bottom());
 484   }
 485 }
 486 
 487 void ShenandoahFullGC::calculate_target_humongous_objects() {
 488   ShenandoahHeap* heap = ShenandoahHeap::heap();
 489 
 490   // Compute the new addresses for humongous objects. We need to do this after addresses
 491   // for regular objects are calculated, and we know what regions in heap suffix are
 492   // available for humongous moves.
 493   //
 494   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 495   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 496   // humongous start there.
 497   //
 498   // The complication is potential non-movable regions during the scan. If such region is
 499   // detected, then sliding restarts towards that non-movable region.
 500 
 501   size_t to_begin = heap->num_regions();
 502   size_t to_end = heap->num_regions();
 503 
 504   log_debug(gc)("Full GC calculating target humongous objects from end " SIZE_FORMAT, to_end);
 505   for (size_t c = heap->num_regions(); c > 0; c--) {
 506     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 507     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 508       // To-region candidate: record this, and continue scan
 509       to_begin = r->index();
 510       continue;
 511     }
 512 
 513     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 514       // From-region candidate: movable humongous region
 515       oop old_obj = cast_to_oop(r->bottom());
 516       size_t words_size = old_obj->size();
 517       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 518 
 519       size_t start = to_end - num_regions;
 520 
 521       if (start >= to_begin && start != r->index()) {
 522         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 523         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 524         FullGCForwarding::forward_to(old_obj, cast_to_oop(heap->get_region(start)->bottom()));
 525         to_end = start;
 526         continue;
 527       }
 528     }
 529 
 530     // Failed to fit. Scan starting from current region.
 531     to_begin = r->index();
 532     to_end = r->index();
 533   }
 534 }
 535 
 536 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 537 private:
 538   ShenandoahHeap* const _heap;
 539 
 540 public:
 541   ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
 542   void heap_region_do(ShenandoahHeapRegion* r) {
 543     if (r->is_trash()) {
 544       r->recycle();
 545     }
 546     if (r->is_cset()) {
 547       // Leave affiliation unchanged
 548       r->make_regular_bypass();
 549     }
 550     if (r->is_empty_uncommitted()) {
 551       r->make_committed_bypass();
 552     }
 553     assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
 554 
 555     // Record current region occupancy: this communicates empty regions are free
 556     // to the rest of Full GC code.
 557     r->set_new_top(r->top());
 558   }
 559 };
 560 
 561 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
 562 private:
 563   ShenandoahHeap* const _heap;
 564   ShenandoahMarkingContext* const _ctx;
 565 
 566 public:
 567   ShenandoahTrashImmediateGarbageClosure() :
 568     _heap(ShenandoahHeap::heap()),
 569     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 570 
 571   void heap_region_do(ShenandoahHeapRegion* r) override {
 572     if (r->is_humongous_start()) {
 573       oop humongous_obj = cast_to_oop(r->bottom());
 574       if (!_ctx->is_marked(humongous_obj)) {
 575         assert(!r->has_live(), "Region " SIZE_FORMAT " is not marked, should not have live", r->index());
 576         _heap->trash_humongous_region_at(r);
 577       } else {
 578         assert(r->has_live(), "Region " SIZE_FORMAT " should have live", r->index());
 579       }
 580     } else if (r->is_humongous_continuation()) {
 581       // If we hit continuation, the non-live humongous starts should have been trashed already
 582       assert(r->humongous_start_region()->has_live(), "Region " SIZE_FORMAT " should have live", r->index());
 583     } else if (r->is_regular()) {
 584       if (!r->has_live()) {
 585         r->make_trash_immediate();
 586       }
 587     }
 588   }
 589 };
 590 
 591 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
 592   ShenandoahHeap* heap = ShenandoahHeap::heap();
 593 
 594   uint n_workers = heap->workers()->active_workers();
 595   size_t n_regions = heap->num_regions();
 596 
 597   // What we want to accomplish: have the dense prefix of data, while still balancing
 598   // out the parallel work.
 599   //
 600   // Assuming the amount of work is driven by the live data that needs moving, we can slice
 601   // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
 602   // thread takes all regions in its prefix subset, and then it takes some regions from
 603   // the tail.
 604   //
 605   // Tail region selection becomes interesting.
 606   //
 607   // First, we want to distribute the regions fairly between the workers, and those regions
 608   // might have different amount of live data. So, until we sure no workers need live data,
 609   // we need to only take what the worker needs.
 610   //
 611   // Second, since we slide everything to the left in each slice, the most busy regions
 612   // would be the ones on the left. Which means we want to have all workers have their after-tail
 613   // regions as close to the left as possible.
 614   //
 615   // The easiest way to do this is to distribute after-tail regions in round-robin between
 616   // workers that still need live data.
 617   //
 618   // Consider parallel workers A, B, C, then the target slice layout would be:
 619   //
 620   //  AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA
 621   //
 622   //  (.....dense-prefix.....) (.....................tail...................)
 623   //  [all regions fully live] [left-most regions are fuller that right-most]
 624   //
 625 
 626   // Compute how much live data is there. This would approximate the size of dense prefix
 627   // we target to create.
 628   size_t total_live = 0;
 629   for (size_t idx = 0; idx < n_regions; idx++) {
 630     ShenandoahHeapRegion *r = heap->get_region(idx);
 631     if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 632       total_live += r->get_live_data_words();
 633     }
 634   }
 635 
 636   // Estimate the size for the dense prefix. Note that we specifically count only the
 637   // "full" regions, so there would be some non-full regions in the slice tail.
 638   size_t live_per_worker = total_live / n_workers;
 639   size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words();
 640   size_t prefix_regions_total = prefix_regions_per_worker * n_workers;
 641   prefix_regions_total = MIN2(prefix_regions_total, n_regions);
 642   assert(prefix_regions_total <= n_regions, "Sanity");
 643 
 644   // There might be non-candidate regions in the prefix. To compute where the tail actually
 645   // ends up being, we need to account those as well.
 646   size_t prefix_end = prefix_regions_total;
 647   for (size_t idx = 0; idx < prefix_regions_total; idx++) {
 648     ShenandoahHeapRegion *r = heap->get_region(idx);
 649     if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 650       prefix_end++;
 651     }
 652   }
 653   prefix_end = MIN2(prefix_end, n_regions);
 654   assert(prefix_end <= n_regions, "Sanity");
 655 
 656   // Distribute prefix regions per worker: each thread definitely gets its own same-sized
 657   // subset of dense prefix.
 658   size_t prefix_idx = 0;
 659 
 660   size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC);
 661 
 662   for (size_t wid = 0; wid < n_workers; wid++) {
 663     ShenandoahHeapRegionSet* slice = worker_slices[wid];
 664 
 665     live[wid] = 0;
 666     size_t regs = 0;
 667 
 668     // Add all prefix regions for this worker
 669     while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) {
 670       ShenandoahHeapRegion *r = heap->get_region(prefix_idx);
 671       if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 672         slice->add_region(r);
 673         live[wid] += r->get_live_data_words();
 674         regs++;
 675       }
 676       prefix_idx++;
 677     }
 678   }
 679 
 680   // Distribute the tail among workers in round-robin fashion.
 681   size_t wid = n_workers - 1;
 682 
 683   for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) {
 684     ShenandoahHeapRegion *r = heap->get_region(tail_idx);
 685     if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 686       assert(wid < n_workers, "Sanity");
 687 
 688       size_t live_region = r->get_live_data_words();
 689 
 690       // Select next worker that still needs live data.
 691       size_t old_wid = wid;
 692       do {
 693         wid++;
 694         if (wid == n_workers) wid = 0;
 695       } while (live[wid] + live_region >= live_per_worker && old_wid != wid);
 696 
 697       if (old_wid == wid) {
 698         // Circled back to the same worker? This means liveness data was
 699         // miscalculated. Bump the live_per_worker limit so that
 700         // everyone gets a piece of the leftover work.
 701         live_per_worker += ShenandoahHeapRegion::region_size_words();
 702       }
 703 
 704       worker_slices[wid]->add_region(r);
 705       live[wid] += live_region;
 706     }
 707   }
 708 
 709   FREE_C_HEAP_ARRAY(size_t, live);
 710 
 711 #ifdef ASSERT
 712   ResourceBitMap map(n_regions);
 713   for (size_t wid = 0; wid < n_workers; wid++) {
 714     ShenandoahHeapRegionSetIterator it(worker_slices[wid]);
 715     ShenandoahHeapRegion* r = it.next();
 716     while (r != nullptr) {
 717       size_t idx = r->index();
 718       assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx);
 719       assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx);
 720       map.at_put(idx, true);
 721       r = it.next();
 722     }
 723   }
 724 
 725   for (size_t rid = 0; rid < n_regions; rid++) {
 726     bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid));
 727     bool is_distributed = map.at(rid);
 728     assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid);
 729   }
 730 #endif
 731 }
 732 
 733 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
 734   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
 735   ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
 736 
 737   ShenandoahHeap* heap = ShenandoahHeap::heap();
 738 
 739   // About to figure out which regions can be compacted, make sure pinning status
 740   // had been updated in GC prologue.
 741   heap->assert_pinned_region_status();
 742 
 743   {
 744     // Trash the immediately collectible regions before computing addresses
 745     ShenandoahTrashImmediateGarbageClosure trash_immediate_garbage;
 746     ShenandoahExcludeRegionClosure<FREE> cl(&trash_immediate_garbage);
 747     heap->heap_region_iterate(&cl);
 748 
 749     // Make sure regions are in good state: committed, active, clean.
 750     // This is needed because we are potentially sliding the data through them.
 751     ShenandoahEnsureHeapActiveClosure ecl;
 752     heap->heap_region_iterate(&ecl);
 753   }
 754 
 755   // Compute the new addresses for regular objects
 756   {
 757     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
 758 
 759     distribute_slices(worker_slices);
 760 
 761     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
 762     heap->workers()->run_task(&task);
 763   }
 764 
 765   // Compute the new addresses for humongous objects
 766   {
 767     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
 768     calculate_target_humongous_objects();
 769   }
 770 }
 771 
 772 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
 773 private:
 774   ShenandoahHeap* const _heap;
 775   ShenandoahMarkingContext* const _ctx;
 776 
 777   template <class T>
 778   inline void do_oop_work(T* p) {
 779     T o = RawAccess<>::oop_load(p);
 780     if (!CompressedOops::is_null(o)) {
 781       oop obj = CompressedOops::decode_not_null(o);
 782       assert(_ctx->is_marked(obj), "must be marked");
 783       if (FullGCForwarding::is_forwarded(obj)) {
 784         oop forw = FullGCForwarding::forwardee(obj);
 785         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 786       }
 787     }
 788   }
 789 
 790 public:
 791   ShenandoahAdjustPointersClosure() :
 792     _heap(ShenandoahHeap::heap()),
 793     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 794 
 795   void do_oop(oop* p)       { do_oop_work(p); }
 796   void do_oop(narrowOop* p) { do_oop_work(p); }
 797   void do_method(Method* m) {}
 798   void do_nmethod(nmethod* nm) {}
 799 };
 800 
 801 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
 802 private:
 803   ShenandoahHeap* const _heap;
 804   ShenandoahAdjustPointersClosure _cl;
 805 
 806 public:
 807   ShenandoahAdjustPointersObjectClosure() :
 808     _heap(ShenandoahHeap::heap()) {
 809   }
 810   void do_object(oop p) {
 811     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 812     p->oop_iterate(&_cl);
 813   }
 814 };
 815 
 816 class ShenandoahAdjustPointersTask : public WorkerTask {
 817 private:
 818   ShenandoahHeap*          const _heap;
 819   ShenandoahRegionIterator       _regions;
 820 
 821 public:
 822   ShenandoahAdjustPointersTask() :
 823     WorkerTask("Shenandoah Adjust Pointers"),
 824     _heap(ShenandoahHeap::heap()) {
 825   }
 826 
 827   void work(uint worker_id) {
 828     ShenandoahParallelWorkerSession worker_session(worker_id);
 829     ShenandoahAdjustPointersObjectClosure obj_cl;
 830     ShenandoahHeapRegion* r = _regions.next();
 831     while (r != nullptr) {
 832       if (!r->is_humongous_continuation() && r->has_live()) {
 833         _heap->marked_object_iterate(r, &obj_cl);
 834       }
 835       if (_heap->mode()->is_generational()) {
 836         ShenandoahGenerationalFullGC::maybe_coalesce_and_fill_region(r);
 837       }
 838       r = _regions.next();
 839     }
 840   }
 841 };
 842 
 843 class ShenandoahAdjustRootPointersTask : public WorkerTask {
 844 private:
 845   ShenandoahRootAdjuster* _rp;
 846   PreservedMarksSet* _preserved_marks;
 847 public:
 848   ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
 849     WorkerTask("Shenandoah Adjust Root Pointers"),
 850     _rp(rp),
 851     _preserved_marks(preserved_marks) {}
 852 
 853   void work(uint worker_id) {
 854     ShenandoahParallelWorkerSession worker_session(worker_id);
 855     ShenandoahAdjustPointersClosure cl;
 856     _rp->roots_do(worker_id, &cl);
 857     _preserved_marks->get(worker_id)->adjust_during_full_gc();
 858   }
 859 };
 860 
 861 void ShenandoahFullGC::phase3_update_references() {
 862   GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
 863   ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
 864 
 865   ShenandoahHeap* heap = ShenandoahHeap::heap();
 866 
 867   WorkerThreads* workers = heap->workers();
 868   uint nworkers = workers->active_workers();
 869   {
 870 #if COMPILER2_OR_JVMCI
 871     DerivedPointerTable::clear();
 872 #endif
 873     ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
 874     ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
 875     workers->run_task(&task);
 876 #if COMPILER2_OR_JVMCI
 877     DerivedPointerTable::update_pointers();
 878 #endif
 879   }
 880 
 881   ShenandoahAdjustPointersTask adjust_pointers_task;
 882   workers->run_task(&adjust_pointers_task);
 883 }
 884 
 885 class ShenandoahCompactObjectsClosure : public ObjectClosure {
 886 private:
 887   ShenandoahHeap* const _heap;
 888   uint            const _worker_id;
 889 
 890 public:
 891   ShenandoahCompactObjectsClosure(uint worker_id) :
 892     _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
 893 
 894   void do_object(oop p) {
 895     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 896     size_t size = p->size();
 897     if (FullGCForwarding::is_forwarded(p)) {
 898       HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
 899       HeapWord* compact_to = cast_from_oop<HeapWord*>(FullGCForwarding::forwardee(p));
 900       assert(compact_from != compact_to, "Forwarded object should move");
 901       Copy::aligned_conjoint_words(compact_from, compact_to, size);
 902       oop new_obj = cast_to_oop(compact_to);
 903 
 904       ContinuationGCSupport::relativize_stack_chunk(new_obj);
 905       new_obj->init_mark();
 906     }
 907   }
 908 };
 909 
 910 class ShenandoahCompactObjectsTask : public WorkerTask {
 911 private:
 912   ShenandoahHeap* const _heap;
 913   ShenandoahHeapRegionSet** const _worker_slices;
 914 
 915 public:
 916   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
 917     WorkerTask("Shenandoah Compact Objects"),
 918     _heap(ShenandoahHeap::heap()),
 919     _worker_slices(worker_slices) {
 920   }
 921 
 922   void work(uint worker_id) {
 923     ShenandoahParallelWorkerSession worker_session(worker_id);
 924     ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
 925 
 926     ShenandoahCompactObjectsClosure cl(worker_id);
 927     ShenandoahHeapRegion* r = slice.next();
 928     while (r != nullptr) {
 929       assert(!r->is_humongous(), "must not get humongous regions here");
 930       if (r->has_live()) {
 931         _heap->marked_object_iterate(r, &cl);
 932       }
 933       r->set_top(r->new_top());
 934       r = slice.next();
 935     }
 936   }
 937 };
 938 
 939 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
 940 private:
 941   ShenandoahHeap* const _heap;
 942   bool _is_generational;
 943   size_t _young_regions, _young_usage, _young_humongous_waste;
 944   size_t _old_regions, _old_usage, _old_humongous_waste;
 945 
 946 public:
 947   ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()),
 948                                    _is_generational(_heap->mode()->is_generational()),
 949                                    _young_regions(0),
 950                                    _young_usage(0),
 951                                    _young_humongous_waste(0),
 952                                    _old_regions(0),
 953                                    _old_usage(0),
 954                                    _old_humongous_waste(0)
 955   {
 956     _heap->free_set()->clear();
 957   }
 958 
 959   void heap_region_do(ShenandoahHeapRegion* r) {
 960     assert (!r->is_cset(), "cset regions should have been demoted already");
 961 
 962     // Need to reset the complete-top-at-mark-start pointer here because
 963     // the complete marking bitmap is no longer valid. This ensures
 964     // size-based iteration in marked_object_iterate().
 965     // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
 966     // pinned regions.
 967     if (!r->is_pinned()) {
 968       _heap->complete_marking_context()->reset_top_at_mark_start(r);
 969     }
 970 
 971     size_t live = r->used();
 972 
 973     // Make empty regions that have been allocated into regular
 974     if (r->is_empty() && live > 0) {
 975       if (!_is_generational) {
 976         r->make_affiliated_maybe();
 977       }
 978       // else, generational mode compaction has already established affiliation.
 979       r->make_regular_bypass();
 980       if (ZapUnusedHeapArea) {
 981         SpaceMangler::mangle_region(MemRegion(r->top(), r->end()));
 982       }
 983     }
 984 
 985     // Reclaim regular regions that became empty
 986     if (r->is_regular() && live == 0) {
 987       r->make_trash();
 988     }
 989 
 990     // Recycle all trash regions
 991     if (r->is_trash()) {
 992       live = 0;
 993       r->recycle();
 994     } else {
 995       if (r->is_old()) {
 996         ShenandoahGenerationalFullGC::account_for_region(r, _old_regions, _old_usage, _old_humongous_waste);
 997       } else if (r->is_young()) {
 998         ShenandoahGenerationalFullGC::account_for_region(r, _young_regions, _young_usage, _young_humongous_waste);
 999       }
1000     }
1001     r->set_live_data(live);
1002     r->reset_alloc_metadata();
1003   }
1004 
1005   void update_generation_usage() {
1006     if (_is_generational) {
1007       _heap->old_generation()->establish_usage(_old_regions, _old_usage, _old_humongous_waste);
1008       _heap->young_generation()->establish_usage(_young_regions, _young_usage, _young_humongous_waste);
1009     } else {
1010       assert(_old_regions == 0, "Old regions only expected in generational mode");
1011       assert(_old_usage == 0, "Old usage only expected in generational mode");
1012       assert(_old_humongous_waste == 0, "Old humongous waste only expected in generational mode");
1013     }
1014 
1015     // In generational mode, global usage should be the sum of young and old. This is also true
1016     // for non-generational modes except that there are no old regions.
1017     _heap->global_generation()->establish_usage(_old_regions + _young_regions,
1018                                                 _old_usage + _young_usage,
1019                                                 _old_humongous_waste + _young_humongous_waste);
1020   }
1021 };
1022 
1023 void ShenandoahFullGC::compact_humongous_objects() {
1024   // Compact humongous regions, based on their fwdptr objects.
1025   //
1026   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
1027   // humongous regions are already compacted, and do not require further moves, which alleviates
1028   // sliding costs. We may consider doing this in parallel in the future.
1029 
1030   ShenandoahHeap* heap = ShenandoahHeap::heap();
1031 
1032   for (size_t c = heap->num_regions(); c > 0; c--) {
1033     ShenandoahHeapRegion* r = heap->get_region(c - 1);
1034     if (r->is_humongous_start()) {
1035       oop old_obj = cast_to_oop(r->bottom());
1036       if (!FullGCForwarding::is_forwarded(old_obj)) {
1037         // No need to move the object, it stays at the same slot
1038         continue;
1039       }
1040       size_t words_size = old_obj->size();
1041       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1042 
1043       size_t old_start = r->index();
1044       size_t old_end   = old_start + num_regions - 1;
1045       size_t new_start = heap->heap_region_index_containing(FullGCForwarding::forwardee(old_obj));
1046       size_t new_end   = new_start + num_regions - 1;
1047       assert(old_start != new_start, "must be real move");
1048       assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
1049 
1050       log_debug(gc)("Full GC compaction moves humongous object from region " SIZE_FORMAT " to region " SIZE_FORMAT, old_start, new_start);
1051       Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
1052       ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
1053 
1054       oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
1055       new_obj->init_mark();
1056 
1057       {
1058         ShenandoahAffiliation original_affiliation = r->affiliation();
1059         for (size_t c = old_start; c <= old_end; c++) {
1060           ShenandoahHeapRegion* r = heap->get_region(c);
1061           // Leave humongous region affiliation unchanged.
1062           r->make_regular_bypass();
1063           r->set_top(r->bottom());
1064         }
1065 
1066         for (size_t c = new_start; c <= new_end; c++) {
1067           ShenandoahHeapRegion* r = heap->get_region(c);
1068           if (c == new_start) {
1069             r->make_humongous_start_bypass(original_affiliation);
1070           } else {
1071             r->make_humongous_cont_bypass(original_affiliation);
1072           }
1073 
1074           // Trailing region may be non-full, record the remainder there
1075           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
1076           if ((c == new_end) && (remainder != 0)) {
1077             r->set_top(r->bottom() + remainder);
1078           } else {
1079             r->set_top(r->end());
1080           }
1081 
1082           r->reset_alloc_metadata();
1083         }
1084       }
1085     }
1086   }
1087 }
1088 
1089 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1090 // we need to remain able to walk pinned regions.
1091 // Since pinned region do not move and don't get compacted, we will get holes with
1092 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
1093 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
1094 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
1095 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
1096 class ShenandoahMCResetCompleteBitmapTask : public WorkerTask {
1097 private:
1098   ShenandoahRegionIterator _regions;
1099 
1100 public:
1101   ShenandoahMCResetCompleteBitmapTask() :
1102     WorkerTask("Shenandoah Reset Bitmap") {
1103   }
1104 
1105   void work(uint worker_id) {
1106     ShenandoahParallelWorkerSession worker_session(worker_id);
1107     ShenandoahHeapRegion* region = _regions.next();
1108     ShenandoahHeap* heap = ShenandoahHeap::heap();
1109     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
1110     while (region != nullptr) {
1111       if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) {
1112         ctx->clear_bitmap(region);
1113       }
1114       region = _regions.next();
1115     }
1116   }
1117 };
1118 
1119 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
1120   GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
1121   ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
1122 
1123   ShenandoahHeap* heap = ShenandoahHeap::heap();
1124 
1125   // Compact regular objects first
1126   {
1127     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
1128     ShenandoahCompactObjectsTask compact_task(worker_slices);
1129     heap->workers()->run_task(&compact_task);
1130   }
1131 
1132   // Compact humongous objects after regular object moves
1133   {
1134     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1135     compact_humongous_objects();
1136   }
1137 }
1138 
1139 void ShenandoahFullGC::phase5_epilog() {
1140   GCTraceTime(Info, gc, phases) time("Phase 5: Full GC epilog", _gc_timer);
1141   ShenandoahHeap* heap = ShenandoahHeap::heap();
1142 
1143   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1144   // and must ensure the bitmap is in sync.
1145   {
1146     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1147     ShenandoahMCResetCompleteBitmapTask task;
1148     heap->workers()->run_task(&task);
1149   }
1150 
1151   // Bring regions in proper states after the collection, and set heap properties.
1152   {
1153     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1154     ShenandoahPostCompactClosure post_compact;
1155     heap->heap_region_iterate(&post_compact);
1156     post_compact.update_generation_usage();
1157 
1158     if (heap->mode()->is_generational()) {
1159       ShenandoahGenerationalFullGC::balance_generations_after_gc(heap);
1160     }
1161 
1162     heap->collection_set()->clear();
1163     size_t young_cset_regions, old_cset_regions;
1164     size_t first_old, last_old, num_old;
1165     heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
1166 
1167     // We also do not expand old generation size following Full GC because we have scrambled age populations and
1168     // no longer have objects separated by age into distinct regions.
1169     if (heap->mode()->is_generational()) {
1170       ShenandoahGenerationalFullGC::compute_balances();
1171     }
1172 
1173     heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
1174 
1175     heap->clear_cancelled_gc(true /* clear oom handler */);
1176   }
1177 
1178   _preserved_marks->restore(heap->workers());
1179   _preserved_marks->reclaim();
1180 
1181   // We defer generation resizing actions until after cset regions have been recycled.  We do this even following an
1182   // abbreviated cycle.
1183   if (heap->mode()->is_generational()) {
1184     ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set();
1185     ShenandoahGenerationalFullGC::rebuild_remembered_set(heap);
1186   }
1187 }