1 /*
   2  * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "compiler/oopMap.hpp"
  29 #include "gc/shared/continuationGCSupport.hpp"
  30 #include "gc/shared/gcTraceTime.inline.hpp"
  31 #include "gc/shared/preservedMarks.inline.hpp"
  32 #include "gc/shared/tlab_globals.hpp"
  33 #include "gc/shared/workerThread.hpp"
  34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  37 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  38 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  39 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  40 #include "gc/shenandoah/shenandoahFullGC.hpp"
  41 #include "gc/shenandoah/shenandoahGenerationalFullGC.hpp"
  42 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  43 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  44 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  45 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  46 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  47 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  48 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  50 #include "gc/shenandoah/shenandoahMetrics.hpp"
  51 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  52 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  53 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  54 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  55 #include "gc/shenandoah/shenandoahUtils.hpp"
  56 #include "gc/shenandoah/shenandoahVerifier.hpp"
  57 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  58 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  59 #include "memory/metaspaceUtils.hpp"
  60 #include "memory/universe.hpp"
  61 #include "oops/compressedOops.inline.hpp"
  62 #include "oops/oop.inline.hpp"
  63 #include "runtime/orderAccess.hpp"
  64 #include "runtime/vmThread.hpp"
  65 #include "utilities/copy.hpp"
  66 #include "utilities/events.hpp"
  67 #include "utilities/growableArray.hpp"
  68 
  69 ShenandoahFullGC::ShenandoahFullGC() :
  70   _gc_timer(ShenandoahHeap::heap()->gc_timer()),
  71   _preserved_marks(new PreservedMarksSet(true)) {}
  72 
  73 ShenandoahFullGC::~ShenandoahFullGC() {
  74   delete _preserved_marks;
  75 }
  76 
  77 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
  78   vmop_entry_full(cause);
  79   // Always success
  80   return true;
  81 }
  82 
  83 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) {
  84   ShenandoahHeap* const heap = ShenandoahHeap::heap();
  85   TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
  86   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
  87 
  88   heap->try_inject_alloc_failure();
  89   VM_ShenandoahFullGC op(cause, this);
  90   VMThread::execute(&op);
  91 }
  92 
  93 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
  94   static const char* msg = "Pause Full";
  95   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
  96   EventMark em("%s", msg);
  97 
  98   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
  99                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
 100                               "full gc");
 101 
 102   op_full(cause);
 103 }
 104 
 105 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
 106   ShenandoahMetricsSnapshot metrics;
 107   metrics.snap_before();
 108 
 109   // Perform full GC
 110   do_it(cause);
 111 
 112   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 113 
 114   if (heap->mode()->is_generational()) {
 115     ShenandoahGenerationalFullGC::handle_completion(heap);
 116   }
 117 
 118   metrics.snap_after();
 119 
 120   if (metrics.is_good_progress()) {
 121     heap->notify_gc_progress();
 122   } else {
 123     // Nothing to do. Tell the allocation path that we have failed to make
 124     // progress, and it can finally fail.
 125     heap->notify_gc_no_progress();
 126   }
 127 
 128   // Regardless if progress was made, we record that we completed a "successful" full GC.
 129   heap->global_generation()->heuristics()->record_success_full();
 130   heap->shenandoah_policy()->record_success_full();
 131 }
 132 
 133 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
 134   ShenandoahHeap* heap = ShenandoahHeap::heap();
 135 
 136   if (heap->mode()->is_generational()) {
 137     ShenandoahGenerationalFullGC::prepare();
 138   }
 139 
 140   if (ShenandoahVerify) {
 141     heap->verifier()->verify_before_fullgc();
 142   }
 143 
 144   if (VerifyBeforeGC) {
 145     Universe::verify();
 146   }
 147 
 148   // Degenerated GC may carry concurrent root flags when upgrading to
 149   // full GC. We need to reset it before mutators resume.
 150   heap->set_concurrent_strong_root_in_progress(false);
 151   heap->set_concurrent_weak_root_in_progress(false);
 152 
 153   heap->set_full_gc_in_progress(true);
 154 
 155   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 156   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
 157 
 158   {
 159     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre);
 160     heap->pre_full_gc_dump(_gc_timer);
 161   }
 162 
 163   {
 164     ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
 165     // Full GC is supposed to recover from any GC state:
 166 
 167     // a0. Remember if we have forwarded objects
 168     bool has_forwarded_objects = heap->has_forwarded_objects();
 169 
 170     // a1. Cancel evacuation, if in progress
 171     if (heap->is_evacuation_in_progress()) {
 172       heap->set_evacuation_in_progress(false);
 173     }
 174     assert(!heap->is_evacuation_in_progress(), "sanity");
 175 
 176     // a2. Cancel update-refs, if in progress
 177     if (heap->is_update_refs_in_progress()) {
 178       heap->set_update_refs_in_progress(false);
 179     }
 180     assert(!heap->is_update_refs_in_progress(), "sanity");
 181 
 182     // b. Cancel all concurrent marks, if in progress
 183     if (heap->is_concurrent_mark_in_progress()) {
 184       // TODO: Send cancel_concurrent_mark upstream? Does it really not have it already?
 185       heap->cancel_concurrent_mark();
 186     }
 187     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 188 
 189     // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
 190     if (has_forwarded_objects) {
 191       update_roots(true /*full_gc*/);
 192     }
 193 
 194     // d. Reset the bitmaps for new marking
 195     heap->global_generation()->reset_mark_bitmap();
 196     assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 197     assert(!heap->global_generation()->is_mark_complete(), "sanity");
 198 
 199     // e. Abandon reference discovery and clear all discovered references.
 200     ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
 201     rp->abandon_partial_discovery();
 202 
 203     // f. Sync pinned region status from the CP marks
 204     heap->sync_pinned_region_status();
 205 
 206     if (heap->mode()->is_generational()) {
 207       ShenandoahGenerationalFullGC::restore_top_before_promote(heap);
 208     }
 209 
 210     // The rest of prologue:
 211     _preserved_marks->init(heap->workers()->active_workers());
 212 
 213     assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
 214   }
 215 
 216   if (UseTLAB) {
 217     // TODO: Do we need to explicitly retire PLABs?
 218     heap->gclabs_retire(ResizeTLAB);
 219     heap->tlabs_retire(ResizeTLAB);
 220   }
 221 
 222   OrderAccess::fence();
 223 
 224   phase1_mark_heap();
 225 
 226   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 227   // Coming out of Full GC, we would not have any forwarded objects.
 228   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 229   heap->set_has_forwarded_objects(false);
 230 
 231   heap->set_full_gc_move_in_progress(true);
 232 
 233   // Setup workers for the rest
 234   OrderAccess::fence();
 235 
 236   // Initialize worker slices
 237   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
 238   for (uint i = 0; i < heap->max_workers(); i++) {
 239     worker_slices[i] = new ShenandoahHeapRegionSet();
 240   }
 241 
 242   {
 243     // The rest of code performs region moves, where region status is undefined
 244     // until all phases run together.
 245     ShenandoahHeapLocker lock(heap->lock());
 246 
 247     phase2_calculate_target_addresses(worker_slices);
 248 
 249     OrderAccess::fence();
 250 
 251     phase3_update_references();
 252 
 253     phase4_compact_objects(worker_slices);
 254 
 255     phase5_epilog();
 256   }
 257 
 258   // Resize metaspace
 259   MetaspaceGC::compute_new_size();
 260 
 261   // Free worker slices
 262   for (uint i = 0; i < heap->max_workers(); i++) {
 263     delete worker_slices[i];
 264   }
 265   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 266 
 267   heap->set_full_gc_move_in_progress(false);
 268   heap->set_full_gc_in_progress(false);
 269 
 270   if (ShenandoahVerify) {
 271     heap->verifier()->verify_after_fullgc();
 272   }
 273 
 274   // Humongous regions are promoted on demand and are accounted for by normal Full GC mechanisms.
 275   if (VerifyAfterGC) {
 276     Universe::verify();
 277   }
 278 
 279   {
 280     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
 281     heap->post_full_gc_dump(_gc_timer);
 282   }
 283 }
 284 
 285 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
 286 private:
 287   ShenandoahMarkingContext* const _ctx;
 288 
 289 public:
 290   ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 291 
 292   void heap_region_do(ShenandoahHeapRegion *r) {
 293     // TODO: Add API to heap to skip free regions
 294     if (r->is_affiliated()) {
 295       _ctx->capture_top_at_mark_start(r);
 296       r->clear_live_data();
 297     }
 298   }
 299 
 300   bool is_thread_safe() { return true; }
 301 };
 302 
 303 void ShenandoahFullGC::phase1_mark_heap() {
 304   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 305   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 306 
 307   ShenandoahHeap* heap = ShenandoahHeap::heap();
 308 
 309   ShenandoahPrepareForMarkClosure cl;
 310   heap->parallel_heap_region_iterate(&cl);
 311 
 312   heap->set_unload_classes(heap->global_generation()->heuristics()->can_unload_classes());
 313 
 314   ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
 315   // enable ("weak") refs discovery
 316   rp->set_soft_reference_policy(true); // forcefully purge all soft references
 317 
 318   ShenandoahSTWMark mark(heap->global_generation(), true /*full_gc*/);
 319   mark.mark();
 320   heap->parallel_cleaning(true /* full_gc */);
 321 
 322   if (ShenandoahHeap::heap()->mode()->is_generational()) {
 323     ShenandoahGenerationalFullGC::log_live_in_old(heap);
 324   }
 325 }
 326 
 327 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 328 private:
 329   PreservedMarks*          const _preserved_marks;
 330   ShenandoahHeap*          const _heap;
 331   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 332   int _empty_regions_pos;
 333   ShenandoahHeapRegion*          _to_region;
 334   ShenandoahHeapRegion*          _from_region;
 335   HeapWord* _compact_point;
 336 
 337 public:
 338   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 339                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 340                                               ShenandoahHeapRegion* to_region) :
 341     _preserved_marks(preserved_marks),
 342     _heap(ShenandoahHeap::heap()),
 343     _empty_regions(empty_regions),
 344     _empty_regions_pos(0),
 345     _to_region(to_region),
 346     _from_region(nullptr),
 347     _compact_point(to_region->bottom()) {}
 348 
 349   void set_from_region(ShenandoahHeapRegion* from_region) {
 350     _from_region = from_region;
 351   }
 352 
 353   void finish() {
 354     assert(_to_region != nullptr, "should not happen");
 355     _to_region->set_new_top(_compact_point);
 356   }
 357 
 358   bool is_compact_same_region() {
 359     return _from_region == _to_region;
 360   }
 361 
 362   int empty_regions_pos() {
 363     return _empty_regions_pos;
 364   }
 365 
 366   void do_object(oop p) {
 367     assert(_from_region != nullptr, "must set before work");
 368     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 369     assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
 370 
 371     size_t obj_size = p->size();
 372     if (_compact_point + obj_size > _to_region->end()) {
 373       finish();
 374 
 375       // Object doesn't fit. Pick next empty region and start compacting there.
 376       ShenandoahHeapRegion* new_to_region;
 377       if (_empty_regions_pos < _empty_regions.length()) {
 378         new_to_region = _empty_regions.at(_empty_regions_pos);
 379         _empty_regions_pos++;
 380       } else {
 381         // Out of empty region? Compact within the same region.
 382         new_to_region = _from_region;
 383       }
 384 
 385       assert(new_to_region != _to_region, "must not reuse same to-region");
 386       assert(new_to_region != nullptr, "must not be null");
 387       _to_region = new_to_region;
 388       _compact_point = _to_region->bottom();
 389     }
 390 
 391     // Object fits into current region, record new location, if object does not move:
 392     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 393     shenandoah_assert_not_forwarded(nullptr, p);
 394     if (_compact_point != cast_from_oop<HeapWord*>(p)) {
 395       _preserved_marks->push_if_necessary(p, p->mark());
 396       p->forward_to(cast_to_oop(_compact_point));
 397     }
 398     _compact_point += obj_size;
 399   }
 400 };
 401 
 402 class ShenandoahPrepareForCompactionTask : public WorkerTask {
 403 private:
 404   PreservedMarksSet*        const _preserved_marks;
 405   ShenandoahHeap*           const _heap;
 406   ShenandoahHeapRegionSet** const _worker_slices;
 407 
 408 public:
 409   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
 410     WorkerTask("Shenandoah Prepare For Compaction"),
 411     _preserved_marks(preserved_marks),
 412     _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
 413   }
 414 
 415   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 416     // Empty region: get it into the slice to defragment the slice itself.
 417     // We could have skipped this without violating correctness, but we really
 418     // want to compact all live regions to the start of the heap, which sometimes
 419     // means moving them into the fully empty regions.
 420     if (r->is_empty()) return true;
 421 
 422     // Can move the region, and this is not the humongous region. Humongous
 423     // moves are special cased here, because their moves are handled separately.
 424     return r->is_stw_move_allowed() && !r->is_humongous();
 425   }
 426 
 427   void work(uint worker_id) override;
 428 private:
 429   template<typename ClosureType>
 430   void prepare_for_compaction(ClosureType& cl,
 431                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 432                               ShenandoahHeapRegionSetIterator& it,
 433                               ShenandoahHeapRegion* from_region);
 434 };
 435 
 436 void ShenandoahPrepareForCompactionTask::work(uint worker_id) {
 437   ShenandoahParallelWorkerSession worker_session(worker_id);
 438   ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 439   ShenandoahHeapRegionSetIterator it(slice);
 440   ShenandoahHeapRegion* from_region = it.next();
 441   // No work?
 442   if (from_region == nullptr) {
 443     return;
 444   }
 445 
 446   // Sliding compaction. Walk all regions in the slice, and compact them.
 447   // Remember empty regions and reuse them as needed.
 448   ResourceMark rm;
 449 
 450   GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
 451 
 452   if (_heap->mode()->is_generational()) {
 453     ShenandoahPrepareForGenerationalCompactionObjectClosure cl(_preserved_marks->get(worker_id),
 454                                                                empty_regions, from_region, worker_id);
 455     prepare_for_compaction(cl, empty_regions, it, from_region);
 456   } else {
 457     ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
 458     prepare_for_compaction(cl, empty_regions, it, from_region);
 459   }
 460 }
 461 
 462 template<typename ClosureType>
 463 void ShenandoahPrepareForCompactionTask::prepare_for_compaction(ClosureType& cl,
 464                                                                 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 465                                                                 ShenandoahHeapRegionSetIterator& it,
 466                                                                 ShenandoahHeapRegion* from_region) {
 467   while (from_region != nullptr) {
 468     assert(is_candidate_region(from_region), "Sanity");
 469     cl.set_from_region(from_region);
 470     if (from_region->has_live()) {
 471       _heap->marked_object_iterate(from_region, &cl);
 472     }
 473 
 474     // Compacted the region to somewhere else? From-region is empty then.
 475     if (!cl.is_compact_same_region()) {
 476       empty_regions.append(from_region);
 477     }
 478     from_region = it.next();
 479   }
 480   cl.finish();
 481 
 482   // Mark all remaining regions as empty
 483   for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 484     ShenandoahHeapRegion* r = empty_regions.at(pos);
 485     r->set_new_top(r->bottom());
 486   }
 487 }
 488 
 489 void ShenandoahFullGC::calculate_target_humongous_objects() {
 490   ShenandoahHeap* heap = ShenandoahHeap::heap();
 491 
 492   // Compute the new addresses for humongous objects. We need to do this after addresses
 493   // for regular objects are calculated, and we know what regions in heap suffix are
 494   // available for humongous moves.
 495   //
 496   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 497   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 498   // humongous start there.
 499   //
 500   // The complication is potential non-movable regions during the scan. If such region is
 501   // detected, then sliding restarts towards that non-movable region.
 502 
 503   size_t to_begin = heap->num_regions();
 504   size_t to_end = heap->num_regions();
 505 
 506   log_debug(gc)("Full GC calculating target humongous objects from end " SIZE_FORMAT, to_end);
 507   for (size_t c = heap->num_regions(); c > 0; c--) {
 508     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 509     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 510       // To-region candidate: record this, and continue scan
 511       to_begin = r->index();
 512       continue;
 513     }
 514 
 515     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 516       // From-region candidate: movable humongous region
 517       oop old_obj = cast_to_oop(r->bottom());
 518       size_t words_size = old_obj->size();
 519       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 520 
 521       size_t start = to_end - num_regions;
 522 
 523       if (start >= to_begin && start != r->index()) {
 524         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 525         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 526         old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
 527         to_end = start;
 528         continue;
 529       }
 530     }
 531 
 532     // Failed to fit. Scan starting from current region.
 533     to_begin = r->index();
 534     to_end = r->index();
 535   }
 536 }
 537 
 538 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 539 private:
 540   ShenandoahHeap* const _heap;
 541 
 542 public:
 543   ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
 544   void heap_region_do(ShenandoahHeapRegion* r) {
 545     if (r->is_trash()) {
 546       r->recycle();
 547     }
 548     if (r->is_cset()) {
 549       // Leave affiliation unchanged
 550       r->make_regular_bypass();
 551     }
 552     if (r->is_empty_uncommitted()) {
 553       r->make_committed_bypass();
 554     }
 555     assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
 556 
 557     // Record current region occupancy: this communicates empty regions are free
 558     // to the rest of Full GC code.
 559     r->set_new_top(r->top());
 560   }
 561 };
 562 
 563 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
 564 private:
 565   ShenandoahHeap* const _heap;
 566   ShenandoahMarkingContext* const _ctx;
 567 
 568 public:
 569   ShenandoahTrashImmediateGarbageClosure() :
 570     _heap(ShenandoahHeap::heap()),
 571     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 572 
 573   void heap_region_do(ShenandoahHeapRegion* r) {
 574     if (!r->is_affiliated()) {
 575       // Ignore free regions
 576       // TODO: change iterators so they do not process FREE regions.
 577       return;
 578     }
 579 
 580     if (r->is_humongous_start()) {
 581       oop humongous_obj = cast_to_oop(r->bottom());
 582       if (!_ctx->is_marked(humongous_obj)) {
 583         assert(!r->has_live(),
 584                "Region " SIZE_FORMAT " is not marked, should not have live", r->index());
 585         _heap->trash_humongous_region_at(r);
 586       } else {
 587         assert(r->has_live(),
 588                "Region " SIZE_FORMAT " should have live", r->index());
 589       }
 590     } else if (r->is_humongous_continuation()) {
 591       // If we hit continuation, the non-live humongous starts should have been trashed already
 592       assert(r->humongous_start_region()->has_live(),
 593              "Region " SIZE_FORMAT " should have live", r->index());
 594     } else if (r->is_regular()) {
 595       if (!r->has_live()) {
 596         r->make_trash_immediate();
 597       }
 598     }
 599   }
 600 };
 601 
 602 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
 603   ShenandoahHeap* heap = ShenandoahHeap::heap();
 604 
 605   uint n_workers = heap->workers()->active_workers();
 606   size_t n_regions = heap->num_regions();
 607 
 608   // What we want to accomplish: have the dense prefix of data, while still balancing
 609   // out the parallel work.
 610   //
 611   // Assuming the amount of work is driven by the live data that needs moving, we can slice
 612   // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
 613   // thread takes all regions in its prefix subset, and then it takes some regions from
 614   // the tail.
 615   //
 616   // Tail region selection becomes interesting.
 617   //
 618   // First, we want to distribute the regions fairly between the workers, and those regions
 619   // might have different amount of live data. So, until we sure no workers need live data,
 620   // we need to only take what the worker needs.
 621   //
 622   // Second, since we slide everything to the left in each slice, the most busy regions
 623   // would be the ones on the left. Which means we want to have all workers have their after-tail
 624   // regions as close to the left as possible.
 625   //
 626   // The easiest way to do this is to distribute after-tail regions in round-robin between
 627   // workers that still need live data.
 628   //
 629   // Consider parallel workers A, B, C, then the target slice layout would be:
 630   //
 631   //  AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA
 632   //
 633   //  (.....dense-prefix.....) (.....................tail...................)
 634   //  [all regions fully live] [left-most regions are fuller that right-most]
 635   //
 636 
 637   // Compute how much live data is there. This would approximate the size of dense prefix
 638   // we target to create.
 639   size_t total_live = 0;
 640   for (size_t idx = 0; idx < n_regions; idx++) {
 641     ShenandoahHeapRegion *r = heap->get_region(idx);
 642     if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 643       total_live += r->get_live_data_words();
 644     }
 645   }
 646 
 647   // Estimate the size for the dense prefix. Note that we specifically count only the
 648   // "full" regions, so there would be some non-full regions in the slice tail.
 649   size_t live_per_worker = total_live / n_workers;
 650   size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words();
 651   size_t prefix_regions_total = prefix_regions_per_worker * n_workers;
 652   prefix_regions_total = MIN2(prefix_regions_total, n_regions);
 653   assert(prefix_regions_total <= n_regions, "Sanity");
 654 
 655   // There might be non-candidate regions in the prefix. To compute where the tail actually
 656   // ends up being, we need to account those as well.
 657   size_t prefix_end = prefix_regions_total;
 658   for (size_t idx = 0; idx < prefix_regions_total; idx++) {
 659     ShenandoahHeapRegion *r = heap->get_region(idx);
 660     if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 661       prefix_end++;
 662     }
 663   }
 664   prefix_end = MIN2(prefix_end, n_regions);
 665   assert(prefix_end <= n_regions, "Sanity");
 666 
 667   // Distribute prefix regions per worker: each thread definitely gets its own same-sized
 668   // subset of dense prefix.
 669   size_t prefix_idx = 0;
 670 
 671   size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC);
 672 
 673   for (size_t wid = 0; wid < n_workers; wid++) {
 674     ShenandoahHeapRegionSet* slice = worker_slices[wid];
 675 
 676     live[wid] = 0;
 677     size_t regs = 0;
 678 
 679     // Add all prefix regions for this worker
 680     while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) {
 681       ShenandoahHeapRegion *r = heap->get_region(prefix_idx);
 682       if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 683         slice->add_region(r);
 684         live[wid] += r->get_live_data_words();
 685         regs++;
 686       }
 687       prefix_idx++;
 688     }
 689   }
 690 
 691   // Distribute the tail among workers in round-robin fashion.
 692   size_t wid = n_workers - 1;
 693 
 694   for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) {
 695     ShenandoahHeapRegion *r = heap->get_region(tail_idx);
 696     if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 697       assert(wid < n_workers, "Sanity");
 698 
 699       size_t live_region = r->get_live_data_words();
 700 
 701       // Select next worker that still needs live data.
 702       size_t old_wid = wid;
 703       do {
 704         wid++;
 705         if (wid == n_workers) wid = 0;
 706       } while (live[wid] + live_region >= live_per_worker && old_wid != wid);
 707 
 708       if (old_wid == wid) {
 709         // Circled back to the same worker? This means liveness data was
 710         // miscalculated. Bump the live_per_worker limit so that
 711         // everyone gets a piece of the leftover work.
 712         live_per_worker += ShenandoahHeapRegion::region_size_words();
 713       }
 714 
 715       worker_slices[wid]->add_region(r);
 716       live[wid] += live_region;
 717     }
 718   }
 719 
 720   FREE_C_HEAP_ARRAY(size_t, live);
 721 
 722 #ifdef ASSERT
 723   ResourceBitMap map(n_regions);
 724   for (size_t wid = 0; wid < n_workers; wid++) {
 725     ShenandoahHeapRegionSetIterator it(worker_slices[wid]);
 726     ShenandoahHeapRegion* r = it.next();
 727     while (r != nullptr) {
 728       size_t idx = r->index();
 729       assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx);
 730       assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx);
 731       map.at_put(idx, true);
 732       r = it.next();
 733     }
 734   }
 735 
 736   for (size_t rid = 0; rid < n_regions; rid++) {
 737     bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid));
 738     bool is_distributed = map.at(rid);
 739     assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid);
 740   }
 741 #endif
 742 }
 743 
 744 // TODO:
 745 //  Consider compacting old-gen objects toward the high end of memory and young-gen objects towards the low-end
 746 //  of memory.  As currently implemented, all regions are compacted toward the low-end of memory.  This creates more
 747 //  fragmentation of the heap, because old-gen regions get scattered among low-address regions such that it becomes
 748 //  more difficult to find contiguous regions for humongous objects.
 749 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
 750   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
 751   ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
 752 
 753   ShenandoahHeap* heap = ShenandoahHeap::heap();
 754 
 755   // About to figure out which regions can be compacted, make sure pinning status
 756   // had been updated in GC prologue.
 757   heap->assert_pinned_region_status();
 758 
 759   {
 760     // Trash the immediately collectible regions before computing addresses
 761     ShenandoahTrashImmediateGarbageClosure tigcl;
 762     heap->heap_region_iterate(&tigcl);
 763 
 764     // Make sure regions are in good state: committed, active, clean.
 765     // This is needed because we are potentially sliding the data through them.
 766     ShenandoahEnsureHeapActiveClosure ecl;
 767     heap->heap_region_iterate(&ecl);
 768   }
 769 
 770   // Compute the new addresses for regular objects
 771   {
 772     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
 773 
 774     distribute_slices(worker_slices);
 775 
 776     // TODO: This is ResourceMark is missing upstream.
 777     ResourceMark rm;
 778     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
 779     heap->workers()->run_task(&task);
 780   }
 781 
 782   // Compute the new addresses for humongous objects
 783   {
 784     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
 785     calculate_target_humongous_objects();
 786   }
 787 }
 788 
 789 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
 790 private:
 791   ShenandoahHeap* const _heap;
 792   ShenandoahMarkingContext* const _ctx;
 793 
 794   template <class T>
 795   inline void do_oop_work(T* p) {
 796     T o = RawAccess<>::oop_load(p);
 797     if (!CompressedOops::is_null(o)) {
 798       oop obj = CompressedOops::decode_not_null(o);
 799       assert(_ctx->is_marked(obj), "must be marked");
 800       if (obj->is_forwarded()) {
 801         oop forw = obj->forwardee();
 802         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 803       }
 804     }
 805   }
 806 
 807 public:
 808   ShenandoahAdjustPointersClosure() :
 809     _heap(ShenandoahHeap::heap()),
 810     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 811 
 812   void do_oop(oop* p)       { do_oop_work(p); }
 813   void do_oop(narrowOop* p) { do_oop_work(p); }
 814   void do_method(Method* m) {}
 815   void do_nmethod(nmethod* nm) {}
 816 };
 817 
 818 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
 819 private:
 820   ShenandoahHeap* const _heap;
 821   ShenandoahAdjustPointersClosure _cl;
 822 
 823 public:
 824   ShenandoahAdjustPointersObjectClosure() :
 825     _heap(ShenandoahHeap::heap()) {
 826   }
 827   void do_object(oop p) {
 828     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 829     p->oop_iterate(&_cl);
 830   }
 831 };
 832 
 833 class ShenandoahAdjustPointersTask : public WorkerTask {
 834 private:
 835   ShenandoahHeap*          const _heap;
 836   ShenandoahRegionIterator       _regions;
 837 
 838 public:
 839   ShenandoahAdjustPointersTask() :
 840     WorkerTask("Shenandoah Adjust Pointers"),
 841     _heap(ShenandoahHeap::heap()) {
 842   }
 843 
 844   void work(uint worker_id) {
 845     ShenandoahParallelWorkerSession worker_session(worker_id);
 846     ShenandoahAdjustPointersObjectClosure obj_cl;
 847     ShenandoahHeapRegion* r = _regions.next();
 848     while (r != nullptr) {
 849       if (!r->is_humongous_continuation() && r->has_live()) {
 850         _heap->marked_object_iterate(r, &obj_cl);
 851       }
 852       if (_heap->mode()->is_generational()) {
 853         ShenandoahGenerationalFullGC::maybe_coalesce_and_fill_region(r);
 854       }
 855       r = _regions.next();
 856     }
 857   }
 858 };
 859 
 860 class ShenandoahAdjustRootPointersTask : public WorkerTask {
 861 private:
 862   ShenandoahRootAdjuster* _rp;
 863   PreservedMarksSet* _preserved_marks;
 864 public:
 865   ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
 866     WorkerTask("Shenandoah Adjust Root Pointers"),
 867     _rp(rp),
 868     _preserved_marks(preserved_marks) {}
 869 
 870   void work(uint worker_id) {
 871     ShenandoahParallelWorkerSession worker_session(worker_id);
 872     ShenandoahAdjustPointersClosure cl;
 873     _rp->roots_do(worker_id, &cl);
 874     _preserved_marks->get(worker_id)->adjust_during_full_gc();
 875   }
 876 };
 877 
 878 void ShenandoahFullGC::phase3_update_references() {
 879   GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
 880   ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
 881 
 882   ShenandoahHeap* heap = ShenandoahHeap::heap();
 883 
 884   WorkerThreads* workers = heap->workers();
 885   uint nworkers = workers->active_workers();
 886   {
 887 #if COMPILER2_OR_JVMCI
 888     DerivedPointerTable::clear();
 889 #endif
 890     ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
 891     ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
 892     workers->run_task(&task);
 893 #if COMPILER2_OR_JVMCI
 894     DerivedPointerTable::update_pointers();
 895 #endif
 896   }
 897 
 898   ShenandoahAdjustPointersTask adjust_pointers_task;
 899   workers->run_task(&adjust_pointers_task);
 900 }
 901 
 902 class ShenandoahCompactObjectsClosure : public ObjectClosure {
 903 private:
 904   ShenandoahHeap* const _heap;
 905   uint            const _worker_id;
 906 
 907 public:
 908   ShenandoahCompactObjectsClosure(uint worker_id) :
 909     _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
 910 
 911   void do_object(oop p) {
 912     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 913     size_t size = p->size();
 914     if (p->is_forwarded()) {
 915       HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
 916       HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee());
 917       assert(compact_from != compact_to, "Forwarded object should move");
 918       Copy::aligned_conjoint_words(compact_from, compact_to, size);
 919       oop new_obj = cast_to_oop(compact_to);
 920 
 921       ContinuationGCSupport::relativize_stack_chunk(new_obj);
 922       new_obj->init_mark();
 923     }
 924   }
 925 };
 926 
 927 class ShenandoahCompactObjectsTask : public WorkerTask {
 928 private:
 929   ShenandoahHeap* const _heap;
 930   ShenandoahHeapRegionSet** const _worker_slices;
 931 
 932 public:
 933   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
 934     WorkerTask("Shenandoah Compact Objects"),
 935     _heap(ShenandoahHeap::heap()),
 936     _worker_slices(worker_slices) {
 937   }
 938 
 939   void work(uint worker_id) {
 940     ShenandoahParallelWorkerSession worker_session(worker_id);
 941     ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
 942 
 943     ShenandoahCompactObjectsClosure cl(worker_id);
 944     ShenandoahHeapRegion* r = slice.next();
 945     while (r != nullptr) {
 946       assert(!r->is_humongous(), "must not get humongous regions here");
 947       if (r->has_live()) {
 948         _heap->marked_object_iterate(r, &cl);
 949       }
 950       r->set_top(r->new_top());
 951       r = slice.next();
 952     }
 953   }
 954 };
 955 
 956 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
 957 private:
 958   ShenandoahHeap* const _heap;
 959   bool _is_generational;
 960   size_t _young_regions, _young_usage, _young_humongous_waste;
 961   size_t _old_regions, _old_usage, _old_humongous_waste;
 962 
 963 public:
 964   ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()),
 965                                    _is_generational(_heap->mode()->is_generational()),
 966                                    _young_regions(0),
 967                                    _young_usage(0),
 968                                    _young_humongous_waste(0),
 969                                    _old_regions(0),
 970                                    _old_usage(0),
 971                                    _old_humongous_waste(0)
 972   {
 973     _heap->free_set()->clear();
 974   }
 975 
 976   void heap_region_do(ShenandoahHeapRegion* r) {
 977     assert (!r->is_cset(), "cset regions should have been demoted already");
 978 
 979     // Need to reset the complete-top-at-mark-start pointer here because
 980     // the complete marking bitmap is no longer valid. This ensures
 981     // size-based iteration in marked_object_iterate().
 982     // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
 983     // pinned regions.
 984     if (!r->is_pinned()) {
 985       _heap->complete_marking_context()->reset_top_at_mark_start(r);
 986     }
 987 
 988     size_t live = r->used();
 989 
 990     // Make empty regions that have been allocated into regular
 991     if (r->is_empty() && live > 0) {
 992       if (!_is_generational) {
 993         r->make_young_maybe();
 994       }
 995       // else, generational mode compaction has already established affiliation.
 996       r->make_regular_bypass();
 997       if (ZapUnusedHeapArea) {
 998         SpaceMangler::mangle_region(MemRegion(r->top(), r->end()));
 999       }
1000     }
1001 
1002     // Reclaim regular regions that became empty
1003     if (r->is_regular() && live == 0) {
1004       r->make_trash();
1005     }
1006 
1007     // Recycle all trash regions
1008     if (r->is_trash()) {
1009       live = 0;
1010       r->recycle();
1011     } else {
1012       if (r->is_old()) {
1013         ShenandoahGenerationalFullGC::account_for_region(r, _old_regions, _old_usage, _old_humongous_waste);
1014       } else if (r->is_young()) {
1015         ShenandoahGenerationalFullGC::account_for_region(r, _young_regions, _young_usage, _young_humongous_waste);
1016       }
1017     }
1018     r->set_live_data(live);
1019     r->reset_alloc_metadata();
1020   }
1021 
1022   void update_generation_usage() {
1023     if (_is_generational) {
1024       _heap->old_generation()->establish_usage(_old_regions, _old_usage, _old_humongous_waste);
1025       _heap->young_generation()->establish_usage(_young_regions, _young_usage, _young_humongous_waste);
1026     } else {
1027       assert(_old_regions == 0, "Old regions only expected in generational mode");
1028       assert(_old_usage == 0, "Old usage only expected in generational mode");
1029       assert(_old_humongous_waste == 0, "Old humongous waste only expected in generational mode");
1030     }
1031 
1032     // In generational mode, global usage should be the sum of young and old. This is also true
1033     // for non-generational modes except that there are no old regions.
1034     _heap->global_generation()->establish_usage(_old_regions + _young_regions,
1035                                                 _old_usage + _young_usage,
1036                                                 _old_humongous_waste + _young_humongous_waste);
1037   }
1038 };
1039 
1040 void ShenandoahFullGC::compact_humongous_objects() {
1041   // Compact humongous regions, based on their fwdptr objects.
1042   //
1043   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
1044   // humongous regions are already compacted, and do not require further moves, which alleviates
1045   // sliding costs. We may consider doing this in parallel in the future.
1046 
1047   ShenandoahHeap* heap = ShenandoahHeap::heap();
1048 
1049   for (size_t c = heap->num_regions(); c > 0; c--) {
1050     ShenandoahHeapRegion* r = heap->get_region(c - 1);
1051     if (r->is_humongous_start()) {
1052       oop old_obj = cast_to_oop(r->bottom());
1053       if (!old_obj->is_forwarded()) {
1054         // No need to move the object, it stays at the same slot
1055         continue;
1056       }
1057       size_t words_size = old_obj->size();
1058       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1059 
1060       size_t old_start = r->index();
1061       size_t old_end   = old_start + num_regions - 1;
1062       size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
1063       size_t new_end   = new_start + num_regions - 1;
1064       assert(old_start != new_start, "must be real move");
1065       assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
1066 
1067       log_debug(gc)("Full GC compaction moves humongous object from region " SIZE_FORMAT " to region " SIZE_FORMAT, old_start, new_start);
1068       Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
1069       ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
1070 
1071       oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
1072       new_obj->init_mark();
1073 
1074       {
1075         ShenandoahAffiliation original_affiliation = r->affiliation();
1076         for (size_t c = old_start; c <= old_end; c++) {
1077           ShenandoahHeapRegion* r = heap->get_region(c);
1078           // Leave humongous region affiliation unchanged.
1079           r->make_regular_bypass();
1080           r->set_top(r->bottom());
1081         }
1082 
1083         for (size_t c = new_start; c <= new_end; c++) {
1084           ShenandoahHeapRegion* r = heap->get_region(c);
1085           if (c == new_start) {
1086             r->make_humongous_start_bypass(original_affiliation);
1087           } else {
1088             r->make_humongous_cont_bypass(original_affiliation);
1089           }
1090 
1091           // Trailing region may be non-full, record the remainder there
1092           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
1093           if ((c == new_end) && (remainder != 0)) {
1094             r->set_top(r->bottom() + remainder);
1095           } else {
1096             r->set_top(r->end());
1097           }
1098 
1099           r->reset_alloc_metadata();
1100         }
1101       }
1102     }
1103   }
1104 }
1105 
1106 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1107 // we need to remain able to walk pinned regions.
1108 // Since pinned region do not move and don't get compacted, we will get holes with
1109 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
1110 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
1111 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
1112 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
1113 class ShenandoahMCResetCompleteBitmapTask : public WorkerTask {
1114 private:
1115   ShenandoahRegionIterator _regions;
1116 
1117 public:
1118   ShenandoahMCResetCompleteBitmapTask() :
1119     WorkerTask("Shenandoah Reset Bitmap") {
1120   }
1121 
1122   void work(uint worker_id) {
1123     ShenandoahParallelWorkerSession worker_session(worker_id);
1124     ShenandoahHeapRegion* region = _regions.next();
1125     ShenandoahHeap* heap = ShenandoahHeap::heap();
1126     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
1127     while (region != nullptr) {
1128       if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) {
1129         ctx->clear_bitmap(region);
1130       }
1131       region = _regions.next();
1132     }
1133   }
1134 };
1135 
1136 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
1137   GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
1138   ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
1139 
1140   ShenandoahHeap* heap = ShenandoahHeap::heap();
1141 
1142   // Compact regular objects first
1143   {
1144     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
1145     ShenandoahCompactObjectsTask compact_task(worker_slices);
1146     heap->workers()->run_task(&compact_task);
1147   }
1148 
1149   // Compact humongous objects after regular object moves
1150   {
1151     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1152     compact_humongous_objects();
1153   }
1154 }
1155 
1156 void ShenandoahFullGC::phase5_epilog() {
1157   GCTraceTime(Info, gc, phases) time("Phase 5: Full GC epilog", _gc_timer);
1158   ShenandoahHeap* heap = ShenandoahHeap::heap();
1159 
1160   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1161   // and must ensure the bitmap is in sync.
1162   {
1163     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1164     ShenandoahMCResetCompleteBitmapTask task;
1165     heap->workers()->run_task(&task);
1166   }
1167 
1168   // Bring regions in proper states after the collection, and set heap properties.
1169   {
1170     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1171     ShenandoahPostCompactClosure post_compact;
1172     heap->heap_region_iterate(&post_compact);
1173     post_compact.update_generation_usage();
1174 
1175     if (heap->mode()->is_generational()) {
1176       ShenandoahGenerationalFullGC::balance_generations_after_gc(heap);
1177     }
1178 
1179     heap->collection_set()->clear();
1180     size_t young_cset_regions, old_cset_regions;
1181     size_t first_old, last_old, num_old;
1182     heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
1183 
1184     // We also do not expand old generation size following Full GC because we have scrambled age populations and
1185     // no longer have objects separated by age into distinct regions.
1186 
1187     // TODO: Do we need to fix FullGC so that it maintains aged segregation of objects into distinct regions?
1188     //       A partial solution would be to remember how many objects are of tenure age following Full GC, but
1189     //       this is probably suboptimal, because most of these objects will not reside in a region that will be
1190     //       selected for the next evacuation phase.
1191 
1192 
1193     if (heap->mode()->is_generational()) {
1194       ShenandoahGenerationalFullGC::compute_balances();
1195     }
1196 
1197     heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
1198 
1199     heap->clear_cancelled_gc(true /* clear oom handler */);
1200   }
1201 
1202   _preserved_marks->restore(heap->workers());
1203   _preserved_marks->reclaim();
1204 
1205   // We defer generation resizing actions until after cset regions have been recycled.  We do this even following an
1206   // abbreviated cycle.
1207   if (heap->mode()->is_generational()) {
1208     ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set();
1209     ShenandoahGenerationalFullGC::rebuild_remembered_set(heap);
1210   }
1211 }