1 /*
   2  * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "compiler/oopMap.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/preservedMarks.inline.hpp"
  30 #include "gc/shared/slidingForwarding.inline.hpp"
  31 #include "gc/shared/tlab_globals.hpp"
  32 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  36 #include "gc/shenandoah/shenandoahFullGC.hpp"
  37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  38 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  39 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  44 #include "gc/shenandoah/shenandoahMetrics.hpp"
  45 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  46 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  47 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  48 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  49 #include "gc/shenandoah/shenandoahUtils.hpp"
  50 #include "gc/shenandoah/shenandoahVerifier.hpp"
  51 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  52 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  53 #include "memory/metaspaceUtils.hpp"
  54 #include "memory/universe.hpp"
  55 #include "oops/compressedOops.inline.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "runtime/biasedLocking.hpp"
  58 #include "runtime/orderAccess.hpp"
  59 #include "runtime/thread.hpp"
  60 #include "runtime/vmThread.hpp"
  61 #include "utilities/copy.hpp"
  62 #include "utilities/events.hpp"
  63 #include "utilities/growableArray.hpp"
  64 #include "gc/shared/workgroup.hpp"
  65 
  66 ShenandoahFullGC::ShenandoahFullGC() :
  67   _gc_timer(ShenandoahHeap::heap()->gc_timer()),
  68   _preserved_marks(new PreservedMarksSet(true)) {}
  69 
  70 ShenandoahFullGC::~ShenandoahFullGC() {
  71   delete _preserved_marks;
  72 }
  73 
  74 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
  75   vmop_entry_full(cause);
  76   // Always success
  77   return true;
  78 }
  79 
  80 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) {
  81   ShenandoahHeap* const heap = ShenandoahHeap::heap();
  82   TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
  83   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
  84 
  85   heap->try_inject_alloc_failure();
  86   VM_ShenandoahFullGC op(cause, this);
  87   VMThread::execute(&op);
  88 }
  89 
  90 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
  91   static const char* msg = "Pause Full";
  92   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
  93   EventMark em("%s", msg);
  94 
  95   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
  96                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
  97                               "full gc");
  98 
  99   op_full(cause);
 100 }
 101 
 102 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
 103   ShenandoahMetricsSnapshot metrics;
 104   metrics.snap_before();
 105 
 106   // Perform full GC
 107   do_it(cause);
 108 
 109   metrics.snap_after();
 110 
 111   if (metrics.is_good_progress()) {
 112     ShenandoahHeap::heap()->notify_gc_progress();
 113   } else {
 114     // Nothing to do. Tell the allocation path that we have failed to make
 115     // progress, and it can finally fail.
 116     ShenandoahHeap::heap()->notify_gc_no_progress();
 117   }
 118 }
 119 
 120 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
 121   ShenandoahHeap* heap = ShenandoahHeap::heap();
 122 
 123   if (ShenandoahVerify) {
 124     heap->verifier()->verify_before_fullgc();
 125   }
 126 
 127   if (VerifyBeforeGC) {
 128     Universe::verify();
 129   }
 130 
 131   // Degenerated GC may carry concurrent root flags when upgrading to
 132   // full GC. We need to reset it before mutators resume.
 133   heap->set_concurrent_strong_root_in_progress(false);
 134   heap->set_concurrent_weak_root_in_progress(false);
 135 
 136   heap->set_full_gc_in_progress(true);
 137 
 138   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 139   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
 140 
 141   {
 142     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre);
 143     heap->pre_full_gc_dump(_gc_timer);
 144   }
 145 
 146   {
 147     ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
 148     // Full GC is supposed to recover from any GC state:
 149 
 150     // a0. Remember if we have forwarded objects
 151     bool has_forwarded_objects = heap->has_forwarded_objects();
 152 
 153     // a1. Cancel evacuation, if in progress
 154     if (heap->is_evacuation_in_progress()) {
 155       heap->set_evacuation_in_progress(false);
 156     }
 157     assert(!heap->is_evacuation_in_progress(), "sanity");
 158 
 159     // a2. Cancel update-refs, if in progress
 160     if (heap->is_update_refs_in_progress()) {
 161       heap->set_update_refs_in_progress(false);
 162     }
 163     assert(!heap->is_update_refs_in_progress(), "sanity");
 164 
 165     // b. Cancel concurrent mark, if in progress
 166     if (heap->is_concurrent_mark_in_progress()) {
 167       ShenandoahConcurrentGC::cancel();
 168       heap->set_concurrent_mark_in_progress(false);
 169     }
 170     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 171 
 172     // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
 173     if (has_forwarded_objects) {
 174       update_roots(true /*full_gc*/);
 175     }
 176 
 177     // d. Reset the bitmaps for new marking
 178     heap->reset_mark_bitmap();
 179     assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 180     assert(!heap->marking_context()->is_complete(), "sanity");
 181 
 182     // e. Abandon reference discovery and clear all discovered references.
 183     ShenandoahReferenceProcessor* rp = heap->ref_processor();
 184     rp->abandon_partial_discovery();
 185 
 186     // f. Sync pinned region status from the CP marks
 187     heap->sync_pinned_region_status();
 188 
 189     // The rest of prologue:
 190     BiasedLocking::preserve_marks();
 191     _preserved_marks->init(heap->workers()->active_workers());
 192     heap->forwarding()->clear();
 193 
 194     assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
 195   }
 196 
 197   if (UseTLAB) {
 198     heap->gclabs_retire(ResizeTLAB);
 199     heap->tlabs_retire(ResizeTLAB);
 200   }
 201 
 202   OrderAccess::fence();
 203 
 204   phase1_mark_heap();
 205 
 206   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 207   // Coming out of Full GC, we would not have any forwarded objects.
 208   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 209   heap->set_has_forwarded_objects(false);
 210 
 211   heap->set_full_gc_move_in_progress(true);
 212 
 213   // Setup workers for the rest
 214   OrderAccess::fence();
 215 
 216   // Initialize worker slices
 217   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
 218   for (uint i = 0; i < heap->max_workers(); i++) {
 219     worker_slices[i] = new ShenandoahHeapRegionSet();
 220   }
 221 
 222   {
 223     // The rest of code performs region moves, where region status is undefined
 224     // until all phases run together.
 225     ShenandoahHeapLocker lock(heap->lock());
 226 
 227     phase2_calculate_target_addresses(worker_slices);
 228 
 229     OrderAccess::fence();
 230 
 231     phase3_update_references();
 232 
 233     phase4_compact_objects(worker_slices);
 234   }
 235 
 236   {
 237     // Epilogue
 238     _preserved_marks->restore(heap->workers());
 239     BiasedLocking::restore_marks();
 240     _preserved_marks->reclaim();
 241   }
 242 
 243   // Resize metaspace
 244   MetaspaceGC::compute_new_size();
 245 
 246   // Free worker slices
 247   for (uint i = 0; i < heap->max_workers(); i++) {
 248     delete worker_slices[i];
 249   }
 250   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 251 
 252   heap->set_full_gc_move_in_progress(false);
 253   heap->set_full_gc_in_progress(false);
 254 
 255   if (ShenandoahVerify) {
 256     heap->verifier()->verify_after_fullgc();
 257   }
 258 
 259   if (VerifyAfterGC) {
 260     Universe::verify();
 261   }
 262 
 263   {
 264     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
 265     heap->post_full_gc_dump(_gc_timer);
 266   }
 267 }
 268 
 269 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
 270 private:
 271   ShenandoahMarkingContext* const _ctx;
 272 
 273 public:
 274   ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 275 
 276   void heap_region_do(ShenandoahHeapRegion *r) {
 277     _ctx->capture_top_at_mark_start(r);
 278     r->clear_live_data();
 279   }
 280 };
 281 
 282 void ShenandoahFullGC::phase1_mark_heap() {
 283   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 284   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 285 
 286   ShenandoahHeap* heap = ShenandoahHeap::heap();
 287 
 288   ShenandoahPrepareForMarkClosure cl;
 289   heap->heap_region_iterate(&cl);
 290 
 291   heap->set_unload_classes(heap->heuristics()->can_unload_classes());
 292 
 293   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 294   // enable ("weak") refs discovery
 295   rp->set_soft_reference_policy(true); // forcefully purge all soft references
 296 
 297   ShenandoahSTWMark mark(true /*full_gc*/);
 298   mark.mark();
 299   heap->parallel_cleaning(true /* full_gc */);
 300 }
 301 
 302 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 303 private:
 304   PreservedMarks*    const _preserved_marks;
 305   SlidingForwarding* const _forwarding;
 306   ShenandoahHeap*    const _heap;
 307   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 308   int _empty_regions_pos;
 309   ShenandoahHeapRegion*          _to_region;
 310   ShenandoahHeapRegion*          _from_region;
 311   HeapWord* _compact_point;
 312 
 313 public:
 314   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 315                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 316                                               ShenandoahHeapRegion* to_region) :
 317     _preserved_marks(preserved_marks),
 318     _forwarding(ShenandoahHeap::heap()->forwarding()),
 319     _heap(ShenandoahHeap::heap()),
 320     _empty_regions(empty_regions),
 321     _empty_regions_pos(0),
 322     _to_region(to_region),
 323     _from_region(NULL),
 324     _compact_point(to_region->bottom()) {}
 325 
 326   void set_from_region(ShenandoahHeapRegion* from_region) {
 327     _from_region = from_region;
 328   }
 329 
 330   void finish_region() {
 331     assert(_to_region != NULL, "should not happen");
 332     _to_region->set_new_top(_compact_point);
 333   }
 334 
 335   bool is_compact_same_region() {
 336     return _from_region == _to_region;
 337   }
 338 
 339   int empty_regions_pos() {
 340     return _empty_regions_pos;
 341   }
 342 
 343   void do_object(oop p) {
 344     assert(_from_region != NULL, "must set before work");
 345     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 346     assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
 347 
 348     size_t obj_size = p->size();
 349     if (_compact_point + obj_size > _to_region->end()) {
 350       finish_region();
 351 
 352       // Object doesn't fit. Pick next empty region and start compacting there.
 353       ShenandoahHeapRegion* new_to_region;
 354       if (_empty_regions_pos < _empty_regions.length()) {
 355         new_to_region = _empty_regions.at(_empty_regions_pos);
 356         _empty_regions_pos++;
 357       } else {
 358         // Out of empty region? Compact within the same region.
 359         new_to_region = _from_region;
 360       }
 361 
 362       assert(new_to_region != _to_region, "must not reuse same to-region");
 363       assert(new_to_region != NULL, "must not be NULL");
 364       _to_region = new_to_region;
 365       _compact_point = _to_region->bottom();
 366     }
 367 
 368     // Object fits into current region, record new location:
 369     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 370     shenandoah_assert_not_forwarded(NULL, p);
 371     _preserved_marks->push_if_necessary(p, p->mark());
 372     _forwarding->forward_to(p, cast_to_oop(_compact_point));
 373     _compact_point += obj_size;
 374   }
 375 };
 376 
 377 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
 378 private:
 379   PreservedMarksSet*        const _preserved_marks;
 380   ShenandoahHeap*           const _heap;
 381   ShenandoahHeapRegionSet** const _worker_slices;
 382 
 383 public:
 384   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
 385     AbstractGangTask("Shenandoah Prepare For Compaction"),
 386     _preserved_marks(preserved_marks),
 387     _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
 388   }
 389 
 390   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 391     // Empty region: get it into the slice to defragment the slice itself.
 392     // We could have skipped this without violating correctness, but we really
 393     // want to compact all live regions to the start of the heap, which sometimes
 394     // means moving them into the fully empty regions.
 395     if (r->is_empty()) return true;
 396 
 397     // Can move the region, and this is not the humongous region. Humongous
 398     // moves are special cased here, because their moves are handled separately.
 399     return r->is_stw_move_allowed() && !r->is_humongous();
 400   }
 401 
 402   void work(uint worker_id) {
 403     ShenandoahParallelWorkerSession worker_session(worker_id);
 404     ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 405     ShenandoahHeapRegionSetIterator it(slice);
 406     ShenandoahHeapRegion* from_region = it.next();
 407     // No work?
 408     if (from_region == NULL) {
 409        return;
 410     }
 411 
 412     // Sliding compaction. Walk all regions in the slice, and compact them.
 413     // Remember empty regions and reuse them as needed.
 414     ResourceMark rm;
 415 
 416     GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
 417 
 418     ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
 419 
 420     while (from_region != NULL) {
 421       assert(is_candidate_region(from_region), "Sanity");
 422 
 423       cl.set_from_region(from_region);
 424       if (from_region->has_live()) {
 425         _heap->marked_object_iterate(from_region, &cl);
 426       }
 427 
 428       // Compacted the region to somewhere else? From-region is empty then.
 429       if (!cl.is_compact_same_region()) {
 430         empty_regions.append(from_region);
 431       }
 432       from_region = it.next();
 433     }
 434     cl.finish_region();
 435 
 436     // Mark all remaining regions as empty
 437     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 438       ShenandoahHeapRegion* r = empty_regions.at(pos);
 439       r->set_new_top(r->bottom());
 440     }
 441   }
 442 };
 443 
 444 void ShenandoahFullGC::calculate_target_humongous_objects() {
 445   ShenandoahHeap* heap = ShenandoahHeap::heap();
 446   SlidingForwarding* forwarding = heap->forwarding();
 447 
 448   // Compute the new addresses for humongous objects. We need to do this after addresses
 449   // for regular objects are calculated, and we know what regions in heap suffix are
 450   // available for humongous moves.
 451   //
 452   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 453   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 454   // humongous start there.
 455   //
 456   // The complication is potential non-movable regions during the scan. If such region is
 457   // detected, then sliding restarts towards that non-movable region.
 458 
 459   size_t to_begin = heap->num_regions();
 460   size_t to_end = heap->num_regions();
 461 
 462   for (size_t c = heap->num_regions(); c > 0; c--) {
 463     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 464     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 465       // To-region candidate: record this, and continue scan
 466       to_begin = r->index();
 467       continue;
 468     }
 469 
 470     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 471       // From-region candidate: movable humongous region
 472       oop old_obj = cast_to_oop(r->bottom());
 473       size_t words_size = old_obj->size();
 474       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 475 
 476       size_t start = to_end - num_regions;
 477 
 478       if (start >= to_begin && start != r->index()) {
 479         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 480         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 481         forwarding->forward_to(old_obj, cast_to_oop(heap->get_region(start)->bottom()));
 482         to_end = start;
 483         continue;
 484       }
 485     }
 486 
 487     // Failed to fit. Scan starting from current region.
 488     to_begin = r->index();
 489     to_end = r->index();
 490   }
 491 }
 492 
 493 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 494 private:
 495   ShenandoahHeap* const _heap;
 496 
 497 public:
 498   ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
 499   void heap_region_do(ShenandoahHeapRegion* r) {
 500     if (r->is_trash()) {
 501       r->recycle();
 502     }
 503     if (r->is_cset()) {
 504       r->make_regular_bypass();
 505     }
 506     if (r->is_empty_uncommitted()) {
 507       r->make_committed_bypass();
 508     }
 509     assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
 510 
 511     // Record current region occupancy: this communicates empty regions are free
 512     // to the rest of Full GC code.
 513     r->set_new_top(r->top());
 514   }
 515 };
 516 
 517 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
 518 private:
 519   ShenandoahHeap* const _heap;
 520   ShenandoahMarkingContext* const _ctx;
 521 
 522 public:
 523   ShenandoahTrashImmediateGarbageClosure() :
 524     _heap(ShenandoahHeap::heap()),
 525     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 526 
 527   void heap_region_do(ShenandoahHeapRegion* r) {
 528     if (r->is_humongous_start()) {
 529       oop humongous_obj = cast_to_oop(r->bottom());
 530       if (!_ctx->is_marked(humongous_obj)) {
 531         assert(!r->has_live(),
 532                "Region " SIZE_FORMAT " is not marked, should not have live", r->index());
 533         _heap->trash_humongous_region_at(r);
 534       } else {
 535         assert(r->has_live(),
 536                "Region " SIZE_FORMAT " should have live", r->index());
 537       }
 538     } else if (r->is_humongous_continuation()) {
 539       // If we hit continuation, the non-live humongous starts should have been trashed already
 540       assert(r->humongous_start_region()->has_live(),
 541              "Region " SIZE_FORMAT " should have live", r->index());
 542     } else if (r->is_regular()) {
 543       if (!r->has_live()) {
 544         r->make_trash_immediate();
 545       }
 546     }
 547   }
 548 };
 549 
 550 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
 551   ShenandoahHeap* heap = ShenandoahHeap::heap();
 552 
 553   uint n_workers = heap->workers()->active_workers();
 554   size_t n_regions = heap->num_regions();
 555 
 556   // What we want to accomplish: have the dense prefix of data, while still balancing
 557   // out the parallel work.
 558   //
 559   // Assuming the amount of work is driven by the live data that needs moving, we can slice
 560   // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
 561   // thread takes all regions in its prefix subset, and then it takes some regions from
 562   // the tail.
 563   //
 564   // Tail region selection becomes interesting.
 565   //
 566   // First, we want to distribute the regions fairly between the workers, and those regions
 567   // might have different amount of live data. So, until we sure no workers need live data,
 568   // we need to only take what the worker needs.
 569   //
 570   // Second, since we slide everything to the left in each slice, the most busy regions
 571   // would be the ones on the left. Which means we want to have all workers have their after-tail
 572   // regions as close to the left as possible.
 573   //
 574   // The easiest way to do this is to distribute after-tail regions in round-robin between
 575   // workers that still need live data.
 576   //
 577   // Consider parallel workers A, B, C, then the target slice layout would be:
 578   //
 579   //  AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA
 580   //
 581   //  (.....dense-prefix.....) (.....................tail...................)
 582   //  [all regions fully live] [left-most regions are fuller that right-most]
 583   //
 584 
 585   // Compute how much live data is there. This would approximate the size of dense prefix
 586   // we target to create.
 587   size_t total_live = 0;
 588   for (size_t idx = 0; idx < n_regions; idx++) {
 589     ShenandoahHeapRegion *r = heap->get_region(idx);
 590     if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 591       total_live += r->get_live_data_words();
 592     }
 593   }
 594 
 595   // Estimate the size for the dense prefix. Note that we specifically count only the
 596   // "full" regions, so there would be some non-full regions in the slice tail.
 597   size_t live_per_worker = total_live / n_workers;
 598   size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words();
 599   size_t prefix_regions_total = prefix_regions_per_worker * n_workers;
 600   prefix_regions_total = MIN2(prefix_regions_total, n_regions);
 601   assert(prefix_regions_total <= n_regions, "Sanity");
 602 
 603   // There might be non-candidate regions in the prefix. To compute where the tail actually
 604   // ends up being, we need to account those as well.
 605   size_t prefix_end = prefix_regions_total;
 606   for (size_t idx = 0; idx < prefix_regions_total; idx++) {
 607     ShenandoahHeapRegion *r = heap->get_region(idx);
 608     if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 609       prefix_end++;
 610     }
 611   }
 612   prefix_end = MIN2(prefix_end, n_regions);
 613   assert(prefix_end <= n_regions, "Sanity");
 614 
 615   // Distribute prefix regions per worker: each thread definitely gets its own same-sized
 616   // subset of dense prefix.
 617   size_t prefix_idx = 0;
 618 
 619   size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC);
 620 
 621   for (size_t wid = 0; wid < n_workers; wid++) {
 622     ShenandoahHeapRegionSet* slice = worker_slices[wid];
 623 
 624     live[wid] = 0;
 625     size_t regs = 0;
 626 
 627     // Add all prefix regions for this worker
 628     while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) {
 629       ShenandoahHeapRegion *r = heap->get_region(prefix_idx);
 630       if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 631         slice->add_region(r);
 632         live[wid] += r->get_live_data_words();
 633         regs++;
 634       }
 635       prefix_idx++;
 636     }
 637   }
 638 
 639   // Distribute the tail among workers in round-robin fashion.
 640   size_t wid = n_workers - 1;
 641 
 642   for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) {
 643     ShenandoahHeapRegion *r = heap->get_region(tail_idx);
 644     if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 645       assert(wid < n_workers, "Sanity");
 646 
 647       size_t live_region = r->get_live_data_words();
 648 
 649       // Select next worker that still needs live data.
 650       size_t old_wid = wid;
 651       do {
 652         wid++;
 653         if (wid == n_workers) wid = 0;
 654       } while (live[wid] + live_region >= live_per_worker && old_wid != wid);
 655 
 656       if (old_wid == wid) {
 657         // Circled back to the same worker? This means liveness data was
 658         // miscalculated. Bump the live_per_worker limit so that
 659         // everyone gets a piece of the leftover work.
 660         live_per_worker += ShenandoahHeapRegion::region_size_words();
 661       }
 662 
 663       worker_slices[wid]->add_region(r);
 664       live[wid] += live_region;
 665     }
 666   }
 667 
 668   FREE_C_HEAP_ARRAY(size_t, live);
 669 
 670 #ifdef ASSERT
 671   ResourceBitMap map(n_regions);
 672   for (size_t wid = 0; wid < n_workers; wid++) {
 673     ShenandoahHeapRegionSetIterator it(worker_slices[wid]);
 674     ShenandoahHeapRegion* r = it.next();
 675     while (r != NULL) {
 676       size_t idx = r->index();
 677       assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx);
 678       assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx);
 679       map.at_put(idx, true);
 680       r = it.next();
 681     }
 682   }
 683 
 684   for (size_t rid = 0; rid < n_regions; rid++) {
 685     bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid));
 686     bool is_distributed = map.at(rid);
 687     assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid);
 688   }
 689 #endif
 690 }
 691 
 692 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
 693   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
 694   ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
 695 
 696   ShenandoahHeap* heap = ShenandoahHeap::heap();
 697 
 698   // About to figure out which regions can be compacted, make sure pinning status
 699   // had been updated in GC prologue.
 700   heap->assert_pinned_region_status();
 701 
 702   {
 703     // Trash the immediately collectible regions before computing addresses
 704     ShenandoahTrashImmediateGarbageClosure tigcl;
 705     heap->heap_region_iterate(&tigcl);
 706 
 707     // Make sure regions are in good state: committed, active, clean.
 708     // This is needed because we are potentially sliding the data through them.
 709     ShenandoahEnsureHeapActiveClosure ecl;
 710     heap->heap_region_iterate(&ecl);
 711   }
 712 
 713   // Compute the new addresses for regular objects
 714   {
 715     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
 716 
 717     distribute_slices(worker_slices);
 718 
 719     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
 720     heap->workers()->run_task(&task);
 721   }
 722 
 723   // Compute the new addresses for humongous objects
 724   {
 725     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
 726     calculate_target_humongous_objects();
 727   }
 728 }
 729 
 730 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
 731 private:
 732   ShenandoahHeap*           const _heap;
 733   const SlidingForwarding*  const _forwarding;
 734   ShenandoahMarkingContext* const _ctx;
 735 
 736   template <class T>
 737   inline void do_oop_work(T* p) {
 738     T o = RawAccess<>::oop_load(p);
 739     if (!CompressedOops::is_null(o)) {
 740       oop obj = CompressedOops::decode_not_null(o);
 741       assert(_ctx->is_marked(obj), "must be marked");
 742       if (obj->is_forwarded()) {
 743         oop forw = _forwarding->forwardee(obj);
 744         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 745       }
 746     }
 747   }
 748 
 749 public:
 750   ShenandoahAdjustPointersClosure() :
 751     _heap(ShenandoahHeap::heap()),
 752     _forwarding(_heap->forwarding()),
 753     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 754 
 755   void do_oop(oop* p)       { do_oop_work(p); }
 756   void do_oop(narrowOop* p) { do_oop_work(p); }
 757 };
 758 
 759 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
 760 private:
 761   ShenandoahHeap* const _heap;
 762   ShenandoahAdjustPointersClosure _cl;
 763 
 764 public:
 765   ShenandoahAdjustPointersObjectClosure() :
 766     _heap(ShenandoahHeap::heap()) {
 767   }
 768   void do_object(oop p) {
 769     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 770     p->oop_iterate(&_cl);
 771   }
 772 };
 773 
 774 class ShenandoahAdjustPointersTask : public AbstractGangTask {
 775 private:
 776   ShenandoahHeap*          const _heap;
 777   ShenandoahRegionIterator       _regions;
 778 
 779 public:
 780   ShenandoahAdjustPointersTask() :
 781     AbstractGangTask("Shenandoah Adjust Pointers"),
 782     _heap(ShenandoahHeap::heap()) {
 783   }
 784 
 785   void work(uint worker_id) {
 786     ShenandoahParallelWorkerSession worker_session(worker_id);
 787     ShenandoahAdjustPointersObjectClosure obj_cl;
 788     ShenandoahHeapRegion* r = _regions.next();
 789     while (r != NULL) {
 790       if (!r->is_humongous_continuation() && r->has_live()) {
 791         _heap->marked_object_iterate(r, &obj_cl);
 792       }
 793       r = _regions.next();
 794     }
 795   }
 796 };
 797 
 798 class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
 799 private:
 800   ShenandoahRootAdjuster* _rp;
 801   PreservedMarksSet* _preserved_marks;
 802 public:
 803   ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
 804     AbstractGangTask("Shenandoah Adjust Root Pointers"),
 805     _rp(rp),
 806     _preserved_marks(preserved_marks) {}
 807 
 808   void work(uint worker_id) {
 809     ShenandoahParallelWorkerSession worker_session(worker_id);
 810     ShenandoahAdjustPointersClosure cl;
 811     _rp->roots_do(worker_id, &cl);
 812     const SlidingForwarding* const forwarding = ShenandoahHeap::heap()->forwarding();
 813     _preserved_marks->get(worker_id)->adjust_during_full_gc(forwarding);
 814   }
 815 };
 816 
 817 void ShenandoahFullGC::phase3_update_references() {
 818   GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
 819   ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
 820 
 821   ShenandoahHeap* heap = ShenandoahHeap::heap();
 822 
 823   WorkGang* workers = heap->workers();
 824   uint nworkers = workers->active_workers();
 825   {
 826 #if COMPILER2_OR_JVMCI
 827     DerivedPointerTable::clear();
 828 #endif
 829     ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
 830     ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
 831     workers->run_task(&task);
 832 #if COMPILER2_OR_JVMCI
 833     DerivedPointerTable::update_pointers();
 834 #endif
 835   }
 836 
 837   ShenandoahAdjustPointersTask adjust_pointers_task;
 838   workers->run_task(&adjust_pointers_task);
 839 }
 840 
 841 class ShenandoahCompactObjectsClosure : public ObjectClosure {
 842 private:
 843   ShenandoahHeap*          const _heap;
 844   const SlidingForwarding* const _forwarding;
 845   uint                     const _worker_id;
 846 
 847 public:
 848   ShenandoahCompactObjectsClosure(uint worker_id) :
 849     _heap(ShenandoahHeap::heap()), _forwarding(_heap->forwarding()), _worker_id(worker_id) {}
 850 
 851   void do_object(oop p) {
 852     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 853     size_t size = (size_t)p->size();
 854     if (p->is_forwarded()) {
 855       HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
 856       HeapWord* compact_to = cast_from_oop<HeapWord*>(_forwarding->forwardee(p));
 857       Copy::aligned_conjoint_words(compact_from, compact_to, size);
 858       oop new_obj = cast_to_oop(compact_to);
 859       new_obj->init_mark();
 860     }
 861   }
 862 };
 863 
 864 class ShenandoahCompactObjectsTask : public AbstractGangTask {
 865 private:
 866   ShenandoahHeap* const _heap;
 867   ShenandoahHeapRegionSet** const _worker_slices;
 868 
 869 public:
 870   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
 871     AbstractGangTask("Shenandoah Compact Objects"),
 872     _heap(ShenandoahHeap::heap()),
 873     _worker_slices(worker_slices) {
 874   }
 875 
 876   void work(uint worker_id) {
 877     ShenandoahParallelWorkerSession worker_session(worker_id);
 878     ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
 879 
 880     ShenandoahCompactObjectsClosure cl(worker_id);
 881     ShenandoahHeapRegion* r = slice.next();
 882     while (r != NULL) {
 883       assert(!r->is_humongous(), "must not get humongous regions here");
 884       if (r->has_live()) {
 885         _heap->marked_object_iterate(r, &cl);
 886       }
 887       r->set_top(r->new_top());
 888       r = slice.next();
 889     }
 890   }
 891 };
 892 
 893 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
 894 private:
 895   ShenandoahHeap* const _heap;
 896   size_t _live;
 897 
 898 public:
 899   ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
 900     _heap->free_set()->clear();
 901   }
 902 
 903   void heap_region_do(ShenandoahHeapRegion* r) {
 904     assert (!r->is_cset(), "cset regions should have been demoted already");
 905 
 906     // Need to reset the complete-top-at-mark-start pointer here because
 907     // the complete marking bitmap is no longer valid. This ensures
 908     // size-based iteration in marked_object_iterate().
 909     // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
 910     // pinned regions.
 911     if (!r->is_pinned()) {
 912       _heap->complete_marking_context()->reset_top_at_mark_start(r);
 913     }
 914 
 915     size_t live = r->used();
 916 
 917     // Make empty regions that have been allocated into regular
 918     if (r->is_empty() && live > 0) {
 919       r->make_regular_bypass();
 920     }
 921 
 922     // Reclaim regular regions that became empty
 923     if (r->is_regular() && live == 0) {
 924       r->make_trash();
 925     }
 926 
 927     // Recycle all trash regions
 928     if (r->is_trash()) {
 929       live = 0;
 930       r->recycle();
 931     }
 932 
 933     r->set_live_data(live);
 934     r->reset_alloc_metadata();
 935     _live += live;
 936   }
 937 
 938   size_t get_live() {
 939     return _live;
 940   }
 941 };
 942 
 943 void ShenandoahFullGC::compact_humongous_objects() {
 944   // Compact humongous regions, based on their fwdptr objects.
 945   //
 946   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
 947   // humongous regions are already compacted, and do not require further moves, which alleviates
 948   // sliding costs. We may consider doing this in parallel in future.
 949 
 950   ShenandoahHeap* heap = ShenandoahHeap::heap();
 951   const SlidingForwarding* const forwarding = heap->forwarding();
 952 
 953   for (size_t c = heap->num_regions(); c > 0; c--) {
 954     ShenandoahHeapRegion* r = heap->get_region(c - 1);
 955     if (r->is_humongous_start()) {
 956       oop old_obj = cast_to_oop(r->bottom());
 957       if (!old_obj->is_forwarded()) {
 958         // No need to move the object, it stays at the same slot
 959         continue;
 960       }
 961       size_t words_size = old_obj->size();
 962       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 963 
 964       size_t old_start = r->index();
 965       size_t old_end   = old_start + num_regions - 1;
 966       size_t new_start = heap->heap_region_index_containing(forwarding->forwardee(old_obj));
 967       size_t new_end   = new_start + num_regions - 1;
 968       assert(old_start != new_start, "must be real move");
 969       assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
 970 
 971       Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
 972                                    heap->get_region(new_start)->bottom(),
 973                                    words_size);
 974 
 975       oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
 976       new_obj->init_mark();
 977 
 978       {
 979         for (size_t c = old_start; c <= old_end; c++) {
 980           ShenandoahHeapRegion* r = heap->get_region(c);
 981           r->make_regular_bypass();
 982           r->set_top(r->bottom());
 983         }
 984 
 985         for (size_t c = new_start; c <= new_end; c++) {
 986           ShenandoahHeapRegion* r = heap->get_region(c);
 987           if (c == new_start) {
 988             r->make_humongous_start_bypass();
 989           } else {
 990             r->make_humongous_cont_bypass();
 991           }
 992 
 993           // Trailing region may be non-full, record the remainder there
 994           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 995           if ((c == new_end) && (remainder != 0)) {
 996             r->set_top(r->bottom() + remainder);
 997           } else {
 998             r->set_top(r->end());
 999           }
1000 
1001           r->reset_alloc_metadata();
1002         }
1003       }
1004     }
1005   }
1006 }
1007 
1008 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1009 // we need to remain able to walk pinned regions.
1010 // Since pinned region do not move and don't get compacted, we will get holes with
1011 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
1012 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
1013 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
1014 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
1015 class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask {
1016 private:
1017   ShenandoahRegionIterator _regions;
1018 
1019 public:
1020   ShenandoahMCResetCompleteBitmapTask() :
1021     AbstractGangTask("Shenandoah Reset Bitmap") {
1022   }
1023 
1024   void work(uint worker_id) {
1025     ShenandoahParallelWorkerSession worker_session(worker_id);
1026     ShenandoahHeapRegion* region = _regions.next();
1027     ShenandoahHeap* heap = ShenandoahHeap::heap();
1028     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
1029     while (region != NULL) {
1030       if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) {
1031         ctx->clear_bitmap(region);
1032       }
1033       region = _regions.next();
1034     }
1035   }
1036 };
1037 
1038 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
1039   GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
1040   ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
1041 
1042   ShenandoahHeap* heap = ShenandoahHeap::heap();
1043 
1044   // Compact regular objects first
1045   {
1046     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
1047     ShenandoahCompactObjectsTask compact_task(worker_slices);
1048     heap->workers()->run_task(&compact_task);
1049   }
1050 
1051   // Compact humongous objects after regular object moves
1052   {
1053     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1054     compact_humongous_objects();
1055   }
1056 
1057   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1058   // and must ensure the bitmap is in sync.
1059   {
1060     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1061     ShenandoahMCResetCompleteBitmapTask task;
1062     heap->workers()->run_task(&task);
1063   }
1064 
1065   // Bring regions in proper states after the collection, and set heap properties.
1066   {
1067     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1068 
1069     ShenandoahPostCompactClosure post_compact;
1070     heap->heap_region_iterate(&post_compact);
1071     heap->set_used(post_compact.get_live());
1072 
1073     heap->collection_set()->clear();
1074     heap->free_set()->rebuild();
1075   }
1076 
1077   heap->clear_cancelled_gc();
1078 }