1 /*
   2  * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "compiler/oopMap.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/preservedMarks.inline.hpp"
  30 #include "gc/shared/tlab_globals.hpp"
  31 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  35 #include "gc/shenandoah/shenandoahFullGC.hpp"
  36 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  37 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  38 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  40 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  42 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  43 #include "gc/shenandoah/shenandoahMetrics.hpp"
  44 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  46 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  47 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  48 #include "gc/shenandoah/shenandoahUtils.hpp"
  49 #include "gc/shenandoah/shenandoahVerifier.hpp"
  50 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  51 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  52 #include "memory/metaspaceUtils.hpp"
  53 #include "memory/universe.hpp"
  54 #include "oops/compressedOops.inline.hpp"
  55 #include "oops/oop.inline.hpp"
  56 #include "runtime/biasedLocking.hpp"
  57 #include "runtime/orderAccess.hpp"
  58 #include "runtime/thread.hpp"
  59 #include "runtime/vmThread.hpp"
  60 #include "utilities/copy.hpp"
  61 #include "utilities/events.hpp"
  62 #include "utilities/growableArray.hpp"
  63 #include "gc/shared/workgroup.hpp"
  64 
  65 ShenandoahFullGC::ShenandoahFullGC() :
  66   _gc_timer(ShenandoahHeap::heap()->gc_timer()),
  67   _preserved_marks(new PreservedMarksSet(true)) {}
  68 
  69 ShenandoahFullGC::~ShenandoahFullGC() {
  70   delete _preserved_marks;
  71 }
  72 
  73 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
  74   vmop_entry_full(cause);
  75   // Always success
  76   return true;
  77 }
  78 
  79 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) {
  80   ShenandoahHeap* const heap = ShenandoahHeap::heap();
  81   TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
  82   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
  83 
  84   heap->try_inject_alloc_failure();
  85   VM_ShenandoahFullGC op(cause, this);
  86   VMThread::execute(&op);
  87 }
  88 
  89 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
  90   static const char* msg = "Pause Full";
  91   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
  92   EventMark em("%s", msg);
  93 
  94   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
  95                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
  96                               "full gc");
  97 
  98   op_full(cause);
  99 }
 100 
 101 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
 102   ShenandoahMetricsSnapshot metrics;
 103   metrics.snap_before();
 104 
 105   // Perform full GC
 106   do_it(cause);
 107 
 108   metrics.snap_after();
 109 
 110   if (metrics.is_good_progress()) {
 111     ShenandoahHeap::heap()->notify_gc_progress();
 112   } else {
 113     // Nothing to do. Tell the allocation path that we have failed to make
 114     // progress, and it can finally fail.
 115     ShenandoahHeap::heap()->notify_gc_no_progress();
 116   }
 117 }
 118 
 119 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
 120   ShenandoahHeap* heap = ShenandoahHeap::heap();
 121 
 122   if (ShenandoahVerify) {
 123     heap->verifier()->verify_before_fullgc();
 124   }
 125 
 126   if (VerifyBeforeGC) {
 127     Universe::verify();
 128   }
 129 
 130   // Degenerated GC may carry concurrent root flags when upgrading to
 131   // full GC. We need to reset it before mutators resume.
 132   heap->set_concurrent_strong_root_in_progress(false);
 133   heap->set_concurrent_weak_root_in_progress(false);
 134 
 135   heap->set_full_gc_in_progress(true);
 136 
 137   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 138   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
 139 
 140   {
 141     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre);
 142     heap->pre_full_gc_dump(_gc_timer);
 143   }
 144 
 145   {
 146     ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
 147     // Full GC is supposed to recover from any GC state:
 148 
 149     // a0. Remember if we have forwarded objects
 150     bool has_forwarded_objects = heap->has_forwarded_objects();
 151 
 152     // a1. Cancel evacuation, if in progress
 153     if (heap->is_evacuation_in_progress()) {
 154       heap->set_evacuation_in_progress(false);
 155     }
 156     assert(!heap->is_evacuation_in_progress(), "sanity");
 157 
 158     // a2. Cancel update-refs, if in progress
 159     if (heap->is_update_refs_in_progress()) {
 160       heap->set_update_refs_in_progress(false);
 161     }
 162     assert(!heap->is_update_refs_in_progress(), "sanity");
 163 
 164     // b. Cancel concurrent mark, if in progress
 165     if (heap->is_concurrent_mark_in_progress()) {
 166       ShenandoahConcurrentGC::cancel();
 167       heap->set_concurrent_mark_in_progress(false);
 168     }
 169     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 170 
 171     // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
 172     if (has_forwarded_objects) {
 173       update_roots(true /*full_gc*/);
 174     }
 175 
 176     // d. Reset the bitmaps for new marking
 177     heap->reset_mark_bitmap();
 178     assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 179     assert(!heap->marking_context()->is_complete(), "sanity");
 180 
 181     // e. Abandon reference discovery and clear all discovered references.
 182     ShenandoahReferenceProcessor* rp = heap->ref_processor();
 183     rp->abandon_partial_discovery();
 184 
 185     // f. Sync pinned region status from the CP marks
 186     heap->sync_pinned_region_status();
 187 
 188     // The rest of prologue:
 189     BiasedLocking::preserve_marks();
 190     _preserved_marks->init(heap->workers()->active_workers());
 191 
 192     assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
 193   }
 194 
 195   if (UseTLAB) {
 196     heap->gclabs_retire(ResizeTLAB);
 197     heap->tlabs_retire(ResizeTLAB);
 198   }
 199 
 200   OrderAccess::fence();
 201 
 202   phase1_mark_heap();
 203 
 204   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 205   // Coming out of Full GC, we would not have any forwarded objects.
 206   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 207   heap->set_has_forwarded_objects(false);
 208 
 209   heap->set_full_gc_move_in_progress(true);
 210 
 211   // Setup workers for the rest
 212   OrderAccess::fence();
 213 
 214   // Initialize worker slices
 215   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
 216   for (uint i = 0; i < heap->max_workers(); i++) {
 217     worker_slices[i] = new ShenandoahHeapRegionSet();
 218   }
 219 
 220   {
 221     // The rest of code performs region moves, where region status is undefined
 222     // until all phases run together.
 223     ShenandoahHeapLocker lock(heap->lock());
 224 
 225     phase2_calculate_target_addresses(worker_slices);
 226 
 227     OrderAccess::fence();
 228 
 229     phase3_update_references();
 230 
 231     phase4_compact_objects(worker_slices);
 232   }
 233 
 234   {
 235     // Epilogue
 236     _preserved_marks->restore(heap->workers());
 237     BiasedLocking::restore_marks();
 238     _preserved_marks->reclaim();
 239   }
 240 
 241   // Resize metaspace
 242   MetaspaceGC::compute_new_size();
 243 
 244   // Free worker slices
 245   for (uint i = 0; i < heap->max_workers(); i++) {
 246     delete worker_slices[i];
 247   }
 248   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 249 
 250   heap->set_full_gc_move_in_progress(false);
 251   heap->set_full_gc_in_progress(false);
 252 
 253   if (ShenandoahVerify) {
 254     heap->verifier()->verify_after_fullgc();
 255   }
 256 
 257   if (VerifyAfterGC) {
 258     Universe::verify();
 259   }
 260 
 261   {
 262     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
 263     heap->post_full_gc_dump(_gc_timer);
 264   }
 265 }
 266 
 267 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
 268 private:
 269   ShenandoahMarkingContext* const _ctx;
 270 
 271 public:
 272   ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 273 
 274   void heap_region_do(ShenandoahHeapRegion *r) {
 275     _ctx->capture_top_at_mark_start(r);
 276     r->clear_live_data();
 277   }
 278 };
 279 
 280 void ShenandoahFullGC::phase1_mark_heap() {
 281   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 282   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 283 
 284   ShenandoahHeap* heap = ShenandoahHeap::heap();
 285 
 286   ShenandoahPrepareForMarkClosure cl;
 287   heap->heap_region_iterate(&cl);
 288 
 289   heap->set_unload_classes(heap->heuristics()->can_unload_classes());
 290 
 291   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 292   // enable ("weak") refs discovery
 293   rp->set_soft_reference_policy(true); // forcefully purge all soft references
 294 
 295   ShenandoahSTWMark mark(true /*full_gc*/);
 296   mark.mark();
 297   heap->parallel_cleaning(true /* full_gc */);
 298 }
 299 
 300 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 301 private:
 302   PreservedMarks*          const _preserved_marks;
 303   ShenandoahHeap*          const _heap;
 304   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 305   int _empty_regions_pos;
 306   ShenandoahHeapRegion*          _to_region;
 307   ShenandoahHeapRegion*          _from_region;
 308   HeapWord* _compact_point;
 309 
 310 public:
 311   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 312                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 313                                               ShenandoahHeapRegion* to_region) :
 314     _preserved_marks(preserved_marks),
 315     _heap(ShenandoahHeap::heap()),
 316     _empty_regions(empty_regions),
 317     _empty_regions_pos(0),
 318     _to_region(to_region),
 319     _from_region(NULL),
 320     _compact_point(to_region->bottom()) {}
 321 
 322   void set_from_region(ShenandoahHeapRegion* from_region) {
 323     _from_region = from_region;
 324   }
 325 
 326   void finish_region() {
 327     assert(_to_region != NULL, "should not happen");
 328     _to_region->set_new_top(_compact_point);
 329   }
 330 
 331   bool is_compact_same_region() {
 332     return _from_region == _to_region;
 333   }
 334 
 335   int empty_regions_pos() {
 336     return _empty_regions_pos;
 337   }
 338 
 339   void do_object(oop p) {
 340     assert(_from_region != NULL, "must set before work");
 341     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 342     assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
 343 
 344     size_t obj_size = p->size();
 345     if (_compact_point + obj_size > _to_region->end()) {
 346       finish_region();
 347 
 348       // Object doesn't fit. Pick next empty region and start compacting there.
 349       ShenandoahHeapRegion* new_to_region;
 350       if (_empty_regions_pos < _empty_regions.length()) {
 351         new_to_region = _empty_regions.at(_empty_regions_pos);
 352         _empty_regions_pos++;
 353       } else {
 354         // Out of empty region? Compact within the same region.
 355         new_to_region = _from_region;
 356       }
 357 
 358       assert(new_to_region != _to_region, "must not reuse same to-region");
 359       assert(new_to_region != NULL, "must not be NULL");
 360       _to_region = new_to_region;
 361       _compact_point = _to_region->bottom();
 362     }
 363 
 364     // Object fits into current region, record new location:
 365     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 366     shenandoah_assert_not_forwarded(NULL, p);
 367     _preserved_marks->push_if_necessary(p, p->mark());
 368     p->forward_to(cast_to_oop(_compact_point));
 369     _compact_point += obj_size;
 370   }
 371 };
 372 
 373 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
 374 private:
 375   PreservedMarksSet*        const _preserved_marks;
 376   ShenandoahHeap*           const _heap;
 377   ShenandoahHeapRegionSet** const _worker_slices;
 378 
 379 public:
 380   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
 381     AbstractGangTask("Shenandoah Prepare For Compaction"),
 382     _preserved_marks(preserved_marks),
 383     _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
 384   }
 385 
 386   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 387     // Empty region: get it into the slice to defragment the slice itself.
 388     // We could have skipped this without violating correctness, but we really
 389     // want to compact all live regions to the start of the heap, which sometimes
 390     // means moving them into the fully empty regions.
 391     if (r->is_empty()) return true;
 392 
 393     // Can move the region, and this is not the humongous region. Humongous
 394     // moves are special cased here, because their moves are handled separately.
 395     return r->is_stw_move_allowed() && !r->is_humongous();
 396   }
 397 
 398   void work(uint worker_id) {
 399     ShenandoahParallelWorkerSession worker_session(worker_id);
 400     ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 401     ShenandoahHeapRegionSetIterator it(slice);
 402     ShenandoahHeapRegion* from_region = it.next();
 403     // No work?
 404     if (from_region == NULL) {
 405        return;
 406     }
 407 
 408     // Sliding compaction. Walk all regions in the slice, and compact them.
 409     // Remember empty regions and reuse them as needed.
 410     ResourceMark rm;
 411 
 412     GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
 413 
 414     ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
 415 
 416     while (from_region != NULL) {
 417       assert(is_candidate_region(from_region), "Sanity");
 418 
 419       cl.set_from_region(from_region);
 420       if (from_region->has_live()) {
 421         _heap->marked_object_iterate(from_region, &cl);
 422       }
 423 
 424       // Compacted the region to somewhere else? From-region is empty then.
 425       if (!cl.is_compact_same_region()) {
 426         empty_regions.append(from_region);
 427       }
 428       from_region = it.next();
 429     }
 430     cl.finish_region();
 431 
 432     // Mark all remaining regions as empty
 433     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 434       ShenandoahHeapRegion* r = empty_regions.at(pos);
 435       r->set_new_top(r->bottom());
 436     }
 437   }
 438 };
 439 
 440 void ShenandoahFullGC::calculate_target_humongous_objects() {
 441   ShenandoahHeap* heap = ShenandoahHeap::heap();
 442 
 443   // Compute the new addresses for humongous objects. We need to do this after addresses
 444   // for regular objects are calculated, and we know what regions in heap suffix are
 445   // available for humongous moves.
 446   //
 447   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 448   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 449   // humongous start there.
 450   //
 451   // The complication is potential non-movable regions during the scan. If such region is
 452   // detected, then sliding restarts towards that non-movable region.
 453 
 454   size_t to_begin = heap->num_regions();
 455   size_t to_end = heap->num_regions();
 456 
 457   for (size_t c = heap->num_regions(); c > 0; c--) {
 458     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 459     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 460       // To-region candidate: record this, and continue scan
 461       to_begin = r->index();
 462       continue;
 463     }
 464 
 465     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 466       // From-region candidate: movable humongous region
 467       oop old_obj = cast_to_oop(r->bottom());
 468       size_t words_size = old_obj->size();
 469       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 470 
 471       size_t start = to_end - num_regions;
 472 
 473       if (start >= to_begin && start != r->index()) {
 474         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 475         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 476         old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
 477         to_end = start;
 478         continue;
 479       }
 480     }
 481 
 482     // Failed to fit. Scan starting from current region.
 483     to_begin = r->index();
 484     to_end = r->index();
 485   }
 486 }
 487 
 488 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 489 private:
 490   ShenandoahHeap* const _heap;
 491 
 492 public:
 493   ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
 494   void heap_region_do(ShenandoahHeapRegion* r) {
 495     if (r->is_trash()) {
 496       r->recycle();
 497     }
 498     if (r->is_cset()) {
 499       r->make_regular_bypass();
 500     }
 501     if (r->is_empty_uncommitted()) {
 502       r->make_committed_bypass();
 503     }
 504     assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
 505 
 506     // Record current region occupancy: this communicates empty regions are free
 507     // to the rest of Full GC code.
 508     r->set_new_top(r->top());
 509   }
 510 };
 511 
 512 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
 513 private:
 514   ShenandoahHeap* const _heap;
 515   ShenandoahMarkingContext* const _ctx;
 516 
 517 public:
 518   ShenandoahTrashImmediateGarbageClosure() :
 519     _heap(ShenandoahHeap::heap()),
 520     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 521 
 522   void heap_region_do(ShenandoahHeapRegion* r) {
 523     if (r->is_humongous_start()) {
 524       oop humongous_obj = cast_to_oop(r->bottom());
 525       if (!_ctx->is_marked(humongous_obj)) {
 526         assert(!r->has_live(),
 527                "Region " SIZE_FORMAT " is not marked, should not have live", r->index());
 528         _heap->trash_humongous_region_at(r);
 529       } else {
 530         assert(r->has_live(),
 531                "Region " SIZE_FORMAT " should have live", r->index());
 532       }
 533     } else if (r->is_humongous_continuation()) {
 534       // If we hit continuation, the non-live humongous starts should have been trashed already
 535       assert(r->humongous_start_region()->has_live(),
 536              "Region " SIZE_FORMAT " should have live", r->index());
 537     } else if (r->is_regular()) {
 538       if (!r->has_live()) {
 539         r->make_trash_immediate();
 540       }
 541     }
 542   }
 543 };
 544 
 545 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
 546   ShenandoahHeap* heap = ShenandoahHeap::heap();
 547 
 548   uint n_workers = heap->workers()->active_workers();
 549   size_t n_regions = heap->num_regions();
 550 
 551   // What we want to accomplish: have the dense prefix of data, while still balancing
 552   // out the parallel work.
 553   //
 554   // Assuming the amount of work is driven by the live data that needs moving, we can slice
 555   // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
 556   // thread takes all regions in its prefix subset, and then it takes some regions from
 557   // the tail.
 558   //
 559   // Tail region selection becomes interesting.
 560   //
 561   // First, we want to distribute the regions fairly between the workers, and those regions
 562   // might have different amount of live data. So, until we sure no workers need live data,
 563   // we need to only take what the worker needs.
 564   //
 565   // Second, since we slide everything to the left in each slice, the most busy regions
 566   // would be the ones on the left. Which means we want to have all workers have their after-tail
 567   // regions as close to the left as possible.
 568   //
 569   // The easiest way to do this is to distribute after-tail regions in round-robin between
 570   // workers that still need live data.
 571   //
 572   // Consider parallel workers A, B, C, then the target slice layout would be:
 573   //
 574   //  AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA
 575   //
 576   //  (.....dense-prefix.....) (.....................tail...................)
 577   //  [all regions fully live] [left-most regions are fuller that right-most]
 578   //
 579 
 580   // Compute how much live data is there. This would approximate the size of dense prefix
 581   // we target to create.
 582   size_t total_live = 0;
 583   for (size_t idx = 0; idx < n_regions; idx++) {
 584     ShenandoahHeapRegion *r = heap->get_region(idx);
 585     if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 586       total_live += r->get_live_data_words();
 587     }
 588   }
 589 
 590   // Estimate the size for the dense prefix. Note that we specifically count only the
 591   // "full" regions, so there would be some non-full regions in the slice tail.
 592   size_t live_per_worker = total_live / n_workers;
 593   size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words();
 594   size_t prefix_regions_total = prefix_regions_per_worker * n_workers;
 595   prefix_regions_total = MIN2(prefix_regions_total, n_regions);
 596   assert(prefix_regions_total <= n_regions, "Sanity");
 597 
 598   // There might be non-candidate regions in the prefix. To compute where the tail actually
 599   // ends up being, we need to account those as well.
 600   size_t prefix_end = prefix_regions_total;
 601   for (size_t idx = 0; idx < prefix_regions_total; idx++) {
 602     ShenandoahHeapRegion *r = heap->get_region(idx);
 603     if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 604       prefix_end++;
 605     }
 606   }
 607   prefix_end = MIN2(prefix_end, n_regions);
 608   assert(prefix_end <= n_regions, "Sanity");
 609 
 610   // Distribute prefix regions per worker: each thread definitely gets its own same-sized
 611   // subset of dense prefix.
 612   size_t prefix_idx = 0;
 613 
 614   size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC);
 615 
 616   for (size_t wid = 0; wid < n_workers; wid++) {
 617     ShenandoahHeapRegionSet* slice = worker_slices[wid];
 618 
 619     live[wid] = 0;
 620     size_t regs = 0;
 621 
 622     // Add all prefix regions for this worker
 623     while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) {
 624       ShenandoahHeapRegion *r = heap->get_region(prefix_idx);
 625       if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 626         slice->add_region(r);
 627         live[wid] += r->get_live_data_words();
 628         regs++;
 629       }
 630       prefix_idx++;
 631     }
 632   }
 633 
 634   // Distribute the tail among workers in round-robin fashion.
 635   size_t wid = n_workers - 1;
 636 
 637   for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) {
 638     ShenandoahHeapRegion *r = heap->get_region(tail_idx);
 639     if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 640       assert(wid < n_workers, "Sanity");
 641 
 642       size_t live_region = r->get_live_data_words();
 643 
 644       // Select next worker that still needs live data.
 645       size_t old_wid = wid;
 646       do {
 647         wid++;
 648         if (wid == n_workers) wid = 0;
 649       } while (live[wid] + live_region >= live_per_worker && old_wid != wid);
 650 
 651       if (old_wid == wid) {
 652         // Circled back to the same worker? This means liveness data was
 653         // miscalculated. Bump the live_per_worker limit so that
 654         // everyone gets a piece of the leftover work.
 655         live_per_worker += ShenandoahHeapRegion::region_size_words();
 656       }
 657 
 658       worker_slices[wid]->add_region(r);
 659       live[wid] += live_region;
 660     }
 661   }
 662 
 663   FREE_C_HEAP_ARRAY(size_t, live);
 664 
 665 #ifdef ASSERT
 666   ResourceBitMap map(n_regions);
 667   for (size_t wid = 0; wid < n_workers; wid++) {
 668     ShenandoahHeapRegionSetIterator it(worker_slices[wid]);
 669     ShenandoahHeapRegion* r = it.next();
 670     while (r != NULL) {
 671       size_t idx = r->index();
 672       assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx);
 673       assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx);
 674       map.at_put(idx, true);
 675       r = it.next();
 676     }
 677   }
 678 
 679   for (size_t rid = 0; rid < n_regions; rid++) {
 680     bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid));
 681     bool is_distributed = map.at(rid);
 682     assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid);
 683   }
 684 #endif
 685 }
 686 
 687 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
 688   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
 689   ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
 690 
 691   ShenandoahHeap* heap = ShenandoahHeap::heap();
 692 
 693   // About to figure out which regions can be compacted, make sure pinning status
 694   // had been updated in GC prologue.
 695   heap->assert_pinned_region_status();
 696 
 697   {
 698     // Trash the immediately collectible regions before computing addresses
 699     ShenandoahTrashImmediateGarbageClosure tigcl;
 700     heap->heap_region_iterate(&tigcl);
 701 
 702     // Make sure regions are in good state: committed, active, clean.
 703     // This is needed because we are potentially sliding the data through them.
 704     ShenandoahEnsureHeapActiveClosure ecl;
 705     heap->heap_region_iterate(&ecl);
 706   }
 707 
 708   // Compute the new addresses for regular objects
 709   {
 710     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
 711 
 712     distribute_slices(worker_slices);
 713 
 714     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
 715     heap->workers()->run_task(&task);
 716   }
 717 
 718   // Compute the new addresses for humongous objects
 719   {
 720     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
 721     calculate_target_humongous_objects();
 722   }
 723 }
 724 
 725 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
 726 private:
 727   ShenandoahHeap* const _heap;
 728   ShenandoahMarkingContext* const _ctx;
 729 
 730   template <class T>
 731   inline void do_oop_work(T* p) {
 732     T o = RawAccess<>::oop_load(p);
 733     if (!CompressedOops::is_null(o)) {
 734       oop obj = CompressedOops::decode_not_null(o);
 735       assert(_ctx->is_marked(obj), "must be marked");
 736       if (obj->is_forwarded()) {
 737         oop forw = obj->forwardee();
 738         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 739       }
 740     }
 741   }
 742 
 743 public:
 744   ShenandoahAdjustPointersClosure() :
 745     _heap(ShenandoahHeap::heap()),
 746     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 747 
 748   void do_oop(oop* p)       { do_oop_work(p); }
 749   void do_oop(narrowOop* p) { do_oop_work(p); }
 750 };
 751 
 752 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
 753 private:
 754   ShenandoahHeap* const _heap;
 755   ShenandoahAdjustPointersClosure _cl;
 756 
 757 public:
 758   ShenandoahAdjustPointersObjectClosure() :
 759     _heap(ShenandoahHeap::heap()) {
 760   }
 761   void do_object(oop p) {
 762     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 763     p->oop_iterate(&_cl);
 764   }
 765 };
 766 
 767 class ShenandoahAdjustPointersTask : public AbstractGangTask {
 768 private:
 769   ShenandoahHeap*          const _heap;
 770   ShenandoahRegionIterator       _regions;
 771 
 772 public:
 773   ShenandoahAdjustPointersTask() :
 774     AbstractGangTask("Shenandoah Adjust Pointers"),
 775     _heap(ShenandoahHeap::heap()) {
 776   }
 777 
 778   void work(uint worker_id) {
 779     ShenandoahParallelWorkerSession worker_session(worker_id);
 780     ShenandoahAdjustPointersObjectClosure obj_cl;
 781     ShenandoahHeapRegion* r = _regions.next();
 782     while (r != NULL) {
 783       if (!r->is_humongous_continuation() && r->has_live()) {
 784         _heap->marked_object_iterate(r, &obj_cl);
 785       }
 786       r = _regions.next();
 787     }
 788   }
 789 };
 790 
 791 class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
 792 private:
 793   ShenandoahRootAdjuster* _rp;
 794   PreservedMarksSet* _preserved_marks;
 795 public:
 796   ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
 797     AbstractGangTask("Shenandoah Adjust Root Pointers"),
 798     _rp(rp),
 799     _preserved_marks(preserved_marks) {}
 800 
 801   void work(uint worker_id) {
 802     ShenandoahParallelWorkerSession worker_session(worker_id);
 803     ShenandoahAdjustPointersClosure cl;
 804     _rp->roots_do(worker_id, &cl);
 805     _preserved_marks->get(worker_id)->adjust_during_full_gc();
 806   }
 807 };
 808 
 809 void ShenandoahFullGC::phase3_update_references() {
 810   GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
 811   ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
 812 
 813   ShenandoahHeap* heap = ShenandoahHeap::heap();
 814 
 815   WorkGang* workers = heap->workers();
 816   uint nworkers = workers->active_workers();
 817   {
 818 #if COMPILER2_OR_JVMCI
 819     DerivedPointerTable::clear();
 820 #endif
 821     ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
 822     ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
 823     workers->run_task(&task);
 824 #if COMPILER2_OR_JVMCI
 825     DerivedPointerTable::update_pointers();
 826 #endif
 827   }
 828 
 829   ShenandoahAdjustPointersTask adjust_pointers_task;
 830   workers->run_task(&adjust_pointers_task);
 831 }
 832 
 833 class ShenandoahCompactObjectsClosure : public ObjectClosure {
 834 private:
 835   ShenandoahHeap* const _heap;
 836   uint            const _worker_id;
 837 
 838 public:
 839   ShenandoahCompactObjectsClosure(uint worker_id) :
 840     _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
 841 
 842   void do_object(oop p) {
 843     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 844     size_t size = (size_t)p->size();
 845     if (p->is_forwarded()) {
 846       HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
 847       HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee());
 848       Copy::aligned_conjoint_words(compact_from, compact_to, size);
 849       oop new_obj = cast_to_oop(compact_to);
 850       new_obj->init_mark();
 851     }
 852   }
 853 };
 854 
 855 class ShenandoahCompactObjectsTask : public AbstractGangTask {
 856 private:
 857   ShenandoahHeap* const _heap;
 858   ShenandoahHeapRegionSet** const _worker_slices;
 859 
 860 public:
 861   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
 862     AbstractGangTask("Shenandoah Compact Objects"),
 863     _heap(ShenandoahHeap::heap()),
 864     _worker_slices(worker_slices) {
 865   }
 866 
 867   void work(uint worker_id) {
 868     ShenandoahParallelWorkerSession worker_session(worker_id);
 869     ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
 870 
 871     ShenandoahCompactObjectsClosure cl(worker_id);
 872     ShenandoahHeapRegion* r = slice.next();
 873     while (r != NULL) {
 874       assert(!r->is_humongous(), "must not get humongous regions here");
 875       if (r->has_live()) {
 876         _heap->marked_object_iterate(r, &cl);
 877       }
 878       r->set_top(r->new_top());
 879       r = slice.next();
 880     }
 881   }
 882 };
 883 
 884 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
 885 private:
 886   ShenandoahHeap* const _heap;
 887   size_t _live;
 888 
 889 public:
 890   ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
 891     _heap->free_set()->clear();
 892   }
 893 
 894   void heap_region_do(ShenandoahHeapRegion* r) {
 895     assert (!r->is_cset(), "cset regions should have been demoted already");
 896 
 897     // Need to reset the complete-top-at-mark-start pointer here because
 898     // the complete marking bitmap is no longer valid. This ensures
 899     // size-based iteration in marked_object_iterate().
 900     // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
 901     // pinned regions.
 902     if (!r->is_pinned()) {
 903       _heap->complete_marking_context()->reset_top_at_mark_start(r);
 904     }
 905 
 906     size_t live = r->used();
 907 
 908     // Make empty regions that have been allocated into regular
 909     if (r->is_empty() && live > 0) {
 910       r->make_regular_bypass();
 911       if (ZapUnusedHeapArea) {
 912         SpaceMangler::mangle_region(MemRegion(r->top(), r->end()));
 913       }
 914     }
 915 
 916     // Reclaim regular regions that became empty
 917     if (r->is_regular() && live == 0) {
 918       r->make_trash();
 919     }
 920 
 921     // Recycle all trash regions
 922     if (r->is_trash()) {
 923       live = 0;
 924       r->recycle();
 925     }
 926 
 927     r->set_live_data(live);
 928     r->reset_alloc_metadata();
 929     _live += live;
 930   }
 931 
 932   size_t get_live() {
 933     return _live;
 934   }
 935 };
 936 
 937 void ShenandoahFullGC::compact_humongous_objects() {
 938   // Compact humongous regions, based on their fwdptr objects.
 939   //
 940   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
 941   // humongous regions are already compacted, and do not require further moves, which alleviates
 942   // sliding costs. We may consider doing this in parallel in future.
 943 
 944   ShenandoahHeap* heap = ShenandoahHeap::heap();
 945 
 946   for (size_t c = heap->num_regions(); c > 0; c--) {
 947     ShenandoahHeapRegion* r = heap->get_region(c - 1);
 948     if (r->is_humongous_start()) {
 949       oop old_obj = cast_to_oop(r->bottom());
 950       if (!old_obj->is_forwarded()) {
 951         // No need to move the object, it stays at the same slot
 952         continue;
 953       }
 954       size_t words_size = old_obj->size();
 955       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 956 
 957       size_t old_start = r->index();
 958       size_t old_end   = old_start + num_regions - 1;
 959       size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
 960       size_t new_end   = new_start + num_regions - 1;
 961       assert(old_start != new_start, "must be real move");
 962       assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
 963 
 964       Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
 965                                    heap->get_region(new_start)->bottom(),
 966                                    words_size);
 967 
 968       oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
 969       new_obj->init_mark();
 970 
 971       {
 972         for (size_t c = old_start; c <= old_end; c++) {
 973           ShenandoahHeapRegion* r = heap->get_region(c);
 974           r->make_regular_bypass();
 975           r->set_top(r->bottom());
 976         }
 977 
 978         for (size_t c = new_start; c <= new_end; c++) {
 979           ShenandoahHeapRegion* r = heap->get_region(c);
 980           if (c == new_start) {
 981             r->make_humongous_start_bypass();
 982           } else {
 983             r->make_humongous_cont_bypass();
 984           }
 985 
 986           // Trailing region may be non-full, record the remainder there
 987           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 988           if ((c == new_end) && (remainder != 0)) {
 989             r->set_top(r->bottom() + remainder);
 990           } else {
 991             r->set_top(r->end());
 992           }
 993 
 994           r->reset_alloc_metadata();
 995         }
 996       }
 997     }
 998   }
 999 }
1000 
1001 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1002 // we need to remain able to walk pinned regions.
1003 // Since pinned region do not move and don't get compacted, we will get holes with
1004 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
1005 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
1006 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
1007 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
1008 class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask {
1009 private:
1010   ShenandoahRegionIterator _regions;
1011 
1012 public:
1013   ShenandoahMCResetCompleteBitmapTask() :
1014     AbstractGangTask("Shenandoah Reset Bitmap") {
1015   }
1016 
1017   void work(uint worker_id) {
1018     ShenandoahParallelWorkerSession worker_session(worker_id);
1019     ShenandoahHeapRegion* region = _regions.next();
1020     ShenandoahHeap* heap = ShenandoahHeap::heap();
1021     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
1022     while (region != NULL) {
1023       if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) {
1024         ctx->clear_bitmap(region);
1025       }
1026       region = _regions.next();
1027     }
1028   }
1029 };
1030 
1031 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
1032   GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
1033   ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
1034 
1035   ShenandoahHeap* heap = ShenandoahHeap::heap();
1036 
1037   // Compact regular objects first
1038   {
1039     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
1040     ShenandoahCompactObjectsTask compact_task(worker_slices);
1041     heap->workers()->run_task(&compact_task);
1042   }
1043 
1044   // Compact humongous objects after regular object moves
1045   {
1046     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1047     compact_humongous_objects();
1048   }
1049 
1050   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1051   // and must ensure the bitmap is in sync.
1052   {
1053     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1054     ShenandoahMCResetCompleteBitmapTask task;
1055     heap->workers()->run_task(&task);
1056   }
1057 
1058   // Bring regions in proper states after the collection, and set heap properties.
1059   {
1060     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1061 
1062     ShenandoahPostCompactClosure post_compact;
1063     heap->heap_region_iterate(&post_compact);
1064     heap->set_used(post_compact.get_live());
1065 
1066     heap->collection_set()->clear();
1067     heap->free_set()->rebuild();
1068   }
1069 
1070   heap->clear_cancelled_gc();
1071 }