1 /*
   2  * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "compiler/oopMap.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/preservedMarks.inline.hpp"
  30 #include "gc/shared/tlab_globals.hpp"
  31 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  35 #include "gc/shenandoah/shenandoahFullGC.hpp"
  36 #include "gc/shenandoah/shenandoahGeneration.hpp"
  37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  38 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  39 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  44 #include "gc/shenandoah/shenandoahMetrics.hpp"
  45 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  46 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  47 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  48 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  49 #include "gc/shenandoah/shenandoahUtils.hpp"
  50 #include "gc/shenandoah/shenandoahVerifier.hpp"
  51 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  52 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  53 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  54 #include "memory/metaspaceUtils.hpp"
  55 #include "memory/universe.hpp"
  56 #include "oops/compressedOops.inline.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "runtime/orderAccess.hpp"
  59 #include "runtime/thread.hpp"
  60 #include "runtime/vmThread.hpp"
  61 #include "utilities/copy.hpp"
  62 #include "utilities/events.hpp"
  63 #include "utilities/growableArray.hpp"
  64 #include "gc/shared/workgroup.hpp"
  65 
  66 // After Full GC is done, reconstruct the remembered set by iterating over OLD regions,
  67 // registering all objects between bottom() and top(), and setting remembered set cards to
  68 // DIRTY if they hold interesting pointers.
  69 class ShenandoahReconstructRememberedSetTask : public AbstractGangTask {
  70 private:
  71   ShenandoahRegionIterator _regions;
  72 
  73 public:
  74   ShenandoahReconstructRememberedSetTask() :
  75     AbstractGangTask("Shenandoah Reset Bitmap") { }
  76 
  77   void work(uint worker_id) {
  78     ShenandoahParallelWorkerSession worker_session(worker_id);
  79     ShenandoahHeapRegion* r = _regions.next();
  80     ShenandoahHeap* heap = ShenandoahHeap::heap();
  81     RememberedScanner* scanner = heap->card_scan();
  82     ShenandoahSetRememberedCardsToDirtyClosure dirty_cards_for_interesting_pointers;
  83 
  84     while (r != NULL) {
  85       if (r->is_old() && r->is_active()) {
  86         HeapWord* obj_addr = r->bottom();
  87         if (r->is_humongous_start()) {
  88           // First, clear the remembered set
  89           oop obj = cast_to_oop(obj_addr);
  90           size_t size = obj->size();
  91           HeapWord* end_object = r->bottom() + size;
  92 
  93           // First, clear the remembered set for all spanned humongous regions
  94           size_t num_regions = (size + ShenandoahHeapRegion::region_size_words() - 1) / ShenandoahHeapRegion::region_size_words();
  95           size_t region_span = num_regions * ShenandoahHeapRegion::region_size_words();
  96           scanner->reset_remset(r->bottom(), region_span);
  97           size_t region_index = r->index();
  98           ShenandoahHeapRegion* humongous_region = heap->get_region(region_index);
  99           while (num_regions-- != 0) {
 100             scanner->reset_object_range(humongous_region->bottom(), humongous_region->end());
 101             region_index++;
 102             humongous_region = heap->get_region(region_index);
 103           }
 104 
 105           // Then register the humongous object and DIRTY relevant remembered set cards
 106           scanner->register_object_wo_lock(obj_addr);
 107           obj->oop_iterate(&dirty_cards_for_interesting_pointers);
 108         } else if (!r->is_humongous()) {
 109           // First, clear the remembered set
 110           scanner->reset_remset(r->bottom(), ShenandoahHeapRegion::region_size_words());
 111           scanner->reset_object_range(r->bottom(), r->end());
 112 
 113           // Then iterate over all objects, registering object and DIRTYing relevant remembered set cards
 114           HeapWord* t = r->top();
 115           while (obj_addr < t) {
 116             oop obj = cast_to_oop(obj_addr);
 117             size_t size = obj->size();
 118             scanner->register_object_wo_lock(obj_addr);
 119             obj_addr += obj->oop_iterate_size(&dirty_cards_for_interesting_pointers);
 120           }
 121         } // else, ignore humongous continuation region
 122       }
 123       // else, this region is FREE or YOUNG or inactive and we can ignore it.
 124       r = _regions.next();
 125     }
 126   }
 127 };
 128 
 129 ShenandoahFullGC::ShenandoahFullGC() :
 130   _gc_timer(ShenandoahHeap::heap()->gc_timer()),
 131   _preserved_marks(new PreservedMarksSet(true)) {}
 132 
 133 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
 134   vmop_entry_full(cause);
 135   // Always success
 136   return true;
 137 }
 138 
 139 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) {
 140   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 141   TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
 142   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
 143 
 144   heap->try_inject_alloc_failure();
 145   VM_ShenandoahFullGC op(cause, this);
 146   VMThread::execute(&op);
 147 }
 148 
 149 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
 150   static const char* msg = "Pause Full";
 151   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
 152   EventMark em("%s", msg);
 153 
 154   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 155                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
 156                               "full gc");
 157 
 158   op_full(cause);
 159 }
 160 
 161 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
 162   ShenandoahMetricsSnapshot metrics;
 163   metrics.snap_before();
 164 
 165   // Perform full GC
 166   do_it(cause);
 167 
 168   metrics.snap_after();
 169 
 170   if (metrics.is_good_progress()) {
 171     ShenandoahHeap::heap()->notify_gc_progress();
 172   } else {
 173     // Nothing to do. Tell the allocation path that we have failed to make
 174     // progress, and it can finally fail.
 175     ShenandoahHeap::heap()->notify_gc_no_progress();
 176   }
 177 }
 178 
 179 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
 180   ShenandoahHeap* heap = ShenandoahHeap::heap();
 181   // Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL.
 182   heap->set_gc_generation(heap->global_generation());
 183 
 184   if (ShenandoahVerify) {
 185     heap->verifier()->verify_before_fullgc();
 186   }
 187 
 188   if (VerifyBeforeGC) {
 189     Universe::verify();
 190   }
 191 
 192   // Degenerated GC may carry concurrent root flags when upgrading to
 193   // full GC. We need to reset it before mutators resume.
 194   heap->set_concurrent_strong_root_in_progress(false);
 195   heap->set_concurrent_weak_root_in_progress(false);
 196 
 197   heap->set_full_gc_in_progress(true);
 198 
 199   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 200   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
 201 
 202   {
 203     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre);
 204     heap->pre_full_gc_dump(_gc_timer);
 205   }
 206 
 207   {
 208     ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
 209     // Full GC is supposed to recover from any GC state:
 210 
 211     // a0. Remember if we have forwarded objects
 212     bool has_forwarded_objects = heap->has_forwarded_objects();
 213 
 214     // a1. Cancel evacuation, if in progress
 215     if (heap->is_evacuation_in_progress()) {
 216       heap->set_evacuation_in_progress(false);
 217     }
 218     assert(!heap->is_evacuation_in_progress(), "sanity");
 219 
 220     // a2. Cancel update-refs, if in progress
 221     if (heap->is_update_refs_in_progress()) {
 222       heap->set_update_refs_in_progress(false);
 223     }
 224     assert(!heap->is_update_refs_in_progress(), "sanity");
 225 
 226     // b. Cancel all concurrent marks, if in progress
 227     if (heap->is_concurrent_mark_in_progress()) {
 228       heap->cancel_concurrent_mark();
 229     }
 230     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 231 
 232     // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
 233     if (has_forwarded_objects) {
 234       update_roots(true /*full_gc*/);
 235     }
 236 
 237     // d. Reset the bitmaps for new marking
 238     heap->global_generation()->reset_mark_bitmap();
 239     assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 240     assert(!heap->global_generation()->is_mark_complete(), "sanity");
 241 
 242     // e. Abandon reference discovery and clear all discovered references.
 243     ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
 244     rp->abandon_partial_discovery();
 245 
 246     // f. Sync pinned region status from the CP marks
 247     heap->sync_pinned_region_status();
 248 
 249     // The rest of prologue:
 250     _preserved_marks->init(heap->workers()->active_workers());
 251 
 252     assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
 253   }
 254 
 255   if (UseTLAB) {
 256     // TODO: Do we need to explicitly retire PLABs?
 257     heap->gclabs_retire(ResizeTLAB);
 258     heap->tlabs_retire(ResizeTLAB);
 259   }
 260 
 261   OrderAccess::fence();
 262 
 263   phase1_mark_heap();
 264 
 265   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 266   // Coming out of Full GC, we would not have any forwarded objects.
 267   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 268   heap->set_has_forwarded_objects(false);
 269 
 270   heap->set_full_gc_move_in_progress(true);
 271 
 272   // Setup workers for the rest
 273   OrderAccess::fence();
 274 
 275   // Initialize worker slices
 276   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
 277   for (uint i = 0; i < heap->max_workers(); i++) {
 278     worker_slices[i] = new ShenandoahHeapRegionSet();
 279   }
 280 
 281   {
 282     // The rest of code performs region moves, where region status is undefined
 283     // until all phases run together.
 284     ShenandoahHeapLocker lock(heap->lock());
 285 
 286     phase2_calculate_target_addresses(worker_slices);
 287 
 288     OrderAccess::fence();
 289 
 290     phase3_update_references();
 291 
 292     phase4_compact_objects(worker_slices);
 293   }
 294 
 295   {
 296     // Epilogue
 297     _preserved_marks->restore(heap->workers());
 298     _preserved_marks->reclaim();
 299 
 300     if (heap->mode()->is_generational()) {
 301       ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_reconstruct_remembered_set);
 302       ShenandoahReconstructRememberedSetTask task;
 303       heap->workers()->run_task(&task);
 304     }
 305   }
 306 
 307   // Resize metaspace
 308   MetaspaceGC::compute_new_size();
 309 
 310   // Free worker slices
 311   for (uint i = 0; i < heap->max_workers(); i++) {
 312     delete worker_slices[i];
 313   }
 314   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 315 
 316   heap->set_full_gc_move_in_progress(false);
 317   heap->set_full_gc_in_progress(false);
 318 
 319   if (ShenandoahVerify) {
 320     if (heap->mode()->is_generational()) {
 321       heap->verifier()->verify_after_generational_fullgc();
 322     } else {
 323       heap->verifier()->verify_after_fullgc();
 324     }
 325   }
 326 
 327   if (VerifyAfterGC) {
 328     Universe::verify();
 329   }
 330 
 331   {
 332     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
 333     heap->post_full_gc_dump(_gc_timer);
 334   }
 335 }
 336 
 337 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
 338 private:
 339   ShenandoahMarkingContext* const _ctx;
 340 
 341 public:
 342   ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 343 
 344   void heap_region_do(ShenandoahHeapRegion *r) {
 345     if (r->affiliation() != FREE) {
 346       _ctx->capture_top_at_mark_start(r);
 347       r->clear_live_data();
 348     }
 349   }
 350 
 351   bool is_thread_safe() { return true; }
 352 };
 353 
 354 void ShenandoahFullGC::phase1_mark_heap() {
 355   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 356   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 357 
 358   ShenandoahHeap* heap = ShenandoahHeap::heap();
 359 
 360   ShenandoahPrepareForMarkClosure cl;
 361   heap->parallel_heap_region_iterate(&cl);
 362 
 363   heap->set_unload_classes(heap->global_generation()->heuristics()->can_unload_classes());
 364 
 365   ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
 366   // enable ("weak") refs discovery
 367   rp->set_soft_reference_policy(true); // forcefully purge all soft references
 368 
 369   ShenandoahSTWMark mark(heap->global_generation(), true /*full_gc*/);
 370   mark.mark();
 371   heap->parallel_cleaning(true /* full_gc */);
 372 }
 373 
 374 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
 375 private:
 376   PreservedMarksSet*        const _preserved_marks;
 377   ShenandoahHeap*           const _heap;
 378   ShenandoahHeapRegionSet** const _worker_slices;
 379   size_t                    const _num_workers;
 380 
 381 public:
 382   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices,
 383                                      size_t num_workers);
 384 
 385   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 386     // Empty region: get it into the slice to defragment the slice itself.
 387     // We could have skipped this without violating correctness, but we really
 388     // want to compact all live regions to the start of the heap, which sometimes
 389     // means moving them into the fully empty regions.
 390     if (r->is_empty()) return true;
 391 
 392     // Can move the region, and this is not the humongous region. Humongous
 393     // moves are special cased here, because their moves are handled separately.
 394     return r->is_stw_move_allowed() && !r->is_humongous();
 395   }
 396 
 397   void work(uint worker_id);
 398 };
 399 
 400 class ShenandoahPrepareForGenerationalCompactionObjectClosure : public ObjectClosure {
 401 private:
 402   ShenandoahPrepareForCompactionTask* _compactor;
 403   PreservedMarks*          const _preserved_marks;
 404   ShenandoahHeap*          const _heap;
 405   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 406   int _empty_regions_pos;
 407   ShenandoahHeapRegion*          _old_to_region;
 408   ShenandoahHeapRegion*          _young_to_region;
 409   ShenandoahHeapRegion*          _from_region;
 410   ShenandoahRegionAffiliation    _from_affiliation;
 411   HeapWord*                      _old_compact_point;
 412   HeapWord*                      _young_compact_point;
 413   uint                           _worker_id;
 414 
 415 public:
 416   ShenandoahPrepareForGenerationalCompactionObjectClosure(ShenandoahPrepareForCompactionTask* compactor,
 417                                                           PreservedMarks* preserved_marks,
 418                                                           GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 419                                                           ShenandoahHeapRegion* old_to_region,
 420                                                           ShenandoahHeapRegion* young_to_region, uint worker_id) :
 421       _compactor(compactor),
 422       _preserved_marks(preserved_marks),
 423       _heap(ShenandoahHeap::heap()),
 424       _empty_regions(empty_regions),
 425       _empty_regions_pos(0),
 426       _old_to_region(old_to_region),
 427       _young_to_region(young_to_region),
 428       _from_region(NULL),
 429       _old_compact_point((old_to_region != nullptr)? old_to_region->bottom(): nullptr),
 430       _young_compact_point((young_to_region != nullptr)? young_to_region->bottom(): nullptr),
 431       _worker_id(worker_id) {}
 432 
 433   void set_from_region(ShenandoahHeapRegion* from_region) {
 434     _from_region = from_region;
 435     _from_affiliation = from_region->affiliation();
 436     if (_from_region->has_live()) {
 437       if (_from_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION) {
 438         if (_old_to_region == nullptr) {
 439           _old_to_region = from_region;
 440           _old_compact_point = from_region->bottom();
 441         }
 442       } else {
 443         assert(_from_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION, "from_region must be OLD or YOUNG");
 444         if (_young_to_region == nullptr) {
 445           _young_to_region = from_region;
 446           _young_compact_point = from_region->bottom();
 447         }
 448       }
 449     } // else, we won't iterate over this _from_region so we don't need to set up to region to hold copies
 450   }
 451 
 452   void finish() {
 453     finish_old_region();
 454     finish_young_region();
 455   }
 456 
 457   void finish_old_region() {
 458     if (_old_to_region != nullptr) {
 459       log_debug(gc)("Planned compaction into Old Region " SIZE_FORMAT ", used: " SIZE_FORMAT " tabulated by worker %u",
 460                     _old_to_region->index(), _old_compact_point - _old_to_region->bottom(), _worker_id);
 461       _old_to_region->set_new_top(_old_compact_point);
 462       _old_to_region = nullptr;
 463     }
 464   }
 465 
 466   void finish_young_region() {
 467     if (_young_to_region != nullptr) {
 468       log_debug(gc)("Worker %u planned compaction into Young Region " SIZE_FORMAT ", used: " SIZE_FORMAT,
 469                     _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom());
 470       _young_to_region->set_new_top(_young_compact_point);
 471       _young_to_region = nullptr;
 472     }
 473   }
 474 
 475   bool is_compact_same_region() {
 476     return (_from_region == _old_to_region) || (_from_region == _young_to_region);
 477   }
 478 
 479   int empty_regions_pos() {
 480     return _empty_regions_pos;
 481   }
 482 
 483   void do_object(oop p) {
 484     assert(_from_region != NULL, "must set before work");
 485     assert((_from_region->bottom() <= cast_from_oop<HeapWord*>(p)) && (cast_from_oop<HeapWord*>(p) < _from_region->top()),
 486            "Object must reside in _from_region");
 487     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 488     assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
 489 
 490     size_t obj_size = p->size();
 491     if (_from_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION) {
 492       assert(_old_to_region != nullptr, "_old_to_region should not be NULL when compacting OLD _from_region");
 493       if (_old_compact_point + obj_size > _old_to_region->end()) {
 494         ShenandoahHeapRegion* new_to_region;
 495 
 496         log_debug(gc)("Worker %u finishing old region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
 497                       ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT,  _worker_id, _old_to_region->index(),
 498                       p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end()));
 499 
 500         // Object does not fit.  Get a new _old_to_region.
 501         finish_old_region();
 502         if (_empty_regions_pos < _empty_regions.length()) {
 503           new_to_region = _empty_regions.at(_empty_regions_pos);
 504           _empty_regions_pos++;
 505           new_to_region->set_affiliation(OLD_GENERATION);
 506         } else {
 507           // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct
 508           // from _from_region.  That's because there is always room for _from_region to be compacted into itself.
 509           // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
 510           new_to_region = _from_region;
 511         }
 512 
 513         assert(new_to_region != _old_to_region, "must not reuse same OLD to-region");
 514         assert(new_to_region != NULL, "must not be NULL");
 515         _old_to_region = new_to_region;
 516         _old_compact_point = _old_to_region->bottom();
 517       }
 518 
 519       // Object fits into current region, record new location:
 520       assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit");
 521       shenandoah_assert_not_forwarded(NULL, p);
 522       _preserved_marks->push_if_necessary(p, p->mark());
 523       p->forward_to(cast_to_oop(_old_compact_point));
 524       _old_compact_point += obj_size;
 525     } else {
 526       assert(_from_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION,
 527              "_from_region must be OLD_GENERATION or YOUNG_GENERATION");
 528 
 529       assert(_young_to_region != nullptr, "_young_to_region should not be NULL when compacting YOUNG _from_region");
 530       if (_young_compact_point + obj_size > _young_to_region->end()) {
 531         ShenandoahHeapRegion* new_to_region;
 532 
 533 
 534         log_debug(gc)("Worker %u finishing young region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
 535                       ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT,  _worker_id, _young_to_region->index(),
 536                       p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end()));
 537 
 538         // Object does not fit.  Get a new _young_to_region.
 539         finish_young_region();
 540         if (_empty_regions_pos < _empty_regions.length()) {
 541           new_to_region = _empty_regions.at(_empty_regions_pos);
 542           _empty_regions_pos++;
 543           new_to_region->set_affiliation(YOUNG_GENERATION);
 544         } else {
 545           // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct
 546           // from _from_region.  That's because there is always room for _from_region to be compacted into itself.
 547           // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
 548           new_to_region = _from_region;
 549         }
 550 
 551         assert(new_to_region != _young_to_region, "must not reuse same OLD to-region");
 552         assert(new_to_region != NULL, "must not be NULL");
 553         _young_to_region = new_to_region;
 554         _young_compact_point = _young_to_region->bottom();
 555       }
 556 
 557       // Object fits into current region, record new location:
 558       assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit");
 559       shenandoah_assert_not_forwarded(NULL, p);
 560       _preserved_marks->push_if_necessary(p, p->mark());
 561       p->forward_to(cast_to_oop(_young_compact_point));
 562       _young_compact_point += obj_size;
 563     }
 564   }
 565 };
 566 
 567 
 568 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 569 private:
 570   PreservedMarks*          const _preserved_marks;
 571   ShenandoahHeap*          const _heap;
 572   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 573   int _empty_regions_pos;
 574   ShenandoahHeapRegion*          _to_region;
 575   ShenandoahHeapRegion*          _from_region;
 576   HeapWord* _compact_point;
 577 
 578 public:
 579   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 580                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 581                                               ShenandoahHeapRegion* to_region) :
 582     _preserved_marks(preserved_marks),
 583     _heap(ShenandoahHeap::heap()),
 584     _empty_regions(empty_regions),
 585     _empty_regions_pos(0),
 586     _to_region(to_region),
 587     _from_region(NULL),
 588     _compact_point(to_region->bottom()) {}
 589 
 590   void set_from_region(ShenandoahHeapRegion* from_region) {
 591     _from_region = from_region;
 592   }
 593 
 594   void finish_region() {
 595     assert(_to_region != NULL, "should not happen");
 596     assert(!_heap->mode()->is_generational(), "Generational GC should use different Closure");
 597     _to_region->set_new_top(_compact_point);
 598   }
 599 
 600   bool is_compact_same_region() {
 601     return _from_region == _to_region;
 602   }
 603 
 604   int empty_regions_pos() {
 605     return _empty_regions_pos;
 606   }
 607 
 608   void do_object(oop p) {
 609     assert(_from_region != NULL, "must set before work");
 610     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 611     assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
 612 
 613     size_t obj_size = p->size();
 614     if (_compact_point + obj_size > _to_region->end()) {
 615       finish_region();
 616 
 617       // Object doesn't fit. Pick next empty region and start compacting there.
 618       ShenandoahHeapRegion* new_to_region;
 619       if (_empty_regions_pos < _empty_regions.length()) {
 620         new_to_region = _empty_regions.at(_empty_regions_pos);
 621         _empty_regions_pos++;
 622       } else {
 623         // Out of empty region? Compact within the same region.
 624         new_to_region = _from_region;
 625       }
 626 
 627       assert(new_to_region != _to_region, "must not reuse same to-region");
 628       assert(new_to_region != NULL, "must not be NULL");
 629       _to_region = new_to_region;
 630       _compact_point = _to_region->bottom();
 631     }
 632 
 633     // Object fits into current region, record new location:
 634     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 635     shenandoah_assert_not_forwarded(NULL, p);
 636     _preserved_marks->push_if_necessary(p, p->mark());
 637     p->forward_to(cast_to_oop(_compact_point));
 638     _compact_point += obj_size;
 639   }
 640 };
 641 
 642 
 643 ShenandoahPrepareForCompactionTask::ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks,
 644                                                                        ShenandoahHeapRegionSet **worker_slices,
 645                                                                        size_t num_workers) :
 646     AbstractGangTask("Shenandoah Prepare For Compaction"),
 647     _preserved_marks(preserved_marks), _heap(ShenandoahHeap::heap()),
 648     _worker_slices(worker_slices), _num_workers(num_workers) { }
 649 
 650 
 651 void ShenandoahPrepareForCompactionTask::work(uint worker_id) {
 652   ShenandoahParallelWorkerSession worker_session(worker_id);
 653   ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 654   ShenandoahHeapRegionSetIterator it(slice);
 655   ShenandoahHeapRegion* from_region = it.next();
 656   // No work?
 657   if (from_region == NULL) {
 658     return;
 659   }
 660 
 661   // Sliding compaction. Walk all regions in the slice, and compact them.
 662   // Remember empty regions and reuse them as needed.
 663   ResourceMark rm;
 664 
 665   GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
 666 
 667   if (_heap->mode()->is_generational()) {
 668     ShenandoahHeapRegion* old_to_region = (from_region->is_old())? from_region: nullptr;
 669     ShenandoahHeapRegion* young_to_region = (from_region->is_young())? from_region: nullptr;
 670     ShenandoahPrepareForGenerationalCompactionObjectClosure cl(this, _preserved_marks->get(worker_id), empty_regions,
 671                                                                old_to_region, young_to_region, worker_id);
 672     while (from_region != NULL) {
 673       assert(is_candidate_region(from_region), "Sanity");
 674       log_debug(gc)("Worker %u compacting %s Region " SIZE_FORMAT " which had used " SIZE_FORMAT " and %s live",
 675                     worker_id, affiliation_name(from_region->affiliation()),
 676                     from_region->index(), from_region->used(), from_region->has_live()? "has": "does not have");
 677       cl.set_from_region(from_region);
 678       if (from_region->has_live()) {
 679         _heap->marked_object_iterate(from_region, &cl);
 680       }
 681 
 682       // Compacted the region to somewhere else? From-region is empty then.
 683       if (!cl.is_compact_same_region()) {
 684         empty_regions.append(from_region);
 685       }
 686       from_region = it.next();
 687     }
 688     cl.finish();
 689 
 690     // Mark all remaining regions as empty
 691     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 692       ShenandoahHeapRegion* r = empty_regions.at(pos);
 693       r->set_new_top(r->bottom());
 694     }
 695   } else {
 696     ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
 697     while (from_region != NULL) {
 698       assert(is_candidate_region(from_region), "Sanity");
 699       cl.set_from_region(from_region);
 700       if (from_region->has_live()) {
 701         _heap->marked_object_iterate(from_region, &cl);
 702       }
 703 
 704       // Compacted the region to somewhere else? From-region is empty then.
 705       if (!cl.is_compact_same_region()) {
 706         empty_regions.append(from_region);
 707       }
 708       from_region = it.next();
 709     }
 710     cl.finish_region();
 711 
 712     // Mark all remaining regions as empty
 713     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 714       ShenandoahHeapRegion* r = empty_regions.at(pos);
 715       r->set_new_top(r->bottom());
 716     }
 717   }
 718 }
 719 
 720 void ShenandoahFullGC::calculate_target_humongous_objects() {
 721   ShenandoahHeap* heap = ShenandoahHeap::heap();
 722 
 723   // Compute the new addresses for humongous objects. We need to do this after addresses
 724   // for regular objects are calculated, and we know what regions in heap suffix are
 725   // available for humongous moves.
 726   //
 727   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 728   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 729   // humongous start there.
 730   //
 731   // The complication is potential non-movable regions during the scan. If such region is
 732   // detected, then sliding restarts towards that non-movable region.
 733 
 734   size_t to_begin = heap->num_regions();
 735   size_t to_end = heap->num_regions();
 736 
 737   log_debug(gc)("Full GC calculating target humongous objects from end " SIZE_FORMAT, to_end);
 738   for (size_t c = heap->num_regions(); c > 0; c--) {
 739     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 740     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 741       // To-region candidate: record this, and continue scan
 742       to_begin = r->index();
 743       continue;
 744     }
 745 
 746     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 747       // From-region candidate: movable humongous region
 748       oop old_obj = cast_to_oop(r->bottom());
 749       size_t words_size = old_obj->size();
 750       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 751 
 752       size_t start = to_end - num_regions;
 753 
 754       if (start >= to_begin && start != r->index()) {
 755         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 756         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 757         old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
 758         to_end = start;
 759         continue;
 760       }
 761     }
 762 
 763     // Failed to fit. Scan starting from current region.
 764     to_begin = r->index();
 765     to_end = r->index();
 766   }
 767 }
 768 
 769 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 770 private:
 771   ShenandoahHeap* const _heap;
 772 
 773 public:
 774   ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
 775   void heap_region_do(ShenandoahHeapRegion* r) {
 776     if (r->is_trash()) {
 777       r->recycle();
 778     }
 779     if (r->is_cset()) {
 780       r->make_regular_bypass();
 781     }
 782     if (r->is_empty_uncommitted()) {
 783       r->make_committed_bypass();
 784     }
 785     assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
 786 
 787     // Record current region occupancy: this communicates empty regions are free
 788     // to the rest of Full GC code.
 789     r->set_new_top(r->top());
 790   }
 791 };
 792 
 793 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
 794 private:
 795   ShenandoahHeap* const _heap;
 796   ShenandoahMarkingContext* const _ctx;
 797 
 798 public:
 799   ShenandoahTrashImmediateGarbageClosure() :
 800     _heap(ShenandoahHeap::heap()),
 801     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 802 
 803   void heap_region_do(ShenandoahHeapRegion* r) {
 804     if (r->affiliation() != FREE) {
 805       if (r->is_humongous_start()) {
 806         oop humongous_obj = cast_to_oop(r->bottom());
 807         if (!_ctx->is_marked(humongous_obj)) {
 808           assert(!r->has_live(),
 809                  "Humongous Start %s Region " SIZE_FORMAT " is not marked, should not have live",
 810                  affiliation_name(r->affiliation()),  r->index());
 811           log_debug(gc)("Trashing immediate humongous region " SIZE_FORMAT " because not marked", r->index());
 812           _heap->trash_humongous_region_at(r);
 813         } else {
 814           assert(r->has_live(),
 815                  "Humongous Start %s Region " SIZE_FORMAT " should have live", affiliation_name(r->affiliation()),  r->index());
 816         }
 817       } else if (r->is_humongous_continuation()) {
 818         // If we hit continuation, the non-live humongous starts should have been trashed already
 819         assert(r->humongous_start_region()->has_live(),
 820                "Humongous Continuation %s Region " SIZE_FORMAT " should have live", affiliation_name(r->affiliation()),  r->index());
 821       } else if (r->is_regular()) {
 822         if (!r->has_live()) {
 823           log_debug(gc)("Trashing immediate regular region " SIZE_FORMAT " because has no live", r->index());
 824           r->make_trash_immediate();
 825         }
 826       }
 827     }
 828     // else, ignore this FREE region.
 829     // TODO: change iterators so they do not process FREE regions.
 830   }
 831 };
 832 
 833 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
 834   ShenandoahHeap* heap = ShenandoahHeap::heap();
 835 
 836   uint n_workers = heap->workers()->active_workers();
 837   size_t n_regions = heap->num_regions();
 838 
 839   // What we want to accomplish: have the dense prefix of data, while still balancing
 840   // out the parallel work.
 841   //
 842   // Assuming the amount of work is driven by the live data that needs moving, we can slice
 843   // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
 844   // thread takes all regions in its prefix subset, and then it takes some regions from
 845   // the tail.
 846   //
 847   // Tail region selection becomes interesting.
 848   //
 849   // First, we want to distribute the regions fairly between the workers, and those regions
 850   // might have different amount of live data. So, until we sure no workers need live data,
 851   // we need to only take what the worker needs.
 852   //
 853   // Second, since we slide everything to the left in each slice, the most busy regions
 854   // would be the ones on the left. Which means we want to have all workers have their after-tail
 855   // regions as close to the left as possible.
 856   //
 857   // The easiest way to do this is to distribute after-tail regions in round-robin between
 858   // workers that still need live data.
 859   //
 860   // Consider parallel workers A, B, C, then the target slice layout would be:
 861   //
 862   //  AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA
 863   //
 864   //  (.....dense-prefix.....) (.....................tail...................)
 865   //  [all regions fully live] [left-most regions are fuller that right-most]
 866   //
 867 
 868   // Compute how much live data is there. This would approximate the size of dense prefix
 869   // we target to create.
 870   size_t total_live = 0;
 871   for (size_t idx = 0; idx < n_regions; idx++) {
 872     ShenandoahHeapRegion *r = heap->get_region(idx);
 873     if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 874       total_live += r->get_live_data_words();
 875     }
 876   }
 877 
 878   // Estimate the size for the dense prefix. Note that we specifically count only the
 879   // "full" regions, so there would be some non-full regions in the slice tail.
 880   size_t live_per_worker = total_live / n_workers;
 881   size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words();
 882   size_t prefix_regions_total = prefix_regions_per_worker * n_workers;
 883   prefix_regions_total = MIN2(prefix_regions_total, n_regions);
 884   assert(prefix_regions_total <= n_regions, "Sanity");
 885 
 886   // There might be non-candidate regions in the prefix. To compute where the tail actually
 887   // ends up being, we need to account those as well.
 888   size_t prefix_end = prefix_regions_total;
 889   for (size_t idx = 0; idx < prefix_regions_total; idx++) {
 890     ShenandoahHeapRegion *r = heap->get_region(idx);
 891     if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 892       prefix_end++;
 893     }
 894   }
 895   prefix_end = MIN2(prefix_end, n_regions);
 896   assert(prefix_end <= n_regions, "Sanity");
 897 
 898   // Distribute prefix regions per worker: each thread definitely gets its own same-sized
 899   // subset of dense prefix.
 900   size_t prefix_idx = 0;
 901 
 902   size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC);
 903 
 904   for (size_t wid = 0; wid < n_workers; wid++) {
 905     ShenandoahHeapRegionSet* slice = worker_slices[wid];
 906 
 907     live[wid] = 0;
 908     size_t regs = 0;
 909 
 910     // Add all prefix regions for this worker
 911     while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) {
 912       ShenandoahHeapRegion *r = heap->get_region(prefix_idx);
 913       if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 914         slice->add_region(r);
 915         live[wid] += r->get_live_data_words();
 916         regs++;
 917       }
 918       prefix_idx++;
 919     }
 920   }
 921 
 922   // Distribute the tail among workers in round-robin fashion.
 923   size_t wid = n_workers - 1;
 924 
 925   for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) {
 926     ShenandoahHeapRegion *r = heap->get_region(tail_idx);
 927     if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
 928       assert(wid < n_workers, "Sanity");
 929 
 930       size_t live_region = r->get_live_data_words();
 931 
 932       // Select next worker that still needs live data.
 933       size_t old_wid = wid;
 934       do {
 935         wid++;
 936         if (wid == n_workers) wid = 0;
 937       } while (live[wid] + live_region >= live_per_worker && old_wid != wid);
 938 
 939       if (old_wid == wid) {
 940         // Circled back to the same worker? This means liveness data was
 941         // miscalculated. Bump the live_per_worker limit so that
 942         // everyone gets a piece of the leftover work.
 943         live_per_worker += ShenandoahHeapRegion::region_size_words();
 944       }
 945 
 946       worker_slices[wid]->add_region(r);
 947       live[wid] += live_region;
 948     }
 949   }
 950 
 951   FREE_C_HEAP_ARRAY(size_t, live);
 952 
 953 #ifdef ASSERT
 954   ResourceBitMap map(n_regions);
 955   for (size_t wid = 0; wid < n_workers; wid++) {
 956     ShenandoahHeapRegionSetIterator it(worker_slices[wid]);
 957     ShenandoahHeapRegion* r = it.next();
 958     while (r != NULL) {
 959       size_t idx = r->index();
 960       assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx);
 961       assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx);
 962       map.at_put(idx, true);
 963       r = it.next();
 964     }
 965   }
 966 
 967   for (size_t rid = 0; rid < n_regions; rid++) {
 968     bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid));
 969     bool is_distributed = map.at(rid);
 970     assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid);
 971   }
 972 #endif
 973 }
 974 
 975 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
 976   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
 977   ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
 978 
 979   ShenandoahHeap* heap = ShenandoahHeap::heap();
 980 
 981   // About to figure out which regions can be compacted, make sure pinning status
 982   // had been updated in GC prologue.
 983   heap->assert_pinned_region_status();
 984 
 985   {
 986     // Trash the immediately collectible regions before computing addresses
 987     ShenandoahTrashImmediateGarbageClosure tigcl;
 988     heap->heap_region_iterate(&tigcl);
 989 
 990     // Make sure regions are in good state: committed, active, clean.
 991     // This is needed because we are potentially sliding the data through them.
 992     ShenandoahEnsureHeapActiveClosure ecl;
 993     heap->heap_region_iterate(&ecl);
 994   }
 995 
 996   if (heap->mode()->is_generational()) {
 997     heap->young_generation()->clear_used();
 998     heap->old_generation()->clear_used();
 999   }
1000 
1001   // Compute the new addresses for regular objects
1002   {
1003     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
1004 
1005     distribute_slices(worker_slices);
1006 
1007     size_t num_workers = heap->max_workers();
1008 
1009     ResourceMark rm;
1010     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices, num_workers);
1011     heap->workers()->run_task(&task);
1012   }
1013 
1014   // Compute the new addresses for humongous objects
1015   {
1016     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
1017     calculate_target_humongous_objects();
1018   }
1019 }
1020 
1021 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
1022 private:
1023   ShenandoahHeap* const _heap;
1024   ShenandoahMarkingContext* const _ctx;
1025 
1026   template <class T>
1027   inline void do_oop_work(T* p) {
1028     T o = RawAccess<>::oop_load(p);
1029     if (!CompressedOops::is_null(o)) {
1030       oop obj = CompressedOops::decode_not_null(o);
1031       assert(_ctx->is_marked(obj), "must be marked");
1032       if (obj->is_forwarded()) {
1033         oop forw = obj->forwardee();
1034         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
1035       }
1036     }
1037   }
1038 
1039 public:
1040   ShenandoahAdjustPointersClosure() :
1041     _heap(ShenandoahHeap::heap()),
1042     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
1043 
1044   void do_oop(oop* p)       { do_oop_work(p); }
1045   void do_oop(narrowOop* p) { do_oop_work(p); }
1046 };
1047 
1048 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
1049 private:
1050   ShenandoahHeap* const _heap;
1051   ShenandoahAdjustPointersClosure _cl;
1052 
1053 public:
1054   ShenandoahAdjustPointersObjectClosure() :
1055     _heap(ShenandoahHeap::heap()) {
1056   }
1057   void do_object(oop p) {
1058     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
1059     p->oop_iterate(&_cl);
1060   }
1061 };
1062 
1063 class ShenandoahAdjustPointersTask : public AbstractGangTask {
1064 private:
1065   ShenandoahHeap*          const _heap;
1066   ShenandoahRegionIterator       _regions;
1067 
1068 public:
1069   ShenandoahAdjustPointersTask() :
1070     AbstractGangTask("Shenandoah Adjust Pointers"),
1071     _heap(ShenandoahHeap::heap()) {
1072   }
1073 
1074   void work(uint worker_id) {
1075     ShenandoahParallelWorkerSession worker_session(worker_id);
1076     ShenandoahAdjustPointersObjectClosure obj_cl;
1077     ShenandoahHeapRegion* r = _regions.next();
1078     while (r != NULL) {
1079       if (!r->is_humongous_continuation() && r->has_live()) {
1080         _heap->marked_object_iterate(r, &obj_cl);
1081       }
1082       r = _regions.next();
1083     }
1084   }
1085 };
1086 
1087 class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
1088 private:
1089   ShenandoahRootAdjuster* _rp;
1090   PreservedMarksSet* _preserved_marks;
1091 public:
1092   ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
1093     AbstractGangTask("Shenandoah Adjust Root Pointers"),
1094     _rp(rp),
1095     _preserved_marks(preserved_marks) {}
1096 
1097   void work(uint worker_id) {
1098     ShenandoahParallelWorkerSession worker_session(worker_id);
1099     ShenandoahAdjustPointersClosure cl;
1100     _rp->roots_do(worker_id, &cl);
1101     _preserved_marks->get(worker_id)->adjust_during_full_gc();
1102   }
1103 };
1104 
1105 void ShenandoahFullGC::phase3_update_references() {
1106   GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
1107   ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
1108 
1109   ShenandoahHeap* heap = ShenandoahHeap::heap();
1110 
1111   WorkGang* workers = heap->workers();
1112   uint nworkers = workers->active_workers();
1113   {
1114 #if COMPILER2_OR_JVMCI
1115     DerivedPointerTable::clear();
1116 #endif
1117     ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
1118     ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
1119     workers->run_task(&task);
1120 #if COMPILER2_OR_JVMCI
1121     DerivedPointerTable::update_pointers();
1122 #endif
1123   }
1124 
1125   ShenandoahAdjustPointersTask adjust_pointers_task;
1126   workers->run_task(&adjust_pointers_task);
1127 }
1128 
1129 class ShenandoahCompactObjectsClosure : public ObjectClosure {
1130 private:
1131   ShenandoahHeap* const _heap;
1132   uint            const _worker_id;
1133 
1134 public:
1135   ShenandoahCompactObjectsClosure(uint worker_id) :
1136     _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
1137 
1138   void do_object(oop p) {
1139     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
1140     size_t size = (size_t)p->size();
1141     if (p->is_forwarded()) {
1142       HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
1143       HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee());
1144       Copy::aligned_conjoint_words(compact_from, compact_to, size);
1145       oop new_obj = cast_to_oop(compact_to);
1146       new_obj->init_mark();
1147     }
1148   }
1149 };
1150 
1151 class ShenandoahCompactObjectsTask : public AbstractGangTask {
1152 private:
1153   ShenandoahHeap* const _heap;
1154   ShenandoahHeapRegionSet** const _worker_slices;
1155 
1156 public:
1157   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
1158     AbstractGangTask("Shenandoah Compact Objects"),
1159     _heap(ShenandoahHeap::heap()),
1160     _worker_slices(worker_slices) {
1161   }
1162 
1163   void work(uint worker_id) {
1164     ShenandoahParallelWorkerSession worker_session(worker_id);
1165     ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
1166 
1167     ShenandoahCompactObjectsClosure cl(worker_id);
1168     ShenandoahHeapRegion* r = slice.next();
1169     while (r != NULL) {
1170       assert(!r->is_humongous(), "must not get humongous regions here");
1171       if (r->has_live()) {
1172         _heap->marked_object_iterate(r, &cl);
1173       }
1174       r->set_top(r->new_top());
1175       r = slice.next();
1176     }
1177   }
1178 };
1179 
1180 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
1181 private:
1182   ShenandoahHeap* const _heap;
1183   size_t _live;
1184 
1185 public:
1186   ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
1187     _heap->free_set()->clear();
1188   }
1189 
1190   void heap_region_do(ShenandoahHeapRegion* r) {
1191     assert (!r->is_cset(), "cset regions should have been demoted already");
1192 
1193     // Need to reset the complete-top-at-mark-start pointer here because
1194     // the complete marking bitmap is no longer valid. This ensures
1195     // size-based iteration in marked_object_iterate().
1196     // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
1197     // pinned regions.
1198     if (!r->is_pinned()) {
1199       _heap->complete_marking_context()->reset_top_at_mark_start(r);
1200     }
1201 
1202     size_t live = r->used();
1203 
1204     // Make empty regions that have been allocated into regular
1205     if (r->is_empty() && live > 0) {
1206       r->make_regular_bypass();
1207     }
1208 
1209     // Reclaim regular regions that became empty
1210     if (r->is_regular() && live == 0) {
1211       r->make_trash();
1212     }
1213 
1214     // Recycle all trash regions
1215     if (r->is_trash()) {
1216       live = 0;
1217       r->recycle();
1218     }
1219 
1220     // Update final usage for generations
1221     if (_heap->mode()->is_generational() && live != 0) {
1222       if (r->is_young()) {
1223         _heap->young_generation()->increase_used(live);
1224       } else if (r->is_old()) {
1225         _heap->old_generation()->increase_used(live);
1226       }
1227     }
1228 
1229     r->set_live_data(live);
1230     r->reset_alloc_metadata();
1231     _live += live;
1232   }
1233 
1234   size_t get_live() {
1235     return _live;
1236   }
1237 };
1238 
1239 void ShenandoahFullGC::compact_humongous_objects() {
1240   // Compact humongous regions, based on their fwdptr objects.
1241   //
1242   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
1243   // humongous regions are already compacted, and do not require further moves, which alleviates
1244   // sliding costs. We may consider doing this in parallel in future.
1245 
1246   ShenandoahHeap* heap = ShenandoahHeap::heap();
1247 
1248   for (size_t c = heap->num_regions(); c > 0; c--) {
1249     ShenandoahHeapRegion* r = heap->get_region(c - 1);
1250     if (r->is_humongous_start()) {
1251       oop old_obj = cast_to_oop(r->bottom());
1252       if (!old_obj->is_forwarded()) {
1253         // No need to move the object, it stays at the same slot
1254         continue;
1255       }
1256       size_t words_size = old_obj->size();
1257       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1258 
1259       size_t old_start = r->index();
1260       size_t old_end   = old_start + num_regions - 1;
1261       size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
1262       size_t new_end   = new_start + num_regions - 1;
1263       assert(old_start != new_start, "must be real move");
1264       assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
1265 
1266       log_debug(gc)("Full GC compaction moves humongous object from region " SIZE_FORMAT " to region " SIZE_FORMAT,
1267                     old_start, new_start);
1268 
1269       Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
1270                                    heap->get_region(new_start)->bottom(),
1271                                    words_size);
1272 
1273       oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
1274       new_obj->init_mark();
1275 
1276       {
1277         ShenandoahRegionAffiliation original_affiliation = r->affiliation();
1278         for (size_t c = old_start; c <= old_end; c++) {
1279           ShenandoahHeapRegion* r = heap->get_region(c);
1280           r->make_regular_bypass();
1281           r->set_top(r->bottom());
1282         }
1283 
1284         for (size_t c = new_start; c <= new_end; c++) {
1285           ShenandoahHeapRegion* r = heap->get_region(c);
1286           if (c == new_start) {
1287             r->make_humongous_start_bypass(original_affiliation);
1288           } else {
1289             r->make_humongous_cont_bypass(original_affiliation);
1290           }
1291 
1292           // Trailing region may be non-full, record the remainder there
1293           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
1294           if ((c == new_end) && (remainder != 0)) {
1295             r->set_top(r->bottom() + remainder);
1296           } else {
1297             r->set_top(r->end());
1298           }
1299 
1300           r->reset_alloc_metadata();
1301         }
1302       }
1303     }
1304   }
1305 }
1306 
1307 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1308 // we need to remain able to walk pinned regions.
1309 // Since pinned region do not move and don't get compacted, we will get holes with
1310 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
1311 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
1312 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
1313 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
1314 class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask {
1315 private:
1316   ShenandoahRegionIterator _regions;
1317 
1318 public:
1319   ShenandoahMCResetCompleteBitmapTask() :
1320     AbstractGangTask("Shenandoah Reset Bitmap") {
1321   }
1322 
1323   void work(uint worker_id) {
1324     ShenandoahParallelWorkerSession worker_session(worker_id);
1325     ShenandoahHeapRegion* region = _regions.next();
1326     ShenandoahHeap* heap = ShenandoahHeap::heap();
1327     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
1328     while (region != NULL) {
1329       if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) {
1330         ctx->clear_bitmap(region);
1331       }
1332       region = _regions.next();
1333     }
1334   }
1335 };
1336 
1337 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
1338   GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
1339   ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
1340 
1341   ShenandoahHeap* heap = ShenandoahHeap::heap();
1342 
1343   // Compact regular objects first
1344   {
1345     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
1346     ShenandoahCompactObjectsTask compact_task(worker_slices);
1347     heap->workers()->run_task(&compact_task);
1348   }
1349 
1350   // Compact humongous objects after regular object moves
1351   {
1352     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1353     compact_humongous_objects();
1354   }
1355 
1356   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1357   // and must ensure the bitmap is in sync.
1358   {
1359     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1360     ShenandoahMCResetCompleteBitmapTask task;
1361     heap->workers()->run_task(&task);
1362   }
1363 
1364   // Bring regions in proper states after the collection, and set heap properties.
1365   {
1366     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1367 
1368     if (heap->mode()->is_generational()) {
1369       heap->young_generation()->clear_used();
1370       heap->old_generation()->clear_used();
1371     }
1372 
1373     ShenandoahPostCompactClosure post_compact;
1374     heap->heap_region_iterate(&post_compact);
1375     heap->set_used(post_compact.get_live());
1376     if (heap->mode()->is_generational()) {
1377       log_info(gc)("FullGC done: GLOBAL usage: " SIZE_FORMAT ", young usage: " SIZE_FORMAT ", old usage: " SIZE_FORMAT,
1378                     post_compact.get_live(), heap->young_generation()->used(), heap->old_generation()->used());
1379     }
1380 
1381     heap->collection_set()->clear();
1382     heap->free_set()->rebuild();
1383   }
1384 
1385   heap->clear_cancelled_gc(true /* clear oom handler */);
1386 }