< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp

Print this page

  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "compiler/oopMap.hpp"
  28 #include "gc/shared/continuationGCSupport.hpp"
  29 #include "gc/shared/gcTraceTime.inline.hpp"
  30 #include "gc/shared/preservedMarks.inline.hpp"
  31 #include "gc/shared/tlab_globals.hpp"
  32 #include "gc/shared/workerThread.hpp"
  33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  37 #include "gc/shenandoah/shenandoahFullGC.hpp"

  38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  39 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  42 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  45 #include "gc/shenandoah/shenandoahMetrics.hpp"

  46 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  47 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  48 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  49 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  50 #include "gc/shenandoah/shenandoahUtils.hpp"
  51 #include "gc/shenandoah/shenandoahVerifier.hpp"
  52 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  53 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"

  54 #include "memory/metaspaceUtils.hpp"
  55 #include "memory/universe.hpp"
  56 #include "oops/compressedOops.inline.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "runtime/javaThread.hpp"
  59 #include "runtime/orderAccess.hpp"
  60 #include "runtime/vmThread.hpp"
  61 #include "utilities/copy.hpp"
  62 #include "utilities/events.hpp"
  63 #include "utilities/growableArray.hpp"
  64 































































  65 ShenandoahFullGC::ShenandoahFullGC() :
  66   _gc_timer(ShenandoahHeap::heap()->gc_timer()),
  67   _preserved_marks(new PreservedMarksSet(true)) {}
  68 
  69 ShenandoahFullGC::~ShenandoahFullGC() {
  70   delete _preserved_marks;
  71 }
  72 
  73 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
  74   vmop_entry_full(cause);
  75   // Always success
  76   return true;
  77 }
  78 
  79 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) {
  80   ShenandoahHeap* const heap = ShenandoahHeap::heap();
  81   TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
  82   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
  83 
  84   heap->try_inject_alloc_failure();
  85   VM_ShenandoahFullGC op(cause, this);
  86   VMThread::execute(&op);
  87 }
  88 
  89 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
  90   static const char* msg = "Pause Full";
  91   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
  92   EventMark em("%s", msg);
  93 
  94   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
  95                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
  96                               "full gc");
  97 
  98   op_full(cause);
  99 }
 100 
 101 void ShenandoahFullGC::op_full(GCCause::Cause cause) {

 102   ShenandoahMetricsSnapshot metrics;
 103   metrics.snap_before();
 104 
 105   // Perform full GC
 106   do_it(cause);
 107 
 108   metrics.snap_after();
 109 









 110   if (metrics.is_good_progress()) {
 111     ShenandoahHeap::heap()->notify_gc_progress();
 112   } else {
 113     // Nothing to do. Tell the allocation path that we have failed to make
 114     // progress, and it can finally fail.
 115     ShenandoahHeap::heap()->notify_gc_no_progress();
 116   }
 117 }
 118 
 119 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
 120   ShenandoahHeap* heap = ShenandoahHeap::heap();


















 121 
 122   if (ShenandoahVerify) {
 123     heap->verifier()->verify_before_fullgc();
 124   }
 125 
 126   if (VerifyBeforeGC) {
 127     Universe::verify();
 128   }
 129 
 130   // Degenerated GC may carry concurrent root flags when upgrading to
 131   // full GC. We need to reset it before mutators resume.
 132   heap->set_concurrent_strong_root_in_progress(false);
 133   heap->set_concurrent_weak_root_in_progress(false);
 134 
 135   heap->set_full_gc_in_progress(true);
 136 
 137   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 138   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
 139 
 140   {

 144 
 145   {
 146     ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
 147     // Full GC is supposed to recover from any GC state:
 148 
 149     // a0. Remember if we have forwarded objects
 150     bool has_forwarded_objects = heap->has_forwarded_objects();
 151 
 152     // a1. Cancel evacuation, if in progress
 153     if (heap->is_evacuation_in_progress()) {
 154       heap->set_evacuation_in_progress(false);
 155     }
 156     assert(!heap->is_evacuation_in_progress(), "sanity");
 157 
 158     // a2. Cancel update-refs, if in progress
 159     if (heap->is_update_refs_in_progress()) {
 160       heap->set_update_refs_in_progress(false);
 161     }
 162     assert(!heap->is_update_refs_in_progress(), "sanity");
 163 
 164     // b. Cancel concurrent mark, if in progress
 165     if (heap->is_concurrent_mark_in_progress()) {
 166       ShenandoahConcurrentGC::cancel();
 167       heap->set_concurrent_mark_in_progress(false);
 168     }
 169     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 170 
 171     // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
 172     if (has_forwarded_objects) {
 173       update_roots(true /*full_gc*/);
 174     }
 175 
 176     // d. Reset the bitmaps for new marking
 177     heap->reset_mark_bitmap();
 178     assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 179     assert(!heap->marking_context()->is_complete(), "sanity");
 180 
 181     // e. Abandon reference discovery and clear all discovered references.
 182     ShenandoahReferenceProcessor* rp = heap->ref_processor();
 183     rp->abandon_partial_discovery();
 184 
 185     // f. Sync pinned region status from the CP marks
 186     heap->sync_pinned_region_status();
 187 
 188     // The rest of prologue:
 189     _preserved_marks->init(heap->workers()->active_workers());
 190 
 191     assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
 192   }
 193 
 194   if (UseTLAB) {

 195     heap->gclabs_retire(ResizeTLAB);
 196     heap->tlabs_retire(ResizeTLAB);
 197   }
 198 
 199   OrderAccess::fence();
 200 
 201   phase1_mark_heap();
 202 
 203   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 204   // Coming out of Full GC, we would not have any forwarded objects.
 205   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 206   heap->set_has_forwarded_objects(false);
 207 
 208   heap->set_full_gc_move_in_progress(true);
 209 
 210   // Setup workers for the rest
 211   OrderAccess::fence();
 212 
 213   // Initialize worker slices
 214   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);

 217   }
 218 
 219   {
 220     // The rest of code performs region moves, where region status is undefined
 221     // until all phases run together.
 222     ShenandoahHeapLocker lock(heap->lock());
 223 
 224     phase2_calculate_target_addresses(worker_slices);
 225 
 226     OrderAccess::fence();
 227 
 228     phase3_update_references();
 229 
 230     phase4_compact_objects(worker_slices);
 231   }
 232 
 233   {
 234     // Epilogue
 235     _preserved_marks->restore(heap->workers());
 236     _preserved_marks->reclaim();






 237   }
 238 
 239   // Resize metaspace
 240   MetaspaceGC::compute_new_size();
 241 


 242   // Free worker slices
 243   for (uint i = 0; i < heap->max_workers(); i++) {
 244     delete worker_slices[i];
 245   }
 246   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 247 
 248   heap->set_full_gc_move_in_progress(false);
 249   heap->set_full_gc_in_progress(false);
 250 
 251   if (ShenandoahVerify) {
 252     heap->verifier()->verify_after_fullgc();




 253   }
 254 




 255   if (VerifyAfterGC) {
 256     Universe::verify();
 257   }
 258 
 259   {
 260     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
 261     heap->post_full_gc_dump(_gc_timer);
 262   }
 263 }
 264 
 265 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
 266 private:
 267   ShenandoahMarkingContext* const _ctx;
 268 
 269 public:
 270   ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 271 
 272   void heap_region_do(ShenandoahHeapRegion *r) {
 273     _ctx->capture_top_at_mark_start(r);
 274     r->clear_live_data();


 275   }


 276 };
 277 
 278 void ShenandoahFullGC::phase1_mark_heap() {
 279   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 280   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 281 
 282   ShenandoahHeap* heap = ShenandoahHeap::heap();
 283 
 284   ShenandoahPrepareForMarkClosure cl;
 285   heap->heap_region_iterate(&cl);
 286 
 287   heap->set_unload_classes(heap->heuristics()->can_unload_classes());
 288 
 289   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 290   // enable ("weak") refs discovery
 291   rp->set_soft_reference_policy(true); // forcefully purge all soft references
 292 
 293   ShenandoahSTWMark mark(true /*full_gc*/);
 294   mark.mark();
 295   heap->parallel_cleaning(true /* full_gc */);
 296 }
 297 







































































































































































































































 298 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 299 private:
 300   PreservedMarks*          const _preserved_marks;
 301   ShenandoahHeap*          const _heap;
 302   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 303   int _empty_regions_pos;
 304   ShenandoahHeapRegion*          _to_region;
 305   ShenandoahHeapRegion*          _from_region;
 306   HeapWord* _compact_point;
 307 
 308 public:
 309   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 310                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 311                                               ShenandoahHeapRegion* to_region) :
 312     _preserved_marks(preserved_marks),
 313     _heap(ShenandoahHeap::heap()),
 314     _empty_regions(empty_regions),
 315     _empty_regions_pos(0),
 316     _to_region(to_region),
 317     _from_region(nullptr),
 318     _compact_point(to_region->bottom()) {}
 319 
 320   void set_from_region(ShenandoahHeapRegion* from_region) {
 321     _from_region = from_region;
 322   }
 323 
 324   void finish_region() {
 325     assert(_to_region != nullptr, "should not happen");

 326     _to_region->set_new_top(_compact_point);
 327   }
 328 
 329   bool is_compact_same_region() {
 330     return _from_region == _to_region;
 331   }
 332 
 333   int empty_regions_pos() {
 334     return _empty_regions_pos;
 335   }
 336 
 337   void do_object(oop p) {
 338     assert(_from_region != nullptr, "must set before work");
 339     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 340     assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
 341 
 342     size_t obj_size = p->size();
 343     if (_compact_point + obj_size > _to_region->end()) {
 344       finish_region();
 345 

 351       } else {
 352         // Out of empty region? Compact within the same region.
 353         new_to_region = _from_region;
 354       }
 355 
 356       assert(new_to_region != _to_region, "must not reuse same to-region");
 357       assert(new_to_region != nullptr, "must not be null");
 358       _to_region = new_to_region;
 359       _compact_point = _to_region->bottom();
 360     }
 361 
 362     // Object fits into current region, record new location:
 363     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 364     shenandoah_assert_not_forwarded(nullptr, p);
 365     _preserved_marks->push_if_necessary(p, p->mark());
 366     p->forward_to(cast_to_oop(_compact_point));
 367     _compact_point += obj_size;
 368   }
 369 };
 370 
 371 class ShenandoahPrepareForCompactionTask : public WorkerTask {
 372 private:
 373   PreservedMarksSet*        const _preserved_marks;
 374   ShenandoahHeap*           const _heap;
 375   ShenandoahHeapRegionSet** const _worker_slices;
 376 
 377 public:
 378   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :

 379     WorkerTask("Shenandoah Prepare For Compaction"),
 380     _preserved_marks(preserved_marks),
 381     _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {










 382   }
 383 
 384   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 385     // Empty region: get it into the slice to defragment the slice itself.
 386     // We could have skipped this without violating correctness, but we really
 387     // want to compact all live regions to the start of the heap, which sometimes
 388     // means moving them into the fully empty regions.
 389     if (r->is_empty()) return true;
 390 
 391     // Can move the region, and this is not the humongous region. Humongous
 392     // moves are special cased here, because their moves are handled separately.
 393     return r->is_stw_move_allowed() && !r->is_humongous();
 394   }
 395 
 396   void work(uint worker_id) {
 397     ShenandoahParallelWorkerSession worker_session(worker_id);
 398     ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 399     ShenandoahHeapRegionSetIterator it(slice);
 400     ShenandoahHeapRegion* from_region = it.next();
 401     // No work?
 402     if (from_region == nullptr) {
 403        return;











 404     }

 405 
 406     // Sliding compaction. Walk all regions in the slice, and compact them.
 407     // Remember empty regions and reuse them as needed.
 408     ResourceMark rm;
 409 
 410     GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
 411 
 412     ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
 413 
 414     while (from_region != nullptr) {
 415       assert(is_candidate_region(from_region), "Sanity");
 416 
 417       cl.set_from_region(from_region);
 418       if (from_region->has_live()) {
 419         _heap->marked_object_iterate(from_region, &cl);
 420       }
 421 
 422       // Compacted the region to somewhere else? From-region is empty then.
 423       if (!cl.is_compact_same_region()) {
 424         empty_regions.append(from_region);
 425       }
 426       from_region = it.next();
 427     }
 428     cl.finish_region();
 429 
 430     // Mark all remaining regions as empty
 431     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 432       ShenandoahHeapRegion* r = empty_regions.at(pos);
 433       r->set_new_top(r->bottom());
 434     }
 435   }
 436 };
 437 
 438 void ShenandoahFullGC::calculate_target_humongous_objects() {
 439   ShenandoahHeap* heap = ShenandoahHeap::heap();
 440 
 441   // Compute the new addresses for humongous objects. We need to do this after addresses
 442   // for regular objects are calculated, and we know what regions in heap suffix are
 443   // available for humongous moves.
 444   //
 445   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 446   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 447   // humongous start there.
 448   //
 449   // The complication is potential non-movable regions during the scan. If such region is
 450   // detected, then sliding restarts towards that non-movable region.
 451 
 452   size_t to_begin = heap->num_regions();
 453   size_t to_end = heap->num_regions();
 454 

 455   for (size_t c = heap->num_regions(); c > 0; c--) {
 456     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 457     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 458       // To-region candidate: record this, and continue scan
 459       to_begin = r->index();
 460       continue;
 461     }
 462 
 463     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 464       // From-region candidate: movable humongous region
 465       oop old_obj = cast_to_oop(r->bottom());
 466       size_t words_size = old_obj->size();
 467       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 468 
 469       size_t start = to_end - num_regions;
 470 
 471       if (start >= to_begin && start != r->index()) {
 472         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 473         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 474         old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
 475         to_end = start;
 476         continue;
 477       }
 478     }
 479 
 480     // Failed to fit. Scan starting from current region.
 481     to_begin = r->index();
 482     to_end = r->index();
 483   }
 484 }
 485 
 486 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 487 private:
 488   ShenandoahHeap* const _heap;
 489 
 490 public:
 491   ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
 492   void heap_region_do(ShenandoahHeapRegion* r) {

 493     if (r->is_trash()) {
 494       r->recycle();
 495     }
 496     if (r->is_cset()) {

 497       r->make_regular_bypass();
 498     }
 499     if (r->is_empty_uncommitted()) {
 500       r->make_committed_bypass();
 501     }
 502     assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
 503 
 504     // Record current region occupancy: this communicates empty regions are free
 505     // to the rest of Full GC code.
 506     r->set_new_top(r->top());
 507   }
 508 };
 509 
 510 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
 511 private:
 512   ShenandoahHeap* const _heap;
 513   ShenandoahMarkingContext* const _ctx;
 514 
 515 public:
 516   ShenandoahTrashImmediateGarbageClosure() :
 517     _heap(ShenandoahHeap::heap()),
 518     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 519 
 520   void heap_region_do(ShenandoahHeapRegion* r) {
 521     if (r->is_humongous_start()) {
 522       oop humongous_obj = cast_to_oop(r->bottom());
 523       if (!_ctx->is_marked(humongous_obj)) {
 524         assert(!r->has_live(),
 525                "Region " SIZE_FORMAT " is not marked, should not have live", r->index());
 526         _heap->trash_humongous_region_at(r);
 527       } else {
 528         assert(r->has_live(),
 529                "Region " SIZE_FORMAT " should have live", r->index());
 530       }
 531     } else if (r->is_humongous_continuation()) {
 532       // If we hit continuation, the non-live humongous starts should have been trashed already
 533       assert(r->humongous_start_region()->has_live(),
 534              "Region " SIZE_FORMAT " should have live", r->index());
 535     } else if (r->is_regular()) {
 536       if (!r->has_live()) {
 537         r->make_trash_immediate();





 538       }
 539     }


 540   }
 541 };
 542 
 543 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
 544   ShenandoahHeap* heap = ShenandoahHeap::heap();
 545 
 546   uint n_workers = heap->workers()->active_workers();
 547   size_t n_regions = heap->num_regions();
 548 
 549   // What we want to accomplish: have the dense prefix of data, while still balancing
 550   // out the parallel work.
 551   //
 552   // Assuming the amount of work is driven by the live data that needs moving, we can slice
 553   // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
 554   // thread takes all regions in its prefix subset, and then it takes some regions from
 555   // the tail.
 556   //
 557   // Tail region selection becomes interesting.
 558   //
 559   // First, we want to distribute the regions fairly between the workers, and those regions

 686   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
 687   ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
 688 
 689   ShenandoahHeap* heap = ShenandoahHeap::heap();
 690 
 691   // About to figure out which regions can be compacted, make sure pinning status
 692   // had been updated in GC prologue.
 693   heap->assert_pinned_region_status();
 694 
 695   {
 696     // Trash the immediately collectible regions before computing addresses
 697     ShenandoahTrashImmediateGarbageClosure tigcl;
 698     heap->heap_region_iterate(&tigcl);
 699 
 700     // Make sure regions are in good state: committed, active, clean.
 701     // This is needed because we are potentially sliding the data through them.
 702     ShenandoahEnsureHeapActiveClosure ecl;
 703     heap->heap_region_iterate(&ecl);
 704   }
 705 





 706   // Compute the new addresses for regular objects
 707   {
 708     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
 709 
 710     distribute_slices(worker_slices);
 711 
 712     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);



 713     heap->workers()->run_task(&task);
 714   }
 715 
 716   // Compute the new addresses for humongous objects
 717   {
 718     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
 719     calculate_target_humongous_objects();
 720   }
 721 }
 722 
 723 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
 724 private:
 725   ShenandoahHeap* const _heap;
 726   ShenandoahMarkingContext* const _ctx;
 727 
 728   template <class T>
 729   inline void do_oop_work(T* p) {
 730     T o = RawAccess<>::oop_load(p);
 731     if (!CompressedOops::is_null(o)) {
 732       oop obj = CompressedOops::decode_not_null(o);

 766 
 767 class ShenandoahAdjustPointersTask : public WorkerTask {
 768 private:
 769   ShenandoahHeap*          const _heap;
 770   ShenandoahRegionIterator       _regions;
 771 
 772 public:
 773   ShenandoahAdjustPointersTask() :
 774     WorkerTask("Shenandoah Adjust Pointers"),
 775     _heap(ShenandoahHeap::heap()) {
 776   }
 777 
 778   void work(uint worker_id) {
 779     ShenandoahParallelWorkerSession worker_session(worker_id);
 780     ShenandoahAdjustPointersObjectClosure obj_cl;
 781     ShenandoahHeapRegion* r = _regions.next();
 782     while (r != nullptr) {
 783       if (!r->is_humongous_continuation() && r->has_live()) {
 784         _heap->marked_object_iterate(r, &obj_cl);
 785       }







 786       r = _regions.next();
 787     }
 788   }
 789 };
 790 
 791 class ShenandoahAdjustRootPointersTask : public WorkerTask {
 792 private:
 793   ShenandoahRootAdjuster* _rp;
 794   PreservedMarksSet* _preserved_marks;
 795 public:
 796   ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
 797     WorkerTask("Shenandoah Adjust Root Pointers"),
 798     _rp(rp),
 799     _preserved_marks(preserved_marks) {}
 800 
 801   void work(uint worker_id) {
 802     ShenandoahParallelWorkerSession worker_session(worker_id);
 803     ShenandoahAdjustPointersClosure cl;
 804     _rp->roots_do(worker_id, &cl);
 805     _preserved_marks->get(worker_id)->adjust_during_full_gc();

 878         _heap->marked_object_iterate(r, &cl);
 879       }
 880       r->set_top(r->new_top());
 881       r = slice.next();
 882     }
 883   }
 884 };
 885 
 886 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
 887 private:
 888   ShenandoahHeap* const _heap;
 889   size_t _live;
 890 
 891 public:
 892   ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
 893     _heap->free_set()->clear();
 894   }
 895 
 896   void heap_region_do(ShenandoahHeapRegion* r) {
 897     assert (!r->is_cset(), "cset regions should have been demoted already");

 898 
 899     // Need to reset the complete-top-at-mark-start pointer here because
 900     // the complete marking bitmap is no longer valid. This ensures
 901     // size-based iteration in marked_object_iterate().
 902     // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
 903     // pinned regions.
 904     if (!r->is_pinned()) {
 905       _heap->complete_marking_context()->reset_top_at_mark_start(r);
 906     }
 907 
 908     size_t live = r->used();
 909 
 910     // Make empty regions that have been allocated into regular
 911     if (r->is_empty() && live > 0) {




 912       r->make_regular_bypass();
 913     }
 914 
 915     // Reclaim regular regions that became empty
 916     if (r->is_regular() && live == 0) {
 917       r->make_trash();
 918     }
 919 
 920     // Recycle all trash regions
 921     if (r->is_trash()) {
 922       live = 0;
 923       r->recycle();
 924     }
 925 









 926     r->set_live_data(live);
 927     r->reset_alloc_metadata();
 928     _live += live;
 929   }
 930 
 931   size_t get_live() {
 932     return _live;
 933   }
 934 };
 935 
 936 void ShenandoahFullGC::compact_humongous_objects() {
 937   // Compact humongous regions, based on their fwdptr objects.
 938   //
 939   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
 940   // humongous regions are already compacted, and do not require further moves, which alleviates
 941   // sliding costs. We may consider doing this in parallel in future.
 942 
 943   ShenandoahHeap* heap = ShenandoahHeap::heap();
 944 
 945   for (size_t c = heap->num_regions(); c > 0; c--) {
 946     ShenandoahHeapRegion* r = heap->get_region(c - 1);
 947     if (r->is_humongous_start()) {
 948       oop old_obj = cast_to_oop(r->bottom());
 949       if (!old_obj->is_forwarded()) {
 950         // No need to move the object, it stays at the same slot
 951         continue;
 952       }
 953       size_t words_size = old_obj->size();
 954       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 955 
 956       size_t old_start = r->index();
 957       size_t old_end   = old_start + num_regions - 1;
 958       size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
 959       size_t new_end   = new_start + num_regions - 1;
 960       assert(old_start != new_start, "must be real move");
 961       assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
 962 
 963       Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
 964       ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));





 965 
 966       oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
 967       new_obj->init_mark();
 968 
 969       {

 970         for (size_t c = old_start; c <= old_end; c++) {
 971           ShenandoahHeapRegion* r = heap->get_region(c);

 972           r->make_regular_bypass();
 973           r->set_top(r->bottom());
 974         }
 975 
 976         for (size_t c = new_start; c <= new_end; c++) {
 977           ShenandoahHeapRegion* r = heap->get_region(c);
 978           if (c == new_start) {
 979             r->make_humongous_start_bypass();
 980           } else {
 981             r->make_humongous_cont_bypass();
 982           }
 983 
 984           // Trailing region may be non-full, record the remainder there
 985           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 986           if ((c == new_end) && (remainder != 0)) {
 987             r->set_top(r->bottom() + remainder);
 988           } else {
 989             r->set_top(r->end());
 990           }
 991 
 992           r->reset_alloc_metadata();
 993         }
 994       }
 995     }
 996   }
 997 }
 998 
 999 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1000 // we need to remain able to walk pinned regions.
1001 // Since pinned region do not move and don't get compacted, we will get holes with

1040   }
1041 
1042   // Compact humongous objects after regular object moves
1043   {
1044     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1045     compact_humongous_objects();
1046   }
1047 
1048   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1049   // and must ensure the bitmap is in sync.
1050   {
1051     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1052     ShenandoahMCResetCompleteBitmapTask task;
1053     heap->workers()->run_task(&task);
1054   }
1055 
1056   // Bring regions in proper states after the collection, and set heap properties.
1057   {
1058     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1059 





1060     ShenandoahPostCompactClosure post_compact;
1061     heap->heap_region_iterate(&post_compact);
1062     heap->set_used(post_compact.get_live());




1063 
1064     heap->collection_set()->clear();
1065     heap->free_set()->rebuild();
1066   }
1067 
1068   heap->clear_cancelled_gc();
1069 }

  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "compiler/oopMap.hpp"
  28 #include "gc/shared/continuationGCSupport.hpp"
  29 #include "gc/shared/gcTraceTime.inline.hpp"
  30 #include "gc/shared/preservedMarks.inline.hpp"
  31 #include "gc/shared/tlab_globals.hpp"
  32 #include "gc/shared/workerThread.hpp"
  33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  37 #include "gc/shenandoah/shenandoahFullGC.hpp"
  38 #include "gc/shenandoah/shenandoahGeneration.hpp"
  39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  40 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  41 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  43 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  44 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  45 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  46 #include "gc/shenandoah/shenandoahMetrics.hpp"
  47 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  48 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  49 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  50 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  51 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  52 #include "gc/shenandoah/shenandoahUtils.hpp"
  53 #include "gc/shenandoah/shenandoahVerifier.hpp"
  54 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  55 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  56 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  57 #include "memory/metaspaceUtils.hpp"
  58 #include "memory/universe.hpp"
  59 #include "oops/compressedOops.inline.hpp"
  60 #include "oops/oop.inline.hpp"
  61 #include "runtime/javaThread.hpp"
  62 #include "runtime/orderAccess.hpp"
  63 #include "runtime/vmThread.hpp"
  64 #include "utilities/copy.hpp"
  65 #include "utilities/events.hpp"
  66 #include "utilities/growableArray.hpp"
  67 
  68 // After Full GC is done, reconstruct the remembered set by iterating over OLD regions,
  69 // registering all objects between bottom() and top(), and setting remembered set cards to
  70 // DIRTY if they hold interesting pointers.
  71 class ShenandoahReconstructRememberedSetTask : public WorkerTask {
  72 private:
  73   ShenandoahRegionIterator _regions;
  74 
  75 public:
  76   ShenandoahReconstructRememberedSetTask() :
  77     WorkerTask("Shenandoah Reset Bitmap") { }
  78 
  79   void work(uint worker_id) {
  80     ShenandoahParallelWorkerSession worker_session(worker_id);
  81     ShenandoahHeapRegion* r = _regions.next();
  82     ShenandoahHeap* heap = ShenandoahHeap::heap();
  83     RememberedScanner* scanner = heap->card_scan();
  84     ShenandoahSetRememberedCardsToDirtyClosure dirty_cards_for_interesting_pointers;
  85 
  86     while (r != nullptr) {
  87       if (r->is_old() && r->is_active()) {
  88         HeapWord* obj_addr = r->bottom();
  89         if (r->is_humongous_start()) {
  90           // First, clear the remembered set
  91           oop obj = cast_to_oop(obj_addr);
  92           size_t size = obj->size();
  93           HeapWord* end_object = r->bottom() + size;
  94 
  95           // First, clear the remembered set for all spanned humongous regions
  96           size_t num_regions = (size + ShenandoahHeapRegion::region_size_words() - 1) / ShenandoahHeapRegion::region_size_words();
  97           size_t region_span = num_regions * ShenandoahHeapRegion::region_size_words();
  98           scanner->reset_remset(r->bottom(), region_span);
  99           size_t region_index = r->index();
 100           ShenandoahHeapRegion* humongous_region = heap->get_region(region_index);
 101           while (num_regions-- != 0) {
 102             scanner->reset_object_range(humongous_region->bottom(), humongous_region->end());
 103             region_index++;
 104             humongous_region = heap->get_region(region_index);
 105           }
 106 
 107           // Then register the humongous object and DIRTY relevant remembered set cards
 108           scanner->register_object_wo_lock(obj_addr);
 109           obj->oop_iterate(&dirty_cards_for_interesting_pointers);
 110         } else if (!r->is_humongous()) {
 111           // First, clear the remembered set
 112           scanner->reset_remset(r->bottom(), ShenandoahHeapRegion::region_size_words());
 113           scanner->reset_object_range(r->bottom(), r->end());
 114 
 115           // Then iterate over all objects, registering object and DIRTYing relevant remembered set cards
 116           HeapWord* t = r->top();
 117           while (obj_addr < t) {
 118             oop obj = cast_to_oop(obj_addr);
 119             size_t size = obj->size();
 120             scanner->register_object_wo_lock(obj_addr);
 121             obj_addr += obj->oop_iterate_size(&dirty_cards_for_interesting_pointers);
 122           }
 123         } // else, ignore humongous continuation region
 124       }
 125       // else, this region is FREE or YOUNG or inactive and we can ignore it.
 126       r = _regions.next();
 127     }
 128   }
 129 };
 130 
 131 ShenandoahFullGC::ShenandoahFullGC() :
 132   _gc_timer(ShenandoahHeap::heap()->gc_timer()),
 133   _preserved_marks(new PreservedMarksSet(true)) {}
 134 
 135 ShenandoahFullGC::~ShenandoahFullGC() {
 136   delete _preserved_marks;
 137 }
 138 
 139 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
 140   vmop_entry_full(cause);
 141   // Always success
 142   return true;
 143 }
 144 
 145 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) {
 146   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 147   TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
 148   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
 149 
 150   heap->try_inject_alloc_failure();
 151   VM_ShenandoahFullGC op(cause, this);
 152   VMThread::execute(&op);
 153 }
 154 
 155 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
 156   static const char* msg = "Pause Full";
 157   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
 158   EventMark em("%s", msg);
 159 
 160   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 161                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
 162                               "full gc");
 163 
 164   op_full(cause);
 165 }
 166 
 167 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
 168   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 169   ShenandoahMetricsSnapshot metrics;
 170   metrics.snap_before();
 171 
 172   // Perform full GC
 173   do_it(cause);
 174 
 175   metrics.snap_after();
 176   if (heap->mode()->is_generational()) {
 177     heap->log_heap_status("At end of Full GC");
 178 
 179     // Since we allow temporary violation of these constraints during Full GC, we want to enforce that the assertions are
 180     // made valid by the time Full GC completes.
 181     assert(heap->old_generation()->used_regions_size() <= heap->old_generation()->adjusted_capacity(),
 182            "Old generation affiliated regions must be less than capacity");
 183     assert(heap->young_generation()->used_regions_size() <= heap->young_generation()->adjusted_capacity(),
 184            "Young generation affiliated regions must be less than capacity");
 185   }
 186   if (metrics.is_good_progress()) {
 187     ShenandoahHeap::heap()->notify_gc_progress();
 188   } else {
 189     // Nothing to do. Tell the allocation path that we have failed to make
 190     // progress, and it can finally fail.
 191     ShenandoahHeap::heap()->notify_gc_no_progress();
 192   }
 193 }
 194 
 195 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
 196   ShenandoahHeap* heap = ShenandoahHeap::heap();
 197   // Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL.
 198   heap->set_gc_generation(heap->global_generation());
 199 
 200   if (heap->mode()->is_generational()) {
 201     // Defer unadjust_available() invocations until after Full GC finishes its efforts because Full GC makes use
 202     // of young-gen memory that may have been loaned from old-gen.
 203 
 204     // No need to old_gen->increase_used().  That was done when plabs were allocated, accounting for both old evacs and promotions.
 205 
 206     heap->set_alloc_supplement_reserve(0);
 207     heap->set_young_evac_reserve(0);
 208     heap->set_old_evac_reserve(0);
 209     heap->reset_old_evac_expended();
 210     heap->set_promoted_reserve(0);
 211 
 212     // Full GC supersedes any marking or coalescing in old generation.
 213     heap->cancel_old_gc();
 214   }
 215 
 216   if (ShenandoahVerify) {
 217     heap->verifier()->verify_before_fullgc();
 218   }
 219 
 220   if (VerifyBeforeGC) {
 221     Universe::verify();
 222   }
 223 
 224   // Degenerated GC may carry concurrent root flags when upgrading to
 225   // full GC. We need to reset it before mutators resume.
 226   heap->set_concurrent_strong_root_in_progress(false);
 227   heap->set_concurrent_weak_root_in_progress(false);
 228 
 229   heap->set_full_gc_in_progress(true);
 230 
 231   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 232   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
 233 
 234   {

 238 
 239   {
 240     ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
 241     // Full GC is supposed to recover from any GC state:
 242 
 243     // a0. Remember if we have forwarded objects
 244     bool has_forwarded_objects = heap->has_forwarded_objects();
 245 
 246     // a1. Cancel evacuation, if in progress
 247     if (heap->is_evacuation_in_progress()) {
 248       heap->set_evacuation_in_progress(false);
 249     }
 250     assert(!heap->is_evacuation_in_progress(), "sanity");
 251 
 252     // a2. Cancel update-refs, if in progress
 253     if (heap->is_update_refs_in_progress()) {
 254       heap->set_update_refs_in_progress(false);
 255     }
 256     assert(!heap->is_update_refs_in_progress(), "sanity");
 257 
 258     // b. Cancel all concurrent marks, if in progress
 259     if (heap->is_concurrent_mark_in_progress()) {
 260       heap->cancel_concurrent_mark();

 261     }
 262     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 263 
 264     // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
 265     if (has_forwarded_objects) {
 266       update_roots(true /*full_gc*/);
 267     }
 268 
 269     // d. Reset the bitmaps for new marking
 270     heap->global_generation()->reset_mark_bitmap();
 271     assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 272     assert(!heap->global_generation()->is_mark_complete(), "sanity");
 273 
 274     // e. Abandon reference discovery and clear all discovered references.
 275     ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
 276     rp->abandon_partial_discovery();
 277 
 278     // f. Sync pinned region status from the CP marks
 279     heap->sync_pinned_region_status();
 280 
 281     // The rest of prologue:
 282     _preserved_marks->init(heap->workers()->active_workers());
 283 
 284     assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
 285   }
 286 
 287   if (UseTLAB) {
 288     // TODO: Do we need to explicitly retire PLABs?
 289     heap->gclabs_retire(ResizeTLAB);
 290     heap->tlabs_retire(ResizeTLAB);
 291   }
 292 
 293   OrderAccess::fence();
 294 
 295   phase1_mark_heap();
 296 
 297   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 298   // Coming out of Full GC, we would not have any forwarded objects.
 299   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 300   heap->set_has_forwarded_objects(false);
 301 
 302   heap->set_full_gc_move_in_progress(true);
 303 
 304   // Setup workers for the rest
 305   OrderAccess::fence();
 306 
 307   // Initialize worker slices
 308   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);

 311   }
 312 
 313   {
 314     // The rest of code performs region moves, where region status is undefined
 315     // until all phases run together.
 316     ShenandoahHeapLocker lock(heap->lock());
 317 
 318     phase2_calculate_target_addresses(worker_slices);
 319 
 320     OrderAccess::fence();
 321 
 322     phase3_update_references();
 323 
 324     phase4_compact_objects(worker_slices);
 325   }
 326 
 327   {
 328     // Epilogue
 329     _preserved_marks->restore(heap->workers());
 330     _preserved_marks->reclaim();
 331 
 332     if (heap->mode()->is_generational()) {
 333       ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_reconstruct_remembered_set);
 334       ShenandoahReconstructRememberedSetTask task;
 335       heap->workers()->run_task(&task);
 336     }
 337   }
 338 
 339   // Resize metaspace
 340   MetaspaceGC::compute_new_size();
 341 
 342   heap->adjust_generation_sizes();
 343 
 344   // Free worker slices
 345   for (uint i = 0; i < heap->max_workers(); i++) {
 346     delete worker_slices[i];
 347   }
 348   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 349 
 350   heap->set_full_gc_move_in_progress(false);
 351   heap->set_full_gc_in_progress(false);
 352 
 353   if (ShenandoahVerify) {
 354     if (heap->mode()->is_generational()) {
 355       heap->verifier()->verify_after_generational_fullgc();
 356     } else {
 357       heap->verifier()->verify_after_fullgc();
 358     }
 359   }
 360 
 361   // Having reclaimed all dead memory, it is now safe to restore capacities to original values.
 362   heap->young_generation()->unadjust_available();
 363   heap->old_generation()->unadjust_available();
 364 
 365   if (VerifyAfterGC) {
 366     Universe::verify();
 367   }
 368 
 369   {
 370     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
 371     heap->post_full_gc_dump(_gc_timer);
 372   }
 373 }
 374 
 375 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
 376 private:
 377   ShenandoahMarkingContext* const _ctx;
 378 
 379 public:
 380   ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 381 
 382   void heap_region_do(ShenandoahHeapRegion *r) {
 383     if (r->affiliation() != FREE) {
 384       _ctx->capture_top_at_mark_start(r);
 385       r->clear_live_data();
 386     }
 387   }
 388 
 389   bool is_thread_safe() { return true; }
 390 };
 391 
 392 void ShenandoahFullGC::phase1_mark_heap() {
 393   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 394   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 395 
 396   ShenandoahHeap* heap = ShenandoahHeap::heap();
 397 
 398   ShenandoahPrepareForMarkClosure cl;
 399   heap->parallel_heap_region_iterate(&cl);
 400 
 401   heap->set_unload_classes(heap->global_generation()->heuristics()->can_unload_classes());
 402 
 403   ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
 404   // enable ("weak") refs discovery
 405   rp->set_soft_reference_policy(true); // forcefully purge all soft references
 406 
 407   ShenandoahSTWMark mark(heap->global_generation(), true /*full_gc*/);
 408   mark.mark();
 409   heap->parallel_cleaning(true /* full_gc */);
 410 }
 411 
 412 class ShenandoahPrepareForCompactionTask : public WorkerTask {
 413 private:
 414   PreservedMarksSet*        const _preserved_marks;
 415   ShenandoahHeap*           const _heap;
 416   ShenandoahHeapRegionSet** const _worker_slices;
 417   size_t                    const _num_workers;
 418 
 419 public:
 420   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices,
 421                                      size_t num_workers);
 422 
 423   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 424     // Empty region: get it into the slice to defragment the slice itself.
 425     // We could have skipped this without violating correctness, but we really
 426     // want to compact all live regions to the start of the heap, which sometimes
 427     // means moving them into the fully empty regions.
 428     if (r->is_empty()) return true;
 429 
 430     // Can move the region, and this is not the humongous region. Humongous
 431     // moves are special cased here, because their moves are handled separately.
 432     return r->is_stw_move_allowed() && !r->is_humongous();
 433   }
 434 
 435   void work(uint worker_id);
 436 };
 437 
 438 class ShenandoahPrepareForGenerationalCompactionObjectClosure : public ObjectClosure {
 439 private:
 440   ShenandoahPrepareForCompactionTask* _compactor;
 441   PreservedMarks*          const _preserved_marks;
 442   ShenandoahHeap*          const _heap;
 443 
 444   // _empty_regions is a thread-local list of heap regions that have been completely emptied by this worker thread's
 445   // compaction efforts.  The worker thread that drives these efforts adds compacted regions to this list if the
 446   // region has not been compacted onto itself.
 447   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 448   int _empty_regions_pos;
 449   ShenandoahHeapRegion*          _old_to_region;
 450   ShenandoahHeapRegion*          _young_to_region;
 451   ShenandoahHeapRegion*          _from_region;
 452   ShenandoahRegionAffiliation    _from_affiliation;
 453   HeapWord*                      _old_compact_point;
 454   HeapWord*                      _young_compact_point;
 455   uint                           _worker_id;
 456 
 457 public:
 458   ShenandoahPrepareForGenerationalCompactionObjectClosure(ShenandoahPrepareForCompactionTask* compactor,
 459                                                           PreservedMarks* preserved_marks,
 460                                                           GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 461                                                           ShenandoahHeapRegion* old_to_region,
 462                                                           ShenandoahHeapRegion* young_to_region, uint worker_id) :
 463       _compactor(compactor),
 464       _preserved_marks(preserved_marks),
 465       _heap(ShenandoahHeap::heap()),
 466       _empty_regions(empty_regions),
 467       _empty_regions_pos(0),
 468       _old_to_region(old_to_region),
 469       _young_to_region(young_to_region),
 470       _from_region(nullptr),
 471       _old_compact_point((old_to_region != nullptr)? old_to_region->bottom(): nullptr),
 472       _young_compact_point((young_to_region != nullptr)? young_to_region->bottom(): nullptr),
 473       _worker_id(worker_id) {}
 474 
 475   void set_from_region(ShenandoahHeapRegion* from_region) {
 476     _from_region = from_region;
 477     _from_affiliation = from_region->affiliation();
 478     if (_from_region->has_live()) {
 479       if (_from_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION) {
 480         if (_old_to_region == nullptr) {
 481           _old_to_region = from_region;
 482           _old_compact_point = from_region->bottom();
 483         }
 484       } else {
 485         assert(_from_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION, "from_region must be OLD or YOUNG");
 486         if (_young_to_region == nullptr) {
 487           _young_to_region = from_region;
 488           _young_compact_point = from_region->bottom();
 489         }
 490       }
 491     } // else, we won't iterate over this _from_region so we don't need to set up to region to hold copies
 492   }
 493 
 494   void finish() {
 495     finish_old_region();
 496     finish_young_region();
 497   }
 498 
 499   void finish_old_region() {
 500     if (_old_to_region != nullptr) {
 501       log_debug(gc)("Planned compaction into Old Region " SIZE_FORMAT ", used: " SIZE_FORMAT " tabulated by worker %u",
 502                     _old_to_region->index(), _old_compact_point - _old_to_region->bottom(), _worker_id);
 503       _old_to_region->set_new_top(_old_compact_point);
 504       _old_to_region = nullptr;
 505     }
 506   }
 507 
 508   void finish_young_region() {
 509     if (_young_to_region != nullptr) {
 510       log_debug(gc)("Worker %u planned compaction into Young Region " SIZE_FORMAT ", used: " SIZE_FORMAT,
 511                     _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom());
 512       _young_to_region->set_new_top(_young_compact_point);
 513       _young_to_region = nullptr;
 514     }
 515   }
 516 
 517   bool is_compact_same_region() {
 518     return (_from_region == _old_to_region) || (_from_region == _young_to_region);
 519   }
 520 
 521   int empty_regions_pos() {
 522     return _empty_regions_pos;
 523   }
 524 
 525   void do_object(oop p) {
 526     assert(_from_region != nullptr, "must set before work");
 527     assert((_from_region->bottom() <= cast_from_oop<HeapWord*>(p)) && (cast_from_oop<HeapWord*>(p) < _from_region->top()),
 528            "Object must reside in _from_region");
 529     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 530     assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
 531 
 532     size_t obj_size = p->size();
 533     uint from_region_age = _from_region->age();
 534     uint object_age = p->age();
 535 
 536     bool promote_object = false;
 537     if ((_from_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION) &&
 538         (from_region_age + object_age >= InitialTenuringThreshold)) {
 539       if ((_old_to_region != nullptr) && (_old_compact_point + obj_size > _old_to_region->end())) {
 540         finish_old_region();
 541         _old_to_region = nullptr;
 542       }
 543       if (_old_to_region == nullptr) {
 544         if (_empty_regions_pos < _empty_regions.length()) {
 545           ShenandoahHeapRegion* new_to_region = _empty_regions.at(_empty_regions_pos);
 546           _empty_regions_pos++;
 547           new_to_region->set_affiliation(OLD_GENERATION);
 548           _old_to_region = new_to_region;
 549           _old_compact_point = _old_to_region->bottom();
 550           promote_object = true;
 551         }
 552         // Else this worker thread does not yet have any empty regions into which this aged object can be promoted so
 553         // we leave promote_object as false, deferring the promotion.
 554       } else {
 555         promote_object = true;
 556       }
 557     }
 558 
 559     if (promote_object || (_from_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION)) {
 560       assert(_old_to_region != nullptr, "_old_to_region should not be nullptr when evacuating to OLD region");
 561       if (_old_compact_point + obj_size > _old_to_region->end()) {
 562         ShenandoahHeapRegion* new_to_region;
 563 
 564         log_debug(gc)("Worker %u finishing old region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
 565                       ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT,  _worker_id, _old_to_region->index(),
 566                       p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end()));
 567 
 568         // Object does not fit.  Get a new _old_to_region.
 569         finish_old_region();
 570         if (_empty_regions_pos < _empty_regions.length()) {
 571           new_to_region = _empty_regions.at(_empty_regions_pos);
 572           _empty_regions_pos++;
 573           new_to_region->set_affiliation(OLD_GENERATION);
 574         } else {
 575           // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct
 576           // from _from_region.  That's because there is always room for _from_region to be compacted into itself.
 577           // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
 578           new_to_region = _from_region;
 579         }
 580 
 581         assert(new_to_region != _old_to_region, "must not reuse same OLD to-region");
 582         assert(new_to_region != nullptr, "must not be nullptr");
 583         _old_to_region = new_to_region;
 584         _old_compact_point = _old_to_region->bottom();
 585       }
 586 
 587       // Object fits into current region, record new location:
 588       assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit");
 589       shenandoah_assert_not_forwarded(nullptr, p);
 590       _preserved_marks->push_if_necessary(p, p->mark());
 591       p->forward_to(cast_to_oop(_old_compact_point));
 592       _old_compact_point += obj_size;
 593     } else {
 594       assert(_from_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION,
 595              "_from_region must be OLD_GENERATION or YOUNG_GENERATION");
 596       assert(_young_to_region != nullptr, "_young_to_region should not be nullptr when compacting YOUNG _from_region");
 597 
 598       // After full gc compaction, all regions have age 0.  Embed the region's age into the object's age in order to preserve
 599       // tenuring progress.
 600       if (_heap->is_aging_cycle()) {
 601         _heap->increase_object_age(p, from_region_age + 1);
 602       } else {
 603         _heap->increase_object_age(p, from_region_age);
 604       }
 605 
 606       if (_young_compact_point + obj_size > _young_to_region->end()) {
 607         ShenandoahHeapRegion* new_to_region;
 608 
 609         log_debug(gc)("Worker %u finishing young region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
 610                       ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT,  _worker_id, _young_to_region->index(),
 611                       p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end()));
 612 
 613         // Object does not fit.  Get a new _young_to_region.
 614         finish_young_region();
 615         if (_empty_regions_pos < _empty_regions.length()) {
 616           new_to_region = _empty_regions.at(_empty_regions_pos);
 617           _empty_regions_pos++;
 618           new_to_region->set_affiliation(YOUNG_GENERATION);
 619         } else {
 620           // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct
 621           // from _from_region.  That's because there is always room for _from_region to be compacted into itself.
 622           // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
 623           new_to_region = _from_region;
 624         }
 625 
 626         assert(new_to_region != _young_to_region, "must not reuse same OLD to-region");
 627         assert(new_to_region != nullptr, "must not be nullptr");
 628         _young_to_region = new_to_region;
 629         _young_compact_point = _young_to_region->bottom();
 630       }
 631 
 632       // Object fits into current region, record new location:
 633       assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit");
 634       shenandoah_assert_not_forwarded(nullptr, p);
 635       _preserved_marks->push_if_necessary(p, p->mark());
 636       p->forward_to(cast_to_oop(_young_compact_point));
 637       _young_compact_point += obj_size;
 638     }
 639   }
 640 };
 641 
 642 
 643 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 644 private:
 645   PreservedMarks*          const _preserved_marks;
 646   ShenandoahHeap*          const _heap;
 647   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 648   int _empty_regions_pos;
 649   ShenandoahHeapRegion*          _to_region;
 650   ShenandoahHeapRegion*          _from_region;
 651   HeapWord* _compact_point;
 652 
 653 public:
 654   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 655                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 656                                               ShenandoahHeapRegion* to_region) :
 657     _preserved_marks(preserved_marks),
 658     _heap(ShenandoahHeap::heap()),
 659     _empty_regions(empty_regions),
 660     _empty_regions_pos(0),
 661     _to_region(to_region),
 662     _from_region(nullptr),
 663     _compact_point(to_region->bottom()) {}
 664 
 665   void set_from_region(ShenandoahHeapRegion* from_region) {
 666     _from_region = from_region;
 667   }
 668 
 669   void finish_region() {
 670     assert(_to_region != nullptr, "should not happen");
 671     assert(!_heap->mode()->is_generational(), "Generational GC should use different Closure");
 672     _to_region->set_new_top(_compact_point);
 673   }
 674 
 675   bool is_compact_same_region() {
 676     return _from_region == _to_region;
 677   }
 678 
 679   int empty_regions_pos() {
 680     return _empty_regions_pos;
 681   }
 682 
 683   void do_object(oop p) {
 684     assert(_from_region != nullptr, "must set before work");
 685     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 686     assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
 687 
 688     size_t obj_size = p->size();
 689     if (_compact_point + obj_size > _to_region->end()) {
 690       finish_region();
 691 

 697       } else {
 698         // Out of empty region? Compact within the same region.
 699         new_to_region = _from_region;
 700       }
 701 
 702       assert(new_to_region != _to_region, "must not reuse same to-region");
 703       assert(new_to_region != nullptr, "must not be null");
 704       _to_region = new_to_region;
 705       _compact_point = _to_region->bottom();
 706     }
 707 
 708     // Object fits into current region, record new location:
 709     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 710     shenandoah_assert_not_forwarded(nullptr, p);
 711     _preserved_marks->push_if_necessary(p, p->mark());
 712     p->forward_to(cast_to_oop(_compact_point));
 713     _compact_point += obj_size;
 714   }
 715 };
 716 





 717 
 718 ShenandoahPrepareForCompactionTask::ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks,
 719                                                                        ShenandoahHeapRegionSet **worker_slices,
 720                                                                        size_t num_workers) :
 721     WorkerTask("Shenandoah Prepare For Compaction"),
 722     _preserved_marks(preserved_marks), _heap(ShenandoahHeap::heap()),
 723     _worker_slices(worker_slices), _num_workers(num_workers) { }
 724 
 725 
 726 void ShenandoahPrepareForCompactionTask::work(uint worker_id) {
 727   ShenandoahParallelWorkerSession worker_session(worker_id);
 728   ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 729   ShenandoahHeapRegionSetIterator it(slice);
 730   ShenandoahHeapRegion* from_region = it.next();
 731   // No work?
 732   if (from_region == nullptr) {
 733     return;
 734   }
 735 
 736   // Sliding compaction. Walk all regions in the slice, and compact them.
 737   // Remember empty regions and reuse them as needed.
 738   ResourceMark rm;



 739 
 740   GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());



 741 
 742   if (_heap->mode()->is_generational()) {
 743     ShenandoahHeapRegion* old_to_region = (from_region->is_old())? from_region: nullptr;
 744     ShenandoahHeapRegion* young_to_region = (from_region->is_young())? from_region: nullptr;
 745     ShenandoahPrepareForGenerationalCompactionObjectClosure cl(this, _preserved_marks->get(worker_id), empty_regions,
 746                                                                old_to_region, young_to_region, worker_id);
 747     while (from_region != nullptr) {
 748       assert(is_candidate_region(from_region), "Sanity");
 749       log_debug(gc)("Worker %u compacting %s Region " SIZE_FORMAT " which had used " SIZE_FORMAT " and %s live",
 750                     worker_id, affiliation_name(from_region->affiliation()),
 751                     from_region->index(), from_region->used(), from_region->has_live()? "has": "does not have");
 752       cl.set_from_region(from_region);
 753       if (from_region->has_live()) {
 754         _heap->marked_object_iterate(from_region, &cl);
 755       }
 756       // Compacted the region to somewhere else? From-region is empty then.
 757       if (!cl.is_compact_same_region()) {
 758         empty_regions.append(from_region);
 759       }
 760       from_region = it.next();
 761     }
 762     cl.finish();
 763 
 764     // Mark all remaining regions as empty
 765     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 766       ShenandoahHeapRegion* r = empty_regions.at(pos);
 767       r->set_new_top(r->bottom());
 768     }
 769   } else {
 770     ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);

 771     while (from_region != nullptr) {
 772       assert(is_candidate_region(from_region), "Sanity");

 773       cl.set_from_region(from_region);
 774       if (from_region->has_live()) {
 775         _heap->marked_object_iterate(from_region, &cl);
 776       }
 777 
 778       // Compacted the region to somewhere else? From-region is empty then.
 779       if (!cl.is_compact_same_region()) {
 780         empty_regions.append(from_region);
 781       }
 782       from_region = it.next();
 783     }
 784     cl.finish_region();
 785 
 786     // Mark all remaining regions as empty
 787     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 788       ShenandoahHeapRegion* r = empty_regions.at(pos);
 789       r->set_new_top(r->bottom());
 790     }
 791   }
 792 }
 793 
 794 void ShenandoahFullGC::calculate_target_humongous_objects() {
 795   ShenandoahHeap* heap = ShenandoahHeap::heap();
 796 
 797   // Compute the new addresses for humongous objects. We need to do this after addresses
 798   // for regular objects are calculated, and we know what regions in heap suffix are
 799   // available for humongous moves.
 800   //
 801   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 802   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 803   // humongous start there.
 804   //
 805   // The complication is potential non-movable regions during the scan. If such region is
 806   // detected, then sliding restarts towards that non-movable region.
 807 
 808   size_t to_begin = heap->num_regions();
 809   size_t to_end = heap->num_regions();
 810 
 811   log_debug(gc)("Full GC calculating target humongous objects from end " SIZE_FORMAT, to_end);
 812   for (size_t c = heap->num_regions(); c > 0; c--) {
 813     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 814     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 815       // To-region candidate: record this, and continue scan
 816       to_begin = r->index();
 817       continue;
 818     }
 819 
 820     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 821       // From-region candidate: movable humongous region
 822       oop old_obj = cast_to_oop(r->bottom());
 823       size_t words_size = old_obj->size();
 824       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 825 
 826       size_t start = to_end - num_regions;
 827 
 828       if (start >= to_begin && start != r->index()) {
 829         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 830         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 831         old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
 832         to_end = start;
 833         continue;
 834       }
 835     }
 836 
 837     // Failed to fit. Scan starting from current region.
 838     to_begin = r->index();
 839     to_end = r->index();
 840   }
 841 }
 842 
 843 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 844 private:
 845   ShenandoahHeap* const _heap;
 846 
 847 public:
 848   ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
 849   void heap_region_do(ShenandoahHeapRegion* r) {
 850     bool is_generational = _heap->mode()->is_generational();
 851     if (r->is_trash()) {
 852       r->recycle();
 853     }
 854     if (r->is_cset()) {
 855       // Leave afffiliation unchanged.
 856       r->make_regular_bypass();
 857     }
 858     if (r->is_empty_uncommitted()) {
 859       r->make_committed_bypass();
 860     }
 861     assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
 862 
 863     // Record current region occupancy: this communicates empty regions are free
 864     // to the rest of Full GC code.
 865     r->set_new_top(r->top());
 866   }
 867 };
 868 
 869 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
 870 private:
 871   ShenandoahHeap* const _heap;
 872   ShenandoahMarkingContext* const _ctx;
 873 
 874 public:
 875   ShenandoahTrashImmediateGarbageClosure() :
 876     _heap(ShenandoahHeap::heap()),
 877     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 878 
 879   void heap_region_do(ShenandoahHeapRegion* r) {
 880     if (r->affiliation() != FREE) {
 881       if (r->is_humongous_start()) {
 882         oop humongous_obj = cast_to_oop(r->bottom());
 883         if (!_ctx->is_marked(humongous_obj)) {
 884           assert(!r->has_live(),
 885                  "Humongous Start %s Region " SIZE_FORMAT " is not marked, should not have live",
 886                  affiliation_name(r->affiliation()),  r->index());
 887           log_debug(gc)("Trashing immediate humongous region " SIZE_FORMAT " because not marked", r->index());
 888           _heap->trash_humongous_region_at(r);
 889         } else {
 890           assert(r->has_live(),
 891                  "Humongous Start %s Region " SIZE_FORMAT " should have live", affiliation_name(r->affiliation()),  r->index());
 892         }
 893       } else if (r->is_humongous_continuation()) {
 894         // If we hit continuation, the non-live humongous starts should have been trashed already
 895         assert(r->humongous_start_region()->has_live(),
 896                "Humongous Continuation %s Region " SIZE_FORMAT " should have live", affiliation_name(r->affiliation()),  r->index());
 897       } else if (r->is_regular()) {
 898         if (!r->has_live()) {
 899           log_debug(gc)("Trashing immediate regular region " SIZE_FORMAT " because has no live", r->index());
 900           r->make_trash_immediate();
 901         }
 902       }
 903     }
 904     // else, ignore this FREE region.
 905     // TODO: change iterators so they do not process FREE regions.
 906   }
 907 };
 908 
 909 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
 910   ShenandoahHeap* heap = ShenandoahHeap::heap();
 911 
 912   uint n_workers = heap->workers()->active_workers();
 913   size_t n_regions = heap->num_regions();
 914 
 915   // What we want to accomplish: have the dense prefix of data, while still balancing
 916   // out the parallel work.
 917   //
 918   // Assuming the amount of work is driven by the live data that needs moving, we can slice
 919   // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
 920   // thread takes all regions in its prefix subset, and then it takes some regions from
 921   // the tail.
 922   //
 923   // Tail region selection becomes interesting.
 924   //
 925   // First, we want to distribute the regions fairly between the workers, and those regions

1052   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
1053   ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
1054 
1055   ShenandoahHeap* heap = ShenandoahHeap::heap();
1056 
1057   // About to figure out which regions can be compacted, make sure pinning status
1058   // had been updated in GC prologue.
1059   heap->assert_pinned_region_status();
1060 
1061   {
1062     // Trash the immediately collectible regions before computing addresses
1063     ShenandoahTrashImmediateGarbageClosure tigcl;
1064     heap->heap_region_iterate(&tigcl);
1065 
1066     // Make sure regions are in good state: committed, active, clean.
1067     // This is needed because we are potentially sliding the data through them.
1068     ShenandoahEnsureHeapActiveClosure ecl;
1069     heap->heap_region_iterate(&ecl);
1070   }
1071 
1072   if (heap->mode()->is_generational()) {
1073     heap->young_generation()->clear_used();
1074     heap->old_generation()->clear_used();
1075   }
1076 
1077   // Compute the new addresses for regular objects
1078   {
1079     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
1080 
1081     distribute_slices(worker_slices);
1082 
1083     size_t num_workers = heap->max_workers();
1084 
1085     ResourceMark rm;
1086     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices, num_workers);
1087     heap->workers()->run_task(&task);
1088   }
1089 
1090   // Compute the new addresses for humongous objects
1091   {
1092     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
1093     calculate_target_humongous_objects();
1094   }
1095 }
1096 
1097 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
1098 private:
1099   ShenandoahHeap* const _heap;
1100   ShenandoahMarkingContext* const _ctx;
1101 
1102   template <class T>
1103   inline void do_oop_work(T* p) {
1104     T o = RawAccess<>::oop_load(p);
1105     if (!CompressedOops::is_null(o)) {
1106       oop obj = CompressedOops::decode_not_null(o);

1140 
1141 class ShenandoahAdjustPointersTask : public WorkerTask {
1142 private:
1143   ShenandoahHeap*          const _heap;
1144   ShenandoahRegionIterator       _regions;
1145 
1146 public:
1147   ShenandoahAdjustPointersTask() :
1148     WorkerTask("Shenandoah Adjust Pointers"),
1149     _heap(ShenandoahHeap::heap()) {
1150   }
1151 
1152   void work(uint worker_id) {
1153     ShenandoahParallelWorkerSession worker_session(worker_id);
1154     ShenandoahAdjustPointersObjectClosure obj_cl;
1155     ShenandoahHeapRegion* r = _regions.next();
1156     while (r != nullptr) {
1157       if (!r->is_humongous_continuation() && r->has_live()) {
1158         _heap->marked_object_iterate(r, &obj_cl);
1159       }
1160       if (r->is_pinned() && r->is_old() && r->is_active() && !r->is_humongous()) {
1161         // Pinned regions are not compacted so they may still hold unmarked objects with
1162         // reference to reclaimed memory. Remembered set scanning will crash if it attempts
1163         // to iterate the oops in these objects.
1164         r->begin_preemptible_coalesce_and_fill();
1165         r->oop_fill_and_coalesce_wo_cancel();
1166       }
1167       r = _regions.next();
1168     }
1169   }
1170 };
1171 
1172 class ShenandoahAdjustRootPointersTask : public WorkerTask {
1173 private:
1174   ShenandoahRootAdjuster* _rp;
1175   PreservedMarksSet* _preserved_marks;
1176 public:
1177   ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
1178     WorkerTask("Shenandoah Adjust Root Pointers"),
1179     _rp(rp),
1180     _preserved_marks(preserved_marks) {}
1181 
1182   void work(uint worker_id) {
1183     ShenandoahParallelWorkerSession worker_session(worker_id);
1184     ShenandoahAdjustPointersClosure cl;
1185     _rp->roots_do(worker_id, &cl);
1186     _preserved_marks->get(worker_id)->adjust_during_full_gc();

1259         _heap->marked_object_iterate(r, &cl);
1260       }
1261       r->set_top(r->new_top());
1262       r = slice.next();
1263     }
1264   }
1265 };
1266 
1267 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
1268 private:
1269   ShenandoahHeap* const _heap;
1270   size_t _live;
1271 
1272 public:
1273   ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
1274     _heap->free_set()->clear();
1275   }
1276 
1277   void heap_region_do(ShenandoahHeapRegion* r) {
1278     assert (!r->is_cset(), "cset regions should have been demoted already");
1279     bool is_generational = _heap->mode()->is_generational();
1280 
1281     // Need to reset the complete-top-at-mark-start pointer here because
1282     // the complete marking bitmap is no longer valid. This ensures
1283     // size-based iteration in marked_object_iterate().
1284     // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
1285     // pinned regions.
1286     if (!r->is_pinned()) {
1287       _heap->complete_marking_context()->reset_top_at_mark_start(r);
1288     }
1289 
1290     size_t live = r->used();
1291 
1292     // Make empty regions that have been allocated into regular
1293     if (r->is_empty() && live > 0) {
1294       if (!is_generational) {
1295         r->make_young_maybe();
1296       }
1297       // else, generational mode compaction has already established affiliation.
1298       r->make_regular_bypass();
1299     }
1300 
1301     // Reclaim regular regions that became empty
1302     if (r->is_regular() && live == 0) {
1303       r->make_trash();
1304     }
1305 
1306     // Recycle all trash regions
1307     if (r->is_trash()) {
1308       live = 0;
1309       r->recycle();
1310     }
1311 
1312     // Update final usage for generations
1313     if (is_generational && live != 0) {
1314       if (r->is_young()) {
1315         _heap->young_generation()->increase_used(live);
1316       } else if (r->is_old()) {
1317         _heap->old_generation()->increase_used(live);
1318       }
1319     }
1320 
1321     r->set_live_data(live);
1322     r->reset_alloc_metadata();
1323     _live += live;
1324   }
1325 
1326   size_t get_live() {
1327     return _live;
1328   }
1329 };
1330 
1331 void ShenandoahFullGC::compact_humongous_objects() {
1332   // Compact humongous regions, based on their fwdptr objects.
1333   //
1334   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
1335   // humongous regions are already compacted, and do not require further moves, which alleviates
1336   // sliding costs. We may consider doing this in parallel in future.
1337 
1338   ShenandoahHeap* heap = ShenandoahHeap::heap();
1339 
1340   for (size_t c = heap->num_regions(); c > 0; c--) {
1341     ShenandoahHeapRegion* r = heap->get_region(c - 1);
1342     if (r->is_humongous_start()) {
1343       oop old_obj = cast_to_oop(r->bottom());
1344       if (!old_obj->is_forwarded()) {
1345         // No need to move the object, it stays at the same slot
1346         continue;
1347       }
1348       size_t words_size = old_obj->size();
1349       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1350 
1351       size_t old_start = r->index();
1352       size_t old_end   = old_start + num_regions - 1;
1353       size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
1354       size_t new_end   = new_start + num_regions - 1;
1355       assert(old_start != new_start, "must be real move");
1356       assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
1357 
1358       ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(heap->get_region(old_start)->bottom()));
1359       log_debug(gc)("Full GC compaction moves humongous object from region " SIZE_FORMAT " to region " SIZE_FORMAT,
1360                     old_start, new_start);
1361 
1362       Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
1363                                    heap->get_region(new_start)->bottom(),
1364                                    words_size);
1365 
1366       oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
1367       new_obj->init_mark();
1368 
1369       {
1370         ShenandoahRegionAffiliation original_affiliation = r->affiliation();
1371         for (size_t c = old_start; c <= old_end; c++) {
1372           ShenandoahHeapRegion* r = heap->get_region(c);
1373           // Leave humongous region affiliation unchanged.
1374           r->make_regular_bypass();
1375           r->set_top(r->bottom());
1376         }
1377 
1378         for (size_t c = new_start; c <= new_end; c++) {
1379           ShenandoahHeapRegion* r = heap->get_region(c);
1380           if (c == new_start) {
1381             r->make_humongous_start_bypass(original_affiliation);
1382           } else {
1383             r->make_humongous_cont_bypass(original_affiliation);
1384           }
1385 
1386           // Trailing region may be non-full, record the remainder there
1387           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
1388           if ((c == new_end) && (remainder != 0)) {
1389             r->set_top(r->bottom() + remainder);
1390           } else {
1391             r->set_top(r->end());
1392           }
1393 
1394           r->reset_alloc_metadata();
1395         }
1396       }
1397     }
1398   }
1399 }
1400 
1401 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1402 // we need to remain able to walk pinned regions.
1403 // Since pinned region do not move and don't get compacted, we will get holes with

1442   }
1443 
1444   // Compact humongous objects after regular object moves
1445   {
1446     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1447     compact_humongous_objects();
1448   }
1449 
1450   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1451   // and must ensure the bitmap is in sync.
1452   {
1453     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1454     ShenandoahMCResetCompleteBitmapTask task;
1455     heap->workers()->run_task(&task);
1456   }
1457 
1458   // Bring regions in proper states after the collection, and set heap properties.
1459   {
1460     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1461 
1462     if (heap->mode()->is_generational()) {
1463       heap->young_generation()->clear_used();
1464       heap->old_generation()->clear_used();
1465     }
1466 
1467     ShenandoahPostCompactClosure post_compact;
1468     heap->heap_region_iterate(&post_compact);
1469     heap->set_used(post_compact.get_live());
1470     if (heap->mode()->is_generational()) {
1471       log_info(gc)("FullGC done: GLOBAL usage: " SIZE_FORMAT ", young usage: " SIZE_FORMAT ", old usage: " SIZE_FORMAT,
1472                     post_compact.get_live(), heap->young_generation()->used(), heap->old_generation()->used());
1473     }
1474 
1475     heap->collection_set()->clear();
1476     heap->free_set()->rebuild();
1477   }
1478 
1479   heap->clear_cancelled_gc(true /* clear oom handler */);
1480 }
< prev index next >