< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp

Print this page

   1 /*
   2  * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.

   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "compiler/oopMap.hpp"
  28 #include "gc/shared/continuationGCSupport.hpp"
  29 #include "gc/shared/gcTraceTime.inline.hpp"
  30 #include "gc/shared/preservedMarks.inline.hpp"
  31 #include "gc/shared/tlab_globals.hpp"
  32 #include "gc/shared/workerThread.hpp"
  33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  37 #include "gc/shenandoah/shenandoahFullGC.hpp"

  38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  39 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  42 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  45 #include "gc/shenandoah/shenandoahMetrics.hpp"

  46 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  47 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  48 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  49 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  50 #include "gc/shenandoah/shenandoahUtils.hpp"
  51 #include "gc/shenandoah/shenandoahVerifier.hpp"
  52 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  53 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"

  54 #include "memory/metaspaceUtils.hpp"
  55 #include "memory/universe.hpp"
  56 #include "oops/compressedOops.inline.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "runtime/javaThread.hpp"
  59 #include "runtime/orderAccess.hpp"
  60 #include "runtime/vmThread.hpp"
  61 #include "utilities/copy.hpp"
  62 #include "utilities/events.hpp"
  63 #include "utilities/growableArray.hpp"
  64 































































  65 ShenandoahFullGC::ShenandoahFullGC() :
  66   _gc_timer(ShenandoahHeap::heap()->gc_timer()),
  67   _preserved_marks(new PreservedMarksSet(true)) {}
  68 
  69 ShenandoahFullGC::~ShenandoahFullGC() {
  70   delete _preserved_marks;
  71 }
  72 
  73 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
  74   vmop_entry_full(cause);
  75   // Always success
  76   return true;
  77 }
  78 
  79 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) {
  80   ShenandoahHeap* const heap = ShenandoahHeap::heap();
  81   TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
  82   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
  83 
  84   heap->try_inject_alloc_failure();
  85   VM_ShenandoahFullGC op(cause, this);
  86   VMThread::execute(&op);
  87 }
  88 
  89 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
  90   static const char* msg = "Pause Full";
  91   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
  92   EventMark em("%s", msg);
  93 
  94   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
  95                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
  96                               "full gc");
  97 
  98   op_full(cause);
  99 }
 100 
 101 void ShenandoahFullGC::op_full(GCCause::Cause cause) {

 102   ShenandoahMetricsSnapshot metrics;
 103   metrics.snap_before();
 104 
 105   // Perform full GC
 106   do_it(cause);
 107 
 108   metrics.snap_after();















 109 

 110   if (metrics.is_good_progress()) {
 111     ShenandoahHeap::heap()->notify_gc_progress();
 112   } else {
 113     // Nothing to do. Tell the allocation path that we have failed to make
 114     // progress, and it can finally fail.
 115     ShenandoahHeap::heap()->notify_gc_no_progress();
 116   }
 117 }
 118 
 119 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
 120   ShenandoahHeap* heap = ShenandoahHeap::heap();













 121 
 122   if (ShenandoahVerify) {
 123     heap->verifier()->verify_before_fullgc();
 124   }
 125 
 126   if (VerifyBeforeGC) {
 127     Universe::verify();
 128   }
 129 
 130   // Degenerated GC may carry concurrent root flags when upgrading to
 131   // full GC. We need to reset it before mutators resume.
 132   heap->set_concurrent_strong_root_in_progress(false);
 133   heap->set_concurrent_weak_root_in_progress(false);
 134 
 135   heap->set_full_gc_in_progress(true);
 136 
 137   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 138   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
 139 
 140   {

 144 
 145   {
 146     ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
 147     // Full GC is supposed to recover from any GC state:
 148 
 149     // a0. Remember if we have forwarded objects
 150     bool has_forwarded_objects = heap->has_forwarded_objects();
 151 
 152     // a1. Cancel evacuation, if in progress
 153     if (heap->is_evacuation_in_progress()) {
 154       heap->set_evacuation_in_progress(false);
 155     }
 156     assert(!heap->is_evacuation_in_progress(), "sanity");
 157 
 158     // a2. Cancel update-refs, if in progress
 159     if (heap->is_update_refs_in_progress()) {
 160       heap->set_update_refs_in_progress(false);
 161     }
 162     assert(!heap->is_update_refs_in_progress(), "sanity");
 163 
 164     // b. Cancel concurrent mark, if in progress
 165     if (heap->is_concurrent_mark_in_progress()) {
 166       ShenandoahConcurrentGC::cancel();
 167       heap->set_concurrent_mark_in_progress(false);
 168     }
 169     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 170 
 171     // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
 172     if (has_forwarded_objects) {
 173       update_roots(true /*full_gc*/);
 174     }
 175 
 176     // d. Reset the bitmaps for new marking
 177     heap->reset_mark_bitmap();
 178     assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 179     assert(!heap->marking_context()->is_complete(), "sanity");
 180 
 181     // e. Abandon reference discovery and clear all discovered references.
 182     ShenandoahReferenceProcessor* rp = heap->ref_processor();
 183     rp->abandon_partial_discovery();
 184 
 185     // f. Sync pinned region status from the CP marks
 186     heap->sync_pinned_region_status();
 187 









 188     // The rest of prologue:
 189     _preserved_marks->init(heap->workers()->active_workers());
 190 
 191     assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
 192   }
 193 
 194   if (UseTLAB) {

 195     heap->gclabs_retire(ResizeTLAB);
 196     heap->tlabs_retire(ResizeTLAB);
 197   }
 198 
 199   OrderAccess::fence();
 200 
 201   phase1_mark_heap();
 202 
 203   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 204   // Coming out of Full GC, we would not have any forwarded objects.
 205   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 206   heap->set_has_forwarded_objects(false);
 207 
 208   heap->set_full_gc_move_in_progress(true);
 209 
 210   // Setup workers for the rest
 211   OrderAccess::fence();
 212 
 213   // Initialize worker slices
 214   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
 215   for (uint i = 0; i < heap->max_workers(); i++) {
 216     worker_slices[i] = new ShenandoahHeapRegionSet();
 217   }
 218 
 219   {
 220     // The rest of code performs region moves, where region status is undefined
 221     // until all phases run together.
 222     ShenandoahHeapLocker lock(heap->lock());
 223 
 224     phase2_calculate_target_addresses(worker_slices);
 225 
 226     OrderAccess::fence();
 227 
 228     phase3_update_references();
 229 
 230     phase4_compact_objects(worker_slices);


 231   }
 232 
 233   {
 234     // Epilogue

 235     _preserved_marks->restore(heap->workers());
 236     _preserved_marks->reclaim();






 237   }
 238 
 239   // Resize metaspace
 240   MetaspaceGC::compute_new_size();
 241 
 242   // Free worker slices
 243   for (uint i = 0; i < heap->max_workers(); i++) {
 244     delete worker_slices[i];
 245   }
 246   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 247 
 248   heap->set_full_gc_move_in_progress(false);
 249   heap->set_full_gc_in_progress(false);
 250 
 251   if (ShenandoahVerify) {
 252     heap->verifier()->verify_after_fullgc();
 253   }
 254 

 255   if (VerifyAfterGC) {
 256     Universe::verify();
 257   }
 258 
 259   {
 260     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
 261     heap->post_full_gc_dump(_gc_timer);
 262   }
 263 }
 264 
 265 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
 266 private:
 267   ShenandoahMarkingContext* const _ctx;
 268 
 269 public:
 270   ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 271 
 272   void heap_region_do(ShenandoahHeapRegion *r) {
 273     _ctx->capture_top_at_mark_start(r);
 274     r->clear_live_data();


 275   }


 276 };
 277 
 278 void ShenandoahFullGC::phase1_mark_heap() {
 279   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 280   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 281 
 282   ShenandoahHeap* heap = ShenandoahHeap::heap();
 283 
 284   ShenandoahPrepareForMarkClosure cl;
 285   heap->heap_region_iterate(&cl);
 286 
 287   heap->set_unload_classes(heap->heuristics()->can_unload_classes());
 288 
 289   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 290   // enable ("weak") refs discovery
 291   rp->set_soft_reference_policy(true); // forcefully purge all soft references
 292 
 293   ShenandoahSTWMark mark(true /*full_gc*/);
 294   mark.mark();
 295   heap->parallel_cleaning(true /* full_gc */);










 296 }
 297 











































































































































































































































 298 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 299 private:
 300   PreservedMarks*          const _preserved_marks;
 301   ShenandoahHeap*          const _heap;
 302   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 303   int _empty_regions_pos;
 304   ShenandoahHeapRegion*          _to_region;
 305   ShenandoahHeapRegion*          _from_region;
 306   HeapWord* _compact_point;
 307 
 308 public:
 309   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 310                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 311                                               ShenandoahHeapRegion* to_region) :
 312     _preserved_marks(preserved_marks),
 313     _heap(ShenandoahHeap::heap()),
 314     _empty_regions(empty_regions),
 315     _empty_regions_pos(0),
 316     _to_region(to_region),
 317     _from_region(nullptr),
 318     _compact_point(to_region->bottom()) {}
 319 
 320   void set_from_region(ShenandoahHeapRegion* from_region) {
 321     _from_region = from_region;
 322   }
 323 
 324   void finish_region() {
 325     assert(_to_region != nullptr, "should not happen");

 326     _to_region->set_new_top(_compact_point);
 327   }
 328 
 329   bool is_compact_same_region() {
 330     return _from_region == _to_region;
 331   }
 332 
 333   int empty_regions_pos() {
 334     return _empty_regions_pos;
 335   }
 336 
 337   void do_object(oop p) {
 338     assert(_from_region != nullptr, "must set before work");
 339     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 340     assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
 341 
 342     size_t obj_size = p->size();
 343     if (_compact_point + obj_size > _to_region->end()) {
 344       finish_region();
 345 

 351       } else {
 352         // Out of empty region? Compact within the same region.
 353         new_to_region = _from_region;
 354       }
 355 
 356       assert(new_to_region != _to_region, "must not reuse same to-region");
 357       assert(new_to_region != nullptr, "must not be null");
 358       _to_region = new_to_region;
 359       _compact_point = _to_region->bottom();
 360     }
 361 
 362     // Object fits into current region, record new location:
 363     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 364     shenandoah_assert_not_forwarded(nullptr, p);
 365     _preserved_marks->push_if_necessary(p, p->mark());
 366     p->forward_to(cast_to_oop(_compact_point));
 367     _compact_point += obj_size;
 368   }
 369 };
 370 
 371 class ShenandoahPrepareForCompactionTask : public WorkerTask {
 372 private:
 373   PreservedMarksSet*        const _preserved_marks;
 374   ShenandoahHeap*           const _heap;
 375   ShenandoahHeapRegionSet** const _worker_slices;
 376 
 377 public:
 378   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :

 379     WorkerTask("Shenandoah Prepare For Compaction"),
 380     _preserved_marks(preserved_marks),
 381     _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {










 382   }
 383 
 384   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 385     // Empty region: get it into the slice to defragment the slice itself.
 386     // We could have skipped this without violating correctness, but we really
 387     // want to compact all live regions to the start of the heap, which sometimes
 388     // means moving them into the fully empty regions.
 389     if (r->is_empty()) return true;
 390 
 391     // Can move the region, and this is not the humongous region. Humongous
 392     // moves are special cased here, because their moves are handled separately.
 393     return r->is_stw_move_allowed() && !r->is_humongous();
 394   }
 395 
 396   void work(uint worker_id) {
 397     ShenandoahParallelWorkerSession worker_session(worker_id);
 398     ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 399     ShenandoahHeapRegionSetIterator it(slice);
 400     ShenandoahHeapRegion* from_region = it.next();
 401     // No work?
 402     if (from_region == nullptr) {
 403        return;













 404     }

 405 
 406     // Sliding compaction. Walk all regions in the slice, and compact them.
 407     // Remember empty regions and reuse them as needed.
 408     ResourceMark rm;
 409 
 410     GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
 411 
 412     ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
 413 
 414     while (from_region != nullptr) {
 415       assert(is_candidate_region(from_region), "Sanity");
 416 
 417       cl.set_from_region(from_region);
 418       if (from_region->has_live()) {
 419         _heap->marked_object_iterate(from_region, &cl);
 420       }
 421 
 422       // Compacted the region to somewhere else? From-region is empty then.
 423       if (!cl.is_compact_same_region()) {
 424         empty_regions.append(from_region);
 425       }
 426       from_region = it.next();
 427     }
 428     cl.finish_region();
 429 
 430     // Mark all remaining regions as empty
 431     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 432       ShenandoahHeapRegion* r = empty_regions.at(pos);
 433       r->set_new_top(r->bottom());
 434     }
 435   }
 436 };
 437 
 438 void ShenandoahFullGC::calculate_target_humongous_objects() {
 439   ShenandoahHeap* heap = ShenandoahHeap::heap();
 440 
 441   // Compute the new addresses for humongous objects. We need to do this after addresses
 442   // for regular objects are calculated, and we know what regions in heap suffix are
 443   // available for humongous moves.
 444   //
 445   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 446   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 447   // humongous start there.
 448   //
 449   // The complication is potential non-movable regions during the scan. If such region is
 450   // detected, then sliding restarts towards that non-movable region.
 451 
 452   size_t to_begin = heap->num_regions();
 453   size_t to_end = heap->num_regions();
 454 

 455   for (size_t c = heap->num_regions(); c > 0; c--) {
 456     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 457     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 458       // To-region candidate: record this, and continue scan
 459       to_begin = r->index();
 460       continue;
 461     }
 462 
 463     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 464       // From-region candidate: movable humongous region
 465       oop old_obj = cast_to_oop(r->bottom());
 466       size_t words_size = old_obj->size();
 467       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 468 
 469       size_t start = to_end - num_regions;
 470 
 471       if (start >= to_begin && start != r->index()) {
 472         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 473         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 474         old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));

 477       }
 478     }
 479 
 480     // Failed to fit. Scan starting from current region.
 481     to_begin = r->index();
 482     to_end = r->index();
 483   }
 484 }
 485 
 486 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 487 private:
 488   ShenandoahHeap* const _heap;
 489 
 490 public:
 491   ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
 492   void heap_region_do(ShenandoahHeapRegion* r) {
 493     if (r->is_trash()) {
 494       r->recycle();
 495     }
 496     if (r->is_cset()) {

 497       r->make_regular_bypass();
 498     }
 499     if (r->is_empty_uncommitted()) {
 500       r->make_committed_bypass();
 501     }
 502     assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
 503 
 504     // Record current region occupancy: this communicates empty regions are free
 505     // to the rest of Full GC code.
 506     r->set_new_top(r->top());
 507   }
 508 };
 509 
 510 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
 511 private:
 512   ShenandoahHeap* const _heap;
 513   ShenandoahMarkingContext* const _ctx;
 514 
 515 public:
 516   ShenandoahTrashImmediateGarbageClosure() :
 517     _heap(ShenandoahHeap::heap()),
 518     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 519 
 520   void heap_region_do(ShenandoahHeapRegion* r) {






 521     if (r->is_humongous_start()) {
 522       oop humongous_obj = cast_to_oop(r->bottom());
 523       if (!_ctx->is_marked(humongous_obj)) {
 524         assert(!r->has_live(),
 525                "Region " SIZE_FORMAT " is not marked, should not have live", r->index());


 526         _heap->trash_humongous_region_at(r);
 527       } else {
 528         assert(r->has_live(),
 529                "Region " SIZE_FORMAT " should have live", r->index());
 530       }
 531     } else if (r->is_humongous_continuation()) {
 532       // If we hit continuation, the non-live humongous starts should have been trashed already
 533       assert(r->humongous_start_region()->has_live(),
 534              "Region " SIZE_FORMAT " should have live", r->index());
 535     } else if (r->is_regular()) {
 536       if (!r->has_live()) {

 537         r->make_trash_immediate();
 538       }
 539     }
 540   }
 541 };
 542 
 543 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
 544   ShenandoahHeap* heap = ShenandoahHeap::heap();
 545 
 546   uint n_workers = heap->workers()->active_workers();
 547   size_t n_regions = heap->num_regions();
 548 
 549   // What we want to accomplish: have the dense prefix of data, while still balancing
 550   // out the parallel work.
 551   //
 552   // Assuming the amount of work is driven by the live data that needs moving, we can slice
 553   // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
 554   // thread takes all regions in its prefix subset, and then it takes some regions from
 555   // the tail.
 556   //

 665   for (size_t wid = 0; wid < n_workers; wid++) {
 666     ShenandoahHeapRegionSetIterator it(worker_slices[wid]);
 667     ShenandoahHeapRegion* r = it.next();
 668     while (r != nullptr) {
 669       size_t idx = r->index();
 670       assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx);
 671       assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx);
 672       map.at_put(idx, true);
 673       r = it.next();
 674     }
 675   }
 676 
 677   for (size_t rid = 0; rid < n_regions; rid++) {
 678     bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid));
 679     bool is_distributed = map.at(rid);
 680     assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid);
 681   }
 682 #endif
 683 }
 684 





 685 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
 686   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
 687   ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
 688 
 689   ShenandoahHeap* heap = ShenandoahHeap::heap();
 690 
 691   // About to figure out which regions can be compacted, make sure pinning status
 692   // had been updated in GC prologue.
 693   heap->assert_pinned_region_status();
 694 
 695   {
 696     // Trash the immediately collectible regions before computing addresses
 697     ShenandoahTrashImmediateGarbageClosure tigcl;
 698     heap->heap_region_iterate(&tigcl);
 699 
 700     // Make sure regions are in good state: committed, active, clean.
 701     // This is needed because we are potentially sliding the data through them.
 702     ShenandoahEnsureHeapActiveClosure ecl;
 703     heap->heap_region_iterate(&ecl);
 704   }
 705 
 706   // Compute the new addresses for regular objects
 707   {
 708     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
 709 
 710     distribute_slices(worker_slices);
 711 
 712     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);



 713     heap->workers()->run_task(&task);
 714   }
 715 
 716   // Compute the new addresses for humongous objects
 717   {
 718     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
 719     calculate_target_humongous_objects();
 720   }
 721 }
 722 
 723 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
 724 private:
 725   ShenandoahHeap* const _heap;
 726   ShenandoahMarkingContext* const _ctx;
 727 
 728   template <class T>
 729   inline void do_oop_work(T* p) {
 730     T o = RawAccess<>::oop_load(p);
 731     if (!CompressedOops::is_null(o)) {
 732       oop obj = CompressedOops::decode_not_null(o);

 766 
 767 class ShenandoahAdjustPointersTask : public WorkerTask {
 768 private:
 769   ShenandoahHeap*          const _heap;
 770   ShenandoahRegionIterator       _regions;
 771 
 772 public:
 773   ShenandoahAdjustPointersTask() :
 774     WorkerTask("Shenandoah Adjust Pointers"),
 775     _heap(ShenandoahHeap::heap()) {
 776   }
 777 
 778   void work(uint worker_id) {
 779     ShenandoahParallelWorkerSession worker_session(worker_id);
 780     ShenandoahAdjustPointersObjectClosure obj_cl;
 781     ShenandoahHeapRegion* r = _regions.next();
 782     while (r != nullptr) {
 783       if (!r->is_humongous_continuation() && r->has_live()) {
 784         _heap->marked_object_iterate(r, &obj_cl);
 785       }







 786       r = _regions.next();
 787     }
 788   }
 789 };
 790 
 791 class ShenandoahAdjustRootPointersTask : public WorkerTask {
 792 private:
 793   ShenandoahRootAdjuster* _rp;
 794   PreservedMarksSet* _preserved_marks;
 795 public:
 796   ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
 797     WorkerTask("Shenandoah Adjust Root Pointers"),
 798     _rp(rp),
 799     _preserved_marks(preserved_marks) {}
 800 
 801   void work(uint worker_id) {
 802     ShenandoahParallelWorkerSession worker_session(worker_id);
 803     ShenandoahAdjustPointersClosure cl;
 804     _rp->roots_do(worker_id, &cl);
 805     _preserved_marks->get(worker_id)->adjust_during_full_gc();

 866     _worker_slices(worker_slices) {
 867   }
 868 
 869   void work(uint worker_id) {
 870     ShenandoahParallelWorkerSession worker_session(worker_id);
 871     ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
 872 
 873     ShenandoahCompactObjectsClosure cl(worker_id);
 874     ShenandoahHeapRegion* r = slice.next();
 875     while (r != nullptr) {
 876       assert(!r->is_humongous(), "must not get humongous regions here");
 877       if (r->has_live()) {
 878         _heap->marked_object_iterate(r, &cl);
 879       }
 880       r->set_top(r->new_top());
 881       r = slice.next();
 882     }
 883   }
 884 };
 885 

















 886 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
 887 private:
 888   ShenandoahHeap* const _heap;
 889   size_t _live;


 890 
 891 public:
 892   ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {








 893     _heap->free_set()->clear();
 894   }
 895 
 896   void heap_region_do(ShenandoahHeapRegion* r) {
 897     assert (!r->is_cset(), "cset regions should have been demoted already");
 898 
 899     // Need to reset the complete-top-at-mark-start pointer here because
 900     // the complete marking bitmap is no longer valid. This ensures
 901     // size-based iteration in marked_object_iterate().
 902     // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
 903     // pinned regions.
 904     if (!r->is_pinned()) {
 905       _heap->complete_marking_context()->reset_top_at_mark_start(r);
 906     }
 907 
 908     size_t live = r->used();
 909 
 910     // Make empty regions that have been allocated into regular
 911     if (r->is_empty() && live > 0) {




 912       r->make_regular_bypass();
 913     }
 914 
 915     // Reclaim regular regions that became empty
 916     if (r->is_regular() && live == 0) {
 917       r->make_trash();
 918     }
 919 
 920     // Recycle all trash regions
 921     if (r->is_trash()) {
 922       live = 0;
 923       r->recycle();






 924     }
 925 
 926     r->set_live_data(live);
 927     r->reset_alloc_metadata();
 928     _live += live;
 929   }
 930 
 931   size_t get_live() {
 932     return _live;













 933   }
 934 };
 935 
 936 void ShenandoahFullGC::compact_humongous_objects() {
 937   // Compact humongous regions, based on their fwdptr objects.
 938   //
 939   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
 940   // humongous regions are already compacted, and do not require further moves, which alleviates
 941   // sliding costs. We may consider doing this in parallel in future.
 942 
 943   ShenandoahHeap* heap = ShenandoahHeap::heap();
 944 
 945   for (size_t c = heap->num_regions(); c > 0; c--) {
 946     ShenandoahHeapRegion* r = heap->get_region(c - 1);
 947     if (r->is_humongous_start()) {
 948       oop old_obj = cast_to_oop(r->bottom());
 949       if (!old_obj->is_forwarded()) {
 950         // No need to move the object, it stays at the same slot
 951         continue;
 952       }
 953       size_t words_size = old_obj->size();
 954       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 955 
 956       size_t old_start = r->index();
 957       size_t old_end   = old_start + num_regions - 1;
 958       size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
 959       size_t new_end   = new_start + num_regions - 1;
 960       assert(old_start != new_start, "must be real move");
 961       assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
 962 
 963       Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
 964       ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));





 965 
 966       oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
 967       new_obj->init_mark();
 968 
 969       {

 970         for (size_t c = old_start; c <= old_end; c++) {
 971           ShenandoahHeapRegion* r = heap->get_region(c);

 972           r->make_regular_bypass();
 973           r->set_top(r->bottom());
 974         }
 975 
 976         for (size_t c = new_start; c <= new_end; c++) {
 977           ShenandoahHeapRegion* r = heap->get_region(c);
 978           if (c == new_start) {
 979             r->make_humongous_start_bypass();
 980           } else {
 981             r->make_humongous_cont_bypass();
 982           }
 983 
 984           // Trailing region may be non-full, record the remainder there
 985           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 986           if ((c == new_end) && (remainder != 0)) {
 987             r->set_top(r->bottom() + remainder);
 988           } else {
 989             r->set_top(r->end());
 990           }
 991 
 992           r->reset_alloc_metadata();
 993         }
 994       }
 995     }
 996   }
 997 }
 998 
 999 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1000 // we need to remain able to walk pinned regions.
1001 // Since pinned region do not move and don't get compacted, we will get holes with

1027 };
1028 
1029 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
1030   GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
1031   ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
1032 
1033   ShenandoahHeap* heap = ShenandoahHeap::heap();
1034 
1035   // Compact regular objects first
1036   {
1037     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
1038     ShenandoahCompactObjectsTask compact_task(worker_slices);
1039     heap->workers()->run_task(&compact_task);
1040   }
1041 
1042   // Compact humongous objects after regular object moves
1043   {
1044     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1045     compact_humongous_objects();
1046   }





1047 
1048   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1049   // and must ensure the bitmap is in sync.
1050   {
1051     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1052     ShenandoahMCResetCompleteBitmapTask task;
1053     heap->workers()->run_task(&task);
1054   }
1055 
1056   // Bring regions in proper states after the collection, and set heap properties.
1057   {
1058     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1059 
1060     ShenandoahPostCompactClosure post_compact;
1061     heap->heap_region_iterate(&post_compact);
1062     heap->set_used(post_compact.get_live());














1063 




1064     heap->collection_set()->clear();
1065     heap->free_set()->rebuild();
1066   }











1067 
1068   heap->clear_cancelled_gc();












































1069 }

   1 /*
   2  * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "compiler/oopMap.hpp"
  29 #include "gc/shared/continuationGCSupport.hpp"
  30 #include "gc/shared/gcTraceTime.inline.hpp"
  31 #include "gc/shared/preservedMarks.inline.hpp"
  32 #include "gc/shared/tlab_globals.hpp"
  33 #include "gc/shared/workerThread.hpp"
  34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  36 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  38 #include "gc/shenandoah/shenandoahFullGC.hpp"
  39 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  41 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  42 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  44 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  45 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  46 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  47 #include "gc/shenandoah/shenandoahMetrics.hpp"
  48 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  49 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  50 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  51 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  52 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  53 #include "gc/shenandoah/shenandoahUtils.hpp"
  54 #include "gc/shenandoah/shenandoahVerifier.hpp"
  55 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  56 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  57 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  58 #include "memory/metaspaceUtils.hpp"
  59 #include "memory/universe.hpp"
  60 #include "oops/compressedOops.inline.hpp"
  61 #include "oops/oop.inline.hpp"
  62 #include "runtime/javaThread.hpp"
  63 #include "runtime/orderAccess.hpp"
  64 #include "runtime/vmThread.hpp"
  65 #include "utilities/copy.hpp"
  66 #include "utilities/events.hpp"
  67 #include "utilities/growableArray.hpp"
  68 
  69 // After Full GC is done, reconstruct the remembered set by iterating over OLD regions,
  70 // registering all objects between bottom() and top(), and setting remembered set cards to
  71 // DIRTY if they hold interesting pointers.
  72 class ShenandoahReconstructRememberedSetTask : public WorkerTask {
  73 private:
  74   ShenandoahRegionIterator _regions;
  75 
  76 public:
  77   ShenandoahReconstructRememberedSetTask() :
  78     WorkerTask("Shenandoah Reset Bitmap") { }
  79 
  80   void work(uint worker_id) {
  81     ShenandoahParallelWorkerSession worker_session(worker_id);
  82     ShenandoahHeapRegion* r = _regions.next();
  83     ShenandoahHeap* heap = ShenandoahHeap::heap();
  84     RememberedScanner* scanner = heap->card_scan();
  85     ShenandoahSetRememberedCardsToDirtyClosure dirty_cards_for_interesting_pointers;
  86 
  87     while (r != nullptr) {
  88       if (r->is_old() && r->is_active()) {
  89         HeapWord* obj_addr = r->bottom();
  90         if (r->is_humongous_start()) {
  91           // First, clear the remembered set
  92           oop obj = cast_to_oop(obj_addr);
  93           size_t size = obj->size();
  94 
  95           // First, clear the remembered set for all spanned humongous regions
  96           size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
  97           size_t region_span = num_regions * ShenandoahHeapRegion::region_size_words();
  98           scanner->reset_remset(r->bottom(), region_span);
  99           size_t region_index = r->index();
 100           ShenandoahHeapRegion* humongous_region = heap->get_region(region_index);
 101           while (num_regions-- != 0) {
 102             scanner->reset_object_range(humongous_region->bottom(), humongous_region->end());
 103             region_index++;
 104             humongous_region = heap->get_region(region_index);
 105           }
 106 
 107           // Then register the humongous object and DIRTY relevant remembered set cards
 108           scanner->register_object_without_lock(obj_addr);
 109           obj->oop_iterate(&dirty_cards_for_interesting_pointers);
 110         } else if (!r->is_humongous()) {
 111           // First, clear the remembered set
 112           scanner->reset_remset(r->bottom(), ShenandoahHeapRegion::region_size_words());
 113           scanner->reset_object_range(r->bottom(), r->end());
 114 
 115           // Then iterate over all objects, registering object and DIRTYing relevant remembered set cards
 116           HeapWord* t = r->top();
 117           while (obj_addr < t) {
 118             oop obj = cast_to_oop(obj_addr);
 119             size_t size = obj->size();
 120             scanner->register_object_without_lock(obj_addr);
 121             obj_addr += obj->oop_iterate_size(&dirty_cards_for_interesting_pointers);
 122           }
 123         } // else, ignore humongous continuation region
 124       }
 125       // else, this region is FREE or YOUNG or inactive and we can ignore it.
 126       // TODO: Assert this.
 127       r = _regions.next();
 128     }
 129   }
 130 };
 131 
 132 ShenandoahFullGC::ShenandoahFullGC() :
 133   _gc_timer(ShenandoahHeap::heap()->gc_timer()),
 134   _preserved_marks(new PreservedMarksSet(true)) {}
 135 
 136 ShenandoahFullGC::~ShenandoahFullGC() {
 137   delete _preserved_marks;
 138 }
 139 
 140 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
 141   vmop_entry_full(cause);
 142   // Always success
 143   return true;
 144 }
 145 
 146 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) {
 147   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 148   TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
 149   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
 150 
 151   heap->try_inject_alloc_failure();
 152   VM_ShenandoahFullGC op(cause, this);
 153   VMThread::execute(&op);
 154 }
 155 
 156 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
 157   static const char* msg = "Pause Full";
 158   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
 159   EventMark em("%s", msg);
 160 
 161   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 162                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
 163                               "full gc");
 164 
 165   op_full(cause);
 166 }
 167 
 168 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
 169   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 170   ShenandoahMetricsSnapshot metrics;
 171   metrics.snap_before();
 172 
 173   // Perform full GC
 174   do_it(cause);
 175 
 176   metrics.snap_after();
 177   if (heap->mode()->is_generational()) {
 178     heap->mmu_tracker()->record_full(heap->global_generation(), GCId::current());
 179     heap->log_heap_status("At end of Full GC");
 180 
 181     // Since we allow temporary violation of these constraints during Full GC, we want to enforce that the assertions are
 182     // made valid by the time Full GC completes.
 183     assert(heap->old_generation()->used_regions_size() <= heap->old_generation()->max_capacity(),
 184            "Old generation affiliated regions must be less than capacity");
 185     assert(heap->young_generation()->used_regions_size() <= heap->young_generation()->max_capacity(),
 186            "Young generation affiliated regions must be less than capacity");
 187 
 188     assert((heap->young_generation()->used() + heap->young_generation()->get_humongous_waste())
 189            <= heap->young_generation()->used_regions_size(), "Young consumed can be no larger than span of affiliated regions");
 190     assert((heap->old_generation()->used() + heap->old_generation()->get_humongous_waste())
 191            <= heap->old_generation()->used_regions_size(), "Old consumed can be no larger than span of affiliated regions");
 192 
 193   }
 194   if (metrics.is_good_progress()) {
 195     ShenandoahHeap::heap()->notify_gc_progress();
 196   } else {
 197     // Nothing to do. Tell the allocation path that we have failed to make
 198     // progress, and it can finally fail.
 199     ShenandoahHeap::heap()->notify_gc_no_progress();
 200   }
 201 }
 202 
 203 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
 204   ShenandoahHeap* heap = ShenandoahHeap::heap();
 205   // Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL.
 206   heap->set_gc_generation(heap->global_generation());
 207 
 208   if (heap->mode()->is_generational()) {
 209     // No need for old_gen->increase_used() as this was done when plabs were allocated.
 210     heap->set_young_evac_reserve(0);
 211     heap->set_old_evac_reserve(0);
 212     heap->reset_old_evac_expended();
 213     heap->set_promoted_reserve(0);
 214 
 215     // Full GC supersedes any marking or coalescing in old generation.
 216     heap->cancel_old_gc();
 217   }
 218 
 219   if (ShenandoahVerify) {
 220     heap->verifier()->verify_before_fullgc();
 221   }
 222 
 223   if (VerifyBeforeGC) {
 224     Universe::verify();
 225   }
 226 
 227   // Degenerated GC may carry concurrent root flags when upgrading to
 228   // full GC. We need to reset it before mutators resume.
 229   heap->set_concurrent_strong_root_in_progress(false);
 230   heap->set_concurrent_weak_root_in_progress(false);
 231 
 232   heap->set_full_gc_in_progress(true);
 233 
 234   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 235   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
 236 
 237   {

 241 
 242   {
 243     ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
 244     // Full GC is supposed to recover from any GC state:
 245 
 246     // a0. Remember if we have forwarded objects
 247     bool has_forwarded_objects = heap->has_forwarded_objects();
 248 
 249     // a1. Cancel evacuation, if in progress
 250     if (heap->is_evacuation_in_progress()) {
 251       heap->set_evacuation_in_progress(false);
 252     }
 253     assert(!heap->is_evacuation_in_progress(), "sanity");
 254 
 255     // a2. Cancel update-refs, if in progress
 256     if (heap->is_update_refs_in_progress()) {
 257       heap->set_update_refs_in_progress(false);
 258     }
 259     assert(!heap->is_update_refs_in_progress(), "sanity");
 260 
 261     // b. Cancel all concurrent marks, if in progress
 262     if (heap->is_concurrent_mark_in_progress()) {
 263       heap->cancel_concurrent_mark();

 264     }
 265     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 266 
 267     // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
 268     if (has_forwarded_objects) {
 269       update_roots(true /*full_gc*/);
 270     }
 271 
 272     // d. Reset the bitmaps for new marking
 273     heap->global_generation()->reset_mark_bitmap();
 274     assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 275     assert(!heap->global_generation()->is_mark_complete(), "sanity");
 276 
 277     // e. Abandon reference discovery and clear all discovered references.
 278     ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
 279     rp->abandon_partial_discovery();
 280 
 281     // f. Sync pinned region status from the CP marks
 282     heap->sync_pinned_region_status();
 283 
 284     if (heap->mode()->is_generational()) {
 285       for (size_t i = 0; i < heap->num_regions(); i++) {
 286         ShenandoahHeapRegion* r = heap->get_region(i);
 287         if (r->get_top_before_promote() != nullptr) {
 288           r->restore_top_before_promote();
 289         }
 290       }
 291     }
 292 
 293     // The rest of prologue:
 294     _preserved_marks->init(heap->workers()->active_workers());
 295 
 296     assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
 297   }
 298 
 299   if (UseTLAB) {
 300     // TODO: Do we need to explicitly retire PLABs?
 301     heap->gclabs_retire(ResizeTLAB);
 302     heap->tlabs_retire(ResizeTLAB);
 303   }
 304 
 305   OrderAccess::fence();
 306 
 307   phase1_mark_heap();
 308 
 309   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 310   // Coming out of Full GC, we would not have any forwarded objects.
 311   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 312   heap->set_has_forwarded_objects(false);
 313 
 314   heap->set_full_gc_move_in_progress(true);
 315 
 316   // Setup workers for the rest
 317   OrderAccess::fence();
 318 
 319   // Initialize worker slices
 320   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
 321   for (uint i = 0; i < heap->max_workers(); i++) {
 322     worker_slices[i] = new ShenandoahHeapRegionSet();
 323   }
 324 
 325   {
 326     // The rest of code performs region moves, where region status is undefined
 327     // until all phases run together.
 328     ShenandoahHeapLocker lock(heap->lock());
 329 
 330     phase2_calculate_target_addresses(worker_slices);
 331 
 332     OrderAccess::fence();
 333 
 334     phase3_update_references();
 335 
 336     phase4_compact_objects(worker_slices);
 337 
 338     phase5_epilog();
 339   }
 340 
 341   {
 342     // Epilogue
 343     // TODO: Merge with phase5_epilog?
 344     _preserved_marks->restore(heap->workers());
 345     _preserved_marks->reclaim();
 346 
 347     if (heap->mode()->is_generational()) {
 348       ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_reconstruct_remembered_set);
 349       ShenandoahReconstructRememberedSetTask task;
 350       heap->workers()->run_task(&task);
 351     }
 352   }
 353 
 354   // Resize metaspace
 355   MetaspaceGC::compute_new_size();
 356 
 357   // Free worker slices
 358   for (uint i = 0; i < heap->max_workers(); i++) {
 359     delete worker_slices[i];
 360   }
 361   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 362 
 363   heap->set_full_gc_move_in_progress(false);
 364   heap->set_full_gc_in_progress(false);
 365 
 366   if (ShenandoahVerify) {
 367     heap->verifier()->verify_after_fullgc();
 368   }
 369 
 370   // Humongous regions are promoted on demand and are accounted for by normal Full GC mechanisms.
 371   if (VerifyAfterGC) {
 372     Universe::verify();
 373   }
 374 
 375   {
 376     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
 377     heap->post_full_gc_dump(_gc_timer);
 378   }
 379 }
 380 
 381 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
 382 private:
 383   ShenandoahMarkingContext* const _ctx;
 384 
 385 public:
 386   ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 387 
 388   void heap_region_do(ShenandoahHeapRegion *r) {
 389     if (r->affiliation() != FREE) {
 390       _ctx->capture_top_at_mark_start(r);
 391       r->clear_live_data();
 392     }
 393   }
 394 
 395   bool is_thread_safe() { return true; }
 396 };
 397 
 398 void ShenandoahFullGC::phase1_mark_heap() {
 399   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 400   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 401 
 402   ShenandoahHeap* heap = ShenandoahHeap::heap();
 403 
 404   ShenandoahPrepareForMarkClosure cl;
 405   heap->parallel_heap_region_iterate(&cl);
 406 
 407   heap->set_unload_classes(heap->global_generation()->heuristics()->can_unload_classes());
 408 
 409   ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
 410   // enable ("weak") refs discovery
 411   rp->set_soft_reference_policy(true); // forcefully purge all soft references
 412 
 413   ShenandoahSTWMark mark(heap->global_generation(), true /*full_gc*/);
 414   mark.mark();
 415   heap->parallel_cleaning(true /* full_gc */);
 416 
 417   size_t live_bytes_in_old = 0;
 418   for (size_t i = 0; i < heap->num_regions(); i++) {
 419     ShenandoahHeapRegion* r = heap->get_region(i);
 420     if (r->is_old()) {
 421       live_bytes_in_old += r->get_live_data_bytes();
 422     }
 423   }
 424   log_info(gc)("Live bytes in old after STW mark: " PROPERFMT, PROPERFMTARGS(live_bytes_in_old));
 425   heap->old_generation()->set_live_bytes_after_last_mark(live_bytes_in_old);
 426 }
 427 
 428 class ShenandoahPrepareForCompactionTask : public WorkerTask {
 429 private:
 430   PreservedMarksSet*        const _preserved_marks;
 431   ShenandoahHeap*           const _heap;
 432   ShenandoahHeapRegionSet** const _worker_slices;
 433   size_t                    const _num_workers;
 434 
 435 public:
 436   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks,
 437                                      ShenandoahHeapRegionSet **worker_slices,
 438                                      size_t num_workers);
 439 
 440   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 441     // Empty region: get it into the slice to defragment the slice itself.
 442     // We could have skipped this without violating correctness, but we really
 443     // want to compact all live regions to the start of the heap, which sometimes
 444     // means moving them into the fully empty regions.
 445     if (r->is_empty()) return true;
 446 
 447     // Can move the region, and this is not the humongous region. Humongous
 448     // moves are special cased here, because their moves are handled separately.
 449     return r->is_stw_move_allowed() && !r->is_humongous();
 450   }
 451 
 452   void work(uint worker_id);
 453 };
 454 
 455 class ShenandoahPrepareForGenerationalCompactionObjectClosure : public ObjectClosure {
 456 private:
 457   PreservedMarks*          const _preserved_marks;
 458   ShenandoahHeap*          const _heap;
 459   uint                           _tenuring_threshold;
 460 
 461   // _empty_regions is a thread-local list of heap regions that have been completely emptied by this worker thread's
 462   // compaction efforts.  The worker thread that drives these efforts adds compacted regions to this list if the
 463   // region has not been compacted onto itself.
 464   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 465   int _empty_regions_pos;
 466   ShenandoahHeapRegion*          _old_to_region;
 467   ShenandoahHeapRegion*          _young_to_region;
 468   ShenandoahHeapRegion*          _from_region;
 469   ShenandoahAffiliation          _from_affiliation;
 470   HeapWord*                      _old_compact_point;
 471   HeapWord*                      _young_compact_point;
 472   uint                           _worker_id;
 473 
 474 public:
 475   ShenandoahPrepareForGenerationalCompactionObjectClosure(PreservedMarks* preserved_marks,
 476                                                           GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 477                                                           ShenandoahHeapRegion* old_to_region,
 478                                                           ShenandoahHeapRegion* young_to_region, uint worker_id) :
 479       _preserved_marks(preserved_marks),
 480       _heap(ShenandoahHeap::heap()),
 481       _tenuring_threshold(0),
 482       _empty_regions(empty_regions),
 483       _empty_regions_pos(0),
 484       _old_to_region(old_to_region),
 485       _young_to_region(young_to_region),
 486       _from_region(nullptr),
 487       _old_compact_point((old_to_region != nullptr)? old_to_region->bottom(): nullptr),
 488       _young_compact_point((young_to_region != nullptr)? young_to_region->bottom(): nullptr),
 489       _worker_id(worker_id) {
 490     if (_heap->mode()->is_generational()) {
 491       _tenuring_threshold = _heap->age_census()->tenuring_threshold();
 492     }
 493   }
 494 
 495   void set_from_region(ShenandoahHeapRegion* from_region) {
 496     _from_region = from_region;
 497     _from_affiliation = from_region->affiliation();
 498     if (_from_region->has_live()) {
 499       if (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION) {
 500         if (_old_to_region == nullptr) {
 501           _old_to_region = from_region;
 502           _old_compact_point = from_region->bottom();
 503         }
 504       } else {
 505         assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION, "from_region must be OLD or YOUNG");
 506         if (_young_to_region == nullptr) {
 507           _young_to_region = from_region;
 508           _young_compact_point = from_region->bottom();
 509         }
 510       }
 511     } // else, we won't iterate over this _from_region so we don't need to set up to region to hold copies
 512   }
 513 
 514   void finish() {
 515     finish_old_region();
 516     finish_young_region();
 517   }
 518 
 519   void finish_old_region() {
 520     if (_old_to_region != nullptr) {
 521       log_debug(gc)("Planned compaction into Old Region " SIZE_FORMAT ", used: " SIZE_FORMAT " tabulated by worker %u",
 522                     _old_to_region->index(), _old_compact_point - _old_to_region->bottom(), _worker_id);
 523       _old_to_region->set_new_top(_old_compact_point);
 524       _old_to_region = nullptr;
 525     }
 526   }
 527 
 528   void finish_young_region() {
 529     if (_young_to_region != nullptr) {
 530       log_debug(gc)("Worker %u planned compaction into Young Region " SIZE_FORMAT ", used: " SIZE_FORMAT,
 531                     _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom());
 532       _young_to_region->set_new_top(_young_compact_point);
 533       _young_to_region = nullptr;
 534     }
 535   }
 536 
 537   bool is_compact_same_region() {
 538     return (_from_region == _old_to_region) || (_from_region == _young_to_region);
 539   }
 540 
 541   int empty_regions_pos() {
 542     return _empty_regions_pos;
 543   }
 544 
 545   void do_object(oop p) {
 546     assert(_from_region != nullptr, "must set before work");
 547     assert((_from_region->bottom() <= cast_from_oop<HeapWord*>(p)) && (cast_from_oop<HeapWord*>(p) < _from_region->top()),
 548            "Object must reside in _from_region");
 549     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 550     assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
 551 
 552     size_t obj_size = p->size();
 553     uint from_region_age = _from_region->age();
 554     uint object_age = p->age();
 555 
 556     bool promote_object = false;
 557     if ((_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION) &&
 558         (from_region_age + object_age >= _tenuring_threshold)) {
 559       if ((_old_to_region != nullptr) && (_old_compact_point + obj_size > _old_to_region->end())) {
 560         finish_old_region();
 561         _old_to_region = nullptr;
 562       }
 563       if (_old_to_region == nullptr) {
 564         if (_empty_regions_pos < _empty_regions.length()) {
 565           ShenandoahHeapRegion* new_to_region = _empty_regions.at(_empty_regions_pos);
 566           _empty_regions_pos++;
 567           new_to_region->set_affiliation(OLD_GENERATION);
 568           _old_to_region = new_to_region;
 569           _old_compact_point = _old_to_region->bottom();
 570           promote_object = true;
 571         }
 572         // Else this worker thread does not yet have any empty regions into which this aged object can be promoted so
 573         // we leave promote_object as false, deferring the promotion.
 574       } else {
 575         promote_object = true;
 576       }
 577     }
 578 
 579     if (promote_object || (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION)) {
 580       assert(_old_to_region != nullptr, "_old_to_region should not be nullptr when evacuating to OLD region");
 581       if (_old_compact_point + obj_size > _old_to_region->end()) {
 582         ShenandoahHeapRegion* new_to_region;
 583 
 584         log_debug(gc)("Worker %u finishing old region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
 585                       ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT,  _worker_id, _old_to_region->index(),
 586                       p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end()));
 587 
 588         // Object does not fit.  Get a new _old_to_region.
 589         finish_old_region();
 590         if (_empty_regions_pos < _empty_regions.length()) {
 591           new_to_region = _empty_regions.at(_empty_regions_pos);
 592           _empty_regions_pos++;
 593           new_to_region->set_affiliation(OLD_GENERATION);
 594         } else {
 595           // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct
 596           // from _from_region.  That's because there is always room for _from_region to be compacted into itself.
 597           // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
 598           new_to_region = _from_region;
 599         }
 600 
 601         assert(new_to_region != _old_to_region, "must not reuse same OLD to-region");
 602         assert(new_to_region != nullptr, "must not be nullptr");
 603         _old_to_region = new_to_region;
 604         _old_compact_point = _old_to_region->bottom();
 605       }
 606 
 607       // Object fits into current region, record new location:
 608       assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit");
 609       shenandoah_assert_not_forwarded(nullptr, p);
 610       _preserved_marks->push_if_necessary(p, p->mark());
 611       p->forward_to(cast_to_oop(_old_compact_point));
 612       _old_compact_point += obj_size;
 613     } else {
 614       assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION,
 615              "_from_region must be OLD_GENERATION or YOUNG_GENERATION");
 616       assert(_young_to_region != nullptr, "_young_to_region should not be nullptr when compacting YOUNG _from_region");
 617 
 618       // After full gc compaction, all regions have age 0.  Embed the region's age into the object's age in order to preserve
 619       // tenuring progress.
 620       if (_heap->is_aging_cycle()) {
 621         _heap->increase_object_age(p, from_region_age + 1);
 622       } else {
 623         _heap->increase_object_age(p, from_region_age);
 624       }
 625 
 626       if (_young_compact_point + obj_size > _young_to_region->end()) {
 627         ShenandoahHeapRegion* new_to_region;
 628 
 629         log_debug(gc)("Worker %u finishing young region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
 630                       ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT,  _worker_id, _young_to_region->index(),
 631                       p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end()));
 632 
 633         // Object does not fit.  Get a new _young_to_region.
 634         finish_young_region();
 635         if (_empty_regions_pos < _empty_regions.length()) {
 636           new_to_region = _empty_regions.at(_empty_regions_pos);
 637           _empty_regions_pos++;
 638           new_to_region->set_affiliation(YOUNG_GENERATION);
 639         } else {
 640           // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct
 641           // from _from_region.  That's because there is always room for _from_region to be compacted into itself.
 642           // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
 643           new_to_region = _from_region;
 644         }
 645 
 646         assert(new_to_region != _young_to_region, "must not reuse same OLD to-region");
 647         assert(new_to_region != nullptr, "must not be nullptr");
 648         _young_to_region = new_to_region;
 649         _young_compact_point = _young_to_region->bottom();
 650       }
 651 
 652       // Object fits into current region, record new location:
 653       assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit");
 654       shenandoah_assert_not_forwarded(nullptr, p);
 655       _preserved_marks->push_if_necessary(p, p->mark());
 656       p->forward_to(cast_to_oop(_young_compact_point));
 657       _young_compact_point += obj_size;
 658     }
 659   }
 660 };
 661 
 662 
 663 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 664 private:
 665   PreservedMarks*          const _preserved_marks;
 666   ShenandoahHeap*          const _heap;
 667   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 668   int _empty_regions_pos;
 669   ShenandoahHeapRegion*          _to_region;
 670   ShenandoahHeapRegion*          _from_region;
 671   HeapWord* _compact_point;
 672 
 673 public:
 674   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 675                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 676                                               ShenandoahHeapRegion* to_region) :
 677     _preserved_marks(preserved_marks),
 678     _heap(ShenandoahHeap::heap()),
 679     _empty_regions(empty_regions),
 680     _empty_regions_pos(0),
 681     _to_region(to_region),
 682     _from_region(nullptr),
 683     _compact_point(to_region->bottom()) {}
 684 
 685   void set_from_region(ShenandoahHeapRegion* from_region) {
 686     _from_region = from_region;
 687   }
 688 
 689   void finish_region() {
 690     assert(_to_region != nullptr, "should not happen");
 691     assert(!_heap->mode()->is_generational(), "Generational GC should use different Closure");
 692     _to_region->set_new_top(_compact_point);
 693   }
 694 
 695   bool is_compact_same_region() {
 696     return _from_region == _to_region;
 697   }
 698 
 699   int empty_regions_pos() {
 700     return _empty_regions_pos;
 701   }
 702 
 703   void do_object(oop p) {
 704     assert(_from_region != nullptr, "must set before work");
 705     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 706     assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
 707 
 708     size_t obj_size = p->size();
 709     if (_compact_point + obj_size > _to_region->end()) {
 710       finish_region();
 711 

 717       } else {
 718         // Out of empty region? Compact within the same region.
 719         new_to_region = _from_region;
 720       }
 721 
 722       assert(new_to_region != _to_region, "must not reuse same to-region");
 723       assert(new_to_region != nullptr, "must not be null");
 724       _to_region = new_to_region;
 725       _compact_point = _to_region->bottom();
 726     }
 727 
 728     // Object fits into current region, record new location:
 729     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 730     shenandoah_assert_not_forwarded(nullptr, p);
 731     _preserved_marks->push_if_necessary(p, p->mark());
 732     p->forward_to(cast_to_oop(_compact_point));
 733     _compact_point += obj_size;
 734   }
 735 };
 736 





 737 
 738 ShenandoahPrepareForCompactionTask::ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks,
 739                                                                        ShenandoahHeapRegionSet **worker_slices,
 740                                                                        size_t num_workers) :
 741     WorkerTask("Shenandoah Prepare For Compaction"),
 742     _preserved_marks(preserved_marks), _heap(ShenandoahHeap::heap()),
 743     _worker_slices(worker_slices), _num_workers(num_workers) { }
 744 
 745 
 746 void ShenandoahPrepareForCompactionTask::work(uint worker_id) {
 747   ShenandoahParallelWorkerSession worker_session(worker_id);
 748   ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 749   ShenandoahHeapRegionSetIterator it(slice);
 750   ShenandoahHeapRegion* from_region = it.next();
 751   // No work?
 752   if (from_region == nullptr) {
 753     return;
 754   }
 755 
 756   // Sliding compaction. Walk all regions in the slice, and compact them.
 757   // Remember empty regions and reuse them as needed.
 758   ResourceMark rm;



 759 
 760   GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());



 761 
 762   if (_heap->mode()->is_generational()) {
 763     ShenandoahHeapRegion* old_to_region = (from_region->is_old())? from_region: nullptr;
 764     ShenandoahHeapRegion* young_to_region = (from_region->is_young())? from_region: nullptr;
 765     ShenandoahPrepareForGenerationalCompactionObjectClosure cl(_preserved_marks->get(worker_id),
 766                                                                empty_regions,
 767                                                                old_to_region, young_to_region,
 768                                                                worker_id);
 769     while (from_region != nullptr) {
 770       assert(is_candidate_region(from_region), "Sanity");
 771       log_debug(gc)("Worker %u compacting %s Region " SIZE_FORMAT " which had used " SIZE_FORMAT " and %s live",
 772                     worker_id, from_region->affiliation_name(),
 773                     from_region->index(), from_region->used(), from_region->has_live()? "has": "does not have");
 774       cl.set_from_region(from_region);
 775       if (from_region->has_live()) {
 776         _heap->marked_object_iterate(from_region, &cl);
 777       }
 778       // Compacted the region to somewhere else? From-region is empty then.
 779       if (!cl.is_compact_same_region()) {
 780         empty_regions.append(from_region);
 781       }
 782       from_region = it.next();
 783     }
 784     cl.finish();
 785 
 786     // Mark all remaining regions as empty
 787     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 788       ShenandoahHeapRegion* r = empty_regions.at(pos);
 789       r->set_new_top(r->bottom());
 790     }
 791   } else {
 792     ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);

 793     while (from_region != nullptr) {
 794       assert(is_candidate_region(from_region), "Sanity");

 795       cl.set_from_region(from_region);
 796       if (from_region->has_live()) {
 797         _heap->marked_object_iterate(from_region, &cl);
 798       }
 799 
 800       // Compacted the region to somewhere else? From-region is empty then.
 801       if (!cl.is_compact_same_region()) {
 802         empty_regions.append(from_region);
 803       }
 804       from_region = it.next();
 805     }
 806     cl.finish_region();
 807 
 808     // Mark all remaining regions as empty
 809     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 810       ShenandoahHeapRegion* r = empty_regions.at(pos);
 811       r->set_new_top(r->bottom());
 812     }
 813   }
 814 }
 815 
 816 void ShenandoahFullGC::calculate_target_humongous_objects() {
 817   ShenandoahHeap* heap = ShenandoahHeap::heap();
 818 
 819   // Compute the new addresses for humongous objects. We need to do this after addresses
 820   // for regular objects are calculated, and we know what regions in heap suffix are
 821   // available for humongous moves.
 822   //
 823   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 824   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 825   // humongous start there.
 826   //
 827   // The complication is potential non-movable regions during the scan. If such region is
 828   // detected, then sliding restarts towards that non-movable region.
 829 
 830   size_t to_begin = heap->num_regions();
 831   size_t to_end = heap->num_regions();
 832 
 833   log_debug(gc)("Full GC calculating target humongous objects from end " SIZE_FORMAT, to_end);
 834   for (size_t c = heap->num_regions(); c > 0; c--) {
 835     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 836     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 837       // To-region candidate: record this, and continue scan
 838       to_begin = r->index();
 839       continue;
 840     }
 841 
 842     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 843       // From-region candidate: movable humongous region
 844       oop old_obj = cast_to_oop(r->bottom());
 845       size_t words_size = old_obj->size();
 846       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 847 
 848       size_t start = to_end - num_regions;
 849 
 850       if (start >= to_begin && start != r->index()) {
 851         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 852         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 853         old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));

 856       }
 857     }
 858 
 859     // Failed to fit. Scan starting from current region.
 860     to_begin = r->index();
 861     to_end = r->index();
 862   }
 863 }
 864 
 865 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 866 private:
 867   ShenandoahHeap* const _heap;
 868 
 869 public:
 870   ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
 871   void heap_region_do(ShenandoahHeapRegion* r) {
 872     if (r->is_trash()) {
 873       r->recycle();
 874     }
 875     if (r->is_cset()) {
 876       // Leave affiliation unchanged
 877       r->make_regular_bypass();
 878     }
 879     if (r->is_empty_uncommitted()) {
 880       r->make_committed_bypass();
 881     }
 882     assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
 883 
 884     // Record current region occupancy: this communicates empty regions are free
 885     // to the rest of Full GC code.
 886     r->set_new_top(r->top());
 887   }
 888 };
 889 
 890 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
 891 private:
 892   ShenandoahHeap* const _heap;
 893   ShenandoahMarkingContext* const _ctx;
 894 
 895 public:
 896   ShenandoahTrashImmediateGarbageClosure() :
 897     _heap(ShenandoahHeap::heap()),
 898     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 899 
 900   void heap_region_do(ShenandoahHeapRegion* r) {
 901     if (!r->is_affiliated()) {
 902       // Ignore free regions
 903       // TODO: change iterators so they do not process FREE regions.
 904       return;
 905     }
 906 
 907     if (r->is_humongous_start()) {
 908       oop humongous_obj = cast_to_oop(r->bottom());
 909       if (!_ctx->is_marked(humongous_obj)) {
 910         assert(!r->has_live(),
 911                "Humongous Start %s Region " SIZE_FORMAT " is not marked, should not have live",
 912                r->affiliation_name(),  r->index());
 913         log_debug(gc)("Trashing immediate humongous region " SIZE_FORMAT " because not marked", r->index());
 914         _heap->trash_humongous_region_at(r);
 915       } else {
 916         assert(r->has_live(),
 917                "Humongous Start %s Region " SIZE_FORMAT " should have live", r->affiliation_name(),  r->index());
 918       }
 919     } else if (r->is_humongous_continuation()) {
 920       // If we hit continuation, the non-live humongous starts should have been trashed already
 921       assert(r->humongous_start_region()->has_live(),
 922              "Humongous Continuation %s Region " SIZE_FORMAT " should have live", r->affiliation_name(),  r->index());
 923     } else if (r->is_regular()) {
 924       if (!r->has_live()) {
 925         log_debug(gc)("Trashing immediate regular region " SIZE_FORMAT " because has no live", r->index());
 926         r->make_trash_immediate();
 927       }
 928     }
 929   }
 930 };
 931 
 932 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
 933   ShenandoahHeap* heap = ShenandoahHeap::heap();
 934 
 935   uint n_workers = heap->workers()->active_workers();
 936   size_t n_regions = heap->num_regions();
 937 
 938   // What we want to accomplish: have the dense prefix of data, while still balancing
 939   // out the parallel work.
 940   //
 941   // Assuming the amount of work is driven by the live data that needs moving, we can slice
 942   // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
 943   // thread takes all regions in its prefix subset, and then it takes some regions from
 944   // the tail.
 945   //

1054   for (size_t wid = 0; wid < n_workers; wid++) {
1055     ShenandoahHeapRegionSetIterator it(worker_slices[wid]);
1056     ShenandoahHeapRegion* r = it.next();
1057     while (r != nullptr) {
1058       size_t idx = r->index();
1059       assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx);
1060       assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx);
1061       map.at_put(idx, true);
1062       r = it.next();
1063     }
1064   }
1065 
1066   for (size_t rid = 0; rid < n_regions; rid++) {
1067     bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid));
1068     bool is_distributed = map.at(rid);
1069     assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid);
1070   }
1071 #endif
1072 }
1073 
1074 // TODO:
1075 //  Consider compacting old-gen objects toward the high end of memory and young-gen objects towards the low-end
1076 //  of memory.  As currently implemented, all regions are compacted toward the low-end of memory.  This creates more
1077 //  fragmentation of the heap, because old-gen regions get scattered among low-address regions such that it becomes
1078 //  more difficult to find contiguous regions for humongous objects.
1079 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
1080   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
1081   ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
1082 
1083   ShenandoahHeap* heap = ShenandoahHeap::heap();
1084 
1085   // About to figure out which regions can be compacted, make sure pinning status
1086   // had been updated in GC prologue.
1087   heap->assert_pinned_region_status();
1088 
1089   {
1090     // Trash the immediately collectible regions before computing addresses
1091     ShenandoahTrashImmediateGarbageClosure tigcl;
1092     heap->heap_region_iterate(&tigcl);
1093 
1094     // Make sure regions are in good state: committed, active, clean.
1095     // This is needed because we are potentially sliding the data through them.
1096     ShenandoahEnsureHeapActiveClosure ecl;
1097     heap->heap_region_iterate(&ecl);
1098   }
1099 
1100   // Compute the new addresses for regular objects
1101   {
1102     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
1103 
1104     distribute_slices(worker_slices);
1105 
1106     size_t num_workers = heap->max_workers();
1107 
1108     ResourceMark rm;
1109     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices, num_workers);
1110     heap->workers()->run_task(&task);
1111   }
1112 
1113   // Compute the new addresses for humongous objects
1114   {
1115     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
1116     calculate_target_humongous_objects();
1117   }
1118 }
1119 
1120 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
1121 private:
1122   ShenandoahHeap* const _heap;
1123   ShenandoahMarkingContext* const _ctx;
1124 
1125   template <class T>
1126   inline void do_oop_work(T* p) {
1127     T o = RawAccess<>::oop_load(p);
1128     if (!CompressedOops::is_null(o)) {
1129       oop obj = CompressedOops::decode_not_null(o);

1163 
1164 class ShenandoahAdjustPointersTask : public WorkerTask {
1165 private:
1166   ShenandoahHeap*          const _heap;
1167   ShenandoahRegionIterator       _regions;
1168 
1169 public:
1170   ShenandoahAdjustPointersTask() :
1171     WorkerTask("Shenandoah Adjust Pointers"),
1172     _heap(ShenandoahHeap::heap()) {
1173   }
1174 
1175   void work(uint worker_id) {
1176     ShenandoahParallelWorkerSession worker_session(worker_id);
1177     ShenandoahAdjustPointersObjectClosure obj_cl;
1178     ShenandoahHeapRegion* r = _regions.next();
1179     while (r != nullptr) {
1180       if (!r->is_humongous_continuation() && r->has_live()) {
1181         _heap->marked_object_iterate(r, &obj_cl);
1182       }
1183       if (r->is_pinned() && r->is_old() && r->is_active() && !r->is_humongous()) {
1184         // Pinned regions are not compacted so they may still hold unmarked objects with
1185         // reference to reclaimed memory. Remembered set scanning will crash if it attempts
1186         // to iterate the oops in these objects.
1187         r->begin_preemptible_coalesce_and_fill();
1188         r->oop_fill_and_coalesce_without_cancel();
1189       }
1190       r = _regions.next();
1191     }
1192   }
1193 };
1194 
1195 class ShenandoahAdjustRootPointersTask : public WorkerTask {
1196 private:
1197   ShenandoahRootAdjuster* _rp;
1198   PreservedMarksSet* _preserved_marks;
1199 public:
1200   ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
1201     WorkerTask("Shenandoah Adjust Root Pointers"),
1202     _rp(rp),
1203     _preserved_marks(preserved_marks) {}
1204 
1205   void work(uint worker_id) {
1206     ShenandoahParallelWorkerSession worker_session(worker_id);
1207     ShenandoahAdjustPointersClosure cl;
1208     _rp->roots_do(worker_id, &cl);
1209     _preserved_marks->get(worker_id)->adjust_during_full_gc();

1270     _worker_slices(worker_slices) {
1271   }
1272 
1273   void work(uint worker_id) {
1274     ShenandoahParallelWorkerSession worker_session(worker_id);
1275     ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
1276 
1277     ShenandoahCompactObjectsClosure cl(worker_id);
1278     ShenandoahHeapRegion* r = slice.next();
1279     while (r != nullptr) {
1280       assert(!r->is_humongous(), "must not get humongous regions here");
1281       if (r->has_live()) {
1282         _heap->marked_object_iterate(r, &cl);
1283       }
1284       r->set_top(r->new_top());
1285       r = slice.next();
1286     }
1287   }
1288 };
1289 
1290 static void account_for_region(ShenandoahHeapRegion* r, size_t &region_count, size_t &region_usage, size_t &humongous_waste) {
1291   region_count++;
1292   region_usage += r->used();
1293   if (r->is_humongous_start()) {
1294     // For each humongous object, we take this path once regardless of how many regions it spans.
1295     HeapWord* obj_addr = r->bottom();
1296     oop obj = cast_to_oop(obj_addr);
1297     size_t word_size = obj->size();
1298     size_t region_size_words = ShenandoahHeapRegion::region_size_words();
1299     size_t overreach = word_size % region_size_words;
1300     if (overreach != 0) {
1301       humongous_waste += (region_size_words - overreach) * HeapWordSize;
1302     }
1303     // else, this humongous object aligns exactly on region size, so no waste.
1304   }
1305 }
1306 
1307 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
1308 private:
1309   ShenandoahHeap* const _heap;
1310   bool _is_generational;
1311   size_t _young_regions, _young_usage, _young_humongous_waste;
1312   size_t _old_regions, _old_usage, _old_humongous_waste;
1313 
1314 public:
1315   ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()),
1316                                    _is_generational(_heap->mode()->is_generational()),
1317                                    _young_regions(0),
1318                                    _young_usage(0),
1319                                    _young_humongous_waste(0),
1320                                    _old_regions(0),
1321                                    _old_usage(0),
1322                                    _old_humongous_waste(0)
1323   {
1324     _heap->free_set()->clear();
1325   }
1326 
1327   void heap_region_do(ShenandoahHeapRegion* r) {
1328     assert (!r->is_cset(), "cset regions should have been demoted already");
1329 
1330     // Need to reset the complete-top-at-mark-start pointer here because
1331     // the complete marking bitmap is no longer valid. This ensures
1332     // size-based iteration in marked_object_iterate().
1333     // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
1334     // pinned regions.
1335     if (!r->is_pinned()) {
1336       _heap->complete_marking_context()->reset_top_at_mark_start(r);
1337     }
1338 
1339     size_t live = r->used();
1340 
1341     // Make empty regions that have been allocated into regular
1342     if (r->is_empty() && live > 0) {
1343       if (!_is_generational) {
1344         r->make_young_maybe();
1345       }
1346       // else, generational mode compaction has already established affiliation.
1347       r->make_regular_bypass();
1348     }
1349 
1350     // Reclaim regular regions that became empty
1351     if (r->is_regular() && live == 0) {
1352       r->make_trash();
1353     }
1354 
1355     // Recycle all trash regions
1356     if (r->is_trash()) {
1357       live = 0;
1358       r->recycle();
1359     } else {
1360       if (r->is_old()) {
1361         account_for_region(r, _old_regions, _old_usage, _old_humongous_waste);
1362       } else if (r->is_young()) {
1363         account_for_region(r, _young_regions, _young_usage, _young_humongous_waste);
1364       }
1365     }

1366     r->set_live_data(live);
1367     r->reset_alloc_metadata();

1368   }
1369 
1370   void update_generation_usage() {
1371     if (_is_generational) {
1372       _heap->old_generation()->establish_usage(_old_regions, _old_usage, _old_humongous_waste);
1373       _heap->young_generation()->establish_usage(_young_regions, _young_usage, _young_humongous_waste);
1374     } else {
1375       assert(_old_regions == 0, "Old regions only expected in generational mode");
1376       assert(_old_usage == 0, "Old usage only expected in generational mode");
1377       assert(_old_humongous_waste == 0, "Old humongous waste only expected in generational mode");
1378     }
1379 
1380     // In generational mode, global usage should be the sum of young and old. This is also true
1381     // for non-generational modes except that there are no old regions.
1382     _heap->global_generation()->establish_usage(_old_regions + _young_regions,
1383                                                 _old_usage + _young_usage,
1384                                                 _old_humongous_waste + _young_humongous_waste);
1385   }
1386 };
1387 
1388 void ShenandoahFullGC::compact_humongous_objects() {
1389   // Compact humongous regions, based on their fwdptr objects.
1390   //
1391   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
1392   // humongous regions are already compacted, and do not require further moves, which alleviates
1393   // sliding costs. We may consider doing this in parallel in the future.
1394 
1395   ShenandoahHeap* heap = ShenandoahHeap::heap();
1396 
1397   for (size_t c = heap->num_regions(); c > 0; c--) {
1398     ShenandoahHeapRegion* r = heap->get_region(c - 1);
1399     if (r->is_humongous_start()) {
1400       oop old_obj = cast_to_oop(r->bottom());
1401       if (!old_obj->is_forwarded()) {
1402         // No need to move the object, it stays at the same slot
1403         continue;
1404       }
1405       size_t words_size = old_obj->size();
1406       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1407 
1408       size_t old_start = r->index();
1409       size_t old_end   = old_start + num_regions - 1;
1410       size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
1411       size_t new_end   = new_start + num_regions - 1;
1412       assert(old_start != new_start, "must be real move");
1413       assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
1414 
1415       ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(heap->get_region(old_start)->bottom()));
1416       log_debug(gc)("Full GC compaction moves humongous object from region " SIZE_FORMAT " to region " SIZE_FORMAT,
1417                     old_start, new_start);
1418 
1419       Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
1420                                    heap->get_region(new_start)->bottom(),
1421                                    words_size);
1422 
1423       oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
1424       new_obj->init_mark();
1425 
1426       {
1427         ShenandoahAffiliation original_affiliation = r->affiliation();
1428         for (size_t c = old_start; c <= old_end; c++) {
1429           ShenandoahHeapRegion* r = heap->get_region(c);
1430           // Leave humongous region affiliation unchanged.
1431           r->make_regular_bypass();
1432           r->set_top(r->bottom());
1433         }
1434 
1435         for (size_t c = new_start; c <= new_end; c++) {
1436           ShenandoahHeapRegion* r = heap->get_region(c);
1437           if (c == new_start) {
1438             r->make_humongous_start_bypass(original_affiliation);
1439           } else {
1440             r->make_humongous_cont_bypass(original_affiliation);
1441           }
1442 
1443           // Trailing region may be non-full, record the remainder there
1444           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
1445           if ((c == new_end) && (remainder != 0)) {
1446             r->set_top(r->bottom() + remainder);
1447           } else {
1448             r->set_top(r->end());
1449           }
1450 
1451           r->reset_alloc_metadata();
1452         }
1453       }
1454     }
1455   }
1456 }
1457 
1458 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1459 // we need to remain able to walk pinned regions.
1460 // Since pinned region do not move and don't get compacted, we will get holes with

1486 };
1487 
1488 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
1489   GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
1490   ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
1491 
1492   ShenandoahHeap* heap = ShenandoahHeap::heap();
1493 
1494   // Compact regular objects first
1495   {
1496     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
1497     ShenandoahCompactObjectsTask compact_task(worker_slices);
1498     heap->workers()->run_task(&compact_task);
1499   }
1500 
1501   // Compact humongous objects after regular object moves
1502   {
1503     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1504     compact_humongous_objects();
1505   }
1506 }
1507 
1508 void ShenandoahFullGC::phase5_epilog() {
1509   GCTraceTime(Info, gc, phases) time("Phase 5: Full GC epilog", _gc_timer);
1510   ShenandoahHeap* heap = ShenandoahHeap::heap();
1511 
1512   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1513   // and must ensure the bitmap is in sync.
1514   {
1515     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1516     ShenandoahMCResetCompleteBitmapTask task;
1517     heap->workers()->run_task(&task);
1518   }
1519 
1520   // Bring regions in proper states after the collection, and set heap properties.
1521   {
1522     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);

1523     ShenandoahPostCompactClosure post_compact;
1524     heap->heap_region_iterate(&post_compact);
1525     post_compact.update_generation_usage();
1526     if (heap->mode()->is_generational()) {
1527       size_t old_usage = heap->old_generation()->used_regions_size();
1528       size_t old_capacity = heap->old_generation()->max_capacity();
1529 
1530       assert(old_usage % ShenandoahHeapRegion::region_size_bytes() == 0, "Old usage must aligh with region size");
1531       assert(old_capacity % ShenandoahHeapRegion::region_size_bytes() == 0, "Old capacity must aligh with region size");
1532 
1533       if (old_capacity > old_usage) {
1534         size_t excess_old_regions = (old_capacity - old_usage) / ShenandoahHeapRegion::region_size_bytes();
1535         heap->generation_sizer()->transfer_to_young(excess_old_regions);
1536       } else if (old_capacity < old_usage) {
1537         size_t old_regions_deficit = (old_usage - old_capacity) / ShenandoahHeapRegion::region_size_bytes();
1538         heap->generation_sizer()->force_transfer_to_old(old_regions_deficit);
1539       }
1540 
1541       log_info(gc)("FullGC done: young usage: " SIZE_FORMAT "%s, old usage: " SIZE_FORMAT "%s",
1542                    byte_size_in_proper_unit(heap->young_generation()->used()), proper_unit_for_byte_size(heap->young_generation()->used()),
1543                    byte_size_in_proper_unit(heap->old_generation()->used()),   proper_unit_for_byte_size(heap->old_generation()->used()));
1544     }
1545     heap->collection_set()->clear();
1546     size_t young_cset_regions, old_cset_regions;
1547     heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions);
1548 
1549     // We also do not expand old generation size following Full GC because we have scrambled age populations and
1550     // no longer have objects separated by age into distinct regions.
1551 
1552     // TODO: Do we need to fix FullGC so that it maintains aged segregation of objects into distinct regions?
1553     //       A partial solution would be to remember how many objects are of tenure age following Full GC, but
1554     //       this is probably suboptimal, because most of these objects will not reside in a region that will be
1555     //       selected for the next evacuation phase.
1556 
1557     // In case this Full GC resulted from degeneration, clear the tally on anticipated promotion.
1558     heap->clear_promotion_potential();
1559 
1560     if (heap->mode()->is_generational()) {
1561       // Invoke this in case we are able to transfer memory from OLD to YOUNG.
1562       heap->adjust_generation_sizes_for_next_cycle(0, 0, 0);
1563     }
1564     heap->free_set()->rebuild(young_cset_regions, old_cset_regions);
1565 
1566     // We defer generation resizing actions until after cset regions have been recycled.  We do this even following an
1567     // abbreviated cycle.
1568     if (heap->mode()->is_generational()) {
1569       bool success;
1570       size_t region_xfer;
1571       const char* region_destination;
1572       ShenandoahYoungGeneration* young_gen = heap->young_generation();
1573       ShenandoahGeneration* old_gen = heap->old_generation();
1574 
1575       size_t old_region_surplus = heap->get_old_region_surplus();
1576       size_t old_region_deficit = heap->get_old_region_deficit();
1577       if (old_region_surplus) {
1578         success = heap->generation_sizer()->transfer_to_young(old_region_surplus);
1579         region_destination = "young";
1580         region_xfer = old_region_surplus;
1581       } else if (old_region_deficit) {
1582         success = heap->generation_sizer()->transfer_to_old(old_region_deficit);
1583         region_destination = "old";
1584         region_xfer = old_region_deficit;
1585         if (!success) {
1586           ((ShenandoahOldHeuristics *) old_gen->heuristics())->trigger_cannot_expand();
1587         }
1588       } else {
1589         region_destination = "none";
1590         region_xfer = 0;
1591         success = true;
1592       }
1593       heap->set_old_region_surplus(0);
1594       heap->set_old_region_deficit(0);
1595       size_t young_available = young_gen->available();
1596       size_t old_available = old_gen->available();
1597       log_info(gc, ergo)("After cleanup, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: "
1598                          SIZE_FORMAT "%s, young_available: " SIZE_FORMAT "%s",
1599                          success? "successfully transferred": "failed to transfer", region_xfer, region_destination,
1600                          byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available),
1601                          byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
1602     }
1603     heap->clear_cancelled_gc(true /* clear oom handler */);
1604   }
1605 }
< prev index next >