< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "compiler/oopMap.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/preservedMarks.inline.hpp"
  30 #include "gc/shared/tlab_globals.hpp"
  31 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  35 #include "gc/shenandoah/shenandoahFullGC.hpp"

  36 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  37 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  38 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  40 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  42 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  43 #include "gc/shenandoah/shenandoahMetrics.hpp"
  44 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  46 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  47 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  48 #include "gc/shenandoah/shenandoahUtils.hpp"
  49 #include "gc/shenandoah/shenandoahVerifier.hpp"
  50 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  51 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"

  52 #include "memory/metaspaceUtils.hpp"
  53 #include "memory/universe.hpp"
  54 #include "oops/compressedOops.inline.hpp"
  55 #include "oops/oop.inline.hpp"
  56 #include "runtime/orderAccess.hpp"
  57 #include "runtime/thread.hpp"
  58 #include "runtime/vmThread.hpp"
  59 #include "utilities/copy.hpp"
  60 #include "utilities/events.hpp"
  61 #include "utilities/growableArray.hpp"
  62 #include "gc/shared/workgroup.hpp"
  63 































































  64 ShenandoahFullGC::ShenandoahFullGC() :
  65   _gc_timer(ShenandoahHeap::heap()->gc_timer()),
  66   _preserved_marks(new PreservedMarksSet(true)) {}
  67 
  68 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
  69   vmop_entry_full(cause);
  70   // Always success
  71   return true;
  72 }
  73 
  74 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) {
  75   ShenandoahHeap* const heap = ShenandoahHeap::heap();
  76   TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
  77   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
  78 
  79   heap->try_inject_alloc_failure();
  80   VM_ShenandoahFullGC op(cause, this);
  81   VMThread::execute(&op);
  82 }
  83 

  96 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
  97   ShenandoahMetricsSnapshot metrics;
  98   metrics.snap_before();
  99 
 100   // Perform full GC
 101   do_it(cause);
 102 
 103   metrics.snap_after();
 104 
 105   if (metrics.is_good_progress()) {
 106     ShenandoahHeap::heap()->notify_gc_progress();
 107   } else {
 108     // Nothing to do. Tell the allocation path that we have failed to make
 109     // progress, and it can finally fail.
 110     ShenandoahHeap::heap()->notify_gc_no_progress();
 111   }
 112 }
 113 
 114 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
 115   ShenandoahHeap* heap = ShenandoahHeap::heap();


 116 
 117   if (ShenandoahVerify) {
 118     heap->verifier()->verify_before_fullgc();
 119   }
 120 
 121   if (VerifyBeforeGC) {
 122     Universe::verify();
 123   }
 124 
 125   // Degenerated GC may carry concurrent root flags when upgrading to
 126   // full GC. We need to reset it before mutators resume.
 127   heap->set_concurrent_strong_root_in_progress(false);
 128   heap->set_concurrent_weak_root_in_progress(false);
 129 
 130   heap->set_full_gc_in_progress(true);
 131 
 132   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 133   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
 134 
 135   {

 139 
 140   {
 141     ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
 142     // Full GC is supposed to recover from any GC state:
 143 
 144     // a0. Remember if we have forwarded objects
 145     bool has_forwarded_objects = heap->has_forwarded_objects();
 146 
 147     // a1. Cancel evacuation, if in progress
 148     if (heap->is_evacuation_in_progress()) {
 149       heap->set_evacuation_in_progress(false);
 150     }
 151     assert(!heap->is_evacuation_in_progress(), "sanity");
 152 
 153     // a2. Cancel update-refs, if in progress
 154     if (heap->is_update_refs_in_progress()) {
 155       heap->set_update_refs_in_progress(false);
 156     }
 157     assert(!heap->is_update_refs_in_progress(), "sanity");
 158 
 159     // b. Cancel concurrent mark, if in progress
 160     if (heap->is_concurrent_mark_in_progress()) {
 161       ShenandoahConcurrentGC::cancel();
 162       heap->set_concurrent_mark_in_progress(false);
 163     }
 164     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 165 
 166     // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
 167     if (has_forwarded_objects) {
 168       update_roots(true /*full_gc*/);
 169     }
 170 
 171     // d. Reset the bitmaps for new marking
 172     heap->reset_mark_bitmap();
 173     assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 174     assert(!heap->marking_context()->is_complete(), "sanity");
 175 
 176     // e. Abandon reference discovery and clear all discovered references.
 177     ShenandoahReferenceProcessor* rp = heap->ref_processor();
 178     rp->abandon_partial_discovery();
 179 
 180     // f. Sync pinned region status from the CP marks
 181     heap->sync_pinned_region_status();
 182 
 183     // The rest of prologue:
 184     _preserved_marks->init(heap->workers()->active_workers());
 185 
 186     assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
 187   }
 188 
 189   if (UseTLAB) {

 190     heap->gclabs_retire(ResizeTLAB);
 191     heap->tlabs_retire(ResizeTLAB);
 192   }
 193 
 194   OrderAccess::fence();
 195 
 196   phase1_mark_heap();
 197 
 198   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 199   // Coming out of Full GC, we would not have any forwarded objects.
 200   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 201   heap->set_has_forwarded_objects(false);
 202 
 203   heap->set_full_gc_move_in_progress(true);
 204 
 205   // Setup workers for the rest
 206   OrderAccess::fence();
 207 
 208   // Initialize worker slices
 209   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);

 212   }
 213 
 214   {
 215     // The rest of code performs region moves, where region status is undefined
 216     // until all phases run together.
 217     ShenandoahHeapLocker lock(heap->lock());
 218 
 219     phase2_calculate_target_addresses(worker_slices);
 220 
 221     OrderAccess::fence();
 222 
 223     phase3_update_references();
 224 
 225     phase4_compact_objects(worker_slices);
 226   }
 227 
 228   {
 229     // Epilogue
 230     _preserved_marks->restore(heap->workers());
 231     _preserved_marks->reclaim();






 232   }
 233 
 234   // Resize metaspace
 235   MetaspaceGC::compute_new_size();
 236 
 237   // Free worker slices
 238   for (uint i = 0; i < heap->max_workers(); i++) {
 239     delete worker_slices[i];
 240   }
 241   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 242 
 243   heap->set_full_gc_move_in_progress(false);
 244   heap->set_full_gc_in_progress(false);
 245 
 246   if (ShenandoahVerify) {
 247     heap->verifier()->verify_after_fullgc();




 248   }
 249 
 250   if (VerifyAfterGC) {
 251     Universe::verify();
 252   }
 253 
 254   {
 255     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
 256     heap->post_full_gc_dump(_gc_timer);
 257   }
 258 }
 259 
 260 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
 261 private:
 262   ShenandoahMarkingContext* const _ctx;
 263 
 264 public:
 265   ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 266 
 267   void heap_region_do(ShenandoahHeapRegion *r) {
 268     _ctx->capture_top_at_mark_start(r);
 269     r->clear_live_data();


 270   }


 271 };
 272 
 273 void ShenandoahFullGC::phase1_mark_heap() {
 274   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 275   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 276 
 277   ShenandoahHeap* heap = ShenandoahHeap::heap();
 278 
 279   ShenandoahPrepareForMarkClosure cl;
 280   heap->heap_region_iterate(&cl);
 281 
 282   heap->set_unload_classes(heap->heuristics()->can_unload_classes());
 283 
 284   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 285   // enable ("weak") refs discovery
 286   rp->set_soft_reference_policy(true); // forcefully purge all soft references
 287 
 288   ShenandoahSTWMark mark(true /*full_gc*/);
 289   mark.mark();
 290   heap->parallel_cleaning(true /* full_gc */);
 291 }
 292 


































































































































































































 293 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 294 private:
 295   PreservedMarks*          const _preserved_marks;
 296   ShenandoahHeap*          const _heap;
 297   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 298   int _empty_regions_pos;
 299   ShenandoahHeapRegion*          _to_region;
 300   ShenandoahHeapRegion*          _from_region;
 301   HeapWord* _compact_point;
 302 
 303 public:
 304   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 305                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 306                                               ShenandoahHeapRegion* to_region) :
 307     _preserved_marks(preserved_marks),
 308     _heap(ShenandoahHeap::heap()),
 309     _empty_regions(empty_regions),
 310     _empty_regions_pos(0),
 311     _to_region(to_region),
 312     _from_region(NULL),
 313     _compact_point(to_region->bottom()) {}
 314 
 315   void set_from_region(ShenandoahHeapRegion* from_region) {
 316     _from_region = from_region;
 317   }
 318 
 319   void finish_region() {
 320     assert(_to_region != NULL, "should not happen");

 321     _to_region->set_new_top(_compact_point);
 322   }
 323 
 324   bool is_compact_same_region() {
 325     return _from_region == _to_region;
 326   }
 327 
 328   int empty_regions_pos() {
 329     return _empty_regions_pos;
 330   }
 331 
 332   void do_object(oop p) {
 333     assert(_from_region != NULL, "must set before work");
 334     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 335     assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
 336 
 337     size_t obj_size = p->size();
 338     if (_compact_point + obj_size > _to_region->end()) {
 339       finish_region();
 340 

 346       } else {
 347         // Out of empty region? Compact within the same region.
 348         new_to_region = _from_region;
 349       }
 350 
 351       assert(new_to_region != _to_region, "must not reuse same to-region");
 352       assert(new_to_region != NULL, "must not be NULL");
 353       _to_region = new_to_region;
 354       _compact_point = _to_region->bottom();
 355     }
 356 
 357     // Object fits into current region, record new location:
 358     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 359     shenandoah_assert_not_forwarded(NULL, p);
 360     _preserved_marks->push_if_necessary(p, p->mark());
 361     p->forward_to(cast_to_oop(_compact_point));
 362     _compact_point += obj_size;
 363   }
 364 };
 365 
 366 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
 367 private:
 368   PreservedMarksSet*        const _preserved_marks;
 369   ShenandoahHeap*           const _heap;
 370   ShenandoahHeapRegionSet** const _worker_slices;
 371 
 372 public:
 373   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :

 374     AbstractGangTask("Shenandoah Prepare For Compaction"),
 375     _preserved_marks(preserved_marks),
 376     _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {










 377   }
 378 
 379   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 380     // Empty region: get it into the slice to defragment the slice itself.
 381     // We could have skipped this without violating correctness, but we really
 382     // want to compact all live regions to the start of the heap, which sometimes
 383     // means moving them into the fully empty regions.
 384     if (r->is_empty()) return true;
 385 
 386     // Can move the region, and this is not the humongous region. Humongous
 387     // moves are special cased here, because their moves are handled separately.
 388     return r->is_stw_move_allowed() && !r->is_humongous();
 389   }
 390 
 391   void work(uint worker_id) {
 392     ShenandoahParallelWorkerSession worker_session(worker_id);
 393     ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 394     ShenandoahHeapRegionSetIterator it(slice);
 395     ShenandoahHeapRegion* from_region = it.next();
 396     // No work?
 397     if (from_region == NULL) {
 398        return;
 399     }
 400 
 401     // Sliding compaction. Walk all regions in the slice, and compact them.
 402     // Remember empty regions and reuse them as needed.
 403     ResourceMark rm;











 404 
 405     GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());






 406 






 407     ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
 408 
 409     while (from_region != NULL) {
 410       assert(is_candidate_region(from_region), "Sanity");
 411 
 412       cl.set_from_region(from_region);
 413       if (from_region->has_live()) {
 414         _heap->marked_object_iterate(from_region, &cl);
 415       }
 416 
 417       // Compacted the region to somewhere else? From-region is empty then.
 418       if (!cl.is_compact_same_region()) {
 419         empty_regions.append(from_region);
 420       }
 421       from_region = it.next();
 422     }
 423     cl.finish_region();
 424 
 425     // Mark all remaining regions as empty
 426     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 427       ShenandoahHeapRegion* r = empty_regions.at(pos);
 428       r->set_new_top(r->bottom());
 429     }
 430   }
 431 };
 432 
 433 void ShenandoahFullGC::calculate_target_humongous_objects() {
 434   ShenandoahHeap* heap = ShenandoahHeap::heap();
 435 
 436   // Compute the new addresses for humongous objects. We need to do this after addresses
 437   // for regular objects are calculated, and we know what regions in heap suffix are
 438   // available for humongous moves.
 439   //
 440   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 441   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 442   // humongous start there.
 443   //
 444   // The complication is potential non-movable regions during the scan. If such region is
 445   // detected, then sliding restarts towards that non-movable region.
 446 
 447   size_t to_begin = heap->num_regions();
 448   size_t to_end = heap->num_regions();
 449 

 450   for (size_t c = heap->num_regions(); c > 0; c--) {
 451     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 452     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 453       // To-region candidate: record this, and continue scan
 454       to_begin = r->index();
 455       continue;
 456     }
 457 
 458     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 459       // From-region candidate: movable humongous region
 460       oop old_obj = cast_to_oop(r->bottom());
 461       size_t words_size = old_obj->size();
 462       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 463 
 464       size_t start = to_end - num_regions;
 465 
 466       if (start >= to_begin && start != r->index()) {
 467         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 468         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 469         old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));

 496     }
 497     assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
 498 
 499     // Record current region occupancy: this communicates empty regions are free
 500     // to the rest of Full GC code.
 501     r->set_new_top(r->top());
 502   }
 503 };
 504 
 505 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
 506 private:
 507   ShenandoahHeap* const _heap;
 508   ShenandoahMarkingContext* const _ctx;
 509 
 510 public:
 511   ShenandoahTrashImmediateGarbageClosure() :
 512     _heap(ShenandoahHeap::heap()),
 513     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 514 
 515   void heap_region_do(ShenandoahHeapRegion* r) {
 516     if (r->is_humongous_start()) {
 517       oop humongous_obj = cast_to_oop(r->bottom());
 518       if (!_ctx->is_marked(humongous_obj)) {
 519         assert(!r->has_live(),
 520                "Region " SIZE_FORMAT " is not marked, should not have live", r->index());
 521         _heap->trash_humongous_region_at(r);
 522       } else {
 523         assert(r->has_live(),
 524                "Region " SIZE_FORMAT " should have live", r->index());
 525       }
 526     } else if (r->is_humongous_continuation()) {
 527       // If we hit continuation, the non-live humongous starts should have been trashed already
 528       assert(r->humongous_start_region()->has_live(),
 529              "Region " SIZE_FORMAT " should have live", r->index());
 530     } else if (r->is_regular()) {
 531       if (!r->has_live()) {
 532         r->make_trash_immediate();





 533       }
 534     }


 535   }
 536 };
 537 
 538 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
 539   ShenandoahHeap* heap = ShenandoahHeap::heap();
 540 
 541   uint n_workers = heap->workers()->active_workers();
 542   size_t n_regions = heap->num_regions();
 543 
 544   // What we want to accomplish: have the dense prefix of data, while still balancing
 545   // out the parallel work.
 546   //
 547   // Assuming the amount of work is driven by the live data that needs moving, we can slice
 548   // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
 549   // thread takes all regions in its prefix subset, and then it takes some regions from
 550   // the tail.
 551   //
 552   // Tail region selection becomes interesting.
 553   //
 554   // First, we want to distribute the regions fairly between the workers, and those regions

 681   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
 682   ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
 683 
 684   ShenandoahHeap* heap = ShenandoahHeap::heap();
 685 
 686   // About to figure out which regions can be compacted, make sure pinning status
 687   // had been updated in GC prologue.
 688   heap->assert_pinned_region_status();
 689 
 690   {
 691     // Trash the immediately collectible regions before computing addresses
 692     ShenandoahTrashImmediateGarbageClosure tigcl;
 693     heap->heap_region_iterate(&tigcl);
 694 
 695     // Make sure regions are in good state: committed, active, clean.
 696     // This is needed because we are potentially sliding the data through them.
 697     ShenandoahEnsureHeapActiveClosure ecl;
 698     heap->heap_region_iterate(&ecl);
 699   }
 700 





 701   // Compute the new addresses for regular objects
 702   {
 703     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
 704 
 705     distribute_slices(worker_slices);
 706 
 707     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);



 708     heap->workers()->run_task(&task);
 709   }
 710 
 711   // Compute the new addresses for humongous objects
 712   {
 713     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
 714     calculate_target_humongous_objects();
 715   }
 716 }
 717 
 718 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
 719 private:
 720   ShenandoahHeap* const _heap;
 721   ShenandoahMarkingContext* const _ctx;
 722 
 723   template <class T>
 724   inline void do_oop_work(T* p) {
 725     T o = RawAccess<>::oop_load(p);
 726     if (!CompressedOops::is_null(o)) {
 727       oop obj = CompressedOops::decode_not_null(o);

 897     }
 898 
 899     size_t live = r->used();
 900 
 901     // Make empty regions that have been allocated into regular
 902     if (r->is_empty() && live > 0) {
 903       r->make_regular_bypass();
 904     }
 905 
 906     // Reclaim regular regions that became empty
 907     if (r->is_regular() && live == 0) {
 908       r->make_trash();
 909     }
 910 
 911     // Recycle all trash regions
 912     if (r->is_trash()) {
 913       live = 0;
 914       r->recycle();
 915     }
 916 









 917     r->set_live_data(live);
 918     r->reset_alloc_metadata();
 919     _live += live;
 920   }
 921 
 922   size_t get_live() {
 923     return _live;
 924   }
 925 };
 926 
 927 void ShenandoahFullGC::compact_humongous_objects() {
 928   // Compact humongous regions, based on their fwdptr objects.
 929   //
 930   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
 931   // humongous regions are already compacted, and do not require further moves, which alleviates
 932   // sliding costs. We may consider doing this in parallel in future.
 933 
 934   ShenandoahHeap* heap = ShenandoahHeap::heap();
 935 
 936   for (size_t c = heap->num_regions(); c > 0; c--) {
 937     ShenandoahHeapRegion* r = heap->get_region(c - 1);
 938     if (r->is_humongous_start()) {
 939       oop old_obj = cast_to_oop(r->bottom());
 940       if (!old_obj->is_forwarded()) {
 941         // No need to move the object, it stays at the same slot
 942         continue;
 943       }
 944       size_t words_size = old_obj->size();
 945       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 946 
 947       size_t old_start = r->index();
 948       size_t old_end   = old_start + num_regions - 1;
 949       size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
 950       size_t new_end   = new_start + num_regions - 1;
 951       assert(old_start != new_start, "must be real move");
 952       assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
 953 



 954       Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
 955                                    heap->get_region(new_start)->bottom(),
 956                                    words_size);
 957 
 958       oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
 959       new_obj->init_mark();
 960 
 961       {

 962         for (size_t c = old_start; c <= old_end; c++) {
 963           ShenandoahHeapRegion* r = heap->get_region(c);
 964           r->make_regular_bypass();
 965           r->set_top(r->bottom());
 966         }
 967 
 968         for (size_t c = new_start; c <= new_end; c++) {
 969           ShenandoahHeapRegion* r = heap->get_region(c);
 970           if (c == new_start) {
 971             r->make_humongous_start_bypass();
 972           } else {
 973             r->make_humongous_cont_bypass();
 974           }
 975 
 976           // Trailing region may be non-full, record the remainder there
 977           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 978           if ((c == new_end) && (remainder != 0)) {
 979             r->set_top(r->bottom() + remainder);
 980           } else {
 981             r->set_top(r->end());
 982           }
 983 
 984           r->reset_alloc_metadata();
 985         }
 986       }
 987     }
 988   }
 989 }
 990 
 991 // This is slightly different to ShHeap::reset_next_mark_bitmap:
 992 // we need to remain able to walk pinned regions.
 993 // Since pinned region do not move and don't get compacted, we will get holes with

1032   }
1033 
1034   // Compact humongous objects after regular object moves
1035   {
1036     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1037     compact_humongous_objects();
1038   }
1039 
1040   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1041   // and must ensure the bitmap is in sync.
1042   {
1043     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1044     ShenandoahMCResetCompleteBitmapTask task;
1045     heap->workers()->run_task(&task);
1046   }
1047 
1048   // Bring regions in proper states after the collection, and set heap properties.
1049   {
1050     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1051 





1052     ShenandoahPostCompactClosure post_compact;
1053     heap->heap_region_iterate(&post_compact);
1054     heap->set_used(post_compact.get_live());




1055 
1056     heap->collection_set()->clear();
1057     heap->free_set()->rebuild();
1058   }
1059 
1060   heap->clear_cancelled_gc();
1061 }

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "compiler/oopMap.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/preservedMarks.inline.hpp"
  30 #include "gc/shared/tlab_globals.hpp"
  31 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  35 #include "gc/shenandoah/shenandoahFullGC.hpp"
  36 #include "gc/shenandoah/shenandoahGeneration.hpp"
  37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  38 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  39 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  44 #include "gc/shenandoah/shenandoahMetrics.hpp"
  45 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  46 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  47 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  48 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  49 #include "gc/shenandoah/shenandoahUtils.hpp"
  50 #include "gc/shenandoah/shenandoahVerifier.hpp"
  51 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  52 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  53 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  54 #include "memory/metaspaceUtils.hpp"
  55 #include "memory/universe.hpp"
  56 #include "oops/compressedOops.inline.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "runtime/orderAccess.hpp"
  59 #include "runtime/thread.hpp"
  60 #include "runtime/vmThread.hpp"
  61 #include "utilities/copy.hpp"
  62 #include "utilities/events.hpp"
  63 #include "utilities/growableArray.hpp"
  64 #include "gc/shared/workgroup.hpp"
  65 
  66 // After Full GC is done, reconstruct the remembered set by iterating over OLD regions,
  67 // registering all objects between bottom() and top(), and setting remembered set cards to
  68 // DIRTY if they hold interesting pointers.
  69 class ShenandoahReconstructRememberedSetTask : public AbstractGangTask {
  70 private:
  71   ShenandoahRegionIterator _regions;
  72 
  73 public:
  74   ShenandoahReconstructRememberedSetTask() :
  75     AbstractGangTask("Shenandoah Reset Bitmap") { }
  76 
  77   void work(uint worker_id) {
  78     ShenandoahParallelWorkerSession worker_session(worker_id);
  79     ShenandoahHeapRegion* r = _regions.next();
  80     ShenandoahHeap* heap = ShenandoahHeap::heap();
  81     RememberedScanner* scanner = heap->card_scan();
  82     ShenandoahSetRememberedCardsToDirtyClosure dirty_cards_for_interesting_pointers;
  83 
  84     while (r != NULL) {
  85       if (r->is_old() && r->is_active()) {
  86         HeapWord* obj_addr = r->bottom();
  87         if (r->is_humongous_start()) {
  88           // First, clear the remembered set
  89           oop obj = cast_to_oop(obj_addr);
  90           size_t size = obj->size();
  91           HeapWord* end_object = r->bottom() + size;
  92 
  93           // First, clear the remembered set for all spanned humongous regions
  94           size_t num_regions = (size + ShenandoahHeapRegion::region_size_words() - 1) / ShenandoahHeapRegion::region_size_words();
  95           size_t region_span = num_regions * ShenandoahHeapRegion::region_size_words();
  96           scanner->reset_remset(r->bottom(), region_span);
  97           size_t region_index = r->index();
  98           ShenandoahHeapRegion* humongous_region = heap->get_region(region_index);
  99           while (num_regions-- != 0) {
 100             scanner->reset_object_range(humongous_region->bottom(), humongous_region->end());
 101             region_index++;
 102             humongous_region = heap->get_region(region_index);
 103           }
 104 
 105           // Then register the humongous object and DIRTY relevant remembered set cards
 106           scanner->register_object_wo_lock(obj_addr);
 107           obj->oop_iterate(&dirty_cards_for_interesting_pointers);
 108         } else if (!r->is_humongous()) {
 109           // First, clear the remembered set
 110           scanner->reset_remset(r->bottom(), ShenandoahHeapRegion::region_size_words());
 111           scanner->reset_object_range(r->bottom(), r->end());
 112 
 113           // Then iterate over all objects, registering object and DIRTYing relevant remembered set cards
 114           HeapWord* t = r->top();
 115           while (obj_addr < t) {
 116             oop obj = cast_to_oop(obj_addr);
 117             size_t size = obj->size();
 118             scanner->register_object_wo_lock(obj_addr);
 119             obj_addr += obj->oop_iterate_size(&dirty_cards_for_interesting_pointers);
 120           }
 121         } // else, ignore humongous continuation region
 122       }
 123       // else, this region is FREE or YOUNG or inactive and we can ignore it.
 124       r = _regions.next();
 125     }
 126   }
 127 };
 128 
 129 ShenandoahFullGC::ShenandoahFullGC() :
 130   _gc_timer(ShenandoahHeap::heap()->gc_timer()),
 131   _preserved_marks(new PreservedMarksSet(true)) {}
 132 
 133 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
 134   vmop_entry_full(cause);
 135   // Always success
 136   return true;
 137 }
 138 
 139 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) {
 140   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 141   TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
 142   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
 143 
 144   heap->try_inject_alloc_failure();
 145   VM_ShenandoahFullGC op(cause, this);
 146   VMThread::execute(&op);
 147 }
 148 

 161 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
 162   ShenandoahMetricsSnapshot metrics;
 163   metrics.snap_before();
 164 
 165   // Perform full GC
 166   do_it(cause);
 167 
 168   metrics.snap_after();
 169 
 170   if (metrics.is_good_progress()) {
 171     ShenandoahHeap::heap()->notify_gc_progress();
 172   } else {
 173     // Nothing to do. Tell the allocation path that we have failed to make
 174     // progress, and it can finally fail.
 175     ShenandoahHeap::heap()->notify_gc_no_progress();
 176   }
 177 }
 178 
 179 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
 180   ShenandoahHeap* heap = ShenandoahHeap::heap();
 181   // Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL.
 182   heap->set_gc_generation(heap->global_generation());
 183 
 184   if (ShenandoahVerify) {
 185     heap->verifier()->verify_before_fullgc();
 186   }
 187 
 188   if (VerifyBeforeGC) {
 189     Universe::verify();
 190   }
 191 
 192   // Degenerated GC may carry concurrent root flags when upgrading to
 193   // full GC. We need to reset it before mutators resume.
 194   heap->set_concurrent_strong_root_in_progress(false);
 195   heap->set_concurrent_weak_root_in_progress(false);
 196 
 197   heap->set_full_gc_in_progress(true);
 198 
 199   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 200   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
 201 
 202   {

 206 
 207   {
 208     ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
 209     // Full GC is supposed to recover from any GC state:
 210 
 211     // a0. Remember if we have forwarded objects
 212     bool has_forwarded_objects = heap->has_forwarded_objects();
 213 
 214     // a1. Cancel evacuation, if in progress
 215     if (heap->is_evacuation_in_progress()) {
 216       heap->set_evacuation_in_progress(false);
 217     }
 218     assert(!heap->is_evacuation_in_progress(), "sanity");
 219 
 220     // a2. Cancel update-refs, if in progress
 221     if (heap->is_update_refs_in_progress()) {
 222       heap->set_update_refs_in_progress(false);
 223     }
 224     assert(!heap->is_update_refs_in_progress(), "sanity");
 225 
 226     // b. Cancel all concurrent marks, if in progress
 227     if (heap->is_concurrent_mark_in_progress()) {
 228       heap->cancel_concurrent_mark();

 229     }
 230     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 231 
 232     // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
 233     if (has_forwarded_objects) {
 234       update_roots(true /*full_gc*/);
 235     }
 236 
 237     // d. Reset the bitmaps for new marking
 238     heap->global_generation()->reset_mark_bitmap();
 239     assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 240     assert(!heap->global_generation()->is_mark_complete(), "sanity");
 241 
 242     // e. Abandon reference discovery and clear all discovered references.
 243     ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
 244     rp->abandon_partial_discovery();
 245 
 246     // f. Sync pinned region status from the CP marks
 247     heap->sync_pinned_region_status();
 248 
 249     // The rest of prologue:
 250     _preserved_marks->init(heap->workers()->active_workers());
 251 
 252     assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
 253   }
 254 
 255   if (UseTLAB) {
 256     // TODO: Do we need to explicitly retire PLABs?
 257     heap->gclabs_retire(ResizeTLAB);
 258     heap->tlabs_retire(ResizeTLAB);
 259   }
 260 
 261   OrderAccess::fence();
 262 
 263   phase1_mark_heap();
 264 
 265   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 266   // Coming out of Full GC, we would not have any forwarded objects.
 267   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 268   heap->set_has_forwarded_objects(false);
 269 
 270   heap->set_full_gc_move_in_progress(true);
 271 
 272   // Setup workers for the rest
 273   OrderAccess::fence();
 274 
 275   // Initialize worker slices
 276   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);

 279   }
 280 
 281   {
 282     // The rest of code performs region moves, where region status is undefined
 283     // until all phases run together.
 284     ShenandoahHeapLocker lock(heap->lock());
 285 
 286     phase2_calculate_target_addresses(worker_slices);
 287 
 288     OrderAccess::fence();
 289 
 290     phase3_update_references();
 291 
 292     phase4_compact_objects(worker_slices);
 293   }
 294 
 295   {
 296     // Epilogue
 297     _preserved_marks->restore(heap->workers());
 298     _preserved_marks->reclaim();
 299 
 300     if (heap->mode()->is_generational()) {
 301       ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_reconstruct_remembered_set);
 302       ShenandoahReconstructRememberedSetTask task;
 303       heap->workers()->run_task(&task);
 304     }
 305   }
 306 
 307   // Resize metaspace
 308   MetaspaceGC::compute_new_size();
 309 
 310   // Free worker slices
 311   for (uint i = 0; i < heap->max_workers(); i++) {
 312     delete worker_slices[i];
 313   }
 314   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 315 
 316   heap->set_full_gc_move_in_progress(false);
 317   heap->set_full_gc_in_progress(false);
 318 
 319   if (ShenandoahVerify) {
 320     if (heap->mode()->is_generational()) {
 321       heap->verifier()->verify_after_generational_fullgc();
 322     } else {
 323       heap->verifier()->verify_after_fullgc();
 324     }
 325   }
 326 
 327   if (VerifyAfterGC) {
 328     Universe::verify();
 329   }
 330 
 331   {
 332     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
 333     heap->post_full_gc_dump(_gc_timer);
 334   }
 335 }
 336 
 337 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
 338 private:
 339   ShenandoahMarkingContext* const _ctx;
 340 
 341 public:
 342   ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 343 
 344   void heap_region_do(ShenandoahHeapRegion *r) {
 345     if (r->affiliation() != FREE) {
 346       _ctx->capture_top_at_mark_start(r);
 347       r->clear_live_data();
 348     }
 349   }
 350 
 351   bool is_thread_safe() { return true; }
 352 };
 353 
 354 void ShenandoahFullGC::phase1_mark_heap() {
 355   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 356   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 357 
 358   ShenandoahHeap* heap = ShenandoahHeap::heap();
 359 
 360   ShenandoahPrepareForMarkClosure cl;
 361   heap->parallel_heap_region_iterate(&cl);
 362 
 363   heap->set_unload_classes(heap->global_generation()->heuristics()->can_unload_classes());
 364 
 365   ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
 366   // enable ("weak") refs discovery
 367   rp->set_soft_reference_policy(true); // forcefully purge all soft references
 368 
 369   ShenandoahSTWMark mark(heap->global_generation(), true /*full_gc*/);
 370   mark.mark();
 371   heap->parallel_cleaning(true /* full_gc */);
 372 }
 373 
 374 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
 375 private:
 376   PreservedMarksSet*        const _preserved_marks;
 377   ShenandoahHeap*           const _heap;
 378   ShenandoahHeapRegionSet** const _worker_slices;
 379   size_t                    const _num_workers;
 380 
 381 public:
 382   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices,
 383                                      size_t num_workers);
 384 
 385   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 386     // Empty region: get it into the slice to defragment the slice itself.
 387     // We could have skipped this without violating correctness, but we really
 388     // want to compact all live regions to the start of the heap, which sometimes
 389     // means moving them into the fully empty regions.
 390     if (r->is_empty()) return true;
 391 
 392     // Can move the region, and this is not the humongous region. Humongous
 393     // moves are special cased here, because their moves are handled separately.
 394     return r->is_stw_move_allowed() && !r->is_humongous();
 395   }
 396 
 397   void work(uint worker_id);
 398 };
 399 
 400 class ShenandoahPrepareForGenerationalCompactionObjectClosure : public ObjectClosure {
 401 private:
 402   ShenandoahPrepareForCompactionTask* _compactor;
 403   PreservedMarks*          const _preserved_marks;
 404   ShenandoahHeap*          const _heap;
 405   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 406   int _empty_regions_pos;
 407   ShenandoahHeapRegion*          _old_to_region;
 408   ShenandoahHeapRegion*          _young_to_region;
 409   ShenandoahHeapRegion*          _from_region;
 410   ShenandoahRegionAffiliation    _from_affiliation;
 411   HeapWord*                      _old_compact_point;
 412   HeapWord*                      _young_compact_point;
 413   uint                           _worker_id;
 414 
 415 public:
 416   ShenandoahPrepareForGenerationalCompactionObjectClosure(ShenandoahPrepareForCompactionTask* compactor,
 417                                                           PreservedMarks* preserved_marks,
 418                                                           GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 419                                                           ShenandoahHeapRegion* old_to_region,
 420                                                           ShenandoahHeapRegion* young_to_region, uint worker_id) :
 421       _compactor(compactor),
 422       _preserved_marks(preserved_marks),
 423       _heap(ShenandoahHeap::heap()),
 424       _empty_regions(empty_regions),
 425       _empty_regions_pos(0),
 426       _old_to_region(old_to_region),
 427       _young_to_region(young_to_region),
 428       _from_region(NULL),
 429       _old_compact_point((old_to_region != nullptr)? old_to_region->bottom(): nullptr),
 430       _young_compact_point((young_to_region != nullptr)? young_to_region->bottom(): nullptr),
 431       _worker_id(worker_id) {}
 432 
 433   void set_from_region(ShenandoahHeapRegion* from_region) {
 434     _from_region = from_region;
 435     _from_affiliation = from_region->affiliation();
 436     if (_from_region->has_live()) {
 437       if (_from_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION) {
 438         if (_old_to_region == nullptr) {
 439           _old_to_region = from_region;
 440           _old_compact_point = from_region->bottom();
 441         }
 442       } else {
 443         assert(_from_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION, "from_region must be OLD or YOUNG");
 444         if (_young_to_region == nullptr) {
 445           _young_to_region = from_region;
 446           _young_compact_point = from_region->bottom();
 447         }
 448       }
 449     } // else, we won't iterate over this _from_region so we don't need to set up to region to hold copies
 450   }
 451 
 452   void finish() {
 453     finish_old_region();
 454     finish_young_region();
 455   }
 456 
 457   void finish_old_region() {
 458     if (_old_to_region != nullptr) {
 459       log_debug(gc)("Planned compaction into Old Region " SIZE_FORMAT ", used: " SIZE_FORMAT " tabulated by worker %u",
 460                     _old_to_region->index(), _old_compact_point - _old_to_region->bottom(), _worker_id);
 461       _old_to_region->set_new_top(_old_compact_point);
 462       _old_to_region = nullptr;
 463     }
 464   }
 465 
 466   void finish_young_region() {
 467     if (_young_to_region != nullptr) {
 468       log_debug(gc)("Worker %u planned compaction into Young Region " SIZE_FORMAT ", used: " SIZE_FORMAT,
 469                     _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom());
 470       _young_to_region->set_new_top(_young_compact_point);
 471       _young_to_region = nullptr;
 472     }
 473   }
 474 
 475   bool is_compact_same_region() {
 476     return (_from_region == _old_to_region) || (_from_region == _young_to_region);
 477   }
 478 
 479   int empty_regions_pos() {
 480     return _empty_regions_pos;
 481   }
 482 
 483   void do_object(oop p) {
 484     assert(_from_region != NULL, "must set before work");
 485     assert((_from_region->bottom() <= cast_from_oop<HeapWord*>(p)) && (cast_from_oop<HeapWord*>(p) < _from_region->top()),
 486            "Object must reside in _from_region");
 487     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 488     assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
 489 
 490     size_t obj_size = p->size();
 491     if (_from_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION) {
 492       assert(_old_to_region != nullptr, "_old_to_region should not be NULL when compacting OLD _from_region");
 493       if (_old_compact_point + obj_size > _old_to_region->end()) {
 494         ShenandoahHeapRegion* new_to_region;
 495 
 496         log_debug(gc)("Worker %u finishing old region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
 497                       ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT,  _worker_id, _old_to_region->index(),
 498                       p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end()));
 499 
 500         // Object does not fit.  Get a new _old_to_region.
 501         finish_old_region();
 502         if (_empty_regions_pos < _empty_regions.length()) {
 503           new_to_region = _empty_regions.at(_empty_regions_pos);
 504           _empty_regions_pos++;
 505           new_to_region->set_affiliation(OLD_GENERATION);
 506         } else {
 507           // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct
 508           // from _from_region.  That's because there is always room for _from_region to be compacted into itself.
 509           // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
 510           new_to_region = _from_region;
 511         }
 512 
 513         assert(new_to_region != _old_to_region, "must not reuse same OLD to-region");
 514         assert(new_to_region != NULL, "must not be NULL");
 515         _old_to_region = new_to_region;
 516         _old_compact_point = _old_to_region->bottom();
 517       }
 518 
 519       // Object fits into current region, record new location:
 520       assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit");
 521       shenandoah_assert_not_forwarded(NULL, p);
 522       _preserved_marks->push_if_necessary(p, p->mark());
 523       p->forward_to(cast_to_oop(_old_compact_point));
 524       _old_compact_point += obj_size;
 525     } else {
 526       assert(_from_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION,
 527              "_from_region must be OLD_GENERATION or YOUNG_GENERATION");
 528 
 529       assert(_young_to_region != nullptr, "_young_to_region should not be NULL when compacting YOUNG _from_region");
 530       if (_young_compact_point + obj_size > _young_to_region->end()) {
 531         ShenandoahHeapRegion* new_to_region;
 532 
 533 
 534         log_debug(gc)("Worker %u finishing young region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
 535                       ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT,  _worker_id, _young_to_region->index(),
 536                       p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end()));
 537 
 538         // Object does not fit.  Get a new _young_to_region.
 539         finish_young_region();
 540         if (_empty_regions_pos < _empty_regions.length()) {
 541           new_to_region = _empty_regions.at(_empty_regions_pos);
 542           _empty_regions_pos++;
 543           new_to_region->set_affiliation(YOUNG_GENERATION);
 544         } else {
 545           // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct
 546           // from _from_region.  That's because there is always room for _from_region to be compacted into itself.
 547           // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
 548           new_to_region = _from_region;
 549         }
 550 
 551         assert(new_to_region != _young_to_region, "must not reuse same OLD to-region");
 552         assert(new_to_region != NULL, "must not be NULL");
 553         _young_to_region = new_to_region;
 554         _young_compact_point = _young_to_region->bottom();
 555       }
 556 
 557       // Object fits into current region, record new location:
 558       assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit");
 559       shenandoah_assert_not_forwarded(NULL, p);
 560       _preserved_marks->push_if_necessary(p, p->mark());
 561       p->forward_to(cast_to_oop(_young_compact_point));
 562       _young_compact_point += obj_size;
 563     }
 564   }
 565 };
 566 
 567 
 568 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 569 private:
 570   PreservedMarks*          const _preserved_marks;
 571   ShenandoahHeap*          const _heap;
 572   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 573   int _empty_regions_pos;
 574   ShenandoahHeapRegion*          _to_region;
 575   ShenandoahHeapRegion*          _from_region;
 576   HeapWord* _compact_point;
 577 
 578 public:
 579   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 580                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 581                                               ShenandoahHeapRegion* to_region) :
 582     _preserved_marks(preserved_marks),
 583     _heap(ShenandoahHeap::heap()),
 584     _empty_regions(empty_regions),
 585     _empty_regions_pos(0),
 586     _to_region(to_region),
 587     _from_region(NULL),
 588     _compact_point(to_region->bottom()) {}
 589 
 590   void set_from_region(ShenandoahHeapRegion* from_region) {
 591     _from_region = from_region;
 592   }
 593 
 594   void finish_region() {
 595     assert(_to_region != NULL, "should not happen");
 596     assert(!_heap->mode()->is_generational(), "Generational GC should use different Closure");
 597     _to_region->set_new_top(_compact_point);
 598   }
 599 
 600   bool is_compact_same_region() {
 601     return _from_region == _to_region;
 602   }
 603 
 604   int empty_regions_pos() {
 605     return _empty_regions_pos;
 606   }
 607 
 608   void do_object(oop p) {
 609     assert(_from_region != NULL, "must set before work");
 610     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 611     assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
 612 
 613     size_t obj_size = p->size();
 614     if (_compact_point + obj_size > _to_region->end()) {
 615       finish_region();
 616 

 622       } else {
 623         // Out of empty region? Compact within the same region.
 624         new_to_region = _from_region;
 625       }
 626 
 627       assert(new_to_region != _to_region, "must not reuse same to-region");
 628       assert(new_to_region != NULL, "must not be NULL");
 629       _to_region = new_to_region;
 630       _compact_point = _to_region->bottom();
 631     }
 632 
 633     // Object fits into current region, record new location:
 634     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 635     shenandoah_assert_not_forwarded(NULL, p);
 636     _preserved_marks->push_if_necessary(p, p->mark());
 637     p->forward_to(cast_to_oop(_compact_point));
 638     _compact_point += obj_size;
 639   }
 640 };
 641 





 642 
 643 ShenandoahPrepareForCompactionTask::ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks,
 644                                                                        ShenandoahHeapRegionSet **worker_slices,
 645                                                                        size_t num_workers) :
 646     AbstractGangTask("Shenandoah Prepare For Compaction"),
 647     _preserved_marks(preserved_marks), _heap(ShenandoahHeap::heap()),
 648     _worker_slices(worker_slices), _num_workers(num_workers) { }
 649 
 650 
 651 void ShenandoahPrepareForCompactionTask::work(uint worker_id) {
 652   ShenandoahParallelWorkerSession worker_session(worker_id);
 653   ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 654   ShenandoahHeapRegionSetIterator it(slice);
 655   ShenandoahHeapRegion* from_region = it.next();
 656   // No work?
 657   if (from_region == NULL) {
 658     return;
 659   }
 660 
 661   // Sliding compaction. Walk all regions in the slice, and compact them.
 662   // Remember empty regions and reuse them as needed.
 663   ResourceMark rm;








 664 
 665   GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());








 666 
 667   if (_heap->mode()->is_generational()) {
 668     ShenandoahHeapRegion* old_to_region = (from_region->is_old())? from_region: nullptr;
 669     ShenandoahHeapRegion* young_to_region = (from_region->is_young())? from_region: nullptr;
 670     ShenandoahPrepareForGenerationalCompactionObjectClosure cl(this, _preserved_marks->get(worker_id), empty_regions,
 671                                                                old_to_region, young_to_region, worker_id);
 672     while (from_region != NULL) {
 673       assert(is_candidate_region(from_region), "Sanity");
 674       log_debug(gc)("Worker %u compacting %s Region " SIZE_FORMAT " which had used " SIZE_FORMAT " and %s live",
 675                     worker_id, affiliation_name(from_region->affiliation()),
 676                     from_region->index(), from_region->used(), from_region->has_live()? "has": "does not have");
 677       cl.set_from_region(from_region);
 678       if (from_region->has_live()) {
 679         _heap->marked_object_iterate(from_region, &cl);
 680       }
 681 
 682       // Compacted the region to somewhere else? From-region is empty then.
 683       if (!cl.is_compact_same_region()) {
 684         empty_regions.append(from_region);
 685       }
 686       from_region = it.next();
 687     }
 688     cl.finish();
 689 
 690     // Mark all remaining regions as empty
 691     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 692       ShenandoahHeapRegion* r = empty_regions.at(pos);
 693       r->set_new_top(r->bottom());
 694     }
 695   } else {
 696     ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);

 697     while (from_region != NULL) {
 698       assert(is_candidate_region(from_region), "Sanity");

 699       cl.set_from_region(from_region);
 700       if (from_region->has_live()) {
 701         _heap->marked_object_iterate(from_region, &cl);
 702       }
 703 
 704       // Compacted the region to somewhere else? From-region is empty then.
 705       if (!cl.is_compact_same_region()) {
 706         empty_regions.append(from_region);
 707       }
 708       from_region = it.next();
 709     }
 710     cl.finish_region();
 711 
 712     // Mark all remaining regions as empty
 713     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 714       ShenandoahHeapRegion* r = empty_regions.at(pos);
 715       r->set_new_top(r->bottom());
 716     }
 717   }
 718 }
 719 
 720 void ShenandoahFullGC::calculate_target_humongous_objects() {
 721   ShenandoahHeap* heap = ShenandoahHeap::heap();
 722 
 723   // Compute the new addresses for humongous objects. We need to do this after addresses
 724   // for regular objects are calculated, and we know what regions in heap suffix are
 725   // available for humongous moves.
 726   //
 727   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 728   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 729   // humongous start there.
 730   //
 731   // The complication is potential non-movable regions during the scan. If such region is
 732   // detected, then sliding restarts towards that non-movable region.
 733 
 734   size_t to_begin = heap->num_regions();
 735   size_t to_end = heap->num_regions();
 736 
 737   log_debug(gc)("Full GC calculating target humongous objects from end " SIZE_FORMAT, to_end);
 738   for (size_t c = heap->num_regions(); c > 0; c--) {
 739     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 740     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 741       // To-region candidate: record this, and continue scan
 742       to_begin = r->index();
 743       continue;
 744     }
 745 
 746     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 747       // From-region candidate: movable humongous region
 748       oop old_obj = cast_to_oop(r->bottom());
 749       size_t words_size = old_obj->size();
 750       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 751 
 752       size_t start = to_end - num_regions;
 753 
 754       if (start >= to_begin && start != r->index()) {
 755         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 756         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 757         old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));

 784     }
 785     assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
 786 
 787     // Record current region occupancy: this communicates empty regions are free
 788     // to the rest of Full GC code.
 789     r->set_new_top(r->top());
 790   }
 791 };
 792 
 793 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
 794 private:
 795   ShenandoahHeap* const _heap;
 796   ShenandoahMarkingContext* const _ctx;
 797 
 798 public:
 799   ShenandoahTrashImmediateGarbageClosure() :
 800     _heap(ShenandoahHeap::heap()),
 801     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 802 
 803   void heap_region_do(ShenandoahHeapRegion* r) {
 804     if (r->affiliation() != FREE) {
 805       if (r->is_humongous_start()) {
 806         oop humongous_obj = cast_to_oop(r->bottom());
 807         if (!_ctx->is_marked(humongous_obj)) {
 808           assert(!r->has_live(),
 809                  "Humongous Start %s Region " SIZE_FORMAT " is not marked, should not have live",
 810                  affiliation_name(r->affiliation()),  r->index());
 811           log_debug(gc)("Trashing immediate humongous region " SIZE_FORMAT " because not marked", r->index());
 812           _heap->trash_humongous_region_at(r);
 813         } else {
 814           assert(r->has_live(),
 815                  "Humongous Start %s Region " SIZE_FORMAT " should have live", affiliation_name(r->affiliation()),  r->index());
 816         }
 817       } else if (r->is_humongous_continuation()) {
 818         // If we hit continuation, the non-live humongous starts should have been trashed already
 819         assert(r->humongous_start_region()->has_live(),
 820                "Humongous Continuation %s Region " SIZE_FORMAT " should have live", affiliation_name(r->affiliation()),  r->index());
 821       } else if (r->is_regular()) {
 822         if (!r->has_live()) {
 823           log_debug(gc)("Trashing immediate regular region " SIZE_FORMAT " because has no live", r->index());
 824           r->make_trash_immediate();
 825         }
 826       }
 827     }
 828     // else, ignore this FREE region.
 829     // TODO: change iterators so they do not process FREE regions.
 830   }
 831 };
 832 
 833 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
 834   ShenandoahHeap* heap = ShenandoahHeap::heap();
 835 
 836   uint n_workers = heap->workers()->active_workers();
 837   size_t n_regions = heap->num_regions();
 838 
 839   // What we want to accomplish: have the dense prefix of data, while still balancing
 840   // out the parallel work.
 841   //
 842   // Assuming the amount of work is driven by the live data that needs moving, we can slice
 843   // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
 844   // thread takes all regions in its prefix subset, and then it takes some regions from
 845   // the tail.
 846   //
 847   // Tail region selection becomes interesting.
 848   //
 849   // First, we want to distribute the regions fairly between the workers, and those regions

 976   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
 977   ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
 978 
 979   ShenandoahHeap* heap = ShenandoahHeap::heap();
 980 
 981   // About to figure out which regions can be compacted, make sure pinning status
 982   // had been updated in GC prologue.
 983   heap->assert_pinned_region_status();
 984 
 985   {
 986     // Trash the immediately collectible regions before computing addresses
 987     ShenandoahTrashImmediateGarbageClosure tigcl;
 988     heap->heap_region_iterate(&tigcl);
 989 
 990     // Make sure regions are in good state: committed, active, clean.
 991     // This is needed because we are potentially sliding the data through them.
 992     ShenandoahEnsureHeapActiveClosure ecl;
 993     heap->heap_region_iterate(&ecl);
 994   }
 995 
 996   if (heap->mode()->is_generational()) {
 997     heap->young_generation()->clear_used();
 998     heap->old_generation()->clear_used();
 999   }
1000 
1001   // Compute the new addresses for regular objects
1002   {
1003     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
1004 
1005     distribute_slices(worker_slices);
1006 
1007     size_t num_workers = heap->max_workers();
1008 
1009     ResourceMark rm;
1010     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices, num_workers);
1011     heap->workers()->run_task(&task);
1012   }
1013 
1014   // Compute the new addresses for humongous objects
1015   {
1016     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
1017     calculate_target_humongous_objects();
1018   }
1019 }
1020 
1021 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
1022 private:
1023   ShenandoahHeap* const _heap;
1024   ShenandoahMarkingContext* const _ctx;
1025 
1026   template <class T>
1027   inline void do_oop_work(T* p) {
1028     T o = RawAccess<>::oop_load(p);
1029     if (!CompressedOops::is_null(o)) {
1030       oop obj = CompressedOops::decode_not_null(o);

1200     }
1201 
1202     size_t live = r->used();
1203 
1204     // Make empty regions that have been allocated into regular
1205     if (r->is_empty() && live > 0) {
1206       r->make_regular_bypass();
1207     }
1208 
1209     // Reclaim regular regions that became empty
1210     if (r->is_regular() && live == 0) {
1211       r->make_trash();
1212     }
1213 
1214     // Recycle all trash regions
1215     if (r->is_trash()) {
1216       live = 0;
1217       r->recycle();
1218     }
1219 
1220     // Update final usage for generations
1221     if (_heap->mode()->is_generational() && live != 0) {
1222       if (r->is_young()) {
1223         _heap->young_generation()->increase_used(live);
1224       } else if (r->is_old()) {
1225         _heap->old_generation()->increase_used(live);
1226       }
1227     }
1228 
1229     r->set_live_data(live);
1230     r->reset_alloc_metadata();
1231     _live += live;
1232   }
1233 
1234   size_t get_live() {
1235     return _live;
1236   }
1237 };
1238 
1239 void ShenandoahFullGC::compact_humongous_objects() {
1240   // Compact humongous regions, based on their fwdptr objects.
1241   //
1242   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
1243   // humongous regions are already compacted, and do not require further moves, which alleviates
1244   // sliding costs. We may consider doing this in parallel in future.
1245 
1246   ShenandoahHeap* heap = ShenandoahHeap::heap();
1247 
1248   for (size_t c = heap->num_regions(); c > 0; c--) {
1249     ShenandoahHeapRegion* r = heap->get_region(c - 1);
1250     if (r->is_humongous_start()) {
1251       oop old_obj = cast_to_oop(r->bottom());
1252       if (!old_obj->is_forwarded()) {
1253         // No need to move the object, it stays at the same slot
1254         continue;
1255       }
1256       size_t words_size = old_obj->size();
1257       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1258 
1259       size_t old_start = r->index();
1260       size_t old_end   = old_start + num_regions - 1;
1261       size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
1262       size_t new_end   = new_start + num_regions - 1;
1263       assert(old_start != new_start, "must be real move");
1264       assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
1265 
1266       log_debug(gc)("Full GC compaction moves humongous object from region " SIZE_FORMAT " to region " SIZE_FORMAT,
1267                     old_start, new_start);
1268 
1269       Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
1270                                    heap->get_region(new_start)->bottom(),
1271                                    words_size);
1272 
1273       oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
1274       new_obj->init_mark();
1275 
1276       {
1277         ShenandoahRegionAffiliation original_affiliation = r->affiliation();
1278         for (size_t c = old_start; c <= old_end; c++) {
1279           ShenandoahHeapRegion* r = heap->get_region(c);
1280           r->make_regular_bypass();
1281           r->set_top(r->bottom());
1282         }
1283 
1284         for (size_t c = new_start; c <= new_end; c++) {
1285           ShenandoahHeapRegion* r = heap->get_region(c);
1286           if (c == new_start) {
1287             r->make_humongous_start_bypass(original_affiliation);
1288           } else {
1289             r->make_humongous_cont_bypass(original_affiliation);
1290           }
1291 
1292           // Trailing region may be non-full, record the remainder there
1293           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
1294           if ((c == new_end) && (remainder != 0)) {
1295             r->set_top(r->bottom() + remainder);
1296           } else {
1297             r->set_top(r->end());
1298           }
1299 
1300           r->reset_alloc_metadata();
1301         }
1302       }
1303     }
1304   }
1305 }
1306 
1307 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1308 // we need to remain able to walk pinned regions.
1309 // Since pinned region do not move and don't get compacted, we will get holes with

1348   }
1349 
1350   // Compact humongous objects after regular object moves
1351   {
1352     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1353     compact_humongous_objects();
1354   }
1355 
1356   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1357   // and must ensure the bitmap is in sync.
1358   {
1359     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1360     ShenandoahMCResetCompleteBitmapTask task;
1361     heap->workers()->run_task(&task);
1362   }
1363 
1364   // Bring regions in proper states after the collection, and set heap properties.
1365   {
1366     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1367 
1368     if (heap->mode()->is_generational()) {
1369       heap->young_generation()->clear_used();
1370       heap->old_generation()->clear_used();
1371     }
1372 
1373     ShenandoahPostCompactClosure post_compact;
1374     heap->heap_region_iterate(&post_compact);
1375     heap->set_used(post_compact.get_live());
1376     if (heap->mode()->is_generational()) {
1377       log_info(gc)("FullGC done: GLOBAL usage: " SIZE_FORMAT ", young usage: " SIZE_FORMAT ", old usage: " SIZE_FORMAT,
1378                     post_compact.get_live(), heap->young_generation()->used(), heap->old_generation()->used());
1379     }
1380 
1381     heap->collection_set()->clear();
1382     heap->free_set()->rebuild();
1383   }
1384 
1385   heap->clear_cancelled_gc(true /* clear oom handler */);
1386 }
< prev index next >