< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp

Print this page

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "compiler/oopMap.hpp"
  28 #include "gc/shared/continuationGCSupport.hpp"
  29 #include "gc/shared/gcTraceTime.inline.hpp"
  30 #include "gc/shared/preservedMarks.inline.hpp"

  31 #include "gc/shared/tlab_globals.hpp"
  32 #include "gc/shared/workerThread.hpp"
  33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  36 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  38 #include "gc/shenandoah/shenandoahFullGC.hpp"
  39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  40 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  41 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  43 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  44 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  45 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  46 #include "gc/shenandoah/shenandoahMetrics.hpp"
  47 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  48 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  49 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  50 #include "gc/shenandoah/shenandoahSTWMark.hpp"

 211   // Coming out of Full GC, we would not have any forwarded objects.
 212   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 213   heap->set_has_forwarded_objects(false);
 214 
 215   heap->set_full_gc_move_in_progress(true);
 216 
 217   // Setup workers for the rest
 218   OrderAccess::fence();
 219 
 220   // Initialize worker slices
 221   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
 222   for (uint i = 0; i < heap->max_workers(); i++) {
 223     worker_slices[i] = new ShenandoahHeapRegionSet();
 224   }
 225 
 226   {
 227     // The rest of code performs region moves, where region status is undefined
 228     // until all phases run together.
 229     ShenandoahHeapLocker lock(heap->lock());
 230 


 231     phase2_calculate_target_addresses(worker_slices);
 232 
 233     OrderAccess::fence();
 234 
 235     phase3_update_references();
 236 
 237     phase4_compact_objects(worker_slices);
 238   }
 239 
 240   {
 241     // Epilogue
 242     _preserved_marks->restore(heap->workers());
 243     _preserved_marks->reclaim();

 244   }
 245 
 246   // Resize metaspace
 247   MetaspaceGC::compute_new_size();
 248 
 249   // Free worker slices
 250   for (uint i = 0; i < heap->max_workers(); i++) {
 251     delete worker_slices[i];
 252   }
 253   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 254 
 255   heap->set_full_gc_move_in_progress(false);
 256   heap->set_full_gc_in_progress(false);
 257 
 258   if (ShenandoahVerify) {
 259     heap->verifier()->verify_after_fullgc();
 260   }
 261 
 262   if (VerifyAfterGC) {
 263     Universe::verify();

 285 void ShenandoahFullGC::phase1_mark_heap() {
 286   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 287   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 288 
 289   ShenandoahHeap* heap = ShenandoahHeap::heap();
 290 
 291   ShenandoahPrepareForMarkClosure cl;
 292   heap->heap_region_iterate(&cl);
 293 
 294   heap->set_unload_classes(heap->heuristics()->can_unload_classes());
 295 
 296   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 297   // enable ("weak") refs discovery
 298   rp->set_soft_reference_policy(true); // forcefully purge all soft references
 299 
 300   ShenandoahSTWMark mark(true /*full_gc*/);
 301   mark.mark();
 302   heap->parallel_cleaning(true /* full_gc */);
 303 }
 304 

 305 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 306 private:
 307   PreservedMarks*          const _preserved_marks;
 308   ShenandoahHeap*          const _heap;
 309   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 310   int _empty_regions_pos;
 311   ShenandoahHeapRegion*          _to_region;
 312   ShenandoahHeapRegion*          _from_region;
 313   HeapWord* _compact_point;
 314 
 315 public:
 316   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 317                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 318                                               ShenandoahHeapRegion* to_region) :
 319     _preserved_marks(preserved_marks),
 320     _heap(ShenandoahHeap::heap()),
 321     _empty_regions(empty_regions),
 322     _empty_regions_pos(0),
 323     _to_region(to_region),
 324     _from_region(nullptr),

 353       // Object doesn't fit. Pick next empty region and start compacting there.
 354       ShenandoahHeapRegion* new_to_region;
 355       if (_empty_regions_pos < _empty_regions.length()) {
 356         new_to_region = _empty_regions.at(_empty_regions_pos);
 357         _empty_regions_pos++;
 358       } else {
 359         // Out of empty region? Compact within the same region.
 360         new_to_region = _from_region;
 361       }
 362 
 363       assert(new_to_region != _to_region, "must not reuse same to-region");
 364       assert(new_to_region != nullptr, "must not be null");
 365       _to_region = new_to_region;
 366       _compact_point = _to_region->bottom();
 367     }
 368 
 369     // Object fits into current region, record new location:
 370     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 371     shenandoah_assert_not_forwarded(nullptr, p);
 372     _preserved_marks->push_if_necessary(p, p->mark());
 373     p->forward_to(cast_to_oop(_compact_point));
 374     _compact_point += obj_size;
 375   }
 376 };
 377 
 378 class ShenandoahPrepareForCompactionTask : public WorkerTask {
 379 private:
 380   PreservedMarksSet*        const _preserved_marks;
 381   ShenandoahHeap*           const _heap;
 382   ShenandoahHeapRegionSet** const _worker_slices;
 383 
 384 public:
 385   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
 386     WorkerTask("Shenandoah Prepare For Compaction"),
 387     _preserved_marks(preserved_marks),
 388     _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
 389   }
 390 
 391   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 392     // Empty region: get it into the slice to defragment the slice itself.
 393     // We could have skipped this without violating correctness, but we really
 394     // want to compact all live regions to the start of the heap, which sometimes
 395     // means moving them into the fully empty regions.
 396     if (r->is_empty()) return true;
 397 
 398     // Can move the region, and this is not the humongous region. Humongous
 399     // moves are special cased here, because their moves are handled separately.
 400     return r->is_stw_move_allowed() && !r->is_humongous();
 401   }
 402 
 403   void work(uint worker_id) {










 404     ShenandoahParallelWorkerSession worker_session(worker_id);
 405     ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 406     ShenandoahHeapRegionSetIterator it(slice);
 407     ShenandoahHeapRegion* from_region = it.next();
 408     // No work?
 409     if (from_region == nullptr) {
 410        return;
 411     }
 412 
 413     // Sliding compaction. Walk all regions in the slice, and compact them.
 414     // Remember empty regions and reuse them as needed.
 415     ResourceMark rm;
 416 
 417     GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
 418 
 419     ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
 420 
 421     while (from_region != nullptr) {
 422       assert(is_candidate_region(from_region), "Sanity");
 423 
 424       cl.set_from_region(from_region);
 425       if (from_region->has_live()) {
 426         _heap->marked_object_iterate(from_region, &cl);
 427       }
 428 
 429       // Compacted the region to somewhere else? From-region is empty then.
 430       if (!cl.is_compact_same_region()) {
 431         empty_regions.append(from_region);
 432       }
 433       from_region = it.next();
 434     }
 435     cl.finish_region();
 436 
 437     // Mark all remaining regions as empty
 438     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 439       ShenandoahHeapRegion* r = empty_regions.at(pos);
 440       r->set_new_top(r->bottom());
 441     }
 442   }
 443 };
 444 
 445 void ShenandoahFullGC::calculate_target_humongous_objects() {

 446   ShenandoahHeap* heap = ShenandoahHeap::heap();
 447 
 448   // Compute the new addresses for humongous objects. We need to do this after addresses
 449   // for regular objects are calculated, and we know what regions in heap suffix are
 450   // available for humongous moves.
 451   //
 452   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 453   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 454   // humongous start there.
 455   //
 456   // The complication is potential non-movable regions during the scan. If such region is
 457   // detected, then sliding restarts towards that non-movable region.
 458 
 459   size_t to_begin = heap->num_regions();
 460   size_t to_end = heap->num_regions();
 461 
 462   for (size_t c = heap->num_regions(); c > 0; c--) {
 463     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 464     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 465       // To-region candidate: record this, and continue scan
 466       to_begin = r->index();
 467       continue;
 468     }
 469 
 470     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 471       // From-region candidate: movable humongous region
 472       oop old_obj = cast_to_oop(r->bottom());
 473       size_t words_size = old_obj->size();
 474       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 475 
 476       size_t start = to_end - num_regions;
 477 
 478       if (start >= to_begin && start != r->index()) {
 479         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 480         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 481         old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
 482         to_end = start;
 483         continue;
 484       }
 485     }
 486 
 487     // Failed to fit. Scan starting from current region.
 488     to_begin = r->index();
 489     to_end = r->index();
 490   }
 491 }
 492 








 493 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 494 private:
 495   ShenandoahHeap* const _heap;
 496 
 497 public:
 498   ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
 499   void heap_region_do(ShenandoahHeapRegion* r) {
 500     if (r->is_trash()) {
 501       r->recycle();
 502     }
 503     if (r->is_cset()) {
 504       r->make_regular_bypass();
 505     }
 506     if (r->is_empty_uncommitted()) {
 507       r->make_committed_bypass();
 508     }
 509     assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
 510 
 511     // Record current region occupancy: this communicates empty regions are free
 512     // to the rest of Full GC code.

 710     heap->heap_region_iterate(&ecl);
 711   }
 712 
 713   // Compute the new addresses for regular objects
 714   {
 715     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
 716 
 717     distribute_slices(worker_slices);
 718 
 719     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
 720     heap->workers()->run_task(&task);
 721   }
 722 
 723   // Compute the new addresses for humongous objects
 724   {
 725     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
 726     calculate_target_humongous_objects();
 727   }
 728 }
 729 

 730 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
 731 private:
 732   ShenandoahHeap* const _heap;
 733   ShenandoahMarkingContext* const _ctx;
 734 
 735   template <class T>
 736   inline void do_oop_work(T* p) {
 737     T o = RawAccess<>::oop_load(p);
 738     if (!CompressedOops::is_null(o)) {
 739       oop obj = CompressedOops::decode_not_null(o);
 740       assert(_ctx->is_marked(obj), "must be marked");
 741       if (obj->is_forwarded()) {
 742         oop forw = obj->forwardee();
 743         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 744       }
 745     }
 746   }
 747 
 748 public:
 749   ShenandoahAdjustPointersClosure() :
 750     _heap(ShenandoahHeap::heap()),
 751     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 752 
 753   void do_oop(oop* p)       { do_oop_work(p); }
 754   void do_oop(narrowOop* p) { do_oop_work(p); }
 755   void do_method(Method* m) {}
 756   void do_nmethod(nmethod* nm) {}
 757 };
 758 

 759 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
 760 private:
 761   ShenandoahHeap* const _heap;
 762   ShenandoahAdjustPointersClosure _cl;
 763 
 764 public:
 765   ShenandoahAdjustPointersObjectClosure() :
 766     _heap(ShenandoahHeap::heap()) {
 767   }
 768   void do_object(oop p) {
 769     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 770     p->oop_iterate(&_cl);
 771   }
 772 };
 773 
 774 class ShenandoahAdjustPointersTask : public WorkerTask {
 775 private:
 776   ShenandoahHeap*          const _heap;
 777   ShenandoahRegionIterator       _regions;
 778 
 779 public:
 780   ShenandoahAdjustPointersTask() :
 781     WorkerTask("Shenandoah Adjust Pointers"),
 782     _heap(ShenandoahHeap::heap()) {
 783   }
 784 
 785   void work(uint worker_id) {


 786     ShenandoahParallelWorkerSession worker_session(worker_id);
 787     ShenandoahAdjustPointersObjectClosure obj_cl;
 788     ShenandoahHeapRegion* r = _regions.next();
 789     while (r != nullptr) {
 790       if (!r->is_humongous_continuation() && r->has_live()) {
 791         _heap->marked_object_iterate(r, &obj_cl);
 792       }
 793       r = _regions.next();
 794     }
 795   }









 796 };
 797 
 798 class ShenandoahAdjustRootPointersTask : public WorkerTask {
 799 private:
 800   ShenandoahRootAdjuster* _rp;
 801   PreservedMarksSet* _preserved_marks;

 802 public:
 803   ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
 804     WorkerTask("Shenandoah Adjust Root Pointers"),
 805     _rp(rp),
 806     _preserved_marks(preserved_marks) {}
 807 
 808   void work(uint worker_id) {


 809     ShenandoahParallelWorkerSession worker_session(worker_id);
 810     ShenandoahAdjustPointersClosure cl;
 811     _rp->roots_do(worker_id, &cl);
 812     _preserved_marks->get(worker_id)->adjust_during_full_gc();
 813   }









 814 };
 815 
 816 void ShenandoahFullGC::phase3_update_references() {
 817   GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
 818   ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
 819 
 820   ShenandoahHeap* heap = ShenandoahHeap::heap();
 821 
 822   WorkerThreads* workers = heap->workers();
 823   uint nworkers = workers->active_workers();
 824   {
 825 #if COMPILER2_OR_JVMCI
 826     DerivedPointerTable::clear();
 827 #endif
 828     ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
 829     ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
 830     workers->run_task(&task);
 831 #if COMPILER2_OR_JVMCI
 832     DerivedPointerTable::update_pointers();
 833 #endif
 834   }
 835 
 836   ShenandoahAdjustPointersTask adjust_pointers_task;
 837   workers->run_task(&adjust_pointers_task);
 838 }
 839 

 840 class ShenandoahCompactObjectsClosure : public ObjectClosure {
 841 private:
 842   ShenandoahHeap* const _heap;
 843   uint            const _worker_id;
 844 
 845 public:
 846   ShenandoahCompactObjectsClosure(uint worker_id) :
 847     _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
 848 
 849   void do_object(oop p) {
 850     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 851     size_t size = p->size();
 852     if (p->is_forwarded()) {
 853       HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
 854       HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee());
 855       Copy::aligned_conjoint_words(compact_from, compact_to, size);
 856       oop new_obj = cast_to_oop(compact_to);
 857 
 858       ContinuationGCSupport::relativize_stack_chunk(new_obj);
 859       new_obj->init_mark();
 860     }
 861   }
 862 };
 863 
 864 class ShenandoahCompactObjectsTask : public WorkerTask {
 865 private:
 866   ShenandoahHeap* const _heap;
 867   ShenandoahHeapRegionSet** const _worker_slices;
 868 
 869 public:
 870   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
 871     WorkerTask("Shenandoah Compact Objects"),
 872     _heap(ShenandoahHeap::heap()),
 873     _worker_slices(worker_slices) {
 874   }
 875 
 876   void work(uint worker_id) {


 877     ShenandoahParallelWorkerSession worker_session(worker_id);
 878     ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
 879 
 880     ShenandoahCompactObjectsClosure cl(worker_id);
 881     ShenandoahHeapRegion* r = slice.next();
 882     while (r != nullptr) {
 883       assert(!r->is_humongous(), "must not get humongous regions here");
 884       if (r->has_live()) {
 885         _heap->marked_object_iterate(r, &cl);
 886       }
 887       r->set_top(r->new_top());
 888       r = slice.next();
 889     }
 890   }









 891 };
 892 
 893 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
 894 private:
 895   ShenandoahHeap* const _heap;
 896   size_t _live;
 897 
 898 public:
 899   ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
 900     _heap->free_set()->clear();
 901   }
 902 
 903   void heap_region_do(ShenandoahHeapRegion* r) {
 904     assert (!r->is_cset(), "cset regions should have been demoted already");
 905 
 906     // Need to reset the complete-top-at-mark-start pointer here because
 907     // the complete marking bitmap is no longer valid. This ensures
 908     // size-based iteration in marked_object_iterate().
 909     // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
 910     // pinned regions.

 926     if (r->is_regular() && live == 0) {
 927       r->make_trash();
 928     }
 929 
 930     // Recycle all trash regions
 931     if (r->is_trash()) {
 932       live = 0;
 933       r->recycle();
 934     }
 935 
 936     r->set_live_data(live);
 937     r->reset_alloc_metadata();
 938     _live += live;
 939   }
 940 
 941   size_t get_live() {
 942     return _live;
 943   }
 944 };
 945 
 946 void ShenandoahFullGC::compact_humongous_objects() {

 947   // Compact humongous regions, based on their fwdptr objects.
 948   //
 949   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
 950   // humongous regions are already compacted, and do not require further moves, which alleviates
 951   // sliding costs. We may consider doing this in parallel in future.
 952 
 953   ShenandoahHeap* heap = ShenandoahHeap::heap();
 954 
 955   for (size_t c = heap->num_regions(); c > 0; c--) {
 956     ShenandoahHeapRegion* r = heap->get_region(c - 1);
 957     if (r->is_humongous_start()) {
 958       oop old_obj = cast_to_oop(r->bottom());
 959       if (!old_obj->is_forwarded()) {
 960         // No need to move the object, it stays at the same slot
 961         continue;
 962       }
 963       size_t words_size = old_obj->size();
 964       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 965 
 966       size_t old_start = r->index();
 967       size_t old_end   = old_start + num_regions - 1;
 968       size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
 969       size_t new_end   = new_start + num_regions - 1;
 970       assert(old_start != new_start, "must be real move");
 971       assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
 972 
 973       Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
 974       ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
 975 
 976       oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
 977       new_obj->init_mark();
 978 
 979       {
 980         for (size_t c = old_start; c <= old_end; c++) {
 981           ShenandoahHeapRegion* r = heap->get_region(c);
 982           r->make_regular_bypass();
 983           r->set_top(r->bottom());
 984         }
 985 
 986         for (size_t c = new_start; c <= new_end; c++) {
 987           ShenandoahHeapRegion* r = heap->get_region(c);
 988           if (c == new_start) {
 989             r->make_humongous_start_bypass();
 990           } else {
 991             r->make_humongous_cont_bypass();
 992           }
 993 
 994           // Trailing region may be non-full, record the remainder there
 995           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 996           if ((c == new_end) && (remainder != 0)) {
 997             r->set_top(r->bottom() + remainder);
 998           } else {
 999             r->set_top(r->end());
1000           }
1001 
1002           r->reset_alloc_metadata();
1003         }
1004       }
1005     }
1006   }
1007 }
1008 








1009 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1010 // we need to remain able to walk pinned regions.
1011 // Since pinned region do not move and don't get compacted, we will get holes with
1012 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
1013 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
1014 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
1015 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
1016 class ShenandoahMCResetCompleteBitmapTask : public WorkerTask {
1017 private:
1018   ShenandoahRegionIterator _regions;
1019 
1020 public:
1021   ShenandoahMCResetCompleteBitmapTask() :
1022     WorkerTask("Shenandoah Reset Bitmap") {
1023   }
1024 
1025   void work(uint worker_id) {
1026     ShenandoahParallelWorkerSession worker_session(worker_id);
1027     ShenandoahHeapRegion* region = _regions.next();
1028     ShenandoahHeap* heap = ShenandoahHeap::heap();

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "compiler/oopMap.hpp"
  28 #include "gc/shared/continuationGCSupport.hpp"
  29 #include "gc/shared/gcTraceTime.inline.hpp"
  30 #include "gc/shared/preservedMarks.inline.hpp"
  31 #include "gc/shared/slidingForwarding.inline.hpp"
  32 #include "gc/shared/tlab_globals.hpp"
  33 #include "gc/shared/workerThread.hpp"
  34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  37 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  38 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  39 #include "gc/shenandoah/shenandoahFullGC.hpp"
  40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  41 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  42 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  44 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  45 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  46 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  47 #include "gc/shenandoah/shenandoahMetrics.hpp"
  48 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  49 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  50 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  51 #include "gc/shenandoah/shenandoahSTWMark.hpp"

 212   // Coming out of Full GC, we would not have any forwarded objects.
 213   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 214   heap->set_has_forwarded_objects(false);
 215 
 216   heap->set_full_gc_move_in_progress(true);
 217 
 218   // Setup workers for the rest
 219   OrderAccess::fence();
 220 
 221   // Initialize worker slices
 222   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
 223   for (uint i = 0; i < heap->max_workers(); i++) {
 224     worker_slices[i] = new ShenandoahHeapRegionSet();
 225   }
 226 
 227   {
 228     // The rest of code performs region moves, where region status is undefined
 229     // until all phases run together.
 230     ShenandoahHeapLocker lock(heap->lock());
 231 
 232     SlidingForwarding::begin();
 233 
 234     phase2_calculate_target_addresses(worker_slices);
 235 
 236     OrderAccess::fence();
 237 
 238     phase3_update_references();
 239 
 240     phase4_compact_objects(worker_slices);
 241   }
 242 
 243   {
 244     // Epilogue
 245     _preserved_marks->restore(heap->workers());
 246     _preserved_marks->reclaim();
 247     SlidingForwarding::end();
 248   }
 249 
 250   // Resize metaspace
 251   MetaspaceGC::compute_new_size();
 252 
 253   // Free worker slices
 254   for (uint i = 0; i < heap->max_workers(); i++) {
 255     delete worker_slices[i];
 256   }
 257   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 258 
 259   heap->set_full_gc_move_in_progress(false);
 260   heap->set_full_gc_in_progress(false);
 261 
 262   if (ShenandoahVerify) {
 263     heap->verifier()->verify_after_fullgc();
 264   }
 265 
 266   if (VerifyAfterGC) {
 267     Universe::verify();

 289 void ShenandoahFullGC::phase1_mark_heap() {
 290   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 291   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 292 
 293   ShenandoahHeap* heap = ShenandoahHeap::heap();
 294 
 295   ShenandoahPrepareForMarkClosure cl;
 296   heap->heap_region_iterate(&cl);
 297 
 298   heap->set_unload_classes(heap->heuristics()->can_unload_classes());
 299 
 300   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 301   // enable ("weak") refs discovery
 302   rp->set_soft_reference_policy(true); // forcefully purge all soft references
 303 
 304   ShenandoahSTWMark mark(true /*full_gc*/);
 305   mark.mark();
 306   heap->parallel_cleaning(true /* full_gc */);
 307 }
 308 
 309 template <bool ALT_FWD>
 310 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 311 private:
 312   PreservedMarks*          const _preserved_marks;
 313   ShenandoahHeap*          const _heap;
 314   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 315   int _empty_regions_pos;
 316   ShenandoahHeapRegion*          _to_region;
 317   ShenandoahHeapRegion*          _from_region;
 318   HeapWord* _compact_point;
 319 
 320 public:
 321   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 322                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 323                                               ShenandoahHeapRegion* to_region) :
 324     _preserved_marks(preserved_marks),
 325     _heap(ShenandoahHeap::heap()),
 326     _empty_regions(empty_regions),
 327     _empty_regions_pos(0),
 328     _to_region(to_region),
 329     _from_region(nullptr),

 358       // Object doesn't fit. Pick next empty region and start compacting there.
 359       ShenandoahHeapRegion* new_to_region;
 360       if (_empty_regions_pos < _empty_regions.length()) {
 361         new_to_region = _empty_regions.at(_empty_regions_pos);
 362         _empty_regions_pos++;
 363       } else {
 364         // Out of empty region? Compact within the same region.
 365         new_to_region = _from_region;
 366       }
 367 
 368       assert(new_to_region != _to_region, "must not reuse same to-region");
 369       assert(new_to_region != nullptr, "must not be null");
 370       _to_region = new_to_region;
 371       _compact_point = _to_region->bottom();
 372     }
 373 
 374     // Object fits into current region, record new location:
 375     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 376     shenandoah_assert_not_forwarded(nullptr, p);
 377     _preserved_marks->push_if_necessary(p, p->mark());
 378     SlidingForwarding::forward_to<ALT_FWD>(p, cast_to_oop(_compact_point));
 379     _compact_point += obj_size;
 380   }
 381 };
 382 
 383 class ShenandoahPrepareForCompactionTask : public WorkerTask {
 384 private:
 385   PreservedMarksSet*        const _preserved_marks;
 386   ShenandoahHeap*           const _heap;
 387   ShenandoahHeapRegionSet** const _worker_slices;
 388 
 389 public:
 390   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
 391     WorkerTask("Shenandoah Prepare For Compaction"),
 392     _preserved_marks(preserved_marks),
 393     _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
 394   }
 395 
 396   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 397     // Empty region: get it into the slice to defragment the slice itself.
 398     // We could have skipped this without violating correctness, but we really
 399     // want to compact all live regions to the start of the heap, which sometimes
 400     // means moving them into the fully empty regions.
 401     if (r->is_empty()) return true;
 402 
 403     // Can move the region, and this is not the humongous region. Humongous
 404     // moves are special cased here, because their moves are handled separately.
 405     return r->is_stw_move_allowed() && !r->is_humongous();
 406   }
 407 
 408   void work(uint worker_id) {
 409     if (UseAltGCForwarding) {
 410       work_impl<true>(worker_id);
 411     } else {
 412       work_impl<false>(worker_id);
 413     }
 414   }
 415 
 416 private:
 417   template <bool ALT_FWD>
 418   void work_impl(uint worker_id) {
 419     ShenandoahParallelWorkerSession worker_session(worker_id);
 420     ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 421     ShenandoahHeapRegionSetIterator it(slice);
 422     ShenandoahHeapRegion* from_region = it.next();
 423     // No work?
 424     if (from_region == nullptr) {
 425        return;
 426     }
 427 
 428     // Sliding compaction. Walk all regions in the slice, and compact them.
 429     // Remember empty regions and reuse them as needed.
 430     ResourceMark rm;
 431 
 432     GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
 433 
 434     ShenandoahPrepareForCompactionObjectClosure<ALT_FWD> cl(_preserved_marks->get(worker_id), empty_regions, from_region);
 435 
 436     while (from_region != nullptr) {
 437       assert(is_candidate_region(from_region), "Sanity");
 438 
 439       cl.set_from_region(from_region);
 440       if (from_region->has_live()) {
 441         _heap->marked_object_iterate(from_region, &cl);
 442       }
 443 
 444       // Compacted the region to somewhere else? From-region is empty then.
 445       if (!cl.is_compact_same_region()) {
 446         empty_regions.append(from_region);
 447       }
 448       from_region = it.next();
 449     }
 450     cl.finish_region();
 451 
 452     // Mark all remaining regions as empty
 453     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 454       ShenandoahHeapRegion* r = empty_regions.at(pos);
 455       r->set_new_top(r->bottom());
 456     }
 457   }
 458 };
 459 
 460 template <bool ALT_FWD>
 461 void ShenandoahFullGC::calculate_target_humongous_objects_impl() {
 462   ShenandoahHeap* heap = ShenandoahHeap::heap();
 463 
 464   // Compute the new addresses for humongous objects. We need to do this after addresses
 465   // for regular objects are calculated, and we know what regions in heap suffix are
 466   // available for humongous moves.
 467   //
 468   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 469   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 470   // humongous start there.
 471   //
 472   // The complication is potential non-movable regions during the scan. If such region is
 473   // detected, then sliding restarts towards that non-movable region.
 474 
 475   size_t to_begin = heap->num_regions();
 476   size_t to_end = heap->num_regions();
 477 
 478   for (size_t c = heap->num_regions(); c > 0; c--) {
 479     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 480     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 481       // To-region candidate: record this, and continue scan
 482       to_begin = r->index();
 483       continue;
 484     }
 485 
 486     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 487       // From-region candidate: movable humongous region
 488       oop old_obj = cast_to_oop(r->bottom());
 489       size_t words_size = old_obj->size();
 490       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 491 
 492       size_t start = to_end - num_regions;
 493 
 494       if (start >= to_begin && start != r->index()) {
 495         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 496         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 497         SlidingForwarding::forward_to<ALT_FWD>(old_obj, cast_to_oop(heap->get_region(start)->bottom()));
 498         to_end = start;
 499         continue;
 500       }
 501     }
 502 
 503     // Failed to fit. Scan starting from current region.
 504     to_begin = r->index();
 505     to_end = r->index();
 506   }
 507 }
 508 
 509 void ShenandoahFullGC::calculate_target_humongous_objects() {
 510   if (UseAltGCForwarding) {
 511     calculate_target_humongous_objects_impl<true>();
 512   } else {
 513     calculate_target_humongous_objects_impl<false>();
 514   }
 515 }
 516 
 517 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 518 private:
 519   ShenandoahHeap* const _heap;
 520 
 521 public:
 522   ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
 523   void heap_region_do(ShenandoahHeapRegion* r) {
 524     if (r->is_trash()) {
 525       r->recycle();
 526     }
 527     if (r->is_cset()) {
 528       r->make_regular_bypass();
 529     }
 530     if (r->is_empty_uncommitted()) {
 531       r->make_committed_bypass();
 532     }
 533     assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
 534 
 535     // Record current region occupancy: this communicates empty regions are free
 536     // to the rest of Full GC code.

 734     heap->heap_region_iterate(&ecl);
 735   }
 736 
 737   // Compute the new addresses for regular objects
 738   {
 739     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
 740 
 741     distribute_slices(worker_slices);
 742 
 743     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
 744     heap->workers()->run_task(&task);
 745   }
 746 
 747   // Compute the new addresses for humongous objects
 748   {
 749     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
 750     calculate_target_humongous_objects();
 751   }
 752 }
 753 
 754 template <bool ALT_FWD>
 755 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
 756 private:
 757   ShenandoahHeap* const _heap;
 758   ShenandoahMarkingContext* const _ctx;
 759 
 760   template <class T>
 761   inline void do_oop_work(T* p) {
 762     T o = RawAccess<>::oop_load(p);
 763     if (!CompressedOops::is_null(o)) {
 764       oop obj = CompressedOops::decode_not_null(o);
 765       assert(_ctx->is_marked(obj), "must be marked");
 766       if (SlidingForwarding::is_forwarded(obj)) {
 767         oop forw = SlidingForwarding::forwardee<ALT_FWD>(obj);
 768         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 769       }
 770     }
 771   }
 772 
 773 public:
 774   ShenandoahAdjustPointersClosure() :
 775     _heap(ShenandoahHeap::heap()),
 776     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 777 
 778   void do_oop(oop* p)       { do_oop_work(p); }
 779   void do_oop(narrowOop* p) { do_oop_work(p); }
 780   void do_method(Method* m) {}
 781   void do_nmethod(nmethod* nm) {}
 782 };
 783 
 784 template <bool ALT_FWD>
 785 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
 786 private:
 787   ShenandoahHeap* const _heap;
 788   ShenandoahAdjustPointersClosure<ALT_FWD> _cl;
 789 
 790 public:
 791   ShenandoahAdjustPointersObjectClosure() :
 792     _heap(ShenandoahHeap::heap()) {
 793   }
 794   void do_object(oop p) {
 795     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 796     p->oop_iterate(&_cl);
 797   }
 798 };
 799 
 800 class ShenandoahAdjustPointersTask : public WorkerTask {
 801 private:
 802   ShenandoahHeap*          const _heap;
 803   ShenandoahRegionIterator       _regions;
 804 
 805 public:
 806   ShenandoahAdjustPointersTask() :
 807     WorkerTask("Shenandoah Adjust Pointers"),
 808     _heap(ShenandoahHeap::heap()) {
 809   }
 810 
 811 private:
 812   template <bool ALT_FWD>
 813   void work_impl(uint worker_id) {
 814     ShenandoahParallelWorkerSession worker_session(worker_id);
 815     ShenandoahAdjustPointersObjectClosure<ALT_FWD> obj_cl;
 816     ShenandoahHeapRegion* r = _regions.next();
 817     while (r != nullptr) {
 818       if (!r->is_humongous_continuation() && r->has_live()) {
 819         _heap->marked_object_iterate(r, &obj_cl);
 820       }
 821       r = _regions.next();
 822     }
 823   }
 824 
 825 public:
 826   void work(uint worker_id) {
 827     if (UseAltGCForwarding) {
 828       work_impl<true>(worker_id);
 829     } else {
 830       work_impl<false>(worker_id);
 831     }
 832   }
 833 };
 834 
 835 class ShenandoahAdjustRootPointersTask : public WorkerTask {
 836 private:
 837   ShenandoahRootAdjuster* _rp;
 838   PreservedMarksSet* _preserved_marks;
 839 
 840 public:
 841   ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
 842     WorkerTask("Shenandoah Adjust Root Pointers"),
 843     _rp(rp),
 844     _preserved_marks(preserved_marks) {}
 845 
 846 private:
 847   template <bool ALT_FWD>
 848   void work_impl(uint worker_id) {
 849     ShenandoahParallelWorkerSession worker_session(worker_id);
 850     ShenandoahAdjustPointersClosure<ALT_FWD> cl;
 851     _rp->roots_do(worker_id, &cl);
 852     _preserved_marks->get(worker_id)->adjust_during_full_gc();
 853   }
 854 
 855 public:
 856   void work(uint worker_id) {
 857     if (UseAltGCForwarding) {
 858       work_impl<true>(worker_id);
 859     } else {
 860       work_impl<false>(worker_id);
 861     }
 862   }
 863 };
 864 
 865 void ShenandoahFullGC::phase3_update_references() {
 866   GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
 867   ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
 868 
 869   ShenandoahHeap* heap = ShenandoahHeap::heap();
 870 
 871   WorkerThreads* workers = heap->workers();
 872   uint nworkers = workers->active_workers();
 873   {
 874 #if COMPILER2_OR_JVMCI
 875     DerivedPointerTable::clear();
 876 #endif
 877     ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
 878     ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
 879     workers->run_task(&task);
 880 #if COMPILER2_OR_JVMCI
 881     DerivedPointerTable::update_pointers();
 882 #endif
 883   }
 884 
 885   ShenandoahAdjustPointersTask adjust_pointers_task;
 886   workers->run_task(&adjust_pointers_task);
 887 }
 888 
 889 template <bool ALT_FWD>
 890 class ShenandoahCompactObjectsClosure : public ObjectClosure {
 891 private:
 892   ShenandoahHeap* const _heap;
 893   uint            const _worker_id;
 894 
 895 public:
 896   ShenandoahCompactObjectsClosure(uint worker_id) :
 897     _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
 898 
 899   void do_object(oop p) {
 900     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 901     size_t size = p->size();
 902     if (SlidingForwarding::is_forwarded(p)) {
 903       HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
 904       HeapWord* compact_to = cast_from_oop<HeapWord*>(SlidingForwarding::forwardee<ALT_FWD>(p));
 905       Copy::aligned_conjoint_words(compact_from, compact_to, size);
 906       oop new_obj = cast_to_oop(compact_to);
 907 
 908       ContinuationGCSupport::relativize_stack_chunk(new_obj);
 909       new_obj->init_mark();
 910     }
 911   }
 912 };
 913 
 914 class ShenandoahCompactObjectsTask : public WorkerTask {
 915 private:
 916   ShenandoahHeap* const _heap;
 917   ShenandoahHeapRegionSet** const _worker_slices;
 918 
 919 public:
 920   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
 921     WorkerTask("Shenandoah Compact Objects"),
 922     _heap(ShenandoahHeap::heap()),
 923     _worker_slices(worker_slices) {
 924   }
 925 
 926 private:
 927   template <bool ALT_FWD>
 928   void work_impl(uint worker_id) {
 929     ShenandoahParallelWorkerSession worker_session(worker_id);
 930     ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
 931 
 932     ShenandoahCompactObjectsClosure<ALT_FWD> cl(worker_id);
 933     ShenandoahHeapRegion* r = slice.next();
 934     while (r != nullptr) {
 935       assert(!r->is_humongous(), "must not get humongous regions here");
 936       if (r->has_live()) {
 937         _heap->marked_object_iterate(r, &cl);
 938       }
 939       r->set_top(r->new_top());
 940       r = slice.next();
 941     }
 942   }
 943 
 944 public:
 945   void work(uint worker_id) {
 946     if (UseAltGCForwarding) {
 947       work_impl<true>(worker_id);
 948     } else {
 949       work_impl<false>(worker_id);
 950     }
 951   }
 952 };
 953 
 954 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
 955 private:
 956   ShenandoahHeap* const _heap;
 957   size_t _live;
 958 
 959 public:
 960   ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
 961     _heap->free_set()->clear();
 962   }
 963 
 964   void heap_region_do(ShenandoahHeapRegion* r) {
 965     assert (!r->is_cset(), "cset regions should have been demoted already");
 966 
 967     // Need to reset the complete-top-at-mark-start pointer here because
 968     // the complete marking bitmap is no longer valid. This ensures
 969     // size-based iteration in marked_object_iterate().
 970     // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
 971     // pinned regions.

 987     if (r->is_regular() && live == 0) {
 988       r->make_trash();
 989     }
 990 
 991     // Recycle all trash regions
 992     if (r->is_trash()) {
 993       live = 0;
 994       r->recycle();
 995     }
 996 
 997     r->set_live_data(live);
 998     r->reset_alloc_metadata();
 999     _live += live;
1000   }
1001 
1002   size_t get_live() {
1003     return _live;
1004   }
1005 };
1006 
1007 template <bool ALT_FWD>
1008 void ShenandoahFullGC::compact_humongous_objects_impl() {
1009   // Compact humongous regions, based on their fwdptr objects.
1010   //
1011   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
1012   // humongous regions are already compacted, and do not require further moves, which alleviates
1013   // sliding costs. We may consider doing this in parallel in future.
1014 
1015   ShenandoahHeap* heap = ShenandoahHeap::heap();
1016 
1017   for (size_t c = heap->num_regions(); c > 0; c--) {
1018     ShenandoahHeapRegion* r = heap->get_region(c - 1);
1019     if (r->is_humongous_start()) {
1020       oop old_obj = cast_to_oop(r->bottom());
1021       if (SlidingForwarding::is_not_forwarded(old_obj)) {
1022         // No need to move the object, it stays at the same slot
1023         continue;
1024       }
1025       size_t words_size = old_obj->size();
1026       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1027 
1028       size_t old_start = r->index();
1029       size_t old_end   = old_start + num_regions - 1;
1030       size_t new_start = heap->heap_region_index_containing(SlidingForwarding::forwardee<ALT_FWD>(old_obj));
1031       size_t new_end   = new_start + num_regions - 1;
1032       assert(old_start != new_start, "must be real move");
1033       assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
1034 
1035       Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
1036       ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
1037 
1038       oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
1039       new_obj->init_mark();
1040 
1041       {
1042         for (size_t c = old_start; c <= old_end; c++) {
1043           ShenandoahHeapRegion* r = heap->get_region(c);
1044           r->make_regular_bypass();
1045           r->set_top(r->bottom());
1046         }
1047 
1048         for (size_t c = new_start; c <= new_end; c++) {
1049           ShenandoahHeapRegion* r = heap->get_region(c);
1050           if (c == new_start) {
1051             r->make_humongous_start_bypass();
1052           } else {
1053             r->make_humongous_cont_bypass();
1054           }
1055 
1056           // Trailing region may be non-full, record the remainder there
1057           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
1058           if ((c == new_end) && (remainder != 0)) {
1059             r->set_top(r->bottom() + remainder);
1060           } else {
1061             r->set_top(r->end());
1062           }
1063 
1064           r->reset_alloc_metadata();
1065         }
1066       }
1067     }
1068   }
1069 }
1070 
1071 void ShenandoahFullGC::compact_humongous_objects() {
1072   if (UseAltGCForwarding) {
1073     compact_humongous_objects_impl<true>();
1074   } else {
1075     compact_humongous_objects_impl<false>();
1076   }
1077 }
1078 
1079 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1080 // we need to remain able to walk pinned regions.
1081 // Since pinned region do not move and don't get compacted, we will get holes with
1082 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
1083 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
1084 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
1085 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
1086 class ShenandoahMCResetCompleteBitmapTask : public WorkerTask {
1087 private:
1088   ShenandoahRegionIterator _regions;
1089 
1090 public:
1091   ShenandoahMCResetCompleteBitmapTask() :
1092     WorkerTask("Shenandoah Reset Bitmap") {
1093   }
1094 
1095   void work(uint worker_id) {
1096     ShenandoahParallelWorkerSession worker_session(worker_id);
1097     ShenandoahHeapRegion* region = _regions.next();
1098     ShenandoahHeap* heap = ShenandoahHeap::heap();
< prev index next >