< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp

Print this page

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "compiler/oopMap.hpp"
  28 #include "gc/shared/continuationGCSupport.hpp"
  29 #include "gc/shared/gcTraceTime.inline.hpp"
  30 #include "gc/shared/preservedMarks.inline.hpp"

  31 #include "gc/shared/tlab_globals.hpp"
  32 #include "gc/shared/workerThread.hpp"
  33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  37 #include "gc/shenandoah/shenandoahFullGC.hpp"
  38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  39 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  42 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  45 #include "gc/shenandoah/shenandoahMetrics.hpp"
  46 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  47 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  48 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  49 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  50 #include "gc/shenandoah/shenandoahUtils.hpp"

 166 
 167     // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
 168     if (has_forwarded_objects) {
 169       update_roots(true /*full_gc*/);
 170     }
 171 
 172     // d. Reset the bitmaps for new marking
 173     heap->reset_mark_bitmap();
 174     assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 175     assert(!heap->marking_context()->is_complete(), "sanity");
 176 
 177     // e. Abandon reference discovery and clear all discovered references.
 178     ShenandoahReferenceProcessor* rp = heap->ref_processor();
 179     rp->abandon_partial_discovery();
 180 
 181     // f. Sync pinned region status from the CP marks
 182     heap->sync_pinned_region_status();
 183 
 184     // The rest of prologue:
 185     _preserved_marks->init(heap->workers()->active_workers());

 186 
 187     assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
 188   }
 189 
 190   if (UseTLAB) {
 191     heap->gclabs_retire(ResizeTLAB);
 192     heap->tlabs_retire(ResizeTLAB);
 193   }
 194 
 195   OrderAccess::fence();
 196 
 197   phase1_mark_heap();
 198 
 199   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 200   // Coming out of Full GC, we would not have any forwarded objects.
 201   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 202   heap->set_has_forwarded_objects(false);
 203 
 204   heap->set_full_gc_move_in_progress(true);
 205 

 276   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 277 
 278   ShenandoahHeap* heap = ShenandoahHeap::heap();
 279 
 280   ShenandoahPrepareForMarkClosure cl;
 281   heap->heap_region_iterate(&cl);
 282 
 283   heap->set_unload_classes(heap->heuristics()->can_unload_classes());
 284 
 285   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 286   // enable ("weak") refs discovery
 287   rp->set_soft_reference_policy(true); // forcefully purge all soft references
 288 
 289   ShenandoahSTWMark mark(true /*full_gc*/);
 290   mark.mark();
 291   heap->parallel_cleaning(true /* full_gc */);
 292 }
 293 
 294 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 295 private:
 296   PreservedMarks*          const _preserved_marks;
 297   ShenandoahHeap*          const _heap;

 298   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 299   int _empty_regions_pos;
 300   ShenandoahHeapRegion*          _to_region;
 301   ShenandoahHeapRegion*          _from_region;
 302   HeapWord* _compact_point;
 303 
 304 public:
 305   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 306                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 307                                               ShenandoahHeapRegion* to_region) :
 308     _preserved_marks(preserved_marks),

 309     _heap(ShenandoahHeap::heap()),
 310     _empty_regions(empty_regions),
 311     _empty_regions_pos(0),
 312     _to_region(to_region),
 313     _from_region(NULL),
 314     _compact_point(to_region->bottom()) {}
 315 
 316   void set_from_region(ShenandoahHeapRegion* from_region) {
 317     _from_region = from_region;
 318   }
 319 
 320   void finish_region() {
 321     assert(_to_region != NULL, "should not happen");
 322     _to_region->set_new_top(_compact_point);
 323   }
 324 
 325   bool is_compact_same_region() {
 326     return _from_region == _to_region;
 327   }
 328 

 342       // Object doesn't fit. Pick next empty region and start compacting there.
 343       ShenandoahHeapRegion* new_to_region;
 344       if (_empty_regions_pos < _empty_regions.length()) {
 345         new_to_region = _empty_regions.at(_empty_regions_pos);
 346         _empty_regions_pos++;
 347       } else {
 348         // Out of empty region? Compact within the same region.
 349         new_to_region = _from_region;
 350       }
 351 
 352       assert(new_to_region != _to_region, "must not reuse same to-region");
 353       assert(new_to_region != NULL, "must not be NULL");
 354       _to_region = new_to_region;
 355       _compact_point = _to_region->bottom();
 356     }
 357 
 358     // Object fits into current region, record new location:
 359     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 360     shenandoah_assert_not_forwarded(NULL, p);
 361     _preserved_marks->push_if_necessary(p, p->mark());
 362     p->forward_to(cast_to_oop(_compact_point));
 363     _compact_point += obj_size;
 364   }
 365 };
 366 
 367 class ShenandoahPrepareForCompactionTask : public WorkerTask {
 368 private:
 369   PreservedMarksSet*        const _preserved_marks;
 370   ShenandoahHeap*           const _heap;
 371   ShenandoahHeapRegionSet** const _worker_slices;
 372 
 373 public:
 374   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
 375     WorkerTask("Shenandoah Prepare For Compaction"),
 376     _preserved_marks(preserved_marks),
 377     _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
 378   }
 379 
 380   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 381     // Empty region: get it into the slice to defragment the slice itself.
 382     // We could have skipped this without violating correctness, but we really

 416       }
 417 
 418       // Compacted the region to somewhere else? From-region is empty then.
 419       if (!cl.is_compact_same_region()) {
 420         empty_regions.append(from_region);
 421       }
 422       from_region = it.next();
 423     }
 424     cl.finish_region();
 425 
 426     // Mark all remaining regions as empty
 427     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 428       ShenandoahHeapRegion* r = empty_regions.at(pos);
 429       r->set_new_top(r->bottom());
 430     }
 431   }
 432 };
 433 
 434 void ShenandoahFullGC::calculate_target_humongous_objects() {
 435   ShenandoahHeap* heap = ShenandoahHeap::heap();

 436 
 437   // Compute the new addresses for humongous objects. We need to do this after addresses
 438   // for regular objects are calculated, and we know what regions in heap suffix are
 439   // available for humongous moves.
 440   //
 441   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 442   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 443   // humongous start there.
 444   //
 445   // The complication is potential non-movable regions during the scan. If such region is
 446   // detected, then sliding restarts towards that non-movable region.
 447 
 448   size_t to_begin = heap->num_regions();
 449   size_t to_end = heap->num_regions();
 450 
 451   for (size_t c = heap->num_regions(); c > 0; c--) {
 452     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 453     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 454       // To-region candidate: record this, and continue scan
 455       to_begin = r->index();
 456       continue;
 457     }
 458 
 459     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 460       // From-region candidate: movable humongous region
 461       oop old_obj = cast_to_oop(r->bottom());
 462       size_t words_size = old_obj->size();
 463       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 464 
 465       size_t start = to_end - num_regions;
 466 
 467       if (start >= to_begin && start != r->index()) {
 468         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 469         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 470         old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
 471         to_end = start;
 472         continue;
 473       }
 474     }
 475 
 476     // Failed to fit. Scan starting from current region.
 477     to_begin = r->index();
 478     to_end = r->index();
 479   }
 480 }
 481 
 482 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 483 private:
 484   ShenandoahHeap* const _heap;
 485 
 486 public:
 487   ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
 488   void heap_region_do(ShenandoahHeapRegion* r) {
 489     if (r->is_trash()) {
 490       r->recycle();

 701 
 702   // Compute the new addresses for regular objects
 703   {
 704     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
 705 
 706     distribute_slices(worker_slices);
 707 
 708     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
 709     heap->workers()->run_task(&task);
 710   }
 711 
 712   // Compute the new addresses for humongous objects
 713   {
 714     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
 715     calculate_target_humongous_objects();
 716   }
 717 }
 718 
 719 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
 720 private:
 721   ShenandoahHeap* const _heap;

 722   ShenandoahMarkingContext* const _ctx;
 723 
 724   template <class T>
 725   inline void do_oop_work(T* p) {
 726     T o = RawAccess<>::oop_load(p);
 727     if (!CompressedOops::is_null(o)) {
 728       oop obj = CompressedOops::decode_not_null(o);
 729       assert(_ctx->is_marked(obj), "must be marked");
 730       if (obj->is_forwarded()) {
 731         oop forw = obj->forwardee();
 732         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 733       }
 734     }
 735   }
 736 
 737 public:
 738   ShenandoahAdjustPointersClosure() :
 739     _heap(ShenandoahHeap::heap()),

 740     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 741 
 742   void do_oop(oop* p)       { do_oop_work(p); }
 743   void do_oop(narrowOop* p) { do_oop_work(p); }
 744   void do_method(Method* m) {}
 745   void do_nmethod(nmethod* nm) {}
 746 };
 747 
 748 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
 749 private:
 750   ShenandoahHeap* const _heap;
 751   ShenandoahAdjustPointersClosure _cl;
 752 
 753 public:
 754   ShenandoahAdjustPointersObjectClosure() :
 755     _heap(ShenandoahHeap::heap()) {
 756   }
 757   void do_object(oop p) {
 758     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 759     p->oop_iterate(&_cl);

 781       }
 782       r = _regions.next();
 783     }
 784   }
 785 };
 786 
 787 class ShenandoahAdjustRootPointersTask : public WorkerTask {
 788 private:
 789   ShenandoahRootAdjuster* _rp;
 790   PreservedMarksSet* _preserved_marks;
 791 public:
 792   ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
 793     WorkerTask("Shenandoah Adjust Root Pointers"),
 794     _rp(rp),
 795     _preserved_marks(preserved_marks) {}
 796 
 797   void work(uint worker_id) {
 798     ShenandoahParallelWorkerSession worker_session(worker_id);
 799     ShenandoahAdjustPointersClosure cl;
 800     _rp->roots_do(worker_id, &cl);
 801     _preserved_marks->get(worker_id)->adjust_during_full_gc();

 802   }
 803 };
 804 
 805 void ShenandoahFullGC::phase3_update_references() {
 806   GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
 807   ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
 808 
 809   ShenandoahHeap* heap = ShenandoahHeap::heap();
 810 
 811   WorkerThreads* workers = heap->workers();
 812   uint nworkers = workers->active_workers();
 813   {
 814 #if COMPILER2_OR_JVMCI
 815     DerivedPointerTable::clear();
 816 #endif
 817     ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
 818     ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
 819     workers->run_task(&task);
 820 #if COMPILER2_OR_JVMCI
 821     DerivedPointerTable::update_pointers();
 822 #endif
 823   }
 824 
 825   ShenandoahAdjustPointersTask adjust_pointers_task;
 826   workers->run_task(&adjust_pointers_task);
 827 }
 828 
 829 class ShenandoahCompactObjectsClosure : public ObjectClosure {
 830 private:
 831   ShenandoahHeap* const _heap;
 832   uint            const _worker_id;

 833 
 834 public:
 835   ShenandoahCompactObjectsClosure(uint worker_id) :
 836     _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
 837 
 838   void do_object(oop p) {
 839     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 840     size_t size = p->size();
 841     if (p->is_forwarded()) {
 842       HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
 843       HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee());
 844       Copy::aligned_conjoint_words(compact_from, compact_to, size);
 845       oop new_obj = cast_to_oop(compact_to);
 846 
 847       ContinuationGCSupport::relativize_stack_chunk(new_obj);
 848       new_obj->init_mark();
 849     }
 850   }
 851 };
 852 
 853 class ShenandoahCompactObjectsTask : public WorkerTask {
 854 private:
 855   ShenandoahHeap* const _heap;
 856   ShenandoahHeapRegionSet** const _worker_slices;
 857 
 858 public:
 859   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
 860     WorkerTask("Shenandoah Compact Objects"),
 861     _heap(ShenandoahHeap::heap()),
 862     _worker_slices(worker_slices) {
 863   }

 920     }
 921 
 922     r->set_live_data(live);
 923     r->reset_alloc_metadata();
 924     _live += live;
 925   }
 926 
 927   size_t get_live() {
 928     return _live;
 929   }
 930 };
 931 
 932 void ShenandoahFullGC::compact_humongous_objects() {
 933   // Compact humongous regions, based on their fwdptr objects.
 934   //
 935   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
 936   // humongous regions are already compacted, and do not require further moves, which alleviates
 937   // sliding costs. We may consider doing this in parallel in future.
 938 
 939   ShenandoahHeap* heap = ShenandoahHeap::heap();

 940 
 941   for (size_t c = heap->num_regions(); c > 0; c--) {
 942     ShenandoahHeapRegion* r = heap->get_region(c - 1);
 943     if (r->is_humongous_start()) {
 944       oop old_obj = cast_to_oop(r->bottom());
 945       if (!old_obj->is_forwarded()) {
 946         // No need to move the object, it stays at the same slot
 947         continue;
 948       }
 949       size_t words_size = old_obj->size();
 950       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 951 
 952       size_t old_start = r->index();
 953       size_t old_end   = old_start + num_regions - 1;
 954       size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
 955       size_t new_end   = new_start + num_regions - 1;
 956       assert(old_start != new_start, "must be real move");
 957       assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
 958 
 959       Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
 960       ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
 961 
 962       oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
 963       new_obj->init_mark();
 964 
 965       {
 966         for (size_t c = old_start; c <= old_end; c++) {
 967           ShenandoahHeapRegion* r = heap->get_region(c);
 968           r->make_regular_bypass();
 969           r->set_top(r->bottom());
 970         }
 971 
 972         for (size_t c = new_start; c <= new_end; c++) {
 973           ShenandoahHeapRegion* r = heap->get_region(c);
 974           if (c == new_start) {

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "compiler/oopMap.hpp"
  28 #include "gc/shared/continuationGCSupport.hpp"
  29 #include "gc/shared/gcTraceTime.inline.hpp"
  30 #include "gc/shared/preservedMarks.inline.hpp"
  31 #include "gc/shared/slidingForwarding.inline.hpp"
  32 #include "gc/shared/tlab_globals.hpp"
  33 #include "gc/shared/workerThread.hpp"
  34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  36 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  38 #include "gc/shenandoah/shenandoahFullGC.hpp"
  39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  40 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  41 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  43 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  44 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  45 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  46 #include "gc/shenandoah/shenandoahMetrics.hpp"
  47 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  48 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  49 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  50 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  51 #include "gc/shenandoah/shenandoahUtils.hpp"

 167 
 168     // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
 169     if (has_forwarded_objects) {
 170       update_roots(true /*full_gc*/);
 171     }
 172 
 173     // d. Reset the bitmaps for new marking
 174     heap->reset_mark_bitmap();
 175     assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 176     assert(!heap->marking_context()->is_complete(), "sanity");
 177 
 178     // e. Abandon reference discovery and clear all discovered references.
 179     ShenandoahReferenceProcessor* rp = heap->ref_processor();
 180     rp->abandon_partial_discovery();
 181 
 182     // f. Sync pinned region status from the CP marks
 183     heap->sync_pinned_region_status();
 184 
 185     // The rest of prologue:
 186     _preserved_marks->init(heap->workers()->active_workers());
 187     heap->forwarding()->clear();
 188 
 189     assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
 190   }
 191 
 192   if (UseTLAB) {
 193     heap->gclabs_retire(ResizeTLAB);
 194     heap->tlabs_retire(ResizeTLAB);
 195   }
 196 
 197   OrderAccess::fence();
 198 
 199   phase1_mark_heap();
 200 
 201   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 202   // Coming out of Full GC, we would not have any forwarded objects.
 203   // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
 204   heap->set_has_forwarded_objects(false);
 205 
 206   heap->set_full_gc_move_in_progress(true);
 207 

 278   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 279 
 280   ShenandoahHeap* heap = ShenandoahHeap::heap();
 281 
 282   ShenandoahPrepareForMarkClosure cl;
 283   heap->heap_region_iterate(&cl);
 284 
 285   heap->set_unload_classes(heap->heuristics()->can_unload_classes());
 286 
 287   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 288   // enable ("weak") refs discovery
 289   rp->set_soft_reference_policy(true); // forcefully purge all soft references
 290 
 291   ShenandoahSTWMark mark(true /*full_gc*/);
 292   mark.mark();
 293   heap->parallel_cleaning(true /* full_gc */);
 294 }
 295 
 296 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 297 private:
 298   PreservedMarks*    const _preserved_marks;
 299   SlidingForwarding* const _forwarding;
 300   ShenandoahHeap*    const _heap;
 301   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 302   int _empty_regions_pos;
 303   ShenandoahHeapRegion*          _to_region;
 304   ShenandoahHeapRegion*          _from_region;
 305   HeapWord* _compact_point;
 306 
 307 public:
 308   ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
 309                                               GrowableArray<ShenandoahHeapRegion*>& empty_regions,
 310                                               ShenandoahHeapRegion* to_region) :
 311     _preserved_marks(preserved_marks),
 312     _forwarding(ShenandoahHeap::heap()->forwarding()),
 313     _heap(ShenandoahHeap::heap()),
 314     _empty_regions(empty_regions),
 315     _empty_regions_pos(0),
 316     _to_region(to_region),
 317     _from_region(NULL),
 318     _compact_point(to_region->bottom()) {}
 319 
 320   void set_from_region(ShenandoahHeapRegion* from_region) {
 321     _from_region = from_region;
 322   }
 323 
 324   void finish_region() {
 325     assert(_to_region != NULL, "should not happen");
 326     _to_region->set_new_top(_compact_point);
 327   }
 328 
 329   bool is_compact_same_region() {
 330     return _from_region == _to_region;
 331   }
 332 

 346       // Object doesn't fit. Pick next empty region and start compacting there.
 347       ShenandoahHeapRegion* new_to_region;
 348       if (_empty_regions_pos < _empty_regions.length()) {
 349         new_to_region = _empty_regions.at(_empty_regions_pos);
 350         _empty_regions_pos++;
 351       } else {
 352         // Out of empty region? Compact within the same region.
 353         new_to_region = _from_region;
 354       }
 355 
 356       assert(new_to_region != _to_region, "must not reuse same to-region");
 357       assert(new_to_region != NULL, "must not be NULL");
 358       _to_region = new_to_region;
 359       _compact_point = _to_region->bottom();
 360     }
 361 
 362     // Object fits into current region, record new location:
 363     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 364     shenandoah_assert_not_forwarded(NULL, p);
 365     _preserved_marks->push_if_necessary(p, p->mark());
 366     _forwarding->forward_to(p, cast_to_oop(_compact_point));
 367     _compact_point += obj_size;
 368   }
 369 };
 370 
 371 class ShenandoahPrepareForCompactionTask : public WorkerTask {
 372 private:
 373   PreservedMarksSet*        const _preserved_marks;
 374   ShenandoahHeap*           const _heap;
 375   ShenandoahHeapRegionSet** const _worker_slices;
 376 
 377 public:
 378   ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
 379     WorkerTask("Shenandoah Prepare For Compaction"),
 380     _preserved_marks(preserved_marks),
 381     _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
 382   }
 383 
 384   static bool is_candidate_region(ShenandoahHeapRegion* r) {
 385     // Empty region: get it into the slice to defragment the slice itself.
 386     // We could have skipped this without violating correctness, but we really

 420       }
 421 
 422       // Compacted the region to somewhere else? From-region is empty then.
 423       if (!cl.is_compact_same_region()) {
 424         empty_regions.append(from_region);
 425       }
 426       from_region = it.next();
 427     }
 428     cl.finish_region();
 429 
 430     // Mark all remaining regions as empty
 431     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 432       ShenandoahHeapRegion* r = empty_regions.at(pos);
 433       r->set_new_top(r->bottom());
 434     }
 435   }
 436 };
 437 
 438 void ShenandoahFullGC::calculate_target_humongous_objects() {
 439   ShenandoahHeap* heap = ShenandoahHeap::heap();
 440   SlidingForwarding* forwarding = heap->forwarding();
 441 
 442   // Compute the new addresses for humongous objects. We need to do this after addresses
 443   // for regular objects are calculated, and we know what regions in heap suffix are
 444   // available for humongous moves.
 445   //
 446   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 447   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 448   // humongous start there.
 449   //
 450   // The complication is potential non-movable regions during the scan. If such region is
 451   // detected, then sliding restarts towards that non-movable region.
 452 
 453   size_t to_begin = heap->num_regions();
 454   size_t to_end = heap->num_regions();
 455 
 456   for (size_t c = heap->num_regions(); c > 0; c--) {
 457     ShenandoahHeapRegion *r = heap->get_region(c - 1);
 458     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 459       // To-region candidate: record this, and continue scan
 460       to_begin = r->index();
 461       continue;
 462     }
 463 
 464     if (r->is_humongous_start() && r->is_stw_move_allowed()) {
 465       // From-region candidate: movable humongous region
 466       oop old_obj = cast_to_oop(r->bottom());
 467       size_t words_size = old_obj->size();
 468       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 469 
 470       size_t start = to_end - num_regions;
 471 
 472       if (start >= to_begin && start != r->index()) {
 473         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 474         _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
 475         forwarding->forward_to(old_obj, cast_to_oop(heap->get_region(start)->bottom()));
 476         to_end = start;
 477         continue;
 478       }
 479     }
 480 
 481     // Failed to fit. Scan starting from current region.
 482     to_begin = r->index();
 483     to_end = r->index();
 484   }
 485 }
 486 
 487 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 488 private:
 489   ShenandoahHeap* const _heap;
 490 
 491 public:
 492   ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
 493   void heap_region_do(ShenandoahHeapRegion* r) {
 494     if (r->is_trash()) {
 495       r->recycle();

 706 
 707   // Compute the new addresses for regular objects
 708   {
 709     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
 710 
 711     distribute_slices(worker_slices);
 712 
 713     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
 714     heap->workers()->run_task(&task);
 715   }
 716 
 717   // Compute the new addresses for humongous objects
 718   {
 719     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
 720     calculate_target_humongous_objects();
 721   }
 722 }
 723 
 724 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
 725 private:
 726   ShenandoahHeap*           const _heap;
 727   const SlidingForwarding*  const _forwarding;
 728   ShenandoahMarkingContext* const _ctx;
 729 
 730   template <class T>
 731   inline void do_oop_work(T* p) {
 732     T o = RawAccess<>::oop_load(p);
 733     if (!CompressedOops::is_null(o)) {
 734       oop obj = CompressedOops::decode_not_null(o);
 735       assert(_ctx->is_marked(obj), "must be marked");
 736       if (obj->is_forwarded()) {
 737         oop forw = _forwarding->forwardee(obj);
 738         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 739       }
 740     }
 741   }
 742 
 743 public:
 744   ShenandoahAdjustPointersClosure() :
 745     _heap(ShenandoahHeap::heap()),
 746     _forwarding(_heap->forwarding()),
 747     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 748 
 749   void do_oop(oop* p)       { do_oop_work(p); }
 750   void do_oop(narrowOop* p) { do_oop_work(p); }
 751   void do_method(Method* m) {}
 752   void do_nmethod(nmethod* nm) {}
 753 };
 754 
 755 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
 756 private:
 757   ShenandoahHeap* const _heap;
 758   ShenandoahAdjustPointersClosure _cl;
 759 
 760 public:
 761   ShenandoahAdjustPointersObjectClosure() :
 762     _heap(ShenandoahHeap::heap()) {
 763   }
 764   void do_object(oop p) {
 765     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 766     p->oop_iterate(&_cl);

 788       }
 789       r = _regions.next();
 790     }
 791   }
 792 };
 793 
 794 class ShenandoahAdjustRootPointersTask : public WorkerTask {
 795 private:
 796   ShenandoahRootAdjuster* _rp;
 797   PreservedMarksSet* _preserved_marks;
 798 public:
 799   ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
 800     WorkerTask("Shenandoah Adjust Root Pointers"),
 801     _rp(rp),
 802     _preserved_marks(preserved_marks) {}
 803 
 804   void work(uint worker_id) {
 805     ShenandoahParallelWorkerSession worker_session(worker_id);
 806     ShenandoahAdjustPointersClosure cl;
 807     _rp->roots_do(worker_id, &cl);
 808     const SlidingForwarding* const forwarding = ShenandoahHeap::heap()->forwarding();
 809     _preserved_marks->get(worker_id)->adjust_during_full_gc(forwarding);
 810   }
 811 };
 812 
 813 void ShenandoahFullGC::phase3_update_references() {
 814   GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
 815   ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
 816 
 817   ShenandoahHeap* heap = ShenandoahHeap::heap();
 818 
 819   WorkerThreads* workers = heap->workers();
 820   uint nworkers = workers->active_workers();
 821   {
 822 #if COMPILER2_OR_JVMCI
 823     DerivedPointerTable::clear();
 824 #endif
 825     ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
 826     ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
 827     workers->run_task(&task);
 828 #if COMPILER2_OR_JVMCI
 829     DerivedPointerTable::update_pointers();
 830 #endif
 831   }
 832 
 833   ShenandoahAdjustPointersTask adjust_pointers_task;
 834   workers->run_task(&adjust_pointers_task);
 835 }
 836 
 837 class ShenandoahCompactObjectsClosure : public ObjectClosure {
 838 private:
 839   ShenandoahHeap*          const _heap;
 840   const SlidingForwarding* const _forwarding;
 841   uint                     const _worker_id;
 842 
 843 public:
 844   ShenandoahCompactObjectsClosure(uint worker_id) :
 845     _heap(ShenandoahHeap::heap()), _forwarding(_heap->forwarding()), _worker_id(worker_id) {}
 846 
 847   void do_object(oop p) {
 848     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 849     size_t size = p->size();
 850     if (p->is_forwarded()) {
 851       HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
 852       HeapWord* compact_to = cast_from_oop<HeapWord*>(_forwarding->forwardee(p));
 853       Copy::aligned_conjoint_words(compact_from, compact_to, size);
 854       oop new_obj = cast_to_oop(compact_to);
 855 
 856       ContinuationGCSupport::relativize_stack_chunk(new_obj);
 857       new_obj->init_mark();
 858     }
 859   }
 860 };
 861 
 862 class ShenandoahCompactObjectsTask : public WorkerTask {
 863 private:
 864   ShenandoahHeap* const _heap;
 865   ShenandoahHeapRegionSet** const _worker_slices;
 866 
 867 public:
 868   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
 869     WorkerTask("Shenandoah Compact Objects"),
 870     _heap(ShenandoahHeap::heap()),
 871     _worker_slices(worker_slices) {
 872   }

 929     }
 930 
 931     r->set_live_data(live);
 932     r->reset_alloc_metadata();
 933     _live += live;
 934   }
 935 
 936   size_t get_live() {
 937     return _live;
 938   }
 939 };
 940 
 941 void ShenandoahFullGC::compact_humongous_objects() {
 942   // Compact humongous regions, based on their fwdptr objects.
 943   //
 944   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
 945   // humongous regions are already compacted, and do not require further moves, which alleviates
 946   // sliding costs. We may consider doing this in parallel in future.
 947 
 948   ShenandoahHeap* heap = ShenandoahHeap::heap();
 949   const SlidingForwarding* const forwarding = heap->forwarding();
 950 
 951   for (size_t c = heap->num_regions(); c > 0; c--) {
 952     ShenandoahHeapRegion* r = heap->get_region(c - 1);
 953     if (r->is_humongous_start()) {
 954       oop old_obj = cast_to_oop(r->bottom());
 955       if (!old_obj->is_forwarded()) {
 956         // No need to move the object, it stays at the same slot
 957         continue;
 958       }
 959       size_t words_size = old_obj->size();
 960       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 961 
 962       size_t old_start = r->index();
 963       size_t old_end   = old_start + num_regions - 1;
 964       size_t new_start = heap->heap_region_index_containing(forwarding->forwardee(old_obj));
 965       size_t new_end   = new_start + num_regions - 1;
 966       assert(old_start != new_start, "must be real move");
 967       assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
 968 
 969       Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
 970       ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
 971 
 972       oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
 973       new_obj->init_mark();
 974 
 975       {
 976         for (size_t c = old_start; c <= old_end; c++) {
 977           ShenandoahHeapRegion* r = heap->get_region(c);
 978           r->make_regular_bypass();
 979           r->set_top(r->bottom());
 980         }
 981 
 982         for (size_t c = new_start; c <= new_end; c++) {
 983           ShenandoahHeapRegion* r = heap->get_region(c);
 984           if (c == new_start) {
< prev index next >