< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page




  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/universe.hpp"
  28 
  29 #include "gc/shared/gcArguments.hpp"
  30 #include "gc/shared/gcTimer.hpp"
  31 #include "gc/shared/gcTraceTime.inline.hpp"
  32 #include "gc/shared/locationPrinter.inline.hpp"
  33 #include "gc/shared/memAllocator.hpp"
  34 #include "gc/shared/oopStorageSet.hpp"
  35 #include "gc/shared/plab.hpp"
  36 
  37 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  38 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  39 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  40 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  41 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  42 #include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
  43 #include "gc/shenandoah/shenandoahControlThread.hpp"

  44 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  45 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  46 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  47 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  48 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  49 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  50 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  51 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  52 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  53 #include "gc/shenandoah/shenandoahMetrics.hpp"
  54 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  55 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  56 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  57 #include "gc/shenandoah/shenandoahPadding.hpp"
  58 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  59 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  60 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  61 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  62 #include "gc/shenandoah/shenandoahUtils.hpp"
  63 #include "gc/shenandoah/shenandoahVerifier.hpp"


 242 
 243   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 244             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 245             _bitmap_bytes_per_slice, bitmap_page_size);
 246 
 247   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 248   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 249   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 250   _bitmap_region_special = bitmap.special();
 251 
 252   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 253                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 254   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 255   if (!_bitmap_region_special) {
 256     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 257                               "Cannot commit bitmap memory");
 258   }
 259 
 260   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 261 


 262   if (ShenandoahVerify) {
 263     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 264     if (!verify_bitmap.special()) {
 265       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 266                                 "Cannot commit verification bitmap memory");
 267     }
 268     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 269     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 270     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 271     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 272   }
 273 
 274   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 275   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 276   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 277   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 278   _aux_bitmap_region_special = aux_bitmap.special();
 279   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 280 
 281   //


 478   _scm(new ShenandoahConcurrentMark()),
 479   _full_gc(new ShenandoahMarkCompact()),
 480   _pacer(NULL),
 481   _verifier(NULL),
 482   _phase_timings(NULL),
 483   _monitoring_support(NULL),
 484   _memory_pool(NULL),
 485   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 486   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 487   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 488   _soft_ref_policy(),
 489   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 490   _ref_processor(NULL),
 491   _marking_context(NULL),
 492   _bitmap_size(0),
 493   _bitmap_regions_per_slice(0),
 494   _bitmap_bytes_per_slice(0),
 495   _bitmap_region_special(false),
 496   _aux_bitmap_region_special(false),
 497   _liveness_cache(NULL),
 498   _collection_set(NULL)

 499 {
 500   _heap = this;
 501 
 502   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 503 
 504   _max_workers = MAX2(_max_workers, 1U);
 505   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 506                             /* are_GC_task_threads */ true,
 507                             /* are_ConcurrentGC_threads */ true);
 508   if (_workers == NULL) {
 509     vm_exit_during_initialization("Failed necessary allocation.");
 510   } else {
 511     _workers->initialize_workers();
 512   }
 513 
 514   if (ParallelGCThreads > 1) {
 515     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 516                                                 ParallelGCThreads,
 517                       /* are_GC_task_threads */ false,
 518                  /* are_ConcurrentGC_threads */ false);


 952 };
 953 
 954 class ShenandoahEvacuationTask : public AbstractGangTask {
 955 private:
 956   ShenandoahHeap* const _sh;
 957   ShenandoahCollectionSet* const _cs;
 958   bool _concurrent;
 959 public:
 960   ShenandoahEvacuationTask(ShenandoahHeap* sh,
 961                            ShenandoahCollectionSet* cs,
 962                            bool concurrent) :
 963     AbstractGangTask("Parallel Evacuation Task"),
 964     _sh(sh),
 965     _cs(cs),
 966     _concurrent(concurrent)
 967   {}
 968 
 969   void work(uint worker_id) {
 970     if (_concurrent) {
 971       ShenandoahConcurrentWorkerSession worker_session(worker_id);
 972       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 973       ShenandoahEvacOOMScope oom_evac_scope;
 974       do_work();
 975     } else {
 976       ShenandoahParallelWorkerSession worker_session(worker_id);
 977       ShenandoahEvacOOMScope oom_evac_scope;
 978       do_work();
 979     }
 980   }
 981 
 982 private:
 983   void do_work() {
 984     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
 985     ShenandoahHeapRegion* r;
 986     while ((r =_cs->claim_next()) != NULL) {
 987       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
 988       _sh->marked_object_iterate(r, &cl);
 989 
 990       if (ShenandoahPacing) {
 991         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
 992       }
 993 
 994       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
 995         break;
 996       }
 997     }


1062     cl.do_thread(t);
1063   }
1064   workers()->threads_do(&cl);
1065 }
1066 
1067 void ShenandoahHeap::resize_tlabs() {
1068   CollectedHeap::resize_all_tlabs();
1069 }
1070 
1071 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1072 private:
1073   ShenandoahRootEvacuator* _rp;
1074 
1075 public:
1076   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1077     AbstractGangTask("Shenandoah evacuate and update roots"),
1078     _rp(rp) {}
1079 
1080   void work(uint worker_id) {
1081     ShenandoahParallelWorkerSession worker_session(worker_id);
1082     ShenandoahEvacOOMScope oom_evac_scope;
1083     ShenandoahEvacuateUpdateRootsClosure<> cl;
1084     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1085     _rp->roots_do(worker_id, &cl);
1086   }
1087 };
1088 
1089 void ShenandoahHeap::evacuate_and_update_roots() {
1090 #if COMPILER2_OR_JVMCI
1091   DerivedPointerTable::clear();
1092 #endif
1093   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1094   {
1095     // Include concurrent roots if current cycle can not process those roots concurrently
1096     ShenandoahRootEvacuator rp(workers()->active_workers(),
1097                                ShenandoahPhaseTimings::init_evac,
1098                                !ShenandoahConcurrentRoots::should_do_concurrent_roots(),
1099                                !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading());
1100     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1101     workers()->run_task(&roots_task);
1102   }


1647   free_set()->recycle_trash();
1648 }
1649 
1650 void ShenandoahHeap::op_cleanup_complete() {
1651   free_set()->recycle_trash();
1652 }
1653 
1654 class ShenandoahConcurrentRootsEvacUpdateTask : public AbstractGangTask {
1655 private:
1656   ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
1657   ShenandoahClassLoaderDataRoots<true /*concurrent*/, false /*single threaded*/> _cld_roots;
1658 
1659 public:
1660   ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1661     AbstractGangTask("Shenandoah Evacuate/Update Concurrent Strong Roots Task"),
1662     _vm_roots(phase),
1663     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()) {}
1664 
1665   void work(uint worker_id) {
1666     ShenandoahConcurrentWorkerSession worker_session(worker_id);
1667     ShenandoahEvacOOMScope oom;
1668     {
1669       // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
1670       // may race against OopStorage::release() calls.
1671       ShenandoahEvacUpdateOopStorageRootsClosure cl;
1672       _vm_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl, worker_id);
1673     }
1674 
1675     {
1676       ShenandoahEvacuateUpdateRootsClosure<> cl;
1677       CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1678       _cld_roots.cld_do(&clds, worker_id);
1679     }
1680   }
1681 };
1682 
1683 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
1684 private:
1685   ShenandoahHeap* const _heap;
1686   ShenandoahMarkingContext* const _mark_context;
1687   bool  _evac_in_progress;


1783     StringTable::reset_dead_counter();
1784     ResolvedMethodTable::reset_dead_counter();
1785     if (_concurrent_class_unloading) {
1786       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1787       _nmethod_itr.nmethods_do_begin();
1788     }
1789   }
1790 
1791   ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
1792     StringTable::finish_dead_counter();
1793     ResolvedMethodTable::finish_dead_counter();
1794     if (_concurrent_class_unloading) {
1795       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1796       _nmethod_itr.nmethods_do_end();
1797     }
1798   }
1799 
1800   void work(uint worker_id) {
1801     ShenandoahConcurrentWorkerSession worker_session(worker_id);
1802     {
1803       ShenandoahEvacOOMScope oom;
1804       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
1805       // may race against OopStorage::release() calls.
1806       ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
1807       _jni_roots.oops_do(&cl, worker_id);
1808       _vm_roots.oops_do(&cl, worker_id);
1809 
1810       cl.reset_dead_counter();
1811       _string_table_roots.oops_do(&cl, worker_id);
1812       StringTable::inc_dead_counter(cl.dead_counter());
1813 
1814       cl.reset_dead_counter();
1815       _resolved_method_table_roots.oops_do(&cl, worker_id);
1816       ResolvedMethodTable::inc_dead_counter(cl.dead_counter());
1817 
1818       // String dedup weak roots
1819       ShenandoahForwardedIsAliveClosure is_alive;
1820       ShenandoahEvacuateUpdateRootsClosure<MO_RELEASE> keep_alive;
1821       _dedup_roots.oops_do(&is_alive, &keep_alive, worker_id);
1822     }
1823 


2455 
2456 template<class T>
2457 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2458 private:
2459   T cl;
2460   ShenandoahHeap* _heap;
2461   ShenandoahRegionIterator* _regions;
2462   bool _concurrent;
2463 public:
2464   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2465     AbstractGangTask("Concurrent Update References Task"),
2466     cl(T()),
2467     _heap(ShenandoahHeap::heap()),
2468     _regions(regions),
2469     _concurrent(concurrent) {
2470   }
2471 
2472   void work(uint worker_id) {
2473     if (_concurrent) {
2474       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2475       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2476       do_work();
2477     } else {
2478       ShenandoahParallelWorkerSession worker_session(worker_id);
2479       do_work();
2480     }
2481   }
2482 
2483 private:
2484   void do_work() {
2485     ShenandoahHeapRegion* r = _regions->next();
2486     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2487     while (r != NULL) {
2488       HeapWord* update_watermark = r->get_update_watermark();
2489       assert (update_watermark >= r->bottom(), "sanity");
2490       if (r->is_active() && !r->is_cset()) {
2491         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2492       }
2493       if (ShenandoahPacing) {
2494         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2495       }




  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/universe.hpp"
  28 
  29 #include "gc/shared/gcArguments.hpp"
  30 #include "gc/shared/gcTimer.hpp"
  31 #include "gc/shared/gcTraceTime.inline.hpp"
  32 #include "gc/shared/locationPrinter.inline.hpp"
  33 #include "gc/shared/memAllocator.hpp"
  34 #include "gc/shared/oopStorageSet.hpp"
  35 #include "gc/shared/plab.hpp"
  36 
  37 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  38 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  39 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  40 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  41 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  42 #include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
  43 #include "gc/shenandoah/shenandoahControlThread.hpp"
  44 #include "gc/shenandoah/shenandoahEvacLockingBitmap.hpp"
  45 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  46 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  47 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  48 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  49 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  50 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  51 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  52 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  53 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  54 #include "gc/shenandoah/shenandoahMetrics.hpp"
  55 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  56 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  57 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  58 #include "gc/shenandoah/shenandoahPadding.hpp"
  59 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  60 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  61 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  62 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  63 #include "gc/shenandoah/shenandoahUtils.hpp"
  64 #include "gc/shenandoah/shenandoahVerifier.hpp"


 243 
 244   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 245             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 246             _bitmap_bytes_per_slice, bitmap_page_size);
 247 
 248   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 249   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 250   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 251   _bitmap_region_special = bitmap.special();
 252 
 253   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 254                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 255   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 256   if (!_bitmap_region_special) {
 257     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 258                               "Cannot commit bitmap memory");
 259   }
 260 
 261   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 262 
 263   _evac_locking_bitmap = new ShenandoahEvacLockingBitmap(_heap_region);
 264 
 265   if (ShenandoahVerify) {
 266     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 267     if (!verify_bitmap.special()) {
 268       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 269                                 "Cannot commit verification bitmap memory");
 270     }
 271     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 272     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 273     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 274     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 275   }
 276 
 277   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 278   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 279   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 280   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 281   _aux_bitmap_region_special = aux_bitmap.special();
 282   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 283 
 284   //


 481   _scm(new ShenandoahConcurrentMark()),
 482   _full_gc(new ShenandoahMarkCompact()),
 483   _pacer(NULL),
 484   _verifier(NULL),
 485   _phase_timings(NULL),
 486   _monitoring_support(NULL),
 487   _memory_pool(NULL),
 488   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 489   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 490   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 491   _soft_ref_policy(),
 492   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 493   _ref_processor(NULL),
 494   _marking_context(NULL),
 495   _bitmap_size(0),
 496   _bitmap_regions_per_slice(0),
 497   _bitmap_bytes_per_slice(0),
 498   _bitmap_region_special(false),
 499   _aux_bitmap_region_special(false),
 500   _liveness_cache(NULL),
 501   _collection_set(NULL),
 502   _evac_locking_bitmap(NULL)
 503 {
 504   _heap = this;
 505 
 506   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 507 
 508   _max_workers = MAX2(_max_workers, 1U);
 509   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 510                             /* are_GC_task_threads */ true,
 511                             /* are_ConcurrentGC_threads */ true);
 512   if (_workers == NULL) {
 513     vm_exit_during_initialization("Failed necessary allocation.");
 514   } else {
 515     _workers->initialize_workers();
 516   }
 517 
 518   if (ParallelGCThreads > 1) {
 519     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 520                                                 ParallelGCThreads,
 521                       /* are_GC_task_threads */ false,
 522                  /* are_ConcurrentGC_threads */ false);


 956 };
 957 
 958 class ShenandoahEvacuationTask : public AbstractGangTask {
 959 private:
 960   ShenandoahHeap* const _sh;
 961   ShenandoahCollectionSet* const _cs;
 962   bool _concurrent;
 963 public:
 964   ShenandoahEvacuationTask(ShenandoahHeap* sh,
 965                            ShenandoahCollectionSet* cs,
 966                            bool concurrent) :
 967     AbstractGangTask("Parallel Evacuation Task"),
 968     _sh(sh),
 969     _cs(cs),
 970     _concurrent(concurrent)
 971   {}
 972 
 973   void work(uint worker_id) {
 974     if (_concurrent) {
 975       ShenandoahConcurrentWorkerSession worker_session(worker_id);
 976       SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);

 977       do_work();
 978     } else {
 979       ShenandoahParallelWorkerSession worker_session(worker_id);

 980       do_work();
 981     }
 982   }
 983 
 984 private:
 985   void do_work() {
 986     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
 987     ShenandoahHeapRegion* r;
 988     while ((r =_cs->claim_next()) != NULL) {
 989       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
 990       _sh->marked_object_iterate(r, &cl);
 991 
 992       if (ShenandoahPacing) {
 993         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
 994       }
 995 
 996       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
 997         break;
 998       }
 999     }


1064     cl.do_thread(t);
1065   }
1066   workers()->threads_do(&cl);
1067 }
1068 
1069 void ShenandoahHeap::resize_tlabs() {
1070   CollectedHeap::resize_all_tlabs();
1071 }
1072 
1073 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1074 private:
1075   ShenandoahRootEvacuator* _rp;
1076 
1077 public:
1078   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1079     AbstractGangTask("Shenandoah evacuate and update roots"),
1080     _rp(rp) {}
1081 
1082   void work(uint worker_id) {
1083     ShenandoahParallelWorkerSession worker_session(worker_id);

1084     ShenandoahEvacuateUpdateRootsClosure<> cl;
1085     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1086     _rp->roots_do(worker_id, &cl);
1087   }
1088 };
1089 
1090 void ShenandoahHeap::evacuate_and_update_roots() {
1091 #if COMPILER2_OR_JVMCI
1092   DerivedPointerTable::clear();
1093 #endif
1094   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1095   {
1096     // Include concurrent roots if current cycle can not process those roots concurrently
1097     ShenandoahRootEvacuator rp(workers()->active_workers(),
1098                                ShenandoahPhaseTimings::init_evac,
1099                                !ShenandoahConcurrentRoots::should_do_concurrent_roots(),
1100                                !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading());
1101     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1102     workers()->run_task(&roots_task);
1103   }


1648   free_set()->recycle_trash();
1649 }
1650 
1651 void ShenandoahHeap::op_cleanup_complete() {
1652   free_set()->recycle_trash();
1653 }
1654 
1655 class ShenandoahConcurrentRootsEvacUpdateTask : public AbstractGangTask {
1656 private:
1657   ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
1658   ShenandoahClassLoaderDataRoots<true /*concurrent*/, false /*single threaded*/> _cld_roots;
1659 
1660 public:
1661   ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1662     AbstractGangTask("Shenandoah Evacuate/Update Concurrent Strong Roots Task"),
1663     _vm_roots(phase),
1664     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()) {}
1665 
1666   void work(uint worker_id) {
1667     ShenandoahConcurrentWorkerSession worker_session(worker_id);

1668     {
1669       // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
1670       // may race against OopStorage::release() calls.
1671       ShenandoahEvacUpdateOopStorageRootsClosure cl;
1672       _vm_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl, worker_id);
1673     }
1674 
1675     {
1676       ShenandoahEvacuateUpdateRootsClosure<> cl;
1677       CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1678       _cld_roots.cld_do(&clds, worker_id);
1679     }
1680   }
1681 };
1682 
1683 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
1684 private:
1685   ShenandoahHeap* const _heap;
1686   ShenandoahMarkingContext* const _mark_context;
1687   bool  _evac_in_progress;


1783     StringTable::reset_dead_counter();
1784     ResolvedMethodTable::reset_dead_counter();
1785     if (_concurrent_class_unloading) {
1786       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1787       _nmethod_itr.nmethods_do_begin();
1788     }
1789   }
1790 
1791   ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
1792     StringTable::finish_dead_counter();
1793     ResolvedMethodTable::finish_dead_counter();
1794     if (_concurrent_class_unloading) {
1795       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1796       _nmethod_itr.nmethods_do_end();
1797     }
1798   }
1799 
1800   void work(uint worker_id) {
1801     ShenandoahConcurrentWorkerSession worker_session(worker_id);
1802     {

1803       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
1804       // may race against OopStorage::release() calls.
1805       ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
1806       _jni_roots.oops_do(&cl, worker_id);
1807       _vm_roots.oops_do(&cl, worker_id);
1808 
1809       cl.reset_dead_counter();
1810       _string_table_roots.oops_do(&cl, worker_id);
1811       StringTable::inc_dead_counter(cl.dead_counter());
1812 
1813       cl.reset_dead_counter();
1814       _resolved_method_table_roots.oops_do(&cl, worker_id);
1815       ResolvedMethodTable::inc_dead_counter(cl.dead_counter());
1816 
1817       // String dedup weak roots
1818       ShenandoahForwardedIsAliveClosure is_alive;
1819       ShenandoahEvacuateUpdateRootsClosure<MO_RELEASE> keep_alive;
1820       _dedup_roots.oops_do(&is_alive, &keep_alive, worker_id);
1821     }
1822 


2454 
2455 template<class T>
2456 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2457 private:
2458   T cl;
2459   ShenandoahHeap* _heap;
2460   ShenandoahRegionIterator* _regions;
2461   bool _concurrent;
2462 public:
2463   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2464     AbstractGangTask("Concurrent Update References Task"),
2465     cl(T()),
2466     _heap(ShenandoahHeap::heap()),
2467     _regions(regions),
2468     _concurrent(concurrent) {
2469   }
2470 
2471   void work(uint worker_id) {
2472     if (_concurrent) {
2473       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2474       SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2475       do_work();
2476     } else {
2477       ShenandoahParallelWorkerSession worker_session(worker_id);
2478       do_work();
2479     }
2480   }
2481 
2482 private:
2483   void do_work() {
2484     ShenandoahHeapRegion* r = _regions->next();
2485     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2486     while (r != NULL) {
2487       HeapWord* update_watermark = r->get_update_watermark();
2488       assert (update_watermark >= r->bottom(), "sanity");
2489       if (r->is_active() && !r->is_cset()) {
2490         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2491       }
2492       if (ShenandoahPacing) {
2493         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2494       }


< prev index next >