< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp

Print this page

        

*** 31,41 **** #include "gc/shared/gcTimer.hpp" #include "gc/shared/referenceProcessor.hpp" #include "gc/shared/referenceProcessorPhaseTimes.hpp" #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" - #include "gc/shenandoah/shenandoahClosures.inline.hpp" #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" #include "gc/shenandoah/shenandoahMarkCompact.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" --- 31,40 ----
*** 79,92 **** { } template<UpdateRefsMode UPDATE_REFS> class ShenandoahInitMarkRootsTask : public AbstractGangTask { private: ! ShenandoahAllRootScanner* _rp; bool _process_refs; public: ! ShenandoahInitMarkRootsTask(ShenandoahAllRootScanner* rp, bool process_refs) : AbstractGangTask("Shenandoah init mark roots task"), _rp(rp), _process_refs(process_refs) { } --- 78,91 ---- { } template<UpdateRefsMode UPDATE_REFS> class ShenandoahInitMarkRootsTask : public AbstractGangTask { private: ! ShenandoahRootProcessor* _rp; bool _process_refs; public: ! ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) : AbstractGangTask("Shenandoah init mark roots task"), _rp(rp), _process_refs(process_refs) { }
*** 113,147 **** // class unloading. // b. With unload_classes = false, we have to nominally retain all the references from code // cache, because there could be the case of embedded class/oop in the generated code, // which we will never visit during mark. Without code cache invalidation, as in (a), // we risk executing that code cache blob, and crashing. if (heap->unload_classes()) { ! _rp->strong_roots_do(worker_id, oops); } else { ! _rp->roots_do(worker_id, oops); } } }; class ShenandoahUpdateRootsTask : public AbstractGangTask { private: ! ShenandoahRootUpdater* _root_updater; public: ! ShenandoahUpdateRootsTask(ShenandoahRootUpdater* root_updater) : AbstractGangTask("Shenandoah update roots task"), ! _root_updater(root_updater) { } void work(uint worker_id) { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); ShenandoahParallelWorkerSession worker_session(worker_id); ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahUpdateRefsClosure cl; ! AlwaysTrueClosure always_true; ! _root_updater->roots_do<AlwaysTrueClosure, ShenandoahUpdateRefsClosure>(worker_id, &always_true, &cl); } }; class ShenandoahConcurrentMarkingTask : public AbstractGangTask { private: --- 112,184 ---- // class unloading. // b. With unload_classes = false, we have to nominally retain all the references from code // cache, because there could be the case of embedded class/oop in the generated code, // which we will never visit during mark. Without code cache invalidation, as in (a), // we risk executing that code cache blob, and crashing. + // c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here, + // and instead do that in concurrent phase under the relevant lock. This saves init mark + // pause time. + + CLDToOopClosure clds_cl(oops, ClassLoaderData::_claim_strong); + MarkingCodeBlobClosure blobs_cl(oops, ! CodeBlobToOopClosure::FixRelocations); + + ResourceMark m; if (heap->unload_classes()) { ! _rp->process_strong_roots(oops, &clds_cl, &blobs_cl, NULL, worker_id); } else { ! if (ShenandoahConcurrentScanCodeRoots) { ! CodeBlobClosure* code_blobs = NULL; ! #ifdef ASSERT ! ShenandoahAssertToSpaceClosure assert_to_space_oops; ! CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations); ! // If conc code cache evac is disabled, code cache should have only to-space ptrs. ! // Otherwise, it should have to-space ptrs only if mark does not update refs. ! if (!heap->has_forwarded_objects()) { ! code_blobs = &assert_to_space; ! } ! #endif ! _rp->process_all_roots(oops, &clds_cl, code_blobs, NULL, worker_id); ! } else { ! _rp->process_all_roots(oops, &clds_cl, &blobs_cl, NULL, worker_id); ! } } } }; class ShenandoahUpdateRootsTask : public AbstractGangTask { private: ! ShenandoahRootProcessor* _rp; ! const bool _update_code_cache; public: ! ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp, bool update_code_cache) : AbstractGangTask("Shenandoah update roots task"), ! _rp(rp), ! _update_code_cache(update_code_cache) { } void work(uint worker_id) { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); ShenandoahParallelWorkerSession worker_session(worker_id); ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahUpdateRefsClosure cl; ! CLDToOopClosure cldCl(&cl, ClassLoaderData::_claim_strong); ! ! CodeBlobClosure* code_blobs; ! CodeBlobToOopClosure update_blobs(&cl, CodeBlobToOopClosure::FixRelocations); ! #ifdef ASSERT ! ShenandoahAssertToSpaceClosure assert_to_space_oops; ! CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations); ! #endif ! if (_update_code_cache) { ! code_blobs = &update_blobs; ! } else { ! code_blobs = ! DEBUG_ONLY(&assert_to_space) ! NOT_DEBUG(NULL); ! } ! _rp->update_all_roots<AlwaysTrueClosure>(&cl, &cldCl, code_blobs, NULL, worker_id); } }; class ShenandoahConcurrentMarkingTask : public AbstractGangTask { private:
*** 225,240 **** shenandoah_assert_rp_isalive_installed(); } else { rp = NULL; } ! if (heap->is_degenerated_gc_in_progress()) { ! // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned, ! // let's check here. ! _cm->concurrent_scan_code_roots(worker_id, rp); ! } ! _cm->mark_loop(worker_id, _terminator, rp, false, // not cancellable _dedup_string); assert(_cm->task_queues()->is_empty(), "Should be empty"); --- 262,274 ---- shenandoah_assert_rp_isalive_installed(); } else { rp = NULL; } ! // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned, ! // let's check here. ! _cm->concurrent_scan_code_roots(worker_id, rp); _cm->mark_loop(worker_id, _terminator, rp, false, // not cancellable _dedup_string); assert(_cm->task_queues()->is_empty(), "Should be empty");
*** 252,262 **** WorkGang* workers = heap->workers(); uint nworkers = workers->active_workers(); assert(nworkers <= task_queues()->size(), "Just check"); ! ShenandoahAllRootScanner root_proc(nworkers, root_phase); TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); task_queues()->reserve(nworkers); if (heap->has_forwarded_objects()) { ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, _heap->process_references()); --- 286,296 ---- WorkGang* workers = heap->workers(); uint nworkers = workers->active_workers(); assert(nworkers <= task_queues()->size(), "Just check"); ! ShenandoahRootProcessor root_proc(heap, nworkers, root_phase); TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); task_queues()->reserve(nworkers); if (heap->has_forwarded_objects()) { ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, _heap->process_references());
*** 296,342 **** DerivedPointerTable::clear(); #endif uint nworkers = _heap->workers()->active_workers(); ! ShenandoahRootUpdater root_updater(nworkers, root_phase, update_code_cache); ! ShenandoahUpdateRootsTask update_roots(&root_updater); _heap->workers()->run_task(&update_roots); #if defined(COMPILER2) || INCLUDE_JVMCI DerivedPointerTable::update_pointers(); #endif } - class ShenandoahUpdateThreadRootsTask : public AbstractGangTask { - private: - ShenandoahThreadRoots _thread_roots; - ShenandoahPhaseTimings::Phase _phase; - public: - ShenandoahUpdateThreadRootsTask(bool is_par, ShenandoahPhaseTimings::Phase phase) : - AbstractGangTask("Shenandoah Update Thread Roots"), - _thread_roots(is_par), - _phase(phase) { - ShenandoahHeap::heap()->phase_timings()->record_workers_start(_phase); - } - - ~ShenandoahUpdateThreadRootsTask() { - ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase); - } - void work(uint worker_id) { - ShenandoahUpdateRefsClosure cl; - _thread_roots.oops_do(&cl, NULL, worker_id); - } - }; - - void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase root_phase) { - WorkGang* workers = _heap->workers(); - bool is_par = workers->active_workers() > 1; - ShenandoahUpdateThreadRootsTask task(is_par, root_phase); - workers->run_task(&task); - } - void ShenandoahConcurrentMark::initialize(uint workers) { _heap = ShenandoahHeap::heap(); uint num_queues = MAX2(workers, 1U); --- 330,348 ---- DerivedPointerTable::clear(); #endif uint nworkers = _heap->workers()->active_workers(); ! ShenandoahRootProcessor root_proc(_heap, nworkers, root_phase); ! ShenandoahUpdateRootsTask update_roots(&root_proc, update_code_cache); _heap->workers()->run_task(&update_roots); #if defined(COMPILER2) || INCLUDE_JVMCI DerivedPointerTable::update_pointers(); #endif } void ShenandoahConcurrentMark::initialize(uint workers) { _heap = ShenandoahHeap::heap(); uint num_queues = MAX2(workers, 1U);
*** 351,361 **** void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) { if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) { ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id); if (!_heap->unload_classes()) { ! MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); // TODO: We can not honor StringDeduplication here, due to lock ranking // inversion. So, we may miss some deduplication candidates. if (_heap->has_forwarded_objects()) { ShenandoahMarkResolveRefsClosure cl(q, rp); CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations); --- 357,367 ---- void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) { if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) { ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id); if (!_heap->unload_classes()) { ! MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); // TODO: We can not honor StringDeduplication here, due to lock ranking // inversion. So, we may miss some deduplication candidates. if (_heap->has_forwarded_objects()) { ShenandoahMarkResolveRefsClosure cl(q, rp); CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
*** 652,668 **** // need updating otherwise. // Weak processor API requires us to visit the oops, even if we are not doing // anything to them. void ShenandoahConcurrentMark::weak_roots_work() { WorkGang* workers = _heap->workers(); ! OopClosure* keep_alive = &do_nothing_cl; ! #ifdef ASSERT ! ShenandoahWeakAssertNotForwardedClosure verify_cl; ! keep_alive = &verify_cl; ! #endif ! ShenandoahIsAliveClosure is_alive; ! WeakProcessor::weak_oops_do(workers, &is_alive, keep_alive, 1); } void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) { ReferenceProcessor* rp = _heap->ref_processor(); --- 658,676 ---- // need updating otherwise. // Weak processor API requires us to visit the oops, even if we are not doing // anything to them. void ShenandoahConcurrentMark::weak_roots_work() { WorkGang* workers = _heap->workers(); ! ShenandoahIsAliveSelector is_alive; ! ! if (_heap->has_forwarded_objects()) { ! ShenandoahWeakUpdateClosure cl; ! WeakProcessor::weak_oops_do(workers, is_alive.is_alive_closure(), &cl, 1); ! } else { ! ShenandoahWeakAssertNotForwardedClosure cl; ! WeakProcessor::weak_oops_do(workers, is_alive.is_alive_closure(), &cl, 1); ! } } void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) { ReferenceProcessor* rp = _heap->ref_processor();
< prev index next >