< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page




1057     _rp(rp) {}
1058 
1059   void work(uint worker_id) {
1060     ShenandoahParallelWorkerSession worker_session(worker_id);
1061     ShenandoahEvacOOMScope oom_evac_scope;
1062     ShenandoahEvacuateUpdateRootsClosure cl;
1063     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1064     _rp->roots_do(worker_id, &cl);
1065   }
1066 };
1067 
1068 void ShenandoahHeap::evacuate_and_update_roots() {
1069 #if COMPILER2_OR_JVMCI
1070   DerivedPointerTable::clear();
1071 #endif
1072   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1073   {
1074     // Include concurrent roots if current cycle can not process those roots concurrently
1075     ShenandoahRootEvacuator rp(workers()->active_workers(),
1076                                ShenandoahPhaseTimings::init_evac,
1077                                !ShenandoahConcurrentRoots::should_do_concurrent_roots());

1078     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1079     workers()->run_task(&roots_task);
1080   }
1081 
1082 #if COMPILER2_OR_JVMCI
1083   DerivedPointerTable::update_pointers();
1084 #endif
1085 }
1086 
1087 // Returns size in bytes
1088 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1089   if (ShenandoahElasticTLAB) {
1090     // With Elastic TLABs, return the max allowed size, and let the allocation path
1091     // figure out the safe size for current allocation.
1092     return ShenandoahHeapRegion::max_tlab_size_bytes();
1093   } else {
1094     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1095   }
1096 }
1097 


1534 
1535       heuristics()->choose_collection_set(_collection_set);
1536 
1537       _free_set->rebuild();
1538     }
1539 
1540     // If collection set has candidates, start evacuation.
1541     // Otherwise, bypass the rest of the cycle.
1542     if (!collection_set()->is_empty()) {
1543       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1544 
1545       if (ShenandoahVerify) {
1546         verifier()->verify_before_evacuation();
1547       }
1548 
1549       set_evacuation_in_progress(true);
1550       // From here on, we need to update references.
1551       set_has_forwarded_objects(true);
1552 
1553       if (!is_degenerated_gc_in_progress()) {


1554         evacuate_and_update_roots();
1555       }
1556 
1557       if (ShenandoahPacing) {
1558         pacer()->setup_for_evac();
1559       }
1560 
1561       if (ShenandoahVerify) {

1562         if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
1563           ShenandoahRootVerifier::RootTypes types = ShenandoahRootVerifier::combine(ShenandoahRootVerifier::JNIHandleRoots, ShenandoahRootVerifier::WeakRoots);
1564           types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CLDGRoots);
1565           verifier()->verify_roots_no_forwarded_except(types);
1566         } else {
1567           verifier()->verify_roots_no_forwarded();
1568         }





1569         verifier()->verify_during_evacuation();
1570       }
1571     } else {
1572       if (ShenandoahVerify) {
1573         verifier()->verify_after_concmark();
1574       }
1575 
1576       if (VerifyAfterGC) {
1577         Universe::verify();
1578       }
1579     }
1580 
1581   } else {
1582     // If this cycle was updating references, we need to keep the has_forwarded_objects
1583     // flag on, for subsequent phases to deal with it.
1584     concurrent_mark()->cancel();
1585     set_concurrent_mark_in_progress(false);
1586 
1587     if (process_references()) {
1588       // Abandon reference processing right away: pre-cleaning must have failed.


1644 
1645   void work(uint worker_id) {
1646     ShenandoahEvacOOMScope oom;
1647     {
1648       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
1649       // may race against OopStorage::release() calls.
1650       ShenandoahEvacUpdateOopStorageRootsClosure cl;
1651       _vm_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl);
1652       _weak_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl);
1653     }
1654 
1655     {
1656       ShenandoahEvacuateUpdateRootsClosure cl;
1657       CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1658       _cld_roots.cld_do(&clds);
1659     }
1660   }
1661 };
1662 
1663 void ShenandoahHeap::op_roots() {
1664   if (is_evacuation_in_progress() &&
1665       ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
1666     ShenandoahConcurrentRootsEvacUpdateTask task;
1667     workers()->run_task(&task);





1668   }


1669 }
1670 
1671 void ShenandoahHeap::op_reset() {
1672   reset_mark_bitmap();
1673 }
1674 
1675 void ShenandoahHeap::op_preclean() {
1676   concurrent_mark()->preclean_weak_refs();
1677 }
1678 
1679 void ShenandoahHeap::op_init_traversal() {
1680   traversal_gc()->init_traversal_collection();
1681 }
1682 
1683 void ShenandoahHeap::op_traversal() {
1684   traversal_gc()->concurrent_traversal_collection();
1685 }
1686 
1687 void ShenandoahHeap::op_final_traversal() {
1688   traversal_gc()->final_traversal_collection();


1906 
1907 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1908   if (has_forwarded_objects()) {
1909     set_gc_state_mask(MARKING | UPDATEREFS, in_progress);
1910   } else {
1911     set_gc_state_mask(MARKING, in_progress);
1912   }
1913   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1914 }
1915 
1916 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1917    set_gc_state_mask(TRAVERSAL, in_progress);
1918    ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1919 }
1920 
1921 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1922   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1923   set_gc_state_mask(EVACUATION, in_progress);
1924 }
1925 









1926 void ShenandoahHeap::ref_processing_init() {
1927   assert(_max_workers > 0, "Sanity");
1928 
1929   _ref_processor =
1930     new ReferenceProcessor(&_subject_to_discovery,  // is_subject_to_discovery
1931                            ParallelRefProcEnabled,  // MT processing
1932                            _max_workers,            // Degree of MT processing
1933                            true,                    // MT discovery
1934                            _max_workers,            // Degree of MT discovery
1935                            false,                   // Reference discovery is not atomic
1936                            NULL,                    // No closure, should be installed before use
1937                            true);                   // Scale worker threads
1938 
1939   shenandoah_assert_rp_isalive_not_installed();
1940 }
1941 
1942 GCTracer* ShenandoahHeap::tracer() {
1943   return shenandoah_policy()->tracer();
1944 }
1945 


2014     ShenandoahGCPhase phase(full_gc ?
2015                             ShenandoahPhaseTimings::full_gc_purge_par :
2016                             ShenandoahPhaseTimings::purge_par);
2017     ShenandoahIsAliveSelector is_alive;
2018     uint num_workers = _workers->active_workers();
2019     ShenandoahClassUnloadingTask unlink_task(is_alive.is_alive_closure(), num_workers, purged_class);
2020     _workers->run_task(&unlink_task);
2021   }
2022 
2023   {
2024     ShenandoahGCPhase phase(full_gc ?
2025                             ShenandoahPhaseTimings::full_gc_purge_cldg :
2026                             ShenandoahPhaseTimings::purge_cldg);
2027     ClassLoaderDataGraph::purge();
2028   }
2029   // Resize and verify metaspace
2030   MetaspaceGC::compute_new_size();
2031   MetaspaceUtils::verify_metrics();
2032 }
2033 
2034 // Process leftover weak oops: update them, if needed or assert they do not
2035 // need updating otherwise.
2036 // Weak processor API requires us to visit the oops, even if we are not doing
2037 // anything to them.
2038 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2039   ShenandoahGCPhase root_phase(full_gc ?
2040                                ShenandoahPhaseTimings::full_gc_purge :
2041                                ShenandoahPhaseTimings::purge);
2042   uint num_workers = _workers->active_workers();
2043   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2044                                                ShenandoahPhaseTimings::full_gc_purge_par :
2045                                                ShenandoahPhaseTimings::purge_par;
2046   // Cleanup weak roots
2047   ShenandoahGCPhase phase(timing_phase);
2048   if (has_forwarded_objects()) {
2049     if (is_traversal_mode()) {
2050       ShenandoahForwardedIsAliveClosure is_alive;
2051       ShenandoahTraversalUpdateRefsClosure keep_alive;
2052       ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahTraversalUpdateRefsClosure>
2053         cleaning_task(&is_alive, &keep_alive, num_workers);
2054       _workers->run_task(&cleaning_task);
2055     } else {
2056       ShenandoahForwardedIsAliveClosure is_alive;
2057       ShenandoahUpdateRefsClosure keep_alive;


2059         cleaning_task(&is_alive, &keep_alive, num_workers);
2060       _workers->run_task(&cleaning_task);
2061     }
2062   } else {
2063     ShenandoahIsAliveClosure is_alive;
2064 #ifdef ASSERT
2065   ShenandoahAssertNotForwardedClosure verify_cl;
2066   ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2067     cleaning_task(&is_alive, &verify_cl, num_workers);
2068 #else
2069   ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2070     cleaning_task(&is_alive, &do_nothing_cl, num_workers);
2071 #endif
2072     _workers->run_task(&cleaning_task);
2073   }
2074 }
2075 
2076 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2077   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2078   stw_process_weak_roots(full_gc);
2079   stw_unload_classes(full_gc);


2080 }
2081 
2082 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2083   if (is_traversal_mode()) {
2084     set_gc_state_mask(HAS_FORWARDED | UPDATEREFS, cond);
2085   } else {
2086     set_gc_state_mask(HAS_FORWARDED, cond);
2087   }
2088 
2089 }
2090 
2091 void ShenandoahHeap::set_process_references(bool pr) {
2092   _process_references.set_cond(pr);
2093 }
2094 
2095 void ShenandoahHeap::set_unload_classes(bool uc) {
2096   _unload_classes.set_cond(uc);
2097 }
2098 
2099 bool ShenandoahHeap::process_references() const {


2127 }
2128 
2129 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2130   _degenerated_gc_in_progress.set_cond(in_progress);
2131 }
2132 
2133 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2134   _full_gc_in_progress.set_cond(in_progress);
2135 }
2136 
2137 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2138   assert (is_full_gc_in_progress(), "should be");
2139   _full_gc_move_in_progress.set_cond(in_progress);
2140 }
2141 
2142 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2143   set_gc_state_mask(UPDATEREFS, in_progress);
2144 }
2145 
2146 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2147   ShenandoahCodeRoots::add_nmethod(nm);
2148 }
2149 
2150 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2151   ShenandoahCodeRoots::remove_nmethod(nm);




2152 }
2153 
2154 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2155   heap_region_containing(o)->record_pin();
2156   return o;
2157 }
2158 
2159 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2160   heap_region_containing(o)->record_unpin();
2161 }
2162 
2163 void ShenandoahHeap::sync_pinned_region_status() {
2164   ShenandoahHeapLocker locker(lock());
2165 
2166   for (size_t i = 0; i < num_regions(); i++) {
2167     ShenandoahHeapRegion *r = get_region(i);
2168     if (r->is_active()) {
2169       if (r->is_pinned()) {
2170         if (r->pin_count() == 0) {
2171           r->make_unpinned();


2178     }
2179   }
2180 
2181   assert_pinned_region_status();
2182 }
2183 
2184 #ifdef ASSERT
2185 void ShenandoahHeap::assert_pinned_region_status() {
2186   for (size_t i = 0; i < num_regions(); i++) {
2187     ShenandoahHeapRegion* r = get_region(i);
2188     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2189            "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2190   }
2191 }
2192 #endif
2193 
2194 GCTimer* ShenandoahHeap::gc_timer() const {
2195   return _gc_timer;
2196 }
2197 






















2198 #ifdef ASSERT
2199 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2200   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2201 
2202   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2203     if (UseDynamicNumberOfGCThreads ||
2204         (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2205       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2206     } else {
2207       // Use ParallelGCThreads inside safepoints
2208       assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2209     }
2210   } else {
2211     if (UseDynamicNumberOfGCThreads ||
2212         (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2213       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2214     } else {
2215       // Use ConcGCThreads outside safepoints
2216       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2217     }


2301     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_prepare);
2302 
2303     make_parsable(true);
2304     for (uint i = 0; i < num_regions(); i++) {
2305       ShenandoahHeapRegion* r = get_region(i);
2306       r->set_concurrent_iteration_safe_limit(r->top());
2307     }
2308 
2309     // Reset iterator.
2310     _update_refs_iterator.reset();
2311   }
2312 
2313   if (ShenandoahPacing) {
2314     pacer()->setup_for_updaterefs();
2315   }
2316 }
2317 
2318 void ShenandoahHeap::op_final_updaterefs() {
2319   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2320 


2321   // Check if there is left-over work, and finish it
2322   if (_update_refs_iterator.has_next()) {
2323     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work);
2324 
2325     // Finish updating references where we left off.
2326     clear_cancelled_gc();
2327     update_heap_references(false);
2328   }
2329 
2330   // Clear cancelled GC, if set. On cancellation path, the block before would handle
2331   // everything. On degenerated paths, cancelled gc would not be set anyway.
2332   if (cancelled_gc()) {
2333     clear_cancelled_gc();
2334   }
2335   assert(!cancelled_gc(), "Should have been done right before");
2336 
2337   if (ShenandoahVerify && !is_degenerated_gc_in_progress()) {
2338     verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots);
2339   }
2340 
2341   if (is_degenerated_gc_in_progress()) {
2342     concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
2343   } else {
2344     concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2345   }
2346 
2347   // Has to be done before cset is clear
2348   if (ShenandoahVerify) {
2349     verifier()->verify_roots_in_to_space();
2350   }
2351 
2352   // Drop unnecessary "pinned" state from regions that does not have CP marks
2353   // anymore, as this would allow trashing them below.
2354   {
2355     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_sync_pinned);
2356     sync_pinned_region_status();
2357   }
2358 




1057     _rp(rp) {}
1058 
1059   void work(uint worker_id) {
1060     ShenandoahParallelWorkerSession worker_session(worker_id);
1061     ShenandoahEvacOOMScope oom_evac_scope;
1062     ShenandoahEvacuateUpdateRootsClosure cl;
1063     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1064     _rp->roots_do(worker_id, &cl);
1065   }
1066 };
1067 
1068 void ShenandoahHeap::evacuate_and_update_roots() {
1069 #if COMPILER2_OR_JVMCI
1070   DerivedPointerTable::clear();
1071 #endif
1072   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1073   {
1074     // Include concurrent roots if current cycle can not process those roots concurrently
1075     ShenandoahRootEvacuator rp(workers()->active_workers(),
1076                                ShenandoahPhaseTimings::init_evac,
1077                                !ShenandoahConcurrentRoots::should_do_concurrent_roots(),
1078                                !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading());
1079     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1080     workers()->run_task(&roots_task);
1081   }
1082 
1083 #if COMPILER2_OR_JVMCI
1084   DerivedPointerTable::update_pointers();
1085 #endif
1086 }
1087 
1088 // Returns size in bytes
1089 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1090   if (ShenandoahElasticTLAB) {
1091     // With Elastic TLABs, return the max allowed size, and let the allocation path
1092     // figure out the safe size for current allocation.
1093     return ShenandoahHeapRegion::max_tlab_size_bytes();
1094   } else {
1095     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1096   }
1097 }
1098 


1535 
1536       heuristics()->choose_collection_set(_collection_set);
1537 
1538       _free_set->rebuild();
1539     }
1540 
1541     // If collection set has candidates, start evacuation.
1542     // Otherwise, bypass the rest of the cycle.
1543     if (!collection_set()->is_empty()) {
1544       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1545 
1546       if (ShenandoahVerify) {
1547         verifier()->verify_before_evacuation();
1548       }
1549 
1550       set_evacuation_in_progress(true);
1551       // From here on, we need to update references.
1552       set_has_forwarded_objects(true);
1553 
1554       if (!is_degenerated_gc_in_progress()) {
1555         prepare_concurrent_roots();
1556         prepare_concurrent_unloading();
1557         evacuate_and_update_roots();
1558       }
1559 
1560       if (ShenandoahPacing) {
1561         pacer()->setup_for_evac();
1562       }
1563 
1564       if (ShenandoahVerify) {
1565         ShenandoahRootVerifier::RootTypes types = ShenandoahRootVerifier::None;
1566         if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
1567           types = ShenandoahRootVerifier::combine(ShenandoahRootVerifier::JNIHandleRoots, ShenandoahRootVerifier::WeakRoots);
1568           types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CLDGRoots);



1569         }
1570 
1571         if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1572           types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CodeRoots);
1573         }
1574         verifier()->verify_roots_no_forwarded_except(types);
1575         verifier()->verify_during_evacuation();
1576       }
1577     } else {
1578       if (ShenandoahVerify) {
1579         verifier()->verify_after_concmark();
1580       }
1581 
1582       if (VerifyAfterGC) {
1583         Universe::verify();
1584       }
1585     }
1586 
1587   } else {
1588     // If this cycle was updating references, we need to keep the has_forwarded_objects
1589     // flag on, for subsequent phases to deal with it.
1590     concurrent_mark()->cancel();
1591     set_concurrent_mark_in_progress(false);
1592 
1593     if (process_references()) {
1594       // Abandon reference processing right away: pre-cleaning must have failed.


1650 
1651   void work(uint worker_id) {
1652     ShenandoahEvacOOMScope oom;
1653     {
1654       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
1655       // may race against OopStorage::release() calls.
1656       ShenandoahEvacUpdateOopStorageRootsClosure cl;
1657       _vm_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl);
1658       _weak_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl);
1659     }
1660 
1661     {
1662       ShenandoahEvacuateUpdateRootsClosure cl;
1663       CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1664       _cld_roots.cld_do(&clds);
1665     }
1666   }
1667 };
1668 
1669 void ShenandoahHeap::op_roots() {
1670   if (is_evacuation_in_progress()) {
1671     if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1672       _unloader.unload();
1673     }
1674 
1675     if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
1676       ShenandoahConcurrentRootsEvacUpdateTask task;
1677       workers()->run_task(&task);
1678     }
1679   }
1680 
1681   set_concurrent_root_in_progress(false);
1682 }
1683 
1684 void ShenandoahHeap::op_reset() {
1685   reset_mark_bitmap();
1686 }
1687 
1688 void ShenandoahHeap::op_preclean() {
1689   concurrent_mark()->preclean_weak_refs();
1690 }
1691 
1692 void ShenandoahHeap::op_init_traversal() {
1693   traversal_gc()->init_traversal_collection();
1694 }
1695 
1696 void ShenandoahHeap::op_traversal() {
1697   traversal_gc()->concurrent_traversal_collection();
1698 }
1699 
1700 void ShenandoahHeap::op_final_traversal() {
1701   traversal_gc()->final_traversal_collection();


1919 
1920 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1921   if (has_forwarded_objects()) {
1922     set_gc_state_mask(MARKING | UPDATEREFS, in_progress);
1923   } else {
1924     set_gc_state_mask(MARKING, in_progress);
1925   }
1926   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1927 }
1928 
1929 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1930    set_gc_state_mask(TRAVERSAL, in_progress);
1931    ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1932 }
1933 
1934 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1935   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1936   set_gc_state_mask(EVACUATION, in_progress);
1937 }
1938 
1939 void ShenandoahHeap::set_concurrent_root_in_progress(bool in_progress) {
1940   assert(ShenandoahConcurrentRoots::can_do_concurrent_roots(), "Why set the flag?");
1941   if (in_progress) {
1942     _concurrent_root_in_progress.set();
1943   } else {
1944     _concurrent_root_in_progress.unset();
1945   }
1946 }
1947 
1948 void ShenandoahHeap::ref_processing_init() {
1949   assert(_max_workers > 0, "Sanity");
1950 
1951   _ref_processor =
1952     new ReferenceProcessor(&_subject_to_discovery,  // is_subject_to_discovery
1953                            ParallelRefProcEnabled,  // MT processing
1954                            _max_workers,            // Degree of MT processing
1955                            true,                    // MT discovery
1956                            _max_workers,            // Degree of MT discovery
1957                            false,                   // Reference discovery is not atomic
1958                            NULL,                    // No closure, should be installed before use
1959                            true);                   // Scale worker threads
1960 
1961   shenandoah_assert_rp_isalive_not_installed();
1962 }
1963 
1964 GCTracer* ShenandoahHeap::tracer() {
1965   return shenandoah_policy()->tracer();
1966 }
1967 


2036     ShenandoahGCPhase phase(full_gc ?
2037                             ShenandoahPhaseTimings::full_gc_purge_par :
2038                             ShenandoahPhaseTimings::purge_par);
2039     ShenandoahIsAliveSelector is_alive;
2040     uint num_workers = _workers->active_workers();
2041     ShenandoahClassUnloadingTask unlink_task(is_alive.is_alive_closure(), num_workers, purged_class);
2042     _workers->run_task(&unlink_task);
2043   }
2044 
2045   {
2046     ShenandoahGCPhase phase(full_gc ?
2047                             ShenandoahPhaseTimings::full_gc_purge_cldg :
2048                             ShenandoahPhaseTimings::purge_cldg);
2049     ClassLoaderDataGraph::purge();
2050   }
2051   // Resize and verify metaspace
2052   MetaspaceGC::compute_new_size();
2053   MetaspaceUtils::verify_metrics();
2054 }
2055 
2056 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
2057 // so they should not have forwarded oops.
2058 // However, we do need to "null" dead oops in the roots, if can not be done
2059 // in concurrent cycles.
2060 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2061   ShenandoahGCPhase root_phase(full_gc ?
2062                                ShenandoahPhaseTimings::full_gc_purge :
2063                                ShenandoahPhaseTimings::purge);
2064   uint num_workers = _workers->active_workers();
2065   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2066                                                ShenandoahPhaseTimings::full_gc_purge_par :
2067                                                ShenandoahPhaseTimings::purge_par;
2068   // Cleanup weak roots
2069   ShenandoahGCPhase phase(timing_phase);
2070   if (has_forwarded_objects()) {
2071     if (is_traversal_mode()) {
2072       ShenandoahForwardedIsAliveClosure is_alive;
2073       ShenandoahTraversalUpdateRefsClosure keep_alive;
2074       ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahTraversalUpdateRefsClosure>
2075         cleaning_task(&is_alive, &keep_alive, num_workers);
2076       _workers->run_task(&cleaning_task);
2077     } else {
2078       ShenandoahForwardedIsAliveClosure is_alive;
2079       ShenandoahUpdateRefsClosure keep_alive;


2081         cleaning_task(&is_alive, &keep_alive, num_workers);
2082       _workers->run_task(&cleaning_task);
2083     }
2084   } else {
2085     ShenandoahIsAliveClosure is_alive;
2086 #ifdef ASSERT
2087   ShenandoahAssertNotForwardedClosure verify_cl;
2088   ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2089     cleaning_task(&is_alive, &verify_cl, num_workers);
2090 #else
2091   ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2092     cleaning_task(&is_alive, &do_nothing_cl, num_workers);
2093 #endif
2094     _workers->run_task(&cleaning_task);
2095   }
2096 }
2097 
2098 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2099   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2100   stw_process_weak_roots(full_gc);
2101   if (!ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
2102     stw_unload_classes(full_gc);
2103   }
2104 }
2105 
2106 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2107   if (is_traversal_mode()) {
2108     set_gc_state_mask(HAS_FORWARDED | UPDATEREFS, cond);
2109   } else {
2110     set_gc_state_mask(HAS_FORWARDED, cond);
2111   }
2112 
2113 }
2114 
2115 void ShenandoahHeap::set_process_references(bool pr) {
2116   _process_references.set_cond(pr);
2117 }
2118 
2119 void ShenandoahHeap::set_unload_classes(bool uc) {
2120   _unload_classes.set_cond(uc);
2121 }
2122 
2123 bool ShenandoahHeap::process_references() const {


2151 }
2152 
2153 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2154   _degenerated_gc_in_progress.set_cond(in_progress);
2155 }
2156 
2157 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2158   _full_gc_in_progress.set_cond(in_progress);
2159 }
2160 
2161 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2162   assert (is_full_gc_in_progress(), "should be");
2163   _full_gc_move_in_progress.set_cond(in_progress);
2164 }
2165 
2166 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2167   set_gc_state_mask(UPDATEREFS, in_progress);
2168 }
2169 
2170 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2171   ShenandoahCodeRoots::register_nmethod(nm);
2172 }
2173 
2174 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2175   ShenandoahCodeRoots::unregister_nmethod(nm);
2176 }
2177 
2178 void ShenandoahHeap::flush_nmethod(nmethod* nm) {
2179   ShenandoahCodeRoots::flush_nmethod(nm);
2180 }
2181 
2182 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2183   heap_region_containing(o)->record_pin();
2184   return o;
2185 }
2186 
2187 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2188   heap_region_containing(o)->record_unpin();
2189 }
2190 
2191 void ShenandoahHeap::sync_pinned_region_status() {
2192   ShenandoahHeapLocker locker(lock());
2193 
2194   for (size_t i = 0; i < num_regions(); i++) {
2195     ShenandoahHeapRegion *r = get_region(i);
2196     if (r->is_active()) {
2197       if (r->is_pinned()) {
2198         if (r->pin_count() == 0) {
2199           r->make_unpinned();


2206     }
2207   }
2208 
2209   assert_pinned_region_status();
2210 }
2211 
2212 #ifdef ASSERT
2213 void ShenandoahHeap::assert_pinned_region_status() {
2214   for (size_t i = 0; i < num_regions(); i++) {
2215     ShenandoahHeapRegion* r = get_region(i);
2216     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2217            "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2218   }
2219 }
2220 #endif
2221 
2222 GCTimer* ShenandoahHeap::gc_timer() const {
2223   return _gc_timer;
2224 }
2225 
2226 void ShenandoahHeap::prepare_concurrent_roots() {
2227   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2228   if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
2229     set_concurrent_root_in_progress(true);
2230   }
2231 }
2232 
2233 void ShenandoahHeap::prepare_concurrent_unloading() {
2234   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2235   if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
2236     ShenandoahCodeRoots::prepare_concurrent_unloading();
2237     _unloader.prepare();
2238   }
2239 }
2240 
2241 void ShenandoahHeap::finish_concurrent_unloading() {
2242   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2243   if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
2244     _unloader.finish();
2245   }
2246 }
2247 
2248 #ifdef ASSERT
2249 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2250   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2251 
2252   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2253     if (UseDynamicNumberOfGCThreads ||
2254         (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2255       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2256     } else {
2257       // Use ParallelGCThreads inside safepoints
2258       assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2259     }
2260   } else {
2261     if (UseDynamicNumberOfGCThreads ||
2262         (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2263       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2264     } else {
2265       // Use ConcGCThreads outside safepoints
2266       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2267     }


2351     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_prepare);
2352 
2353     make_parsable(true);
2354     for (uint i = 0; i < num_regions(); i++) {
2355       ShenandoahHeapRegion* r = get_region(i);
2356       r->set_concurrent_iteration_safe_limit(r->top());
2357     }
2358 
2359     // Reset iterator.
2360     _update_refs_iterator.reset();
2361   }
2362 
2363   if (ShenandoahPacing) {
2364     pacer()->setup_for_updaterefs();
2365   }
2366 }
2367 
2368 void ShenandoahHeap::op_final_updaterefs() {
2369   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2370 
2371   finish_concurrent_unloading();
2372 
2373   // Check if there is left-over work, and finish it
2374   if (_update_refs_iterator.has_next()) {
2375     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work);
2376 
2377     // Finish updating references where we left off.
2378     clear_cancelled_gc();
2379     update_heap_references(false);
2380   }
2381 
2382   // Clear cancelled GC, if set. On cancellation path, the block before would handle
2383   // everything. On degenerated paths, cancelled gc would not be set anyway.
2384   if (cancelled_gc()) {
2385     clear_cancelled_gc();
2386   }
2387   assert(!cancelled_gc(), "Should have been done right before");
2388 
2389   if (ShenandoahVerify && !is_degenerated_gc_in_progress()) {
2390     verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots);
2391   }
2392 
2393   if (is_degenerated_gc_in_progress()) {
2394     concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
2395   } else {
2396     concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2397   }
2398 
2399   // Has to be done before cset is clear
2400   if (ShenandoahVerify) {
2401     verifier()->verify_roots_in_to_space();
2402   }
2403 
2404   // Drop unnecessary "pinned" state from regions that does not have CP marks
2405   // anymore, as this would allow trashing them below.
2406   {
2407     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_sync_pinned);
2408     sync_pinned_region_status();
2409   }
2410 


< prev index next >