< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page




1050 
1051 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1052 private:
1053   ShenandoahRootEvacuator* _rp;
1054 
1055 public:
1056   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1057     AbstractGangTask("Shenandoah evacuate and update roots"),
1058     _rp(rp) {}
1059 
1060   void work(uint worker_id) {
1061     ShenandoahParallelWorkerSession worker_session(worker_id);
1062     ShenandoahEvacOOMScope oom_evac_scope;
1063     ShenandoahEvacuateUpdateRootsClosure cl;
1064     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1065     _rp->roots_do(worker_id, &cl);
1066   }
1067 };
1068 
1069 void ShenandoahHeap::evacuate_and_update_roots() {
1070 #if COMPILER2_OR_JVMCI
1071   DerivedPointerTable::clear();
1072 #endif
1073   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1074 
1075   {
1076     ShenandoahRootEvacuator rp(workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1077     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1078     workers()->run_task(&roots_task);
1079   }
1080 
1081 #if COMPILER2_OR_JVMCI
1082   DerivedPointerTable::update_pointers();
1083 #endif
1084 }
1085 
1086 // Returns size in bytes
1087 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1088   if (ShenandoahElasticTLAB) {
1089     // With Elastic TLABs, return the max allowed size, and let the allocation path
1090     // figure out the safe size for current allocation.
1091     return ShenandoahHeapRegion::max_tlab_size_bytes();
1092   } else {
1093     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1094   }
1095 }
1096 
1097 size_t ShenandoahHeap::max_tlab_size() const {
1098   // Returns size in words
1099   return ShenandoahHeapRegion::max_tlab_size_words();
1100 }
1101 


1262  *
1263  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1264  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1265  * is allowed to report dead objects, but is not required to do so.
1266  */
1267 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1268   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1269   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1270     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1271     return;
1272   }
1273 
1274   // Reset bitmap
1275   _aux_bit_map.clear();
1276 
1277   Stack<oop,mtGC> oop_stack;
1278 
1279   // First, we process all GC roots. This populates the work stack with initial objects.
1280   ShenandoahAllRootScanner rp(1, ShenandoahPhaseTimings::_num_phases);
1281   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1282   rp.roots_do_unchecked(&oops);
1283 
1284   // Work through the oop stack to traverse heap.
1285   while (! oop_stack.is_empty()) {
1286     oop obj = oop_stack.pop();
1287     assert(oopDesc::is_oop(obj), "must be a valid oop");
1288     cl->do_object(obj);
1289     obj->oop_iterate(&oops);
1290   }
1291 
1292   assert(oop_stack.is_empty(), "should be empty");
1293 
1294   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1295     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1296   }
1297 }
1298 
1299 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1300   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1301   object_iterate(cl);
1302 }


1587 
1588 void ShenandoahHeap::op_traversal() {
1589   traversal_gc()->concurrent_traversal_collection();
1590 }
1591 
1592 void ShenandoahHeap::op_final_traversal() {
1593   traversal_gc()->final_traversal_collection();
1594 }
1595 
1596 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1597   ShenandoahMetricsSnapshot metrics;
1598   metrics.snap_before();
1599 
1600   full_gc()->do_it(cause);
1601   if (UseTLAB) {
1602     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
1603     resize_all_tlabs();
1604   }
1605 
1606   metrics.snap_after();

1607 
1608   if (metrics.is_good_progress()) {
1609     _progress_last_gc.set();
1610   } else {
1611     // Nothing to do. Tell the allocation path that we have failed to make
1612     // progress, and it can finally fail.
1613     _progress_last_gc.unset();
1614   }
1615 }
1616 
1617 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1618   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1619   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1620   // some phase, we have to upgrade the Degenerate GC to Full GC.
1621 
1622   clear_cancelled_gc();
1623 
1624   ShenandoahMetricsSnapshot metrics;
1625   metrics.snap_before();
1626 
1627   switch (point) {
1628     case _degenerated_traversal:


1677     case _degenerated_mark:
1678       op_final_mark();
1679       if (cancelled_gc()) {
1680         op_degenerated_fail();
1681         return;
1682       }
1683 
1684       op_cleanup();
1685 
1686     case _degenerated_evac:
1687       // If heuristics thinks we should do the cycle, this flag would be set,
1688       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
1689       if (is_evacuation_in_progress()) {
1690 
1691         // Degeneration under oom-evac protocol might have left some objects in
1692         // collection set un-evacuated. Restart evacuation from the beginning to
1693         // capture all objects. For all the objects that are already evacuated,
1694         // it would be a simple check, which is supposed to be fast. This is also
1695         // safe to do even without degeneration, as CSet iterator is at beginning
1696         // in preparation for evacuation anyway.
1697         //
1698         // Before doing that, we need to make sure we never had any cset-pinned
1699         // regions. This may happen if allocation failure happened when evacuating
1700         // the about-to-be-pinned object, oom-evac protocol left the object in
1701         // the collection set, and then the pin reached the cset region. If we continue
1702         // the cycle here, we would trash the cset and alive objects in it. To avoid
1703         // it, we fail degeneration right away and slide into Full GC to recover.
1704 
1705         {
1706           collection_set()->clear_current_index();
1707 
1708           ShenandoahHeapRegion* r;
1709           while ((r = collection_set()->next()) != NULL) {
1710             if (r->is_pinned()) {
1711               cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1712               op_degenerated_fail();
1713               return;
1714             }
1715           }
1716 
1717           collection_set()->clear_current_index();
1718         }
1719 
1720         op_stw_evac();
1721         if (cancelled_gc()) {
1722           op_degenerated_fail();
1723           return;
1724         }
1725       }
1726 
1727       // If heuristics thinks we should do the cycle, this flag would be set,
1728       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
1729       if (has_forwarded_objects()) {
1730         op_init_updaterefs();
1731         if (cancelled_gc()) {
1732           op_degenerated_fail();
1733           return;
1734         }
1735       }
1736 
1737     case _degenerated_updaterefs:
1738       if (has_forwarded_objects()) {


1742           return;
1743         }
1744       }
1745 
1746       op_cleanup();
1747       break;
1748 
1749     default:
1750       ShouldNotReachHere();
1751   }
1752 
1753   if (ShenandoahVerify) {
1754     verifier()->verify_after_degenerated();
1755   }
1756 
1757   if (VerifyAfterGC) {
1758     Universe::verify();
1759   }
1760 
1761   metrics.snap_after();

1762 
1763   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
1764   // because that probably means the heap is overloaded and/or fragmented.
1765   if (!metrics.is_good_progress()) {
1766     _progress_last_gc.unset();
1767     cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1768     op_degenerated_futile();
1769   } else {
1770     _progress_last_gc.set();
1771   }
1772 }
1773 
1774 void ShenandoahHeap::op_degenerated_fail() {
1775   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
1776   shenandoah_policy()->record_degenerated_upgrade_to_full();
1777   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1778 }
1779 
1780 void ShenandoahHeap::op_degenerated_futile() {
1781   shenandoah_policy()->record_degenerated_upgrade_to_full();
1782   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1783 }
1784 
1785 void ShenandoahHeap::stop_concurrent_marking() {




1050 
1051 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1052 private:
1053   ShenandoahRootEvacuator* _rp;
1054 
1055 public:
1056   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1057     AbstractGangTask("Shenandoah evacuate and update roots"),
1058     _rp(rp) {}
1059 
1060   void work(uint worker_id) {
1061     ShenandoahParallelWorkerSession worker_session(worker_id);
1062     ShenandoahEvacOOMScope oom_evac_scope;
1063     ShenandoahEvacuateUpdateRootsClosure cl;
1064     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1065     _rp->roots_do(worker_id, &cl);
1066   }
1067 };
1068 
1069 void ShenandoahHeap::evacuate_and_update_roots() {
1070 #if defined(COMPILER2) || INCLUDE_JVMCI
1071   DerivedPointerTable::clear();
1072 #endif
1073   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1074 
1075   {
1076     ShenandoahRootEvacuator rp(workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1077     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1078     workers()->run_task(&roots_task);
1079   }
1080 
1081 #if defined(COMPILER2) || INCLUDE_JVMCI
1082   DerivedPointerTable::update_pointers();
1083 #endif
1084 }
1085 
1086 // Returns size in bytes
1087 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1088   if (ShenandoahElasticTLAB) {
1089     // With Elastic TLABs, return the max allowed size, and let the allocation path
1090     // figure out the safe size for current allocation.
1091     return ShenandoahHeapRegion::max_tlab_size_bytes();
1092   } else {
1093     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1094   }
1095 }
1096 
1097 size_t ShenandoahHeap::max_tlab_size() const {
1098   // Returns size in words
1099   return ShenandoahHeapRegion::max_tlab_size_words();
1100 }
1101 


1262  *
1263  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1264  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1265  * is allowed to report dead objects, but is not required to do so.
1266  */
1267 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1268   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1269   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1270     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1271     return;
1272   }
1273 
1274   // Reset bitmap
1275   _aux_bit_map.clear();
1276 
1277   Stack<oop,mtGC> oop_stack;
1278 
1279   // First, we process all GC roots. This populates the work stack with initial objects.
1280   ShenandoahAllRootScanner rp(1, ShenandoahPhaseTimings::_num_phases);
1281   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1282   rp.roots_do(0, &oops);
1283 
1284   // Work through the oop stack to traverse heap.
1285   while (! oop_stack.is_empty()) {
1286     oop obj = oop_stack.pop();
1287     assert(oopDesc::is_oop(obj), "must be a valid oop");
1288     cl->do_object(obj);
1289     obj->oop_iterate(&oops);
1290   }
1291 
1292   assert(oop_stack.is_empty(), "should be empty");
1293 
1294   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1295     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1296   }
1297 }
1298 
1299 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1300   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1301   object_iterate(cl);
1302 }


1587 
1588 void ShenandoahHeap::op_traversal() {
1589   traversal_gc()->concurrent_traversal_collection();
1590 }
1591 
1592 void ShenandoahHeap::op_final_traversal() {
1593   traversal_gc()->final_traversal_collection();
1594 }
1595 
1596 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1597   ShenandoahMetricsSnapshot metrics;
1598   metrics.snap_before();
1599 
1600   full_gc()->do_it(cause);
1601   if (UseTLAB) {
1602     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
1603     resize_all_tlabs();
1604   }
1605 
1606   metrics.snap_after();
1607   metrics.print();
1608 
1609   if (metrics.is_good_progress("Full GC")) {
1610     _progress_last_gc.set();
1611   } else {
1612     // Nothing to do. Tell the allocation path that we have failed to make
1613     // progress, and it can finally fail.
1614     _progress_last_gc.unset();
1615   }
1616 }
1617 
1618 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1619   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1620   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1621   // some phase, we have to upgrade the Degenerate GC to Full GC.
1622 
1623   clear_cancelled_gc();
1624 
1625   ShenandoahMetricsSnapshot metrics;
1626   metrics.snap_before();
1627 
1628   switch (point) {
1629     case _degenerated_traversal:


1678     case _degenerated_mark:
1679       op_final_mark();
1680       if (cancelled_gc()) {
1681         op_degenerated_fail();
1682         return;
1683       }
1684 
1685       op_cleanup();
1686 
1687     case _degenerated_evac:
1688       // If heuristics thinks we should do the cycle, this flag would be set,
1689       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
1690       if (is_evacuation_in_progress()) {
1691 
1692         // Degeneration under oom-evac protocol might have left some objects in
1693         // collection set un-evacuated. Restart evacuation from the beginning to
1694         // capture all objects. For all the objects that are already evacuated,
1695         // it would be a simple check, which is supposed to be fast. This is also
1696         // safe to do even without degeneration, as CSet iterator is at beginning
1697         // in preparation for evacuation anyway.
1698         collection_set()->clear_current_index();





















1699 
1700         op_stw_evac();
1701         if (cancelled_gc()) {
1702           op_degenerated_fail();
1703           return;
1704         }
1705       }
1706 
1707       // If heuristics thinks we should do the cycle, this flag would be set,
1708       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
1709       if (has_forwarded_objects()) {
1710         op_init_updaterefs();
1711         if (cancelled_gc()) {
1712           op_degenerated_fail();
1713           return;
1714         }
1715       }
1716 
1717     case _degenerated_updaterefs:
1718       if (has_forwarded_objects()) {


1722           return;
1723         }
1724       }
1725 
1726       op_cleanup();
1727       break;
1728 
1729     default:
1730       ShouldNotReachHere();
1731   }
1732 
1733   if (ShenandoahVerify) {
1734     verifier()->verify_after_degenerated();
1735   }
1736 
1737   if (VerifyAfterGC) {
1738     Universe::verify();
1739   }
1740 
1741   metrics.snap_after();
1742   metrics.print();
1743 
1744   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
1745   // because that probably means the heap is overloaded and/or fragmented.
1746   if (!metrics.is_good_progress("Degenerated GC")) {
1747     _progress_last_gc.unset();
1748     cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1749     op_degenerated_futile();
1750   } else {
1751     _progress_last_gc.set();
1752   }
1753 }
1754 
1755 void ShenandoahHeap::op_degenerated_fail() {
1756   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
1757   shenandoah_policy()->record_degenerated_upgrade_to_full();
1758   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1759 }
1760 
1761 void ShenandoahHeap::op_degenerated_futile() {
1762   shenandoah_policy()->record_degenerated_upgrade_to_full();
1763   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1764 }
1765 
1766 void ShenandoahHeap::stop_concurrent_marking() {


< prev index next >