1 /* 2 * Copyright (c) 2021, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shared/collectorCounters.hpp" 28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 29 #include "gc/shenandoah/shenandoahConcurrentMark.hpp" 30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp" 31 #include "gc/shenandoah/shenandoahFullGC.hpp" 32 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 33 #include "gc/shenandoah/shenandoahMetrics.hpp" 34 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 35 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 36 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 37 #include "gc/shenandoah/shenandoahSTWMark.hpp" 38 #include "gc/shenandoah/shenandoahUtils.hpp" 39 #include "gc/shenandoah/shenandoahVerifier.hpp" 40 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 41 #include "gc/shenandoah/shenandoahVMOperations.hpp" 42 #include "runtime/vmThread.hpp" 43 #include "utilities/events.hpp" 44 45 ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point) : 46 ShenandoahGC(), 47 _degen_point(degen_point) { 48 } 49 50 bool ShenandoahDegenGC::collect(GCCause::Cause cause) { 51 vmop_degenerated(); 52 return true; 53 } 54 55 void ShenandoahDegenGC::vmop_degenerated() { 56 TraceCollectorStats tcs(ShenandoahHeap::heap()->monitoring_support()->full_stw_collection_counters()); 57 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross); 58 VM_ShenandoahDegeneratedGC degenerated_gc(this); 59 VMThread::execute(°enerated_gc); 60 } 61 62 void ShenandoahDegenGC::entry_degenerated() { 63 const char* msg = degen_event_message(_degen_point); 64 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */); 65 EventMark em("%s", msg); 66 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 67 68 ShenandoahWorkerScope scope(heap->workers(), 69 ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(), 70 "stw degenerated gc"); 71 72 heap->set_degenerated_gc_in_progress(true); 73 op_degenerated(); 74 heap->set_degenerated_gc_in_progress(false); 75 } 76 77 void ShenandoahDegenGC::op_degenerated() { 78 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 79 // Degenerated GC is STW, but it can also fail. Current mechanics communicates 80 // GC failure via cancelled_concgc() flag. So, if we detect the failure after 81 // some phase, we have to upgrade the Degenerate GC to Full GC. 82 heap->clear_cancelled_gc(); 83 84 ShenandoahMetricsSnapshot metrics; 85 metrics.snap_before(); 86 87 switch (_degen_point) { 88 // The cases below form the Duff's-like device: it describes the actual GC cycle, 89 // but enters it at different points, depending on which concurrent phase had 90 // degenerated. 91 92 case _degenerated_outside_cycle: 93 // We have degenerated from outside the cycle, which means something is bad with 94 // the heap, most probably heavy humongous fragmentation, or we are very low on free 95 // space. It makes little sense to wait for Full GC to reclaim as much as it can, when 96 // we can do the most aggressive degen cycle, which includes processing references and 97 // class unloading, unless those features are explicitly disabled. 98 // 99 100 // Degenerated from concurrent root mark, reset the flag for STW mark 101 if (heap->is_concurrent_mark_in_progress()) { 102 ShenandoahConcurrentMark::cancel(); 103 heap->set_concurrent_mark_in_progress(false); 104 } 105 106 // Note that we can only do this for "outside-cycle" degens, otherwise we would risk 107 // changing the cycle parameters mid-cycle during concurrent -> degenerated handover. 108 heap->set_unload_classes(heap->heuristics()->can_unload_classes()); 109 110 op_reset(); 111 112 // STW mark 113 op_mark(); 114 115 case _degenerated_mark: 116 // No fallthrough. Continue mark, handed over from concurrent mark if 117 // concurrent mark has yet completed 118 if (_degen_point == ShenandoahDegenPoint::_degenerated_mark && 119 heap->is_concurrent_mark_in_progress()) { 120 op_finish_mark(); 121 } 122 assert(!heap->cancelled_gc(), "STW mark can not OOM"); 123 124 /* Degen select Collection Set. etc. */ 125 op_prepare_evacuation(); 126 127 op_cleanup_early(); 128 129 case _degenerated_evac: 130 // If heuristics thinks we should do the cycle, this flag would be set, 131 // and we can do evacuation. Otherwise, it would be the shortcut cycle. 132 if (heap->is_evacuation_in_progress()) { 133 134 if (_degen_point == _degenerated_evac) { 135 // Degeneration under oom-evac protocol allows the mutator LRB to expose 136 // references to from-space objects. This is okay, in theory, because we 137 // will come to the safepoint here to complete the evacuations and update 138 // the references. However, if the from-space reference is written to a 139 // region that was EC during final mark or was recycled after final mark 140 // it will not have TAMS or UWM updated. Such a region is effectively 141 // skipped during update references which can lead to crashes and corruption 142 // if the from-space reference is accessed. 143 if (UseTLAB) { 144 heap->labs_make_parsable(); 145 } 146 147 for (size_t i = 0; i < heap->num_regions(); i++) { 148 ShenandoahHeapRegion* r = heap->get_region(i); 149 if (r->is_active() && r->top() > r->get_update_watermark()) { 150 r->set_update_watermark_at_safepoint(r->top()); 151 } 152 } 153 } 154 155 // Degeneration under oom-evac protocol might have left some objects in 156 // collection set un-evacuated. Restart evacuation from the beginning to 157 // capture all objects. For all the objects that are already evacuated, 158 // it would be a simple check, which is supposed to be fast. This is also 159 // safe to do even without degeneration, as CSet iterator is at beginning 160 // in preparation for evacuation anyway. 161 // 162 // Before doing that, we need to make sure we never had any cset-pinned 163 // regions. This may happen if allocation failure happened when evacuating 164 // the about-to-be-pinned object, oom-evac protocol left the object in 165 // the collection set, and then the pin reached the cset region. If we continue 166 // the cycle here, we would trash the cset and alive objects in it. To avoid 167 // it, we fail degeneration right away and slide into Full GC to recover. 168 169 { 170 heap->sync_pinned_region_status(); 171 heap->collection_set()->clear_current_index(); 172 173 ShenandoahHeapRegion* r; 174 while ((r = heap->collection_set()->next()) != nullptr) { 175 if (r->is_pinned()) { 176 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc); 177 op_degenerated_fail(); 178 return; 179 } 180 } 181 182 heap->collection_set()->clear_current_index(); 183 } 184 op_evacuate(); 185 if (heap->cancelled_gc()) { 186 op_degenerated_fail(); 187 return; 188 } 189 } 190 191 // If heuristics thinks we should do the cycle, this flag would be set, 192 // and we need to do update-refs. Otherwise, it would be the shortcut cycle. 193 if (heap->has_forwarded_objects()) { 194 op_init_updaterefs(); 195 assert(!heap->cancelled_gc(), "STW reference update can not OOM"); 196 } 197 198 case _degenerated_updaterefs: 199 if (heap->has_forwarded_objects()) { 200 op_updaterefs(); 201 op_update_roots(); 202 assert(!heap->cancelled_gc(), "STW reference update can not OOM"); 203 } 204 205 // Disarm nmethods that armed in concurrent cycle. 206 // In above case, update roots should disarm them 207 ShenandoahCodeRoots::disarm_nmethods(); 208 209 op_cleanup_complete(); 210 break; 211 default: 212 ShouldNotReachHere(); 213 } 214 215 if (ShenandoahVerify) { 216 heap->verifier()->verify_after_degenerated(); 217 } 218 219 if (VerifyAfterGC) { 220 Universe::verify(); 221 } 222 223 metrics.snap_after(); 224 225 // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles, 226 // because that probably means the heap is overloaded and/or fragmented. 227 if (!metrics.is_good_progress()) { 228 heap->notify_gc_no_progress(); 229 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc); 230 op_degenerated_futile(); 231 } else { 232 heap->notify_gc_progress(); 233 } 234 } 235 236 void ShenandoahDegenGC::op_reset() { 237 ShenandoahHeap::heap()->prepare_gc(); 238 } 239 240 void ShenandoahDegenGC::op_mark() { 241 assert(!ShenandoahHeap::heap()->is_concurrent_mark_in_progress(), "Should be reset"); 242 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_mark); 243 ShenandoahSTWMark mark(false /*full gc*/); 244 mark.clear(); 245 mark.mark(); 246 } 247 248 void ShenandoahDegenGC::op_finish_mark() { 249 ShenandoahConcurrentMark mark; 250 mark.finish_mark(); 251 } 252 253 void ShenandoahDegenGC::op_prepare_evacuation() { 254 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 255 if (ShenandoahVerify) { 256 heap->verifier()->verify_roots_no_forwarded(); 257 } 258 259 // STW cleanup weak roots and unload classes 260 heap->parallel_cleaning(false /*full gc*/); 261 // Prepare regions and collection set 262 heap->prepare_regions_and_collection_set(false /*concurrent*/); 263 264 // Retire the TLABs, which will force threads to reacquire their TLABs after the pause. 265 // This is needed for two reasons. Strong one: new allocations would be with new freeset, 266 // which would be outside the collection set, so no cset writes would happen there. 267 // Weaker one: new allocations would happen past update watermark, and so less work would 268 // be needed for reference updates (would update the large filler instead). 269 if (UseTLAB) { 270 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_final_manage_labs); 271 heap->tlabs_retire(false); 272 } 273 274 if (!heap->collection_set()->is_empty()) { 275 heap->set_evacuation_in_progress(true); 276 heap->set_has_forwarded_objects(true); 277 278 if(ShenandoahVerify) { 279 heap->verifier()->verify_during_evacuation(); 280 } 281 } else { 282 if (ShenandoahVerify) { 283 heap->verifier()->verify_after_concmark(); 284 } 285 286 if (VerifyAfterGC) { 287 Universe::verify(); 288 } 289 } 290 } 291 292 void ShenandoahDegenGC::op_cleanup_early() { 293 ShenandoahHeap::heap()->recycle_trash(); 294 } 295 296 void ShenandoahDegenGC::op_evacuate() { 297 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_evac); 298 ShenandoahHeap::heap()->evacuate_collection_set(false /* concurrent*/); 299 } 300 301 void ShenandoahDegenGC::op_init_updaterefs() { 302 // Evacuation has completed 303 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 304 heap->set_evacuation_in_progress(false); 305 heap->set_concurrent_weak_root_in_progress(false); 306 heap->set_concurrent_strong_root_in_progress(false); 307 308 heap->prepare_update_heap_references(false /*concurrent*/); 309 heap->set_update_refs_in_progress(true); 310 } 311 312 void ShenandoahDegenGC::op_updaterefs() { 313 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 314 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_updaterefs); 315 // Handed over from concurrent update references phase 316 heap->update_heap_references(false /*concurrent*/); 317 318 heap->set_update_refs_in_progress(false); 319 heap->set_has_forwarded_objects(false); 320 } 321 322 void ShenandoahDegenGC::op_update_roots() { 323 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 324 325 update_roots(false /*full_gc*/); 326 327 heap->update_heap_region_states(false /*concurrent*/); 328 329 if (ShenandoahVerify) { 330 heap->verifier()->verify_after_updaterefs(); 331 } 332 333 if (VerifyAfterGC) { 334 Universe::verify(); 335 } 336 337 heap->rebuild_free_set(false /*concurrent*/); 338 } 339 340 void ShenandoahDegenGC::op_cleanup_complete() { 341 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete); 342 ShenandoahHeap::heap()->recycle_trash(); 343 } 344 345 void ShenandoahDegenGC::op_degenerated_fail() { 346 log_info(gc)("Cannot finish degeneration, upgrading to Full GC"); 347 ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full(); 348 349 ShenandoahFullGC full_gc; 350 full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc); 351 } 352 353 void ShenandoahDegenGC::op_degenerated_futile() { 354 ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full(); 355 ShenandoahFullGC full_gc; 356 full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc); 357 } 358 359 const char* ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point) const { 360 switch (point) { 361 case _degenerated_unset: 362 return "Pause Degenerated GC (<UNSET>)"; 363 case _degenerated_outside_cycle: 364 return "Pause Degenerated GC (Outside of Cycle)"; 365 case _degenerated_mark: 366 return "Pause Degenerated GC (Mark)"; 367 case _degenerated_evac: 368 return "Pause Degenerated GC (Evacuation)"; 369 case _degenerated_updaterefs: 370 return "Pause Degenerated GC (Update Refs)"; 371 default: 372 ShouldNotReachHere(); 373 return "ERROR"; 374 } 375 }