1 /* 2 * Copyright (c) 2021, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shared/collectorCounters.hpp" 28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 29 #include "gc/shenandoah/shenandoahConcurrentMark.hpp" 30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp" 31 #include "gc/shenandoah/shenandoahFullGC.hpp" 32 #include "gc/shenandoah/shenandoahGeneration.hpp" 33 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 34 #include "gc/shenandoah/shenandoahMetrics.hpp" 35 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 36 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 38 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 39 #include "gc/shenandoah/shenandoahSTWMark.hpp" 40 #include "gc/shenandoah/shenandoahUtils.hpp" 41 #include "gc/shenandoah/shenandoahVerifier.hpp" 42 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 43 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 44 #include "gc/shenandoah/shenandoahVMOperations.hpp" 45 #include "runtime/vmThread.hpp" 46 #include "utilities/events.hpp" 47 48 ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point, ShenandoahGeneration* generation) : 49 ShenandoahGC(), 50 _degen_point(degen_point), 51 _generation(generation), 52 _upgraded_to_full(false) { 53 } 54 55 bool ShenandoahDegenGC::collect(GCCause::Cause cause) { 56 vmop_degenerated(); 57 ShenandoahHeap* heap = ShenandoahHeap::heap(); 58 if (heap->mode()->is_generational()) { 59 heap->log_heap_status("At end of Degenerated GC"); 60 } 61 return true; 62 } 63 64 void ShenandoahDegenGC::vmop_degenerated() { 65 TraceCollectorStats tcs(ShenandoahHeap::heap()->monitoring_support()->full_stw_collection_counters()); 66 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross); 67 VM_ShenandoahDegeneratedGC degenerated_gc(this); 68 VMThread::execute(°enerated_gc); 69 } 70 71 void ShenandoahDegenGC::entry_degenerated() { 72 char msg[1024]; 73 degen_event_message(_degen_point, msg, sizeof(msg)); 74 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */); 75 EventMark em("%s", msg); 76 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 77 ShenandoahWorkerScope scope(heap->workers(), 78 ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(), 79 "stw degenerated gc"); 80 81 heap->set_degenerated_gc_in_progress(true); 82 op_degenerated(); 83 heap->set_degenerated_gc_in_progress(false); 84 } 85 86 void ShenandoahDegenGC::op_degenerated() { 87 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 88 // Degenerated GC is STW, but it can also fail. Current mechanics communicates 89 // GC failure via cancelled_concgc() flag. So, if we detect the failure after 90 // some phase, we have to upgrade the Degenerate GC to Full GC. 91 heap->clear_cancelled_gc(true /* clear oom handler */); 92 93 #ifdef ASSERT 94 if (heap->mode()->is_generational()) { 95 if (_generation->generation_mode() == GenerationMode::GLOBAL) { 96 // We can only get to a degenerated global cycle _after_ a concurrent global cycle 97 // has been cancelled. In which case, we expect the concurrent global cycle to have 98 // cancelled the old gc already. 99 assert(!heap->is_old_gc_active(), "Old GC should not be active during global cycle."); 100 } 101 102 if (!heap->is_concurrent_old_mark_in_progress()) { 103 // If we are not marking the old generation, there should be nothing in the old mark queues 104 assert(heap->old_generation()->task_queues()->is_empty(), "Old gen task queues should be empty."); 105 } 106 } 107 #endif 108 109 ShenandoahMetricsSnapshot metrics; 110 metrics.snap_before(); 111 112 switch (_degen_point) { 113 // The cases below form the Duff's-like device: it describes the actual GC cycle, 114 // but enters it at different points, depending on which concurrent phase had 115 // degenerated. 116 117 case _degenerated_outside_cycle: 118 // We have degenerated from outside the cycle, which means something is bad with 119 // the heap, most probably heavy humongous fragmentation, or we are very low on free 120 // space. It makes little sense to wait for Full GC to reclaim as much as it can, when 121 // we can do the most aggressive degen cycle, which includes processing references and 122 // class unloading, unless those features are explicitly disabled. 123 124 if (heap->is_concurrent_old_mark_in_progress()) { 125 // We have come straight into a degenerated cycle without running a concurrent cycle 126 // first and the SATB barrier is enabled to support concurrent old marking. The SATB buffer 127 // may hold a mix of old and young pointers. The old pointers need to be transferred 128 // to the old generation mark queues and the young pointers are _not_ part of this 129 // snapshot, so they must be dropped here. 130 heap->transfer_old_pointers_from_satb(); 131 } 132 133 // Note that we can only do this for "outside-cycle" degens, otherwise we would risk 134 // changing the cycle parameters mid-cycle during concurrent -> degenerated handover. 135 heap->set_unload_classes((!heap->mode()->is_generational() || _generation->generation_mode() == GLOBAL) && _generation->heuristics()->can_unload_classes()); 136 137 if (heap->mode()->is_generational() && (_generation->generation_mode() == YOUNG || (_generation->generation_mode() == GLOBAL && ShenandoahVerify))) { 138 // Swap remembered sets for young, or if the verifier will run during a global collect 139 _generation->swap_remembered_set(); 140 } 141 142 case _degenerated_roots: 143 // Degenerated from concurrent root mark, reset the flag for STW mark 144 if (!heap->mode()->is_generational()) { 145 if (heap->is_concurrent_mark_in_progress()) { 146 heap->cancel_concurrent_mark(); 147 } 148 } else { 149 if (_generation->is_concurrent_mark_in_progress()) { 150 // We want to allow old generation marking to be punctuated by young collections 151 // (even if they have degenerated). If this is a global cycle, we'd have cancelled 152 // the entire old gc before coming into this switch. 153 _generation->cancel_marking(); 154 } 155 } 156 157 if (_degen_point == ShenandoahDegenPoint::_degenerated_roots) { 158 // We only need this if the concurrent cycle has already swapped the card tables. 159 // Marking will use the 'read' table, but interesting pointers may have been 160 // recorded in the 'write' table in the time between the cancelled concurrent cycle 161 // and this degenerated cycle. These pointers need to be included the 'read' table 162 // used to scan the remembered set during the STW mark which follows here. 163 _generation->merge_write_table(); 164 } 165 166 op_reset(); 167 168 // STW mark 169 op_mark(); 170 171 case _degenerated_mark: 172 // No fallthrough. Continue mark, handed over from concurrent mark if 173 // concurrent mark has yet completed 174 if (_degen_point == ShenandoahDegenPoint::_degenerated_mark && 175 heap->is_concurrent_mark_in_progress()) { 176 op_finish_mark(); 177 } 178 assert(!heap->cancelled_gc(), "STW mark can not OOM"); 179 180 /* Degen select Collection Set. etc. */ 181 op_prepare_evacuation(); 182 183 op_cleanup_early(); 184 185 case _degenerated_evac: 186 187 if (heap->mode()->is_generational() && _generation->generation_mode() == GLOBAL) { 188 op_global_coalesce_and_fill(); 189 } 190 191 // If heuristics thinks we should do the cycle, this flag would be set, 192 // and we can do evacuation. Otherwise, it would be the shortcut cycle. 193 if (heap->is_evacuation_in_progress()) { 194 195 if (_degen_point == _degenerated_evac) { 196 // Degeneration under oom-evac protocol allows the mutator LRB to expose 197 // references to from-space objects. This is okay, in theory, because we 198 // will come to the safepoint here to complete the evacuations and update 199 // the references. However, if the from-space reference is written to a 200 // region that was EC during final mark or was recycled after final mark 201 // it will not have TAMS or UWM updated. Such a region is effectively 202 // skipped during update references which can lead to crashes and corruption 203 // if the from-space reference is accessed. 204 if (UseTLAB) { 205 heap->labs_make_parsable(); 206 } 207 208 for (size_t i = 0; i < heap->num_regions(); i++) { 209 ShenandoahHeapRegion* r = heap->get_region(i); 210 if (r->is_active() && r->top() > r->get_update_watermark()) { 211 r->set_update_watermark_at_safepoint(r->top()); 212 } 213 } 214 } 215 216 // Degeneration under oom-evac protocol might have left some objects in 217 // collection set un-evacuated. Restart evacuation from the beginning to 218 // capture all objects. For all the objects that are already evacuated, 219 // it would be a simple check, which is supposed to be fast. This is also 220 // safe to do even without degeneration, as CSet iterator is at beginning 221 // in preparation for evacuation anyway. 222 // 223 // Before doing that, we need to make sure we never had any cset-pinned 224 // regions. This may happen if allocation failure happened when evacuating 225 // the about-to-be-pinned object, oom-evac protocol left the object in 226 // the collection set, and then the pin reached the cset region. If we continue 227 // the cycle here, we would trash the cset and alive objects in it. To avoid 228 // it, we fail degeneration right away and slide into Full GC to recover. 229 230 { 231 heap->sync_pinned_region_status(); 232 heap->collection_set()->clear_current_index(); 233 ShenandoahHeapRegion* r; 234 while ((r = heap->collection_set()->next()) != NULL) { 235 if (r->is_pinned()) { 236 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc); 237 op_degenerated_fail(); 238 return; 239 } 240 } 241 242 heap->collection_set()->clear_current_index(); 243 } 244 op_evacuate(); 245 if (heap->cancelled_gc()) { 246 op_degenerated_fail(); 247 return; 248 } 249 } 250 251 // If heuristics thinks we should do the cycle, this flag would be set, 252 // and we need to do update-refs. Otherwise, it would be the shortcut cycle. 253 if (heap->has_forwarded_objects()) { 254 op_init_updaterefs(); 255 assert(!heap->cancelled_gc(), "STW reference update can not OOM"); 256 } 257 258 case _degenerated_updaterefs: 259 if (heap->has_forwarded_objects()) { 260 op_updaterefs(); 261 op_update_roots(); 262 assert(!heap->cancelled_gc(), "STW reference update can not OOM"); 263 } 264 265 if (ClassUnloading) { 266 // Disarm nmethods that armed in concurrent cycle. 267 // In above case, update roots should disarm them 268 ShenandoahCodeRoots::disarm_nmethods(); 269 } 270 271 op_cleanup_complete(); 272 break; 273 default: 274 ShouldNotReachHere(); 275 } 276 277 if (heap->mode()->is_generational()) { 278 // In case degeneration interrupted concurrent evacuation or update references, we need to clean up transient state. 279 // Otherwise, these actions have no effect. 280 281 heap->young_generation()->unadjust_available(); 282 heap->old_generation()->unadjust_available(); 283 // No need to old_gen->increase_used(). That was done when plabs were allocated, accounting for both old evacs and promotions. 284 285 heap->set_alloc_supplement_reserve(0); 286 heap->set_young_evac_reserve(0); 287 heap->set_old_evac_reserve(0); 288 heap->reset_old_evac_expended(); 289 heap->set_promoted_reserve(0); 290 291 heap->adjust_generation_sizes(); 292 } 293 294 if (ShenandoahVerify) { 295 heap->verifier()->verify_after_degenerated(); 296 } 297 298 if (VerifyAfterGC) { 299 Universe::verify(); 300 } 301 302 metrics.snap_after(); 303 304 // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles, 305 // because that probably means the heap is overloaded and/or fragmented. 306 if (!metrics.is_good_progress()) { 307 heap->notify_gc_no_progress(); 308 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc); 309 op_degenerated_futile(); 310 } else { 311 heap->notify_gc_progress(); 312 } 313 } 314 315 void ShenandoahDegenGC::op_reset() { 316 _generation->prepare_gc(); 317 } 318 319 void ShenandoahDegenGC::op_mark() { 320 assert(!_generation->is_concurrent_mark_in_progress(), "Should be reset"); 321 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_mark); 322 ShenandoahSTWMark mark(_generation, false /*full gc*/); 323 mark.mark(); 324 } 325 326 void ShenandoahDegenGC::op_finish_mark() { 327 ShenandoahConcurrentMark mark(_generation); 328 mark.finish_mark(); 329 } 330 331 void ShenandoahDegenGC::op_prepare_evacuation() { 332 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 333 if (ShenandoahVerify) { 334 heap->verifier()->verify_roots_no_forwarded(); 335 } 336 337 // STW cleanup weak roots and unload classes 338 heap->parallel_cleaning(false /*full gc*/); 339 340 // Prepare regions and collection set 341 _generation->prepare_regions_and_collection_set(false /*concurrent*/); 342 343 // Retire the TLABs, which will force threads to reacquire their TLABs after the pause. 344 // This is needed for two reasons. Strong one: new allocations would be with new freeset, 345 // which would be outside the collection set, so no cset writes would happen there. 346 // Weaker one: new allocations would happen past update watermark, and so less work would 347 // be needed for reference updates (would update the large filler instead). 348 if (UseTLAB) { 349 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_final_manage_labs); 350 heap->tlabs_retire(false); 351 } 352 353 if (!heap->collection_set()->is_empty()) { 354 heap->set_evacuation_in_progress(true); 355 heap->set_has_forwarded_objects(true); 356 357 if(ShenandoahVerify) { 358 heap->verifier()->verify_during_evacuation(); 359 } 360 } else { 361 if (ShenandoahVerify) { 362 heap->verifier()->verify_after_concmark(); 363 } 364 365 if (VerifyAfterGC) { 366 Universe::verify(); 367 } 368 } 369 } 370 371 void ShenandoahDegenGC::op_cleanup_early() { 372 ShenandoahHeap::heap()->recycle_trash(); 373 } 374 375 void ShenandoahDegenGC::op_global_coalesce_and_fill() { 376 ShenandoahHeap::heap()->coalesce_and_fill_old_regions(); 377 } 378 379 void ShenandoahDegenGC::op_evacuate() { 380 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_evac); 381 ShenandoahHeap::heap()->evacuate_collection_set(false /* concurrent*/); 382 } 383 384 void ShenandoahDegenGC::op_init_updaterefs() { 385 // Evacuation has completed 386 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 387 heap->set_evacuation_in_progress(false); 388 heap->set_concurrent_weak_root_in_progress(false); 389 heap->set_concurrent_strong_root_in_progress(false); 390 391 heap->prepare_update_heap_references(false /*concurrent*/); 392 heap->set_update_refs_in_progress(true); 393 } 394 395 void ShenandoahDegenGC::op_updaterefs() { 396 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 397 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_updaterefs); 398 // Handed over from concurrent update references phase 399 heap->update_heap_references(false /*concurrent*/); 400 401 heap->set_update_refs_in_progress(false); 402 heap->set_has_forwarded_objects(false); 403 } 404 405 void ShenandoahDegenGC::op_update_roots() { 406 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 407 408 update_roots(false /*full_gc*/); 409 410 heap->update_heap_region_states(false /*concurrent*/); 411 412 if (ShenandoahVerify) { 413 heap->verifier()->verify_after_updaterefs(); 414 } 415 416 if (VerifyAfterGC) { 417 Universe::verify(); 418 } 419 420 heap->rebuild_free_set(false /*concurrent*/); 421 } 422 423 void ShenandoahDegenGC::op_cleanup_complete() { 424 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete); 425 ShenandoahHeap::heap()->recycle_trash(); 426 } 427 428 void ShenandoahDegenGC::op_degenerated_fail() { 429 upgrade_to_full(); 430 ShenandoahFullGC full_gc; 431 full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc); 432 } 433 434 void ShenandoahDegenGC::op_degenerated_futile() { 435 upgrade_to_full(); 436 ShenandoahFullGC full_gc; 437 full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc); 438 } 439 440 void ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point, char* buf, size_t len) const { 441 jio_snprintf(buf, len, "Pause Degenerated %s GC (%s)", _generation->name(), ShenandoahGC::degen_point_to_string(point)); 442 } 443 444 void ShenandoahDegenGC::upgrade_to_full() { 445 log_info(gc)("Degenerate GC upgrading to Full GC"); 446 ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full(); 447 _upgraded_to_full = true; 448 } 449 450 bool ShenandoahDegenGC::upgraded_to_full() { 451 return _upgraded_to_full; 452 }