1 /* 2 * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. 4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "gc/shenandoah/mode/shenandoahMode.hpp" 29 #include "gc/shenandoah/shenandoahAsserts.hpp" 30 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 31 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 32 #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp" 33 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp" 34 #include "gc/shenandoah/shenandoahFreeSet.hpp" 35 #include "gc/shenandoah/shenandoahFullGC.hpp" 36 #include "gc/shenandoah/shenandoahGeneration.hpp" 37 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 38 #include "gc/shenandoah/shenandoahOldGC.hpp" 39 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 40 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 41 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 42 #include "gc/shenandoah/shenandoahPacer.inline.hpp" 43 #include "gc/shenandoah/shenandoahUtils.hpp" 44 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 45 #include "logging/log.hpp" 46 #include "memory/metaspaceUtils.hpp" 47 #include "memory/metaspaceStats.hpp" 48 #include "runtime/atomic.hpp" 49 50 ShenandoahGenerationalControlThread::ShenandoahGenerationalControlThread() : 51 ShenandoahController(), 52 _control_lock(Mutex::nosafepoint - 2, "ShenandoahControlGC_lock", true), 53 _regulator_lock(Mutex::nosafepoint - 2, "ShenandoahRegulatorGC_lock", true), 54 _requested_gc_cause(GCCause::_no_gc), 55 _requested_generation(GLOBAL), 56 _degen_point(ShenandoahGC::_degenerated_outside_cycle), 57 _degen_generation(nullptr), 58 _mode(none) { 59 shenandoah_assert_generational(); 60 set_name("Shenandoah Control Thread"); 61 create_and_start(); 62 } 63 64 void ShenandoahGenerationalControlThread::run_service() { 65 ShenandoahGenerationalHeap* const heap = ShenandoahGenerationalHeap::heap(); 66 67 const GCMode default_mode = concurrent_normal; 68 ShenandoahGenerationType generation = GLOBAL; 69 70 double last_shrink_time = os::elapsedTime(); 71 uint age_period = 0; 72 73 // Shrink period avoids constantly polling regions for shrinking. 74 // Having a period 10x lower than the delay would mean we hit the 75 // shrinking with lag of less than 1/10-th of true delay. 76 // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds. 77 const double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10; 78 79 ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy(); 80 81 // Heuristics are notified of allocation failures here and other outcomes 82 // of the cycle. They're also used here to control whether the Nth consecutive 83 // degenerated cycle should be 'promoted' to a full cycle. The decision to 84 // trigger a cycle or not is evaluated on the regulator thread. 85 ShenandoahHeuristics* global_heuristics = heap->global_generation()->heuristics(); 86 while (!in_graceful_shutdown() && !should_terminate()) { 87 // Figure out if we have pending requests. 88 const bool alloc_failure_pending = _alloc_failure_gc.is_set(); 89 const bool humongous_alloc_failure_pending = _humongous_alloc_failure_gc.is_set(); 90 91 GCCause::Cause cause = Atomic::xchg(&_requested_gc_cause, GCCause::_no_gc); 92 93 const bool is_gc_requested = ShenandoahCollectorPolicy::is_requested_gc(cause); 94 95 // This control loop iteration has seen this much allocation. 96 const size_t allocs_seen = reset_allocs_seen(); 97 98 // Check if we have seen a new target for soft max heap size. 99 const bool soft_max_changed = heap->check_soft_max_changed(); 100 101 // Choose which GC mode to run in. The block below should select a single mode. 102 set_gc_mode(none); 103 ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset; 104 105 if (alloc_failure_pending) { 106 // Allocation failure takes precedence: we have to deal with it first thing 107 cause = GCCause::_allocation_failure; 108 109 // Consume the degen point, and seed it with default value 110 degen_point = _degen_point; 111 _degen_point = ShenandoahGC::_degenerated_outside_cycle; 112 113 if (degen_point == ShenandoahGC::_degenerated_outside_cycle) { 114 _degen_generation = heap->young_generation(); 115 } else { 116 assert(_degen_generation != nullptr, "Need to know which generation to resume"); 117 } 118 119 ShenandoahHeuristics* heuristics = _degen_generation->heuristics(); 120 generation = _degen_generation->type(); 121 bool old_gen_evacuation_failed = heap->old_generation()->clear_failed_evacuation(); 122 123 heuristics->log_trigger("Handle Allocation Failure"); 124 125 // Do not bother with degenerated cycle if old generation evacuation failed or if humongous allocation failed 126 if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() && 127 !old_gen_evacuation_failed && !humongous_alloc_failure_pending) { 128 heuristics->record_allocation_failure_gc(); 129 policy->record_alloc_failure_to_degenerated(degen_point); 130 set_gc_mode(stw_degenerated); 131 } else { 132 heuristics->record_allocation_failure_gc(); 133 policy->record_alloc_failure_to_full(); 134 generation = GLOBAL; 135 set_gc_mode(stw_full); 136 } 137 } else if (is_gc_requested) { 138 generation = GLOBAL; 139 global_heuristics->log_trigger("GC request (%s)", GCCause::to_string(cause)); 140 global_heuristics->record_requested_gc(); 141 142 if (ShenandoahCollectorPolicy::should_run_full_gc(cause)) { 143 set_gc_mode(stw_full); 144 } else { 145 set_gc_mode(default_mode); 146 // Unload and clean up everything 147 heap->set_unload_classes(global_heuristics->can_unload_classes()); 148 } 149 } else { 150 // We should only be here if the regulator requested a cycle or if 151 // there is an old generation mark in progress. 152 if (cause == GCCause::_shenandoah_concurrent_gc) { 153 if (_requested_generation == OLD && heap->old_generation()->is_doing_mixed_evacuations()) { 154 // If a request to start an old cycle arrived while an old cycle was running, but _before_ 155 // it chose any regions for evacuation we don't want to start a new old cycle. Rather, we want 156 // the heuristic to run a young collection so that we can evacuate some old regions. 157 assert(!heap->is_concurrent_old_mark_in_progress(), "Should not be running mixed collections and concurrent marking"); 158 generation = YOUNG; 159 } else { 160 generation = _requested_generation; 161 } 162 163 // preemption was requested or this is a regular cycle 164 set_gc_mode(default_mode); 165 166 // Don't start a new old marking if there is one already in progress 167 if (generation == OLD && heap->is_concurrent_old_mark_in_progress()) { 168 set_gc_mode(servicing_old); 169 } 170 171 if (generation == GLOBAL) { 172 heap->set_unload_classes(global_heuristics->should_unload_classes()); 173 } else { 174 heap->set_unload_classes(false); 175 } 176 } else if (heap->is_concurrent_old_mark_in_progress() || heap->is_prepare_for_old_mark_in_progress()) { 177 // Nobody asked us to do anything, but we have an old-generation mark or old-generation preparation for 178 // mixed evacuation in progress, so resume working on that. 179 log_info(gc)("Resume old GC: marking is%s in progress, preparing is%s in progress", 180 heap->is_concurrent_old_mark_in_progress() ? "" : " NOT", 181 heap->is_prepare_for_old_mark_in_progress() ? "" : " NOT"); 182 183 cause = GCCause::_shenandoah_concurrent_gc; 184 generation = OLD; 185 set_gc_mode(servicing_old); 186 heap->set_unload_classes(false); 187 } 188 } 189 190 const bool gc_requested = (gc_mode() != none); 191 assert (!gc_requested || cause != GCCause::_no_gc, "GC cause should be set"); 192 193 if (gc_requested) { 194 // Blow away all soft references on this cycle, if handling allocation failure, 195 // either implicit or explicit GC request, or we are requested to do so unconditionally. 196 if (generation == GLOBAL && (alloc_failure_pending || is_gc_requested || ShenandoahAlwaysClearSoftRefs)) { 197 heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); 198 } 199 200 // GC is starting, bump the internal ID 201 update_gc_id(); 202 203 heap->reset_bytes_allocated_since_gc_start(); 204 205 MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics(); 206 207 // If GC was requested, we are sampling the counters even without actual triggers 208 // from allocation machinery. This captures GC phases more accurately. 209 heap->set_forced_counters_update(true); 210 211 // If GC was requested, we better dump freeset data for performance debugging 212 heap->free_set()->log_status_under_lock(); 213 214 // In case this is a degenerated cycle, remember whether original cycle was aging. 215 const bool was_aging_cycle = heap->is_aging_cycle(); 216 heap->set_aging_cycle(false); 217 218 switch (gc_mode()) { 219 case concurrent_normal: { 220 // At this point: 221 // if (generation == YOUNG), this is a normal YOUNG cycle 222 // if (generation == OLD), this is a bootstrap OLD cycle 223 // if (generation == GLOBAL), this is a GLOBAL cycle triggered by System.gc() 224 // In all three cases, we want to age old objects if this is an aging cycle 225 if (age_period-- == 0) { 226 heap->set_aging_cycle(true); 227 age_period = ShenandoahAgingCyclePeriod - 1; 228 } 229 service_concurrent_normal_cycle(heap, generation, cause); 230 break; 231 } 232 case stw_degenerated: { 233 heap->set_aging_cycle(was_aging_cycle); 234 service_stw_degenerated_cycle(cause, degen_point); 235 break; 236 } 237 case stw_full: { 238 if (age_period-- == 0) { 239 heap->set_aging_cycle(true); 240 age_period = ShenandoahAgingCyclePeriod - 1; 241 } 242 service_stw_full_cycle(cause); 243 break; 244 } 245 case servicing_old: { 246 assert(generation == OLD, "Expected old generation here"); 247 GCIdMark gc_id_mark; 248 service_concurrent_old_cycle(heap, cause); 249 break; 250 } 251 default: 252 ShouldNotReachHere(); 253 } 254 255 // If this was the requested GC cycle, notify waiters about it 256 if (is_gc_requested) { 257 notify_gc_waiters(); 258 } 259 260 // If this was the allocation failure GC cycle, notify waiters about it 261 if (alloc_failure_pending) { 262 notify_alloc_failure_waiters(); 263 } 264 265 // Report current free set state at the end of cycle, whether 266 // it is a normal completion, or the abort. 267 heap->free_set()->log_status_under_lock(); 268 269 { 270 // Notify Universe about new heap usage. This has implications for 271 // global soft refs policy, and we better report it every time heap 272 // usage goes down. 273 ShenandoahHeapLocker locker(heap->lock()); 274 heap->update_capacity_and_used_at_gc(); 275 } 276 277 // Signal that we have completed a visit to all live objects. 278 heap->record_whole_heap_examined_timestamp(); 279 280 // Disable forced counters update, and update counters one more time 281 // to capture the state at the end of GC session. 282 heap->handle_force_counters_update(); 283 heap->set_forced_counters_update(false); 284 285 // Retract forceful part of soft refs policy 286 heap->soft_ref_policy()->set_should_clear_all_soft_refs(false); 287 288 // Clear metaspace oom flag, if current cycle unloaded classes 289 if (heap->unload_classes()) { 290 global_heuristics->clear_metaspace_oom(); 291 } 292 293 process_phase_timings(heap); 294 295 // Print Metaspace change following GC (if logging is enabled). 296 MetaspaceUtils::print_metaspace_change(meta_sizes); 297 298 // GC is over, we are at idle now 299 if (ShenandoahPacing) { 300 heap->pacer()->setup_for_idle(); 301 } 302 } else { 303 // Report to pacer that we have seen this many words allocated 304 if (ShenandoahPacing && (allocs_seen > 0)) { 305 heap->pacer()->report_alloc(allocs_seen); 306 } 307 } 308 309 const double current = os::elapsedTime(); 310 311 if (ShenandoahUncommit && (is_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) { 312 // Explicit GC tries to uncommit everything down to min capacity. 313 // Soft max change tries to uncommit everything down to target capacity. 314 // Periodic uncommit tries to uncommit suitable regions down to min capacity. 315 316 double shrink_before = (is_gc_requested || soft_max_changed) ? 317 current : 318 current - (ShenandoahUncommitDelay / 1000.0); 319 320 size_t shrink_until = soft_max_changed ? 321 heap->soft_max_capacity() : 322 heap->min_capacity(); 323 324 heap->maybe_uncommit(shrink_before, shrink_until); 325 heap->phase_timings()->flush_cycle_to_global(); 326 last_shrink_time = current; 327 } 328 329 // Wait for ShenandoahControlIntervalMax unless there was an allocation failure or another request was made mid-cycle. 330 if (!is_alloc_failure_gc() && _requested_gc_cause == GCCause::_no_gc) { 331 // The timed wait is necessary because this thread has a responsibility to send 332 // 'alloc_words' to the pacer when it does not perform a GC. 333 MonitorLocker lock(&_control_lock, Mutex::_no_safepoint_check_flag); 334 lock.wait(ShenandoahControlIntervalMax); 335 } 336 } 337 338 // Wait for the actual stop(), can't leave run_service() earlier. 339 while (!should_terminate()) { 340 os::naked_short_sleep(ShenandoahControlIntervalMin); 341 } 342 } 343 344 void ShenandoahGenerationalControlThread::process_phase_timings(const ShenandoahGenerationalHeap* heap) { 345 // Commit worker statistics to cycle data 346 heap->phase_timings()->flush_par_workers_to_cycle(); 347 if (ShenandoahPacing) { 348 heap->pacer()->flush_stats_to_cycle(); 349 } 350 351 ShenandoahEvacuationTracker* evac_tracker = heap->evac_tracker(); 352 ShenandoahCycleStats evac_stats = evac_tracker->flush_cycle_to_global(); 353 354 // Print GC stats for current cycle 355 { 356 LogTarget(Info, gc, stats) lt; 357 if (lt.is_enabled()) { 358 ResourceMark rm; 359 LogStream ls(lt); 360 heap->phase_timings()->print_cycle_on(&ls); 361 evac_tracker->print_evacuations_on(&ls, &evac_stats.workers, 362 &evac_stats.mutators); 363 if (ShenandoahPacing) { 364 heap->pacer()->print_cycle_on(&ls); 365 } 366 } 367 } 368 369 // Commit statistics to globals 370 heap->phase_timings()->flush_cycle_to_global(); 371 } 372 373 // Young and old concurrent cycles are initiated by the regulator. Implicit 374 // and explicit GC requests are handled by the controller thread and always 375 // run a global cycle (which is concurrent by default, but may be overridden 376 // by command line options). Old cycles always degenerate to a global cycle. 377 // Young cycles are degenerated to complete the young cycle. Young 378 // and old degen may upgrade to Full GC. Full GC may also be 379 // triggered directly by a System.gc() invocation. 380 // 381 // 382 // +-----+ Idle +-----+-----------+---------------------+ 383 // | + | | | 384 // | | | | | 385 // | | v | | 386 // | | Bootstrap Old +-- | ------------+ | 387 // | | + | | | 388 // | | | | | | 389 // | v v v v | 390 // | Resume Old <----------+ Young +--> Young Degen | 391 // | + + ^ + + | 392 // v | | | | | | 393 // Global <-+ | +----------------------------+ | | 394 // + | | | 395 // | v v | 396 // +---> Global Degen +--------------------> Full <----+ 397 // 398 void ShenandoahGenerationalControlThread::service_concurrent_normal_cycle(ShenandoahGenerationalHeap* heap, 399 const ShenandoahGenerationType generation, 400 GCCause::Cause cause) { 401 GCIdMark gc_id_mark; 402 switch (generation) { 403 case YOUNG: { 404 // Run a young cycle. This might or might not, have interrupted an ongoing 405 // concurrent mark in the old generation. We need to think about promotions 406 // in this case. Promoted objects should be above the TAMS in the old regions 407 // they end up in, but we have to be sure we don't promote into any regions 408 // that are in the cset. 409 log_info(gc, ergo)("Start GC cycle (Young)"); 410 service_concurrent_cycle(heap->young_generation(), cause, false); 411 break; 412 } 413 case OLD: { 414 log_info(gc, ergo)("Start GC cycle (Old)"); 415 service_concurrent_old_cycle(heap, cause); 416 break; 417 } 418 case GLOBAL: { 419 log_info(gc, ergo)("Start GC cycle (Global)"); 420 service_concurrent_cycle(heap->global_generation(), cause, false); 421 break; 422 } 423 default: 424 ShouldNotReachHere(); 425 } 426 } 427 428 void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(ShenandoahGenerationalHeap* heap, GCCause::Cause &cause) { 429 ShenandoahOldGeneration* old_generation = heap->old_generation(); 430 ShenandoahYoungGeneration* young_generation = heap->young_generation(); 431 ShenandoahOldGeneration::State original_state = old_generation->state(); 432 433 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 434 435 switch (original_state) { 436 case ShenandoahOldGeneration::FILLING: { 437 ShenandoahGCSession session(cause, old_generation); 438 _allow_old_preemption.set(); 439 old_generation->entry_coalesce_and_fill(); 440 _allow_old_preemption.unset(); 441 442 // Before bootstrapping begins, we must acknowledge any cancellation request. 443 // If the gc has not been cancelled, this does nothing. If it has been cancelled, 444 // this will clear the cancellation request and exit before starting the bootstrap 445 // phase. This will allow the young GC cycle to proceed normally. If we do not 446 // acknowledge the cancellation request, the subsequent young cycle will observe 447 // the request and essentially cancel itself. 448 if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) { 449 log_info(gc)("Preparation for old generation cycle was cancelled"); 450 return; 451 } 452 453 // Coalescing threads completed and nothing was cancelled. it is safe to transition from this state. 454 old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); 455 return; 456 } 457 case ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP: 458 old_generation->transition_to(ShenandoahOldGeneration::BOOTSTRAPPING); 459 case ShenandoahOldGeneration::BOOTSTRAPPING: { 460 // Configure the young generation's concurrent mark to put objects in 461 // old regions into the concurrent mark queues associated with the old 462 // generation. The young cycle will run as normal except that rather than 463 // ignore old references it will mark and enqueue them in the old concurrent 464 // task queues but it will not traverse them. 465 set_gc_mode(bootstrapping_old); 466 young_generation->set_old_gen_task_queues(old_generation->task_queues()); 467 ShenandoahGCSession session(cause, young_generation); 468 service_concurrent_cycle(heap, young_generation, cause, true); 469 process_phase_timings(heap); 470 if (heap->cancelled_gc()) { 471 // Young generation bootstrap cycle has failed. Concurrent mark for old generation 472 // is going to resume after degenerated bootstrap cycle completes. 473 log_info(gc)("Bootstrap cycle for old generation was cancelled"); 474 return; 475 } 476 477 // Reset the degenerated point. Normally this would happen at the top 478 // of the control loop, but here we have just completed a young cycle 479 // which has bootstrapped the old concurrent marking. 480 _degen_point = ShenandoahGC::_degenerated_outside_cycle; 481 482 // From here we will 'resume' the old concurrent mark. This will skip reset 483 // and init mark for the concurrent mark. All of that work will have been 484 // done by the bootstrapping young cycle. 485 set_gc_mode(servicing_old); 486 old_generation->transition_to(ShenandoahOldGeneration::MARKING); 487 } 488 case ShenandoahOldGeneration::MARKING: { 489 ShenandoahGCSession session(cause, old_generation); 490 bool marking_complete = resume_concurrent_old_cycle(old_generation, cause); 491 if (marking_complete) { 492 assert(old_generation->state() != ShenandoahOldGeneration::MARKING, "Should not still be marking"); 493 if (original_state == ShenandoahOldGeneration::MARKING) { 494 heap->mmu_tracker()->record_old_marking_increment(true); 495 heap->log_heap_status("At end of Concurrent Old Marking finishing increment"); 496 } 497 } else if (original_state == ShenandoahOldGeneration::MARKING) { 498 heap->mmu_tracker()->record_old_marking_increment(false); 499 heap->log_heap_status("At end of Concurrent Old Marking increment"); 500 } 501 break; 502 } 503 default: 504 fatal("Unexpected state for old GC: %s", ShenandoahOldGeneration::state_name(old_generation->state())); 505 } 506 } 507 508 bool ShenandoahGenerationalControlThread::resume_concurrent_old_cycle(ShenandoahOldGeneration* generation, GCCause::Cause cause) { 509 assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(), "Old mark should be in progress"); 510 log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued", generation->task_queues()->tasks()); 511 512 ShenandoahHeap* heap = ShenandoahHeap::heap(); 513 514 // We can only tolerate being cancelled during concurrent marking or during preparation for mixed 515 // evacuation. This flag here (passed by reference) is used to control precisely where the regulator 516 // is allowed to cancel a GC. 517 ShenandoahOldGC gc(generation, _allow_old_preemption); 518 if (gc.collect(cause)) { 519 heap->notify_gc_progress(); 520 generation->record_success_concurrent(false); 521 } 522 523 if (heap->cancelled_gc()) { 524 // It's possible the gc cycle was cancelled after the last time 525 // the collection checked for cancellation. In which case, the 526 // old gc cycle is still completed, and we have to deal with this 527 // cancellation. We set the degeneration point to be outside 528 // the cycle because if this is an allocation failure, that is 529 // what must be done (there is no degenerated old cycle). If the 530 // cancellation was due to a heuristic wanting to start a young 531 // cycle, then we are not actually going to a degenerated cycle, 532 // so the degenerated point doesn't matter here. 533 check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle); 534 if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) { 535 heap->shenandoah_policy()->record_interrupted_old(); 536 } 537 return false; 538 } 539 return true; 540 } 541 542 void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool do_old_gc_bootstrap) { 543 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during 544 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there. 545 // If second allocation failure happens during Degenerated GC cycle (for example, when GC 546 // tries to evac something and no memory is available), cycle degrades to Full GC. 547 // 548 // There are also a shortcut through the normal cycle: immediate garbage shortcut, when 549 // heuristics says there are no regions to compact, and all the collection comes from immediately 550 // reclaimable regions. 551 // 552 // ................................................................................................ 553 // 554 // (immediate garbage shortcut) Concurrent GC 555 // /-------------------------------------------\ 556 // | | 557 // | | 558 // | | 559 // | v 560 // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END] 561 // | | | ^ 562 // | (af) | (af) | (af) | 563 // ..................|....................|.................|..............|....................... 564 // | | | | 565 // | | | | Degenerated GC 566 // v v v | 567 // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o 568 // | | | ^ 569 // | (af) | (af) | (af) | 570 // ..................|....................|.................|..............|....................... 571 // | | | | 572 // | v | | Full GC 573 // \------------------->o<----------------/ | 574 // | | 575 // v | 576 // Full GC --------------------------/ 577 // 578 if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return; 579 580 ShenandoahHeap* heap = ShenandoahHeap::heap(); 581 ShenandoahGCSession session(cause, generation); 582 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 583 584 service_concurrent_cycle(heap, generation, cause, do_old_gc_bootstrap); 585 } 586 587 void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahHeap* heap, 588 ShenandoahGeneration* generation, 589 GCCause::Cause& cause, 590 bool do_old_gc_bootstrap) { 591 assert(!generation->is_old(), "Old GC takes a different control path"); 592 593 ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap); 594 if (gc.collect(cause)) { 595 // Cycle is complete 596 heap->notify_gc_progress(); 597 generation->record_success_concurrent(gc.abbreviated()); 598 } else { 599 assert(heap->cancelled_gc(), "Must have been cancelled"); 600 check_cancellation_or_degen(gc.degen_point()); 601 602 // Concurrent young-gen collection degenerates to young 603 // collection. Same for global collections. 604 _degen_generation = generation; 605 } 606 const char* msg; 607 ShenandoahMmuTracker* mmu_tracker = heap->mmu_tracker(); 608 if (generation->is_young()) { 609 if (heap->cancelled_gc()) { 610 msg = (do_old_gc_bootstrap) ? "At end of Interrupted Concurrent Bootstrap GC" : 611 "At end of Interrupted Concurrent Young GC"; 612 } else { 613 // We only record GC results if GC was successful 614 msg = (do_old_gc_bootstrap) ? "At end of Concurrent Bootstrap GC" : 615 "At end of Concurrent Young GC"; 616 if (heap->collection_set()->has_old_regions()) { 617 mmu_tracker->record_mixed(get_gc_id()); 618 } else if (do_old_gc_bootstrap) { 619 mmu_tracker->record_bootstrap(get_gc_id()); 620 } else { 621 mmu_tracker->record_young(get_gc_id()); 622 } 623 } 624 } else { 625 assert(generation->is_global(), "If not young, must be GLOBAL"); 626 assert(!do_old_gc_bootstrap, "Do not bootstrap with GLOBAL GC"); 627 if (heap->cancelled_gc()) { 628 msg = "At end of Interrupted Concurrent GLOBAL GC"; 629 } else { 630 // We only record GC results if GC was successful 631 msg = "At end of Concurrent Global GC"; 632 mmu_tracker->record_global(get_gc_id()); 633 } 634 } 635 heap->log_heap_status(msg); 636 } 637 638 bool ShenandoahGenerationalControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) { 639 ShenandoahHeap* heap = ShenandoahHeap::heap(); 640 if (!heap->cancelled_gc()) { 641 return false; 642 } 643 644 if (in_graceful_shutdown()) { 645 return true; 646 } 647 648 assert(_degen_point == ShenandoahGC::_degenerated_outside_cycle, 649 "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point)); 650 651 if (is_alloc_failure_gc()) { 652 _degen_point = point; 653 _preemption_requested.unset(); 654 return true; 655 } 656 657 if (_preemption_requested.is_set()) { 658 assert(_requested_generation == YOUNG, "Only young GCs may preempt old."); 659 _preemption_requested.unset(); 660 661 // Old generation marking is only cancellable during concurrent marking. 662 // Once final mark is complete, the code does not check again for cancellation. 663 // If old generation was cancelled for an allocation failure, we wouldn't 664 // make it to this case. The calling code is responsible for forcing a 665 // cancellation due to allocation failure into a degenerated cycle. 666 _degen_point = point; 667 heap->clear_cancelled_gc(false /* clear oom handler */); 668 return true; 669 } 670 671 fatal("Cancel GC either for alloc failure GC, or gracefully exiting, or to pause old generation marking"); 672 return false; 673 } 674 675 void ShenandoahGenerationalControlThread::stop_service() { 676 // Nothing to do here. 677 } 678 679 void ShenandoahGenerationalControlThread::service_stw_full_cycle(GCCause::Cause cause) { 680 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 681 682 GCIdMark gc_id_mark; 683 ShenandoahGCSession session(cause, heap->global_generation()); 684 685 ShenandoahFullGC gc; 686 gc.collect(cause); 687 } 688 689 void ShenandoahGenerationalControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, 690 ShenandoahGC::ShenandoahDegenPoint point) { 691 assert(point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set"); 692 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 693 694 GCIdMark gc_id_mark; 695 ShenandoahGCSession session(cause, _degen_generation); 696 697 ShenandoahDegenGC gc(point, _degen_generation); 698 gc.collect(cause); 699 700 assert(heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks"); 701 if (_degen_generation->is_global()) { 702 assert(heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks"); 703 assert(heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks"); 704 } else { 705 assert(_degen_generation->is_young(), "Expected degenerated young cycle, if not global."); 706 ShenandoahOldGeneration* old = heap->old_generation(); 707 if (old->is_bootstrapping()) { 708 old->transition_to(ShenandoahOldGeneration::MARKING); 709 } 710 } 711 } 712 713 void ShenandoahGenerationalControlThread::request_gc(GCCause::Cause cause) { 714 if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) { 715 handle_requested_gc(cause); 716 } 717 } 718 719 bool ShenandoahGenerationalControlThread::request_concurrent_gc(ShenandoahGenerationType generation) { 720 if (_preemption_requested.is_set() || _requested_gc_cause != GCCause::_no_gc || ShenandoahHeap::heap()->cancelled_gc()) { 721 // Ignore subsequent requests from the heuristics 722 log_debug(gc, thread)("Reject request for concurrent gc: preemption_requested: %s, gc_requested: %s, gc_cancelled: %s", 723 BOOL_TO_STR(_preemption_requested.is_set()), 724 GCCause::to_string(_requested_gc_cause), 725 BOOL_TO_STR(ShenandoahHeap::heap()->cancelled_gc())); 726 return false; 727 } 728 729 if (gc_mode() == none) { 730 GCCause::Cause existing = Atomic::cmpxchg(&_requested_gc_cause, GCCause::_no_gc, GCCause::_shenandoah_concurrent_gc); 731 if (existing != GCCause::_no_gc) { 732 log_debug(gc, thread)("Reject request for concurrent gc because another gc is pending: %s", GCCause::to_string(existing)); 733 return false; 734 } 735 736 _requested_generation = generation; 737 notify_control_thread(); 738 739 MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag); 740 while (gc_mode() == none) { 741 ml.wait(); 742 } 743 return true; 744 } 745 746 if (preempt_old_marking(generation)) { 747 assert(gc_mode() == servicing_old, "Expected to be servicing old, but was: %s.", gc_mode_name(gc_mode())); 748 GCCause::Cause existing = Atomic::cmpxchg(&_requested_gc_cause, GCCause::_no_gc, GCCause::_shenandoah_concurrent_gc); 749 if (existing != GCCause::_no_gc) { 750 log_debug(gc, thread)("Reject request to interrupt old gc because another gc is pending: %s", GCCause::to_string(existing)); 751 return false; 752 } 753 754 log_info(gc)("Preempting old generation mark to allow %s GC", shenandoah_generation_name(generation)); 755 _requested_generation = generation; 756 _preemption_requested.set(); 757 ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc); 758 notify_control_thread(); 759 760 MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag); 761 while (gc_mode() == servicing_old) { 762 ml.wait(); 763 } 764 return true; 765 } 766 767 log_debug(gc, thread)("Reject request for concurrent gc: mode: %s, allow_old_preemption: %s", 768 gc_mode_name(gc_mode()), 769 BOOL_TO_STR(_allow_old_preemption.is_set())); 770 return false; 771 } 772 773 void ShenandoahGenerationalControlThread::notify_control_thread() { 774 MonitorLocker locker(&_control_lock, Mutex::_no_safepoint_check_flag); 775 _control_lock.notify(); 776 } 777 778 bool ShenandoahGenerationalControlThread::preempt_old_marking(ShenandoahGenerationType generation) { 779 return (generation == YOUNG) && _allow_old_preemption.try_unset(); 780 } 781 782 void ShenandoahGenerationalControlThread::handle_requested_gc(GCCause::Cause cause) { 783 // For normal requested GCs (System.gc) we want to block the caller. However, 784 // for whitebox requested GC, we want to initiate the GC and return immediately. 785 // The whitebox caller thread will arrange for itself to wait until the GC notifies 786 // it that has reached the requested breakpoint (phase in the GC). 787 if (cause == GCCause::_wb_breakpoint) { 788 Atomic::xchg(&_requested_gc_cause, cause); 789 notify_control_thread(); 790 return; 791 } 792 793 // Make sure we have at least one complete GC cycle before unblocking 794 // from the explicit GC request. 795 // 796 // This is especially important for weak references cleanup and/or native 797 // resources (e.g. DirectByteBuffers) machinery: when explicit GC request 798 // comes very late in the already running cycle, it would miss lots of new 799 // opportunities for cleanup that were made available before the caller 800 // requested the GC. 801 802 MonitorLocker ml(&_gc_waiters_lock); 803 size_t current_gc_id = get_gc_id(); 804 size_t required_gc_id = current_gc_id + 1; 805 while (current_gc_id < required_gc_id) { 806 // This races with the regulator thread to start a concurrent gc and the 807 // control thread to clear it at the start of a cycle. Threads here are 808 // allowed to escalate a heuristic's request for concurrent gc. 809 GCCause::Cause existing = Atomic::xchg(&_requested_gc_cause, cause); 810 if (existing != GCCause::_no_gc) { 811 log_debug(gc, thread)("GC request supersedes existing request: %s", GCCause::to_string(existing)); 812 } 813 814 notify_control_thread(); 815 ml.wait(); 816 current_gc_id = get_gc_id(); 817 } 818 } 819 820 void ShenandoahGenerationalControlThread::notify_gc_waiters() { 821 MonitorLocker ml(&_gc_waiters_lock); 822 ml.notify_all(); 823 } 824 825 const char* ShenandoahGenerationalControlThread::gc_mode_name(ShenandoahGenerationalControlThread::GCMode mode) { 826 switch (mode) { 827 case none: return "idle"; 828 case concurrent_normal: return "normal"; 829 case stw_degenerated: return "degenerated"; 830 case stw_full: return "full"; 831 case servicing_old: return "old"; 832 case bootstrapping_old: return "bootstrap"; 833 default: return "unknown"; 834 } 835 } 836 837 void ShenandoahGenerationalControlThread::set_gc_mode(ShenandoahGenerationalControlThread::GCMode new_mode) { 838 if (_mode != new_mode) { 839 log_debug(gc)("Transition from: %s to: %s", gc_mode_name(_mode), gc_mode_name(new_mode)); 840 MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag); 841 _mode = new_mode; 842 ml.notify_all(); 843 } 844 }