1 /* 2 * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. 4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "gc/shenandoah/mode/shenandoahMode.hpp" 29 #include "gc/shenandoah/shenandoahAsserts.hpp" 30 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 31 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 32 #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp" 33 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp" 34 #include "gc/shenandoah/shenandoahFreeSet.hpp" 35 #include "gc/shenandoah/shenandoahFullGC.hpp" 36 #include "gc/shenandoah/shenandoahGeneration.hpp" 37 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 38 #include "gc/shenandoah/shenandoahOldGC.hpp" 39 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 40 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 41 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 42 #include "gc/shenandoah/shenandoahPacer.inline.hpp" 43 #include "gc/shenandoah/shenandoahUtils.hpp" 44 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 45 #include "logging/log.hpp" 46 #include "memory/metaspaceUtils.hpp" 47 #include "memory/metaspaceStats.hpp" 48 #include "runtime/atomic.hpp" 49 50 ShenandoahGenerationalControlThread::ShenandoahGenerationalControlThread() : 51 ShenandoahController(), 52 _control_lock(Mutex::nosafepoint - 2, "ShenandoahControlGC_lock", true), 53 _regulator_lock(Mutex::nosafepoint - 2, "ShenandoahRegulatorGC_lock", true), 54 _requested_gc_cause(GCCause::_no_gc), 55 _requested_generation(GLOBAL), 56 _degen_point(ShenandoahGC::_degenerated_outside_cycle), 57 _degen_generation(nullptr), 58 _mode(none) { 59 shenandoah_assert_generational(); 60 set_name("Shenandoah Control Thread"); 61 create_and_start(); 62 } 63 64 void ShenandoahGenerationalControlThread::run_service() { 65 ShenandoahGenerationalHeap* const heap = ShenandoahGenerationalHeap::heap(); 66 67 const GCMode default_mode = concurrent_normal; 68 ShenandoahGenerationType generation = GLOBAL; 69 70 double last_shrink_time = os::elapsedTime(); 71 uint age_period = 0; 72 73 // Shrink period avoids constantly polling regions for shrinking. 74 // Having a period 10x lower than the delay would mean we hit the 75 // shrinking with lag of less than 1/10-th of true delay. 76 // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds. 77 const double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10; 78 79 ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy(); 80 81 // Heuristics are notified of allocation failures here and other outcomes 82 // of the cycle. They're also used here to control whether the Nth consecutive 83 // degenerated cycle should be 'promoted' to a full cycle. The decision to 84 // trigger a cycle or not is evaluated on the regulator thread. 85 ShenandoahHeuristics* global_heuristics = heap->global_generation()->heuristics(); 86 while (!in_graceful_shutdown() && !should_terminate()) { 87 // Figure out if we have pending requests. 88 const bool alloc_failure_pending = _alloc_failure_gc.is_set(); 89 const bool humongous_alloc_failure_pending = _humongous_alloc_failure_gc.is_set(); 90 91 GCCause::Cause cause = Atomic::xchg(&_requested_gc_cause, GCCause::_no_gc); 92 93 const bool is_gc_requested = ShenandoahCollectorPolicy::is_requested_gc(cause); 94 95 // This control loop iteration has seen this much allocation. 96 const size_t allocs_seen = reset_allocs_seen(); 97 98 // Check if we have seen a new target for soft max heap size. 99 const bool soft_max_changed = heap->check_soft_max_changed(); 100 101 // Choose which GC mode to run in. The block below should select a single mode. 102 set_gc_mode(none); 103 ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset; 104 105 if (alloc_failure_pending) { 106 // Allocation failure takes precedence: we have to deal with it first thing 107 cause = GCCause::_allocation_failure; 108 109 // Consume the degen point, and seed it with default value 110 degen_point = _degen_point; 111 _degen_point = ShenandoahGC::_degenerated_outside_cycle; 112 113 if (degen_point == ShenandoahGC::_degenerated_outside_cycle) { 114 _degen_generation = heap->young_generation(); 115 } else { 116 assert(_degen_generation != nullptr, "Need to know which generation to resume"); 117 } 118 119 ShenandoahHeuristics* heuristics = _degen_generation->heuristics(); 120 generation = _degen_generation->type(); 121 bool old_gen_evacuation_failed = heap->old_generation()->clear_failed_evacuation(); 122 123 heuristics->log_trigger("Handle Allocation Failure"); 124 125 // Do not bother with degenerated cycle if old generation evacuation failed or if humongous allocation failed 126 if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() && 127 !old_gen_evacuation_failed && !humongous_alloc_failure_pending) { 128 heuristics->record_allocation_failure_gc(); 129 policy->record_alloc_failure_to_degenerated(degen_point); 130 set_gc_mode(stw_degenerated); 131 } else { 132 heuristics->record_allocation_failure_gc(); 133 policy->record_alloc_failure_to_full(); 134 generation = GLOBAL; 135 set_gc_mode(stw_full); 136 } 137 } else if (is_gc_requested) { 138 generation = GLOBAL; 139 global_heuristics->log_trigger("GC request (%s)", GCCause::to_string(cause)); 140 global_heuristics->record_requested_gc(); 141 142 if (ShenandoahCollectorPolicy::should_run_full_gc(cause)) { 143 set_gc_mode(stw_full); 144 } else { 145 set_gc_mode(default_mode); 146 // Unload and clean up everything 147 heap->set_unload_classes(global_heuristics->can_unload_classes()); 148 } 149 } else { 150 // We should only be here if the regulator requested a cycle or if 151 // there is an old generation mark in progress. 152 if (cause == GCCause::_shenandoah_concurrent_gc) { 153 if (_requested_generation == OLD && heap->old_generation()->is_doing_mixed_evacuations()) { 154 // If a request to start an old cycle arrived while an old cycle was running, but _before_ 155 // it chose any regions for evacuation we don't want to start a new old cycle. Rather, we want 156 // the heuristic to run a young collection so that we can evacuate some old regions. 157 assert(!heap->is_concurrent_old_mark_in_progress(), "Should not be running mixed collections and concurrent marking"); 158 generation = YOUNG; 159 } else { 160 generation = _requested_generation; 161 } 162 163 // preemption was requested or this is a regular cycle 164 set_gc_mode(default_mode); 165 166 // Don't start a new old marking if there is one already in progress 167 if (generation == OLD && heap->is_concurrent_old_mark_in_progress()) { 168 set_gc_mode(servicing_old); 169 } 170 171 if (generation == GLOBAL) { 172 heap->set_unload_classes(global_heuristics->should_unload_classes()); 173 } else { 174 heap->set_unload_classes(false); 175 } 176 } else if (heap->is_concurrent_old_mark_in_progress() || heap->is_prepare_for_old_mark_in_progress()) { 177 // Nobody asked us to do anything, but we have an old-generation mark or old-generation preparation for 178 // mixed evacuation in progress, so resume working on that. 179 log_info(gc)("Resume old GC: marking is%s in progress, preparing is%s in progress", 180 heap->is_concurrent_old_mark_in_progress() ? "" : " NOT", 181 heap->is_prepare_for_old_mark_in_progress() ? "" : " NOT"); 182 183 cause = GCCause::_shenandoah_concurrent_gc; 184 generation = OLD; 185 set_gc_mode(servicing_old); 186 heap->set_unload_classes(false); 187 } 188 } 189 190 const bool gc_requested = (gc_mode() != none); 191 assert (!gc_requested || cause != GCCause::_no_gc, "GC cause should be set"); 192 193 if (gc_requested) { 194 // Blow away all soft references on this cycle, if handling allocation failure, 195 // either implicit or explicit GC request, or we are requested to do so unconditionally. 196 if (generation == GLOBAL && (alloc_failure_pending || is_gc_requested || ShenandoahAlwaysClearSoftRefs)) { 197 heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); 198 } 199 200 // GC is starting, bump the internal ID 201 update_gc_id(); 202 203 heap->reset_bytes_allocated_since_gc_start(); 204 205 MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics(); 206 207 // If GC was requested, we are sampling the counters even without actual triggers 208 // from allocation machinery. This captures GC phases more accurately. 209 heap->set_forced_counters_update(true); 210 211 // If GC was requested, we better dump freeset data for performance debugging 212 heap->free_set()->log_status_under_lock(); 213 214 // In case this is a degenerated cycle, remember whether original cycle was aging. 215 const bool was_aging_cycle = heap->is_aging_cycle(); 216 heap->set_aging_cycle(false); 217 218 switch (gc_mode()) { 219 case concurrent_normal: { 220 // At this point: 221 // if (generation == YOUNG), this is a normal YOUNG cycle 222 // if (generation == OLD), this is a bootstrap OLD cycle 223 // if (generation == GLOBAL), this is a GLOBAL cycle triggered by System.gc() 224 // In all three cases, we want to age old objects if this is an aging cycle 225 if (age_period-- == 0) { 226 heap->set_aging_cycle(true); 227 age_period = ShenandoahAgingCyclePeriod - 1; 228 } 229 service_concurrent_normal_cycle(heap, generation, cause); 230 break; 231 } 232 case stw_degenerated: { 233 heap->set_aging_cycle(was_aging_cycle); 234 service_stw_degenerated_cycle(cause, degen_point); 235 break; 236 } 237 case stw_full: { 238 if (age_period-- == 0) { 239 heap->set_aging_cycle(true); 240 age_period = ShenandoahAgingCyclePeriod - 1; 241 } 242 service_stw_full_cycle(cause); 243 break; 244 } 245 case servicing_old: { 246 assert(generation == OLD, "Expected old generation here"); 247 GCIdMark gc_id_mark; 248 service_concurrent_old_cycle(heap, cause); 249 break; 250 } 251 default: 252 ShouldNotReachHere(); 253 } 254 255 // If this was the requested GC cycle, notify waiters about it 256 if (is_gc_requested) { 257 notify_gc_waiters(); 258 } 259 260 // If this was the allocation failure GC cycle, notify waiters about it 261 if (alloc_failure_pending) { 262 notify_alloc_failure_waiters(); 263 } 264 265 // Report current free set state at the end of cycle, whether 266 // it is a normal completion, or the abort. 267 heap->free_set()->log_status_under_lock(); 268 269 // Notify Universe about new heap usage. This has implications for 270 // global soft refs policy, and we better report it every time heap 271 // usage goes down. 272 heap->update_capacity_and_used_at_gc(); 273 274 // Signal that we have completed a visit to all live objects. 275 heap->record_whole_heap_examined_timestamp(); 276 277 // Disable forced counters update, and update counters one more time 278 // to capture the state at the end of GC session. 279 heap->handle_force_counters_update(); 280 heap->set_forced_counters_update(false); 281 282 // Retract forceful part of soft refs policy 283 heap->soft_ref_policy()->set_should_clear_all_soft_refs(false); 284 285 // Clear metaspace oom flag, if current cycle unloaded classes 286 if (heap->unload_classes()) { 287 global_heuristics->clear_metaspace_oom(); 288 } 289 290 process_phase_timings(heap); 291 292 // Print Metaspace change following GC (if logging is enabled). 293 MetaspaceUtils::print_metaspace_change(meta_sizes); 294 295 // GC is over, we are at idle now 296 if (ShenandoahPacing) { 297 heap->pacer()->setup_for_idle(); 298 } 299 } else { 300 // Report to pacer that we have seen this many words allocated 301 if (ShenandoahPacing && (allocs_seen > 0)) { 302 heap->pacer()->report_alloc(allocs_seen); 303 } 304 } 305 306 const double current = os::elapsedTime(); 307 308 if (ShenandoahUncommit && (is_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) { 309 // Explicit GC tries to uncommit everything down to min capacity. 310 // Soft max change tries to uncommit everything down to target capacity. 311 // Periodic uncommit tries to uncommit suitable regions down to min capacity. 312 313 double shrink_before = (is_gc_requested || soft_max_changed) ? 314 current : 315 current - (ShenandoahUncommitDelay / 1000.0); 316 317 size_t shrink_until = soft_max_changed ? 318 heap->soft_max_capacity() : 319 heap->min_capacity(); 320 321 heap->maybe_uncommit(shrink_before, shrink_until); 322 heap->phase_timings()->flush_cycle_to_global(); 323 last_shrink_time = current; 324 } 325 326 // Wait for ShenandoahControlIntervalMax unless there was an allocation failure or another request was made mid-cycle. 327 if (!is_alloc_failure_gc() && _requested_gc_cause == GCCause::_no_gc) { 328 // The timed wait is necessary because this thread has a responsibility to send 329 // 'alloc_words' to the pacer when it does not perform a GC. 330 MonitorLocker lock(&_control_lock, Mutex::_no_safepoint_check_flag); 331 lock.wait(ShenandoahControlIntervalMax); 332 } 333 } 334 335 // Wait for the actual stop(), can't leave run_service() earlier. 336 while (!should_terminate()) { 337 os::naked_short_sleep(ShenandoahControlIntervalMin); 338 } 339 } 340 341 void ShenandoahGenerationalControlThread::process_phase_timings(const ShenandoahGenerationalHeap* heap) { 342 // Commit worker statistics to cycle data 343 heap->phase_timings()->flush_par_workers_to_cycle(); 344 if (ShenandoahPacing) { 345 heap->pacer()->flush_stats_to_cycle(); 346 } 347 348 ShenandoahEvacuationTracker* evac_tracker = heap->evac_tracker(); 349 ShenandoahCycleStats evac_stats = evac_tracker->flush_cycle_to_global(); 350 351 // Print GC stats for current cycle 352 { 353 LogTarget(Info, gc, stats) lt; 354 if (lt.is_enabled()) { 355 ResourceMark rm; 356 LogStream ls(lt); 357 heap->phase_timings()->print_cycle_on(&ls); 358 evac_tracker->print_evacuations_on(&ls, &evac_stats.workers, 359 &evac_stats.mutators); 360 if (ShenandoahPacing) { 361 heap->pacer()->print_cycle_on(&ls); 362 } 363 } 364 } 365 366 // Commit statistics to globals 367 heap->phase_timings()->flush_cycle_to_global(); 368 } 369 370 // Young and old concurrent cycles are initiated by the regulator. Implicit 371 // and explicit GC requests are handled by the controller thread and always 372 // run a global cycle (which is concurrent by default, but may be overridden 373 // by command line options). Old cycles always degenerate to a global cycle. 374 // Young cycles are degenerated to complete the young cycle. Young 375 // and old degen may upgrade to Full GC. Full GC may also be 376 // triggered directly by a System.gc() invocation. 377 // 378 // 379 // +-----+ Idle +-----+-----------+---------------------+ 380 // | + | | | 381 // | | | | | 382 // | | v | | 383 // | | Bootstrap Old +-- | ------------+ | 384 // | | + | | | 385 // | | | | | | 386 // | v v v v | 387 // | Resume Old <----------+ Young +--> Young Degen | 388 // | + + ^ + + | 389 // v | | | | | | 390 // Global <-+ | +----------------------------+ | | 391 // + | | | 392 // | v v | 393 // +---> Global Degen +--------------------> Full <----+ 394 // 395 void ShenandoahGenerationalControlThread::service_concurrent_normal_cycle(ShenandoahGenerationalHeap* heap, 396 const ShenandoahGenerationType generation, 397 GCCause::Cause cause) { 398 GCIdMark gc_id_mark; 399 switch (generation) { 400 case YOUNG: { 401 // Run a young cycle. This might or might not, have interrupted an ongoing 402 // concurrent mark in the old generation. We need to think about promotions 403 // in this case. Promoted objects should be above the TAMS in the old regions 404 // they end up in, but we have to be sure we don't promote into any regions 405 // that are in the cset. 406 log_info(gc, ergo)("Start GC cycle (Young)"); 407 service_concurrent_cycle(heap->young_generation(), cause, false); 408 break; 409 } 410 case OLD: { 411 log_info(gc, ergo)("Start GC cycle (Old)"); 412 service_concurrent_old_cycle(heap, cause); 413 break; 414 } 415 case GLOBAL: { 416 log_info(gc, ergo)("Start GC cycle (Global)"); 417 service_concurrent_cycle(heap->global_generation(), cause, false); 418 break; 419 } 420 default: 421 ShouldNotReachHere(); 422 } 423 } 424 425 void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(ShenandoahGenerationalHeap* heap, GCCause::Cause &cause) { 426 ShenandoahOldGeneration* old_generation = heap->old_generation(); 427 ShenandoahYoungGeneration* young_generation = heap->young_generation(); 428 ShenandoahOldGeneration::State original_state = old_generation->state(); 429 430 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 431 432 switch (original_state) { 433 case ShenandoahOldGeneration::FILLING: { 434 ShenandoahGCSession session(cause, old_generation); 435 _allow_old_preemption.set(); 436 old_generation->entry_coalesce_and_fill(); 437 _allow_old_preemption.unset(); 438 439 // Before bootstrapping begins, we must acknowledge any cancellation request. 440 // If the gc has not been cancelled, this does nothing. If it has been cancelled, 441 // this will clear the cancellation request and exit before starting the bootstrap 442 // phase. This will allow the young GC cycle to proceed normally. If we do not 443 // acknowledge the cancellation request, the subsequent young cycle will observe 444 // the request and essentially cancel itself. 445 if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) { 446 log_info(gc)("Preparation for old generation cycle was cancelled"); 447 return; 448 } 449 450 // Coalescing threads completed and nothing was cancelled. it is safe to transition from this state. 451 old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); 452 return; 453 } 454 case ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP: 455 old_generation->transition_to(ShenandoahOldGeneration::BOOTSTRAPPING); 456 case ShenandoahOldGeneration::BOOTSTRAPPING: { 457 // Configure the young generation's concurrent mark to put objects in 458 // old regions into the concurrent mark queues associated with the old 459 // generation. The young cycle will run as normal except that rather than 460 // ignore old references it will mark and enqueue them in the old concurrent 461 // task queues but it will not traverse them. 462 set_gc_mode(bootstrapping_old); 463 young_generation->set_old_gen_task_queues(old_generation->task_queues()); 464 ShenandoahGCSession session(cause, young_generation); 465 service_concurrent_cycle(heap, young_generation, cause, true); 466 process_phase_timings(heap); 467 if (heap->cancelled_gc()) { 468 // Young generation bootstrap cycle has failed. Concurrent mark for old generation 469 // is going to resume after degenerated bootstrap cycle completes. 470 log_info(gc)("Bootstrap cycle for old generation was cancelled"); 471 return; 472 } 473 474 // Reset the degenerated point. Normally this would happen at the top 475 // of the control loop, but here we have just completed a young cycle 476 // which has bootstrapped the old concurrent marking. 477 _degen_point = ShenandoahGC::_degenerated_outside_cycle; 478 479 // From here we will 'resume' the old concurrent mark. This will skip reset 480 // and init mark for the concurrent mark. All of that work will have been 481 // done by the bootstrapping young cycle. 482 set_gc_mode(servicing_old); 483 old_generation->transition_to(ShenandoahOldGeneration::MARKING); 484 } 485 case ShenandoahOldGeneration::MARKING: { 486 ShenandoahGCSession session(cause, old_generation); 487 bool marking_complete = resume_concurrent_old_cycle(old_generation, cause); 488 if (marking_complete) { 489 assert(old_generation->state() != ShenandoahOldGeneration::MARKING, "Should not still be marking"); 490 if (original_state == ShenandoahOldGeneration::MARKING) { 491 heap->mmu_tracker()->record_old_marking_increment(true); 492 heap->log_heap_status("At end of Concurrent Old Marking finishing increment"); 493 } 494 } else if (original_state == ShenandoahOldGeneration::MARKING) { 495 heap->mmu_tracker()->record_old_marking_increment(false); 496 heap->log_heap_status("At end of Concurrent Old Marking increment"); 497 } 498 break; 499 } 500 default: 501 fatal("Unexpected state for old GC: %s", ShenandoahOldGeneration::state_name(old_generation->state())); 502 } 503 } 504 505 bool ShenandoahGenerationalControlThread::resume_concurrent_old_cycle(ShenandoahOldGeneration* generation, GCCause::Cause cause) { 506 assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(), "Old mark should be in progress"); 507 log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued", generation->task_queues()->tasks()); 508 509 ShenandoahHeap* heap = ShenandoahHeap::heap(); 510 511 // We can only tolerate being cancelled during concurrent marking or during preparation for mixed 512 // evacuation. This flag here (passed by reference) is used to control precisely where the regulator 513 // is allowed to cancel a GC. 514 ShenandoahOldGC gc(generation, _allow_old_preemption); 515 if (gc.collect(cause)) { 516 heap->notify_gc_progress(); 517 generation->record_success_concurrent(false); 518 } 519 520 if (heap->cancelled_gc()) { 521 // It's possible the gc cycle was cancelled after the last time 522 // the collection checked for cancellation. In which case, the 523 // old gc cycle is still completed, and we have to deal with this 524 // cancellation. We set the degeneration point to be outside 525 // the cycle because if this is an allocation failure, that is 526 // what must be done (there is no degenerated old cycle). If the 527 // cancellation was due to a heuristic wanting to start a young 528 // cycle, then we are not actually going to a degenerated cycle, 529 // so the degenerated point doesn't matter here. 530 check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle); 531 if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) { 532 heap->shenandoah_policy()->record_interrupted_old(); 533 } 534 return false; 535 } 536 return true; 537 } 538 539 void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool do_old_gc_bootstrap) { 540 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during 541 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there. 542 // If second allocation failure happens during Degenerated GC cycle (for example, when GC 543 // tries to evac something and no memory is available), cycle degrades to Full GC. 544 // 545 // There are also a shortcut through the normal cycle: immediate garbage shortcut, when 546 // heuristics says there are no regions to compact, and all the collection comes from immediately 547 // reclaimable regions. 548 // 549 // ................................................................................................ 550 // 551 // (immediate garbage shortcut) Concurrent GC 552 // /-------------------------------------------\ 553 // | | 554 // | | 555 // | | 556 // | v 557 // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END] 558 // | | | ^ 559 // | (af) | (af) | (af) | 560 // ..................|....................|.................|..............|....................... 561 // | | | | 562 // | | | | Degenerated GC 563 // v v v | 564 // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o 565 // | | | ^ 566 // | (af) | (af) | (af) | 567 // ..................|....................|.................|..............|....................... 568 // | | | | 569 // | v | | Full GC 570 // \------------------->o<----------------/ | 571 // | | 572 // v | 573 // Full GC --------------------------/ 574 // 575 if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return; 576 577 ShenandoahHeap* heap = ShenandoahHeap::heap(); 578 ShenandoahGCSession session(cause, generation); 579 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 580 581 service_concurrent_cycle(heap, generation, cause, do_old_gc_bootstrap); 582 } 583 584 void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahHeap* heap, 585 ShenandoahGeneration* generation, 586 GCCause::Cause& cause, 587 bool do_old_gc_bootstrap) { 588 assert(!generation->is_old(), "Old GC takes a different control path"); 589 590 ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap); 591 if (gc.collect(cause)) { 592 // Cycle is complete 593 heap->notify_gc_progress(); 594 generation->record_success_concurrent(gc.abbreviated()); 595 } else { 596 assert(heap->cancelled_gc(), "Must have been cancelled"); 597 check_cancellation_or_degen(gc.degen_point()); 598 599 // Concurrent young-gen collection degenerates to young 600 // collection. Same for global collections. 601 _degen_generation = generation; 602 } 603 const char* msg; 604 ShenandoahMmuTracker* mmu_tracker = heap->mmu_tracker(); 605 if (generation->is_young()) { 606 if (heap->cancelled_gc()) { 607 msg = (do_old_gc_bootstrap) ? "At end of Interrupted Concurrent Bootstrap GC" : 608 "At end of Interrupted Concurrent Young GC"; 609 } else { 610 // We only record GC results if GC was successful 611 msg = (do_old_gc_bootstrap) ? "At end of Concurrent Bootstrap GC" : 612 "At end of Concurrent Young GC"; 613 if (heap->collection_set()->has_old_regions()) { 614 mmu_tracker->record_mixed(get_gc_id()); 615 } else if (do_old_gc_bootstrap) { 616 mmu_tracker->record_bootstrap(get_gc_id()); 617 } else { 618 mmu_tracker->record_young(get_gc_id()); 619 } 620 } 621 } else { 622 assert(generation->is_global(), "If not young, must be GLOBAL"); 623 assert(!do_old_gc_bootstrap, "Do not bootstrap with GLOBAL GC"); 624 if (heap->cancelled_gc()) { 625 msg = "At end of Interrupted Concurrent GLOBAL GC"; 626 } else { 627 // We only record GC results if GC was successful 628 msg = "At end of Concurrent Global GC"; 629 mmu_tracker->record_global(get_gc_id()); 630 } 631 } 632 heap->log_heap_status(msg); 633 } 634 635 bool ShenandoahGenerationalControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) { 636 ShenandoahHeap* heap = ShenandoahHeap::heap(); 637 if (!heap->cancelled_gc()) { 638 return false; 639 } 640 641 if (in_graceful_shutdown()) { 642 return true; 643 } 644 645 assert(_degen_point == ShenandoahGC::_degenerated_outside_cycle, 646 "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point)); 647 648 if (is_alloc_failure_gc()) { 649 _degen_point = point; 650 _preemption_requested.unset(); 651 return true; 652 } 653 654 if (_preemption_requested.is_set()) { 655 assert(_requested_generation == YOUNG, "Only young GCs may preempt old."); 656 _preemption_requested.unset(); 657 658 // Old generation marking is only cancellable during concurrent marking. 659 // Once final mark is complete, the code does not check again for cancellation. 660 // If old generation was cancelled for an allocation failure, we wouldn't 661 // make it to this case. The calling code is responsible for forcing a 662 // cancellation due to allocation failure into a degenerated cycle. 663 _degen_point = point; 664 heap->clear_cancelled_gc(false /* clear oom handler */); 665 return true; 666 } 667 668 fatal("Cancel GC either for alloc failure GC, or gracefully exiting, or to pause old generation marking"); 669 return false; 670 } 671 672 void ShenandoahGenerationalControlThread::stop_service() { 673 // Nothing to do here. 674 } 675 676 void ShenandoahGenerationalControlThread::service_stw_full_cycle(GCCause::Cause cause) { 677 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 678 679 GCIdMark gc_id_mark; 680 ShenandoahGCSession session(cause, heap->global_generation()); 681 682 ShenandoahFullGC gc; 683 gc.collect(cause); 684 } 685 686 void ShenandoahGenerationalControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, 687 ShenandoahGC::ShenandoahDegenPoint point) { 688 assert(point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set"); 689 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 690 691 GCIdMark gc_id_mark; 692 ShenandoahGCSession session(cause, _degen_generation); 693 694 ShenandoahDegenGC gc(point, _degen_generation); 695 gc.collect(cause); 696 697 assert(heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks"); 698 if (_degen_generation->is_global()) { 699 assert(heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks"); 700 assert(heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks"); 701 } else { 702 assert(_degen_generation->is_young(), "Expected degenerated young cycle, if not global."); 703 ShenandoahOldGeneration* old = heap->old_generation(); 704 if (old->is_bootstrapping()) { 705 old->transition_to(ShenandoahOldGeneration::MARKING); 706 } 707 } 708 } 709 710 void ShenandoahGenerationalControlThread::request_gc(GCCause::Cause cause) { 711 if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) { 712 handle_requested_gc(cause); 713 } 714 } 715 716 bool ShenandoahGenerationalControlThread::request_concurrent_gc(ShenandoahGenerationType generation) { 717 if (_preemption_requested.is_set() || _requested_gc_cause != GCCause::_no_gc || ShenandoahHeap::heap()->cancelled_gc()) { 718 // Ignore subsequent requests from the heuristics 719 log_debug(gc, thread)("Reject request for concurrent gc: preemption_requested: %s, gc_requested: %s, gc_cancelled: %s", 720 BOOL_TO_STR(_preemption_requested.is_set()), 721 GCCause::to_string(_requested_gc_cause), 722 BOOL_TO_STR(ShenandoahHeap::heap()->cancelled_gc())); 723 return false; 724 } 725 726 if (gc_mode() == none) { 727 GCCause::Cause existing = Atomic::cmpxchg(&_requested_gc_cause, GCCause::_no_gc, GCCause::_shenandoah_concurrent_gc); 728 if (existing != GCCause::_no_gc) { 729 log_debug(gc, thread)("Reject request for concurrent gc because another gc is pending: %s", GCCause::to_string(existing)); 730 return false; 731 } 732 733 _requested_generation = generation; 734 notify_control_thread(); 735 736 MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag); 737 while (gc_mode() == none) { 738 ml.wait(); 739 } 740 return true; 741 } 742 743 if (preempt_old_marking(generation)) { 744 assert(gc_mode() == servicing_old, "Expected to be servicing old, but was: %s.", gc_mode_name(gc_mode())); 745 GCCause::Cause existing = Atomic::cmpxchg(&_requested_gc_cause, GCCause::_no_gc, GCCause::_shenandoah_concurrent_gc); 746 if (existing != GCCause::_no_gc) { 747 log_debug(gc, thread)("Reject request to interrupt old gc because another gc is pending: %s", GCCause::to_string(existing)); 748 return false; 749 } 750 751 log_info(gc)("Preempting old generation mark to allow %s GC", shenandoah_generation_name(generation)); 752 _requested_generation = generation; 753 _preemption_requested.set(); 754 ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc); 755 notify_control_thread(); 756 757 MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag); 758 while (gc_mode() == servicing_old) { 759 ml.wait(); 760 } 761 return true; 762 } 763 764 log_debug(gc, thread)("Reject request for concurrent gc: mode: %s, allow_old_preemption: %s", 765 gc_mode_name(gc_mode()), 766 BOOL_TO_STR(_allow_old_preemption.is_set())); 767 return false; 768 } 769 770 void ShenandoahGenerationalControlThread::notify_control_thread() { 771 MonitorLocker locker(&_control_lock, Mutex::_no_safepoint_check_flag); 772 _control_lock.notify(); 773 } 774 775 bool ShenandoahGenerationalControlThread::preempt_old_marking(ShenandoahGenerationType generation) { 776 return (generation == YOUNG) && _allow_old_preemption.try_unset(); 777 } 778 779 void ShenandoahGenerationalControlThread::handle_requested_gc(GCCause::Cause cause) { 780 // For normal requested GCs (System.gc) we want to block the caller. However, 781 // for whitebox requested GC, we want to initiate the GC and return immediately. 782 // The whitebox caller thread will arrange for itself to wait until the GC notifies 783 // it that has reached the requested breakpoint (phase in the GC). 784 if (cause == GCCause::_wb_breakpoint) { 785 Atomic::xchg(&_requested_gc_cause, cause); 786 notify_control_thread(); 787 return; 788 } 789 790 // Make sure we have at least one complete GC cycle before unblocking 791 // from the explicit GC request. 792 // 793 // This is especially important for weak references cleanup and/or native 794 // resources (e.g. DirectByteBuffers) machinery: when explicit GC request 795 // comes very late in the already running cycle, it would miss lots of new 796 // opportunities for cleanup that were made available before the caller 797 // requested the GC. 798 799 MonitorLocker ml(&_gc_waiters_lock); 800 size_t current_gc_id = get_gc_id(); 801 size_t required_gc_id = current_gc_id + 1; 802 while (current_gc_id < required_gc_id) { 803 // This races with the regulator thread to start a concurrent gc and the 804 // control thread to clear it at the start of a cycle. Threads here are 805 // allowed to escalate a heuristic's request for concurrent gc. 806 GCCause::Cause existing = Atomic::xchg(&_requested_gc_cause, cause); 807 if (existing != GCCause::_no_gc) { 808 log_debug(gc, thread)("GC request supersedes existing request: %s", GCCause::to_string(existing)); 809 } 810 811 notify_control_thread(); 812 ml.wait(); 813 current_gc_id = get_gc_id(); 814 } 815 } 816 817 void ShenandoahGenerationalControlThread::notify_gc_waiters() { 818 MonitorLocker ml(&_gc_waiters_lock); 819 ml.notify_all(); 820 } 821 822 const char* ShenandoahGenerationalControlThread::gc_mode_name(ShenandoahGenerationalControlThread::GCMode mode) { 823 switch (mode) { 824 case none: return "idle"; 825 case concurrent_normal: return "normal"; 826 case stw_degenerated: return "degenerated"; 827 case stw_full: return "full"; 828 case servicing_old: return "old"; 829 case bootstrapping_old: return "bootstrap"; 830 default: return "unknown"; 831 } 832 } 833 834 void ShenandoahGenerationalControlThread::set_gc_mode(ShenandoahGenerationalControlThread::GCMode new_mode) { 835 if (_mode != new_mode) { 836 log_debug(gc)("Transition from: %s to: %s", gc_mode_name(_mode), gc_mode_name(new_mode)); 837 MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag); 838 _mode = new_mode; 839 ml.notify_all(); 840 } 841 }