1 /* 2 * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. 4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "gc/shenandoah/shenandoahAsserts.hpp" 29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 30 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 31 #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp" 32 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp" 33 #include "gc/shenandoah/shenandoahFreeSet.hpp" 34 #include "gc/shenandoah/shenandoahFullGC.hpp" 35 #include "gc/shenandoah/shenandoahGeneration.hpp" 36 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 37 #include "gc/shenandoah/shenandoahOldGC.hpp" 38 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 39 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 41 #include "gc/shenandoah/shenandoahPacer.inline.hpp" 42 #include "gc/shenandoah/shenandoahUtils.hpp" 43 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 44 #include "logging/log.hpp" 45 #include "memory/metaspaceUtils.hpp" 46 #include "memory/metaspaceStats.hpp" 47 #include "runtime/atomic.hpp" 48 #include "utilities/events.hpp" 49 50 ShenandoahGenerationalControlThread::ShenandoahGenerationalControlThread() : 51 _control_lock(Mutex::nosafepoint - 2, "ShenandoahGCRequest_lock", true), 52 _requested_gc_cause(GCCause::_no_gc), 53 _requested_generation(nullptr), 54 _gc_mode(none), 55 _degen_point(ShenandoahGC::_degenerated_unset), 56 _heap(ShenandoahGenerationalHeap::heap()), 57 _age_period(0) { 58 shenandoah_assert_generational(); 59 set_name("Shenandoah Control Thread"); 60 create_and_start(); 61 } 62 63 void ShenandoahGenerationalControlThread::run_service() { 64 65 const int64_t wait_ms = ShenandoahPacing ? ShenandoahControlIntervalMin : 0; 66 ShenandoahGCRequest request; 67 while (!should_terminate()) { 68 69 // This control loop iteration has seen this much allocation. 70 const size_t allocs_seen = reset_allocs_seen(); 71 72 // Figure out if we have pending requests. 73 check_for_request(request); 74 75 if (request.cause == GCCause::_shenandoah_stop_vm) { 76 break; 77 } 78 79 if (request.cause != GCCause::_no_gc) { 80 run_gc_cycle(request); 81 } else { 82 // Report to pacer that we have seen this many words allocated 83 if (ShenandoahPacing && (allocs_seen > 0)) { 84 _heap->pacer()->report_alloc(allocs_seen); 85 } 86 } 87 88 // If the cycle was cancelled, continue the next iteration to deal with it. Otherwise, 89 // if there was no other cycle requested, cleanup and wait for the next request. 90 if (!_heap->cancelled_gc()) { 91 MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); 92 if (_requested_gc_cause == GCCause::_no_gc) { 93 set_gc_mode(ml, none); 94 ml.wait(wait_ms); 95 } 96 } 97 } 98 99 // In case any threads are waiting for a cycle to happen, notify them so they observe the shutdown. 100 notify_gc_waiters(); 101 notify_alloc_failure_waiters(); 102 set_gc_mode(stopped); 103 } 104 105 void ShenandoahGenerationalControlThread::stop_service() { 106 log_debug(gc, thread)("Stopping control thread"); 107 MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); 108 _heap->cancel_gc(GCCause::_shenandoah_stop_vm); 109 _requested_gc_cause = GCCause::_shenandoah_stop_vm; 110 notify_cancellation(ml, GCCause::_shenandoah_stop_vm); 111 // We can't wait here because it may interfere with the active cycle's ability 112 // to reach a safepoint (this runs on a java thread). 113 } 114 115 void ShenandoahGenerationalControlThread::check_for_request(ShenandoahGCRequest& request) { 116 // Hold the lock while we read request cause and generation 117 MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); 118 if (_heap->cancelled_gc()) { 119 // The previous request was cancelled. Either it was cancelled for an allocation 120 // failure (degenerated cycle), or old marking was cancelled to run a young collection. 121 // In either case, the correct generation for the next cycle can be determined by 122 // the cancellation cause. 123 request.cause = _heap->cancelled_cause(); 124 if (request.cause == GCCause::_shenandoah_concurrent_gc) { 125 request.generation = _heap->young_generation(); 126 _heap->clear_cancelled_gc(false); 127 } 128 } else { 129 request.cause = _requested_gc_cause; 130 request.generation = _requested_generation; 131 132 // Only clear these if we made a request from them. In the case of a cancelled gc, 133 // we do not want to inadvertently lose this pending request. 134 _requested_gc_cause = GCCause::_no_gc; 135 _requested_generation = nullptr; 136 } 137 138 if (request.cause == GCCause::_no_gc || request.cause == GCCause::_shenandoah_stop_vm) { 139 return; 140 } 141 142 GCMode mode; 143 if (ShenandoahCollectorPolicy::is_allocation_failure(request.cause)) { 144 mode = prepare_for_allocation_failure_gc(request); 145 } else if (ShenandoahCollectorPolicy::is_explicit_gc(request.cause)) { 146 mode = prepare_for_explicit_gc(request); 147 } else { 148 mode = prepare_for_concurrent_gc(request); 149 } 150 set_gc_mode(ml, mode); 151 } 152 153 ShenandoahGenerationalControlThread::GCMode ShenandoahGenerationalControlThread::prepare_for_allocation_failure_gc(ShenandoahGCRequest &request) { 154 155 if (_degen_point == ShenandoahGC::_degenerated_unset) { 156 _degen_point = ShenandoahGC::_degenerated_outside_cycle; 157 request.generation = _heap->young_generation(); 158 } else if (request.generation->is_old()) { 159 // This means we degenerated during the young bootstrap for the old generation 160 // cycle. The following degenerated cycle should therefore also be young. 161 request.generation = _heap->young_generation(); 162 } 163 164 ShenandoahHeuristics* heuristics = request.generation->heuristics(); 165 bool old_gen_evacuation_failed = _heap->old_generation()->clear_failed_evacuation(); 166 167 heuristics->log_trigger("Handle Allocation Failure"); 168 169 // Do not bother with degenerated cycle if old generation evacuation failed or if humongous allocation failed 170 if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() && 171 !old_gen_evacuation_failed && request.cause != GCCause::_shenandoah_humongous_allocation_failure) { 172 heuristics->record_allocation_failure_gc(); 173 _heap->shenandoah_policy()->record_alloc_failure_to_degenerated(_degen_point); 174 return stw_degenerated; 175 } else { 176 heuristics->record_allocation_failure_gc(); 177 _heap->shenandoah_policy()->record_alloc_failure_to_full(); 178 request.generation = _heap->global_generation(); 179 return stw_full; 180 } 181 } 182 183 ShenandoahGenerationalControlThread::GCMode ShenandoahGenerationalControlThread::prepare_for_explicit_gc(ShenandoahGCRequest &request) const { 184 ShenandoahHeuristics* global_heuristics = _heap->global_generation()->heuristics(); 185 request.generation = _heap->global_generation(); 186 global_heuristics->log_trigger("GC request (%s)", GCCause::to_string(request.cause)); 187 global_heuristics->record_requested_gc(); 188 189 if (ShenandoahCollectorPolicy::should_run_full_gc(request.cause)) { 190 return stw_full;; 191 } else { 192 // Unload and clean up everything. Note that this is an _explicit_ request and so does not use 193 // the same `should_unload_classes` call as the regulator's concurrent gc request. 194 _heap->set_unload_classes(global_heuristics->can_unload_classes()); 195 return concurrent_normal; 196 } 197 } 198 199 ShenandoahGenerationalControlThread::GCMode ShenandoahGenerationalControlThread::prepare_for_concurrent_gc(const ShenandoahGCRequest &request) const { 200 assert(!(request.generation->is_old() && _heap->old_generation()->is_doing_mixed_evacuations()), 201 "Old heuristic should not request cycles while it waits for mixed evacuations"); 202 203 if (request.generation->is_global()) { 204 ShenandoahHeuristics* global_heuristics = _heap->global_generation()->heuristics(); 205 _heap->set_unload_classes(global_heuristics->should_unload_classes()); 206 } else { 207 _heap->set_unload_classes(false); 208 } 209 210 // preemption was requested or this is a regular cycle 211 return request.generation->is_old() ? servicing_old : concurrent_normal; 212 } 213 214 void ShenandoahGenerationalControlThread::maybe_set_aging_cycle() { 215 if (_age_period-- == 0) { 216 _heap->set_aging_cycle(true); 217 _age_period = ShenandoahAgingCyclePeriod - 1; 218 } else { 219 _heap->set_aging_cycle(false); 220 } 221 } 222 223 void ShenandoahGenerationalControlThread::run_gc_cycle(const ShenandoahGCRequest& request) { 224 225 log_debug(gc, thread)("Starting GC (%s): %s, %s", gc_mode_name(gc_mode()), GCCause::to_string(request.cause), request.generation->name()); 226 assert(gc_mode() != none, "GC mode cannot be none here"); 227 228 // Blow away all soft references on this cycle, if handling allocation failure, 229 // either implicit or explicit GC request, or we are requested to do so unconditionally. 230 if (request.generation->is_global() && (ShenandoahCollectorPolicy::is_allocation_failure(request.cause) || ShenandoahCollectorPolicy::is_explicit_gc(request.cause) || ShenandoahAlwaysClearSoftRefs)) { 231 _heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); 232 } 233 234 // GC is starting, bump the internal ID 235 update_gc_id(); 236 237 GCIdMark gc_id_mark; 238 239 _heap->reset_bytes_allocated_since_gc_start(); 240 241 MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics(); 242 243 // If GC was requested, we are sampling the counters even without actual triggers 244 // from allocation machinery. This captures GC phases more accurately. 245 _heap->set_forced_counters_update(true); 246 247 // If GC was requested, we better dump freeset data for performance debugging 248 _heap->free_set()->log_status_under_lock(); 249 250 { 251 // Cannot uncommit bitmap slices during concurrent reset 252 ShenandoahNoUncommitMark forbid_region_uncommit(_heap); 253 254 switch (gc_mode()) { 255 case concurrent_normal: { 256 service_concurrent_normal_cycle(request); 257 break; 258 } 259 case stw_degenerated: { 260 service_stw_degenerated_cycle(request); 261 break; 262 } 263 case stw_full: { 264 service_stw_full_cycle(request.cause); 265 break; 266 } 267 case servicing_old: { 268 assert(request.generation->is_old(), "Expected old generation here"); 269 service_concurrent_old_cycle(request); 270 break; 271 } 272 default: 273 ShouldNotReachHere(); 274 } 275 } 276 277 // If this cycle completed successfully, notify threads waiting for gc 278 if (!_heap->cancelled_gc()) { 279 notify_gc_waiters(); 280 notify_alloc_failure_waiters(); 281 } 282 283 // Report current free set state at the end of cycle, whether 284 // it is a normal completion, or the abort. 285 _heap->free_set()->log_status_under_lock(); 286 287 // Notify Universe about new heap usage. This has implications for 288 // global soft refs policy, and we better report it every time heap 289 // usage goes down. 290 _heap->update_capacity_and_used_at_gc(); 291 292 // Signal that we have completed a visit to all live objects. 293 _heap->record_whole_heap_examined_timestamp(); 294 295 // Disable forced counters update, and update counters one more time 296 // to capture the state at the end of GC session. 297 _heap->handle_force_counters_update(); 298 _heap->set_forced_counters_update(false); 299 300 // Retract forceful part of soft refs policy 301 _heap->soft_ref_policy()->set_should_clear_all_soft_refs(false); 302 303 // Clear metaspace oom flag, if current cycle unloaded classes 304 if (_heap->unload_classes()) { 305 _heap->global_generation()->heuristics()->clear_metaspace_oom(); 306 } 307 308 process_phase_timings(); 309 310 // Print Metaspace change following GC (if logging is enabled). 311 MetaspaceUtils::print_metaspace_change(meta_sizes); 312 313 // GC is over, we are at idle now 314 if (ShenandoahPacing) { 315 _heap->pacer()->setup_for_idle(); 316 } 317 318 // Check if we have seen a new target for soft max heap size or if a gc was requested. 319 // Either of these conditions will attempt to uncommit regions. 320 if (ShenandoahUncommit) { 321 if (_heap->check_soft_max_changed()) { 322 _heap->notify_soft_max_changed(); 323 } else if (ShenandoahCollectorPolicy::is_explicit_gc(request.cause)) { 324 _heap->notify_explicit_gc_requested(); 325 } 326 } 327 328 log_debug(gc, thread)("Completed GC (%s): %s, %s, cancelled: %s", 329 gc_mode_name(gc_mode()), GCCause::to_string(request.cause), request.generation->name(), GCCause::to_string(_heap->cancelled_cause())); 330 } 331 332 void ShenandoahGenerationalControlThread::process_phase_timings() const { 333 // Commit worker statistics to cycle data 334 _heap->phase_timings()->flush_par_workers_to_cycle(); 335 if (ShenandoahPacing) { 336 _heap->pacer()->flush_stats_to_cycle(); 337 } 338 339 ShenandoahEvacuationTracker* evac_tracker = _heap->evac_tracker(); 340 ShenandoahCycleStats evac_stats = evac_tracker->flush_cycle_to_global(); 341 342 // Print GC stats for current cycle 343 { 344 LogTarget(Info, gc, stats) lt; 345 if (lt.is_enabled()) { 346 ResourceMark rm; 347 LogStream ls(lt); 348 _heap->phase_timings()->print_cycle_on(&ls); 349 evac_tracker->print_evacuations_on(&ls, &evac_stats.workers, 350 &evac_stats.mutators); 351 if (ShenandoahPacing) { 352 _heap->pacer()->print_cycle_on(&ls); 353 } 354 } 355 } 356 357 // Commit statistics to globals 358 _heap->phase_timings()->flush_cycle_to_global(); 359 } 360 361 // Young and old concurrent cycles are initiated by the regulator. Implicit 362 // and explicit GC requests are handled by the controller thread and always 363 // run a global cycle (which is concurrent by default, but may be overridden 364 // by command line options). Old cycles always degenerate to a global cycle. 365 // Young cycles are degenerated to complete the young cycle. Young 366 // and old degen may upgrade to Full GC. Full GC may also be 367 // triggered directly by a System.gc() invocation. 368 // 369 // 370 // +-----+ Idle +-----+-----------+---------------------+ 371 // | + | | | 372 // | | | | | 373 // | | v | | 374 // | | Bootstrap Old +-- | ------------+ | 375 // | | + | | | 376 // | | | | | | 377 // | v v v v | 378 // | Resume Old <----------+ Young +--> Young Degen | 379 // | + + ^ + + | 380 // v | | | | | | 381 // Global <-+ | +----------------------------+ | | 382 // + | | | 383 // | v v | 384 // +---> Global Degen +--------------------> Full <----+ 385 // 386 void ShenandoahGenerationalControlThread::service_concurrent_normal_cycle(const ShenandoahGCRequest& request) { 387 log_info(gc, ergo)("Start GC cycle (%s)", request.generation->name()); 388 if (request.generation->is_old()) { 389 service_concurrent_old_cycle(request); 390 } else { 391 service_concurrent_cycle(request.generation, request.cause, false); 392 } 393 } 394 395 void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(const ShenandoahGCRequest& request) { 396 ShenandoahOldGeneration* old_generation = _heap->old_generation(); 397 ShenandoahYoungGeneration* young_generation = _heap->young_generation(); 398 ShenandoahOldGeneration::State original_state = old_generation->state(); 399 400 TraceCollectorStats tcs(_heap->monitoring_support()->concurrent_collection_counters()); 401 402 switch (original_state) { 403 case ShenandoahOldGeneration::FILLING: { 404 ShenandoahGCSession session(request.cause, old_generation); 405 assert(gc_mode() == servicing_old, "Filling should be servicing old"); 406 _allow_old_preemption.set(); 407 old_generation->entry_coalesce_and_fill(); 408 _allow_old_preemption.unset(); 409 410 // Before bootstrapping begins, we must acknowledge any cancellation request. 411 // If the gc has not been cancelled, this does nothing. If it has been cancelled, 412 // this will clear the cancellation request and exit before starting the bootstrap 413 // phase. This will allow the young GC cycle to proceed normally. If we do not 414 // acknowledge the cancellation request, the subsequent young cycle will observe 415 // the request and essentially cancel itself. 416 if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) { 417 log_info(gc, thread)("Preparation for old generation cycle was cancelled"); 418 return; 419 } 420 421 // Coalescing threads completed and nothing was cancelled. it is safe to transition from this state. 422 old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); 423 return; 424 } 425 case ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP: 426 old_generation->transition_to(ShenandoahOldGeneration::BOOTSTRAPPING); 427 case ShenandoahOldGeneration::BOOTSTRAPPING: { 428 // Configure the young generation's concurrent mark to put objects in 429 // old regions into the concurrent mark queues associated with the old 430 // generation. The young cycle will run as normal except that rather than 431 // ignore old references it will mark and enqueue them in the old concurrent 432 // task queues but it will not traverse them. 433 set_gc_mode(bootstrapping_old); 434 young_generation->set_old_gen_task_queues(old_generation->task_queues()); 435 service_concurrent_cycle(young_generation, request.cause, true); 436 process_phase_timings(); 437 if (_heap->cancelled_gc()) { 438 // Young generation bootstrap cycle has failed. Concurrent mark for old generation 439 // is going to resume after degenerated bootstrap cycle completes. 440 log_info(gc)("Bootstrap cycle for old generation was cancelled"); 441 return; 442 } 443 444 assert(_degen_point == ShenandoahGC::_degenerated_unset, "Degen point should not be set if gc wasn't cancelled"); 445 446 // From here we will 'resume' the old concurrent mark. This will skip reset 447 // and init mark for the concurrent mark. All of that work will have been 448 // done by the bootstrapping young cycle. 449 set_gc_mode(servicing_old); 450 old_generation->transition_to(ShenandoahOldGeneration::MARKING); 451 } 452 case ShenandoahOldGeneration::MARKING: { 453 ShenandoahGCSession session(request.cause, old_generation); 454 bool marking_complete = resume_concurrent_old_cycle(old_generation, request.cause); 455 if (marking_complete) { 456 assert(old_generation->state() != ShenandoahOldGeneration::MARKING, "Should not still be marking"); 457 if (original_state == ShenandoahOldGeneration::MARKING) { 458 _heap->mmu_tracker()->record_old_marking_increment(true); 459 _heap->log_heap_status("At end of Concurrent Old Marking finishing increment"); 460 } 461 } else if (original_state == ShenandoahOldGeneration::MARKING) { 462 _heap->mmu_tracker()->record_old_marking_increment(false); 463 _heap->log_heap_status("At end of Concurrent Old Marking increment"); 464 } 465 break; 466 } 467 default: 468 fatal("Unexpected state for old GC: %s", ShenandoahOldGeneration::state_name(old_generation->state())); 469 } 470 } 471 472 bool ShenandoahGenerationalControlThread::resume_concurrent_old_cycle(ShenandoahOldGeneration* generation, GCCause::Cause cause) { 473 assert(_heap->is_concurrent_old_mark_in_progress(), "Old mark should be in progress"); 474 log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued", generation->task_queues()->tasks()); 475 476 // We can only tolerate being cancelled during concurrent marking or during preparation for mixed 477 // evacuation. This flag here (passed by reference) is used to control precisely where the regulator 478 // is allowed to cancel a GC. 479 ShenandoahOldGC gc(generation, _allow_old_preemption); 480 if (gc.collect(cause)) { 481 _heap->notify_gc_progress(); 482 generation->record_success_concurrent(false); 483 } 484 485 if (_heap->cancelled_gc()) { 486 // It's possible the gc cycle was cancelled after the last time 487 // the collection checked for cancellation. In which case, the 488 // old gc cycle is still completed, and we have to deal with this 489 // cancellation. We set the degeneration point to be outside 490 // the cycle because if this is an allocation failure, that is 491 // what must be done (there is no degenerated old cycle). If the 492 // cancellation was due to a heuristic wanting to start a young 493 // cycle, then we are not actually going to a degenerated cycle, 494 // so the degenerated point doesn't matter here. 495 check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle); 496 if (cause == GCCause::_shenandoah_concurrent_gc) { 497 _heap->shenandoah_policy()->record_interrupted_old(); 498 } 499 return false; 500 } 501 return true; 502 } 503 504 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during 505 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there. 506 // If second allocation failure happens during Degenerated GC cycle (for example, when GC 507 // tries to evac something and no memory is available), cycle degrades to Full GC. 508 // 509 // There are also a shortcut through the normal cycle: immediate garbage shortcut, when 510 // heuristics says there are no regions to compact, and all the collection comes from immediately 511 // reclaimable regions. 512 // 513 // ................................................................................................ 514 // 515 // (immediate garbage shortcut) Concurrent GC 516 // /-------------------------------------------\ 517 // | | 518 // | | 519 // | | 520 // | v 521 // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END] 522 // | | | ^ 523 // | (af) | (af) | (af) | 524 // ..................|....................|.................|..............|....................... 525 // | | | | 526 // | | | | Degenerated GC 527 // v v v | 528 // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o 529 // | | | ^ 530 // | (af) | (af) | (af) | 531 // ..................|....................|.................|..............|....................... 532 // | | | | 533 // | v | | Full GC 534 // \------------------->o<----------------/ | 535 // | | 536 // v | 537 // Full GC --------------------------/ 538 // 539 void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, 540 GCCause::Cause cause, 541 bool do_old_gc_bootstrap) { 542 // At this point: 543 // if (generation == YOUNG), this is a normal young cycle or a bootstrap cycle 544 // if (generation == GLOBAL), this is a GLOBAL cycle 545 // In either case, we want to age old objects if this is an aging cycle 546 maybe_set_aging_cycle(); 547 548 ShenandoahGCSession session(cause, generation); 549 TraceCollectorStats tcs(_heap->monitoring_support()->concurrent_collection_counters()); 550 551 assert(!generation->is_old(), "Old GC takes a different control path"); 552 553 ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap); 554 if (gc.collect(cause)) { 555 // Cycle is complete 556 _heap->notify_gc_progress(); 557 generation->record_success_concurrent(gc.abbreviated()); 558 } else { 559 assert(_heap->cancelled_gc(), "Must have been cancelled"); 560 check_cancellation_or_degen(gc.degen_point()); 561 } 562 563 const char* msg; 564 ShenandoahMmuTracker* mmu_tracker = _heap->mmu_tracker(); 565 if (generation->is_young()) { 566 if (_heap->cancelled_gc()) { 567 msg = (do_old_gc_bootstrap) ? "At end of Interrupted Concurrent Bootstrap GC" : 568 "At end of Interrupted Concurrent Young GC"; 569 } else { 570 // We only record GC results if GC was successful 571 msg = (do_old_gc_bootstrap) ? "At end of Concurrent Bootstrap GC" : 572 "At end of Concurrent Young GC"; 573 if (_heap->collection_set()->has_old_regions()) { 574 mmu_tracker->record_mixed(get_gc_id()); 575 } else if (do_old_gc_bootstrap) { 576 mmu_tracker->record_bootstrap(get_gc_id()); 577 } else { 578 mmu_tracker->record_young(get_gc_id()); 579 } 580 } 581 } else { 582 assert(generation->is_global(), "If not young, must be GLOBAL"); 583 assert(!do_old_gc_bootstrap, "Do not bootstrap with GLOBAL GC"); 584 if (_heap->cancelled_gc()) { 585 msg = "At end of Interrupted Concurrent GLOBAL GC"; 586 } else { 587 // We only record GC results if GC was successful 588 msg = "At end of Concurrent Global GC"; 589 mmu_tracker->record_global(get_gc_id()); 590 } 591 } 592 _heap->log_heap_status(msg); 593 } 594 595 bool ShenandoahGenerationalControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) { 596 if (!_heap->cancelled_gc()) { 597 return false; 598 } 599 600 if (_heap->cancelled_cause() == GCCause::_shenandoah_stop_vm 601 || _heap->cancelled_cause() == GCCause::_shenandoah_concurrent_gc) { 602 log_debug(gc, thread)("Cancellation detected, reason: %s", GCCause::to_string(_heap->cancelled_cause())); 603 return true; 604 } 605 606 if (ShenandoahCollectorPolicy::is_allocation_failure(_heap->cancelled_cause())) { 607 assert(_degen_point == ShenandoahGC::_degenerated_unset, 608 "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point)); 609 _degen_point = point; 610 log_debug(gc, thread)("Cancellation detected:, reason: %s, degen point: %s", 611 GCCause::to_string(_heap->cancelled_cause()), 612 ShenandoahGC::degen_point_to_string(_degen_point)); 613 return true; 614 } 615 616 fatal("Cancel GC either for alloc failure GC, or gracefully exiting, or to pause old generation marking"); 617 return false; 618 } 619 620 void ShenandoahGenerationalControlThread::service_stw_full_cycle(GCCause::Cause cause) { 621 ShenandoahGCSession session(cause, _heap->global_generation()); 622 maybe_set_aging_cycle(); 623 ShenandoahFullGC gc; 624 gc.collect(cause); 625 _degen_point = ShenandoahGC::_degenerated_unset; 626 } 627 628 void ShenandoahGenerationalControlThread::service_stw_degenerated_cycle(const ShenandoahGCRequest& request) { 629 assert(_degen_point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set"); 630 631 ShenandoahGCSession session(request.cause, request.generation); 632 633 ShenandoahDegenGC gc(_degen_point, request.generation); 634 gc.collect(request.cause); 635 _degen_point = ShenandoahGC::_degenerated_unset; 636 637 assert(_heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks"); 638 if (request.generation->is_global()) { 639 assert(_heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks"); 640 assert(_heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks"); 641 } else { 642 assert(request.generation->is_young(), "Expected degenerated young cycle, if not global."); 643 ShenandoahOldGeneration* old = _heap->old_generation(); 644 if (old->is_bootstrapping()) { 645 old->transition_to(ShenandoahOldGeneration::MARKING); 646 } 647 } 648 } 649 650 void ShenandoahGenerationalControlThread::request_gc(GCCause::Cause cause) { 651 if (ShenandoahCollectorPolicy::is_allocation_failure(cause)) { 652 // GC should already be cancelled. Here we are just notifying the control thread to 653 // wake up and handle the cancellation request, so we don't need to set _requested_gc_cause. 654 notify_cancellation(cause); 655 } else if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) { 656 handle_requested_gc(cause); 657 } 658 } 659 660 bool ShenandoahGenerationalControlThread::request_concurrent_gc(ShenandoahGeneration* generation) { 661 if (_heap->cancelled_gc()) { 662 // Ignore subsequent requests from the heuristics 663 log_debug(gc, thread)("Reject request for concurrent gc: gc_requested: %s, gc_cancelled: %s", 664 GCCause::to_string(_requested_gc_cause), 665 BOOL_TO_STR(_heap->cancelled_gc())); 666 return false; 667 } 668 669 MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); 670 if (gc_mode() == servicing_old) { 671 if (!preempt_old_marking(generation)) { 672 log_debug(gc, thread)("Cannot start young, old collection is not preemptible"); 673 return false; 674 } 675 676 // Cancel the old GC and wait for the control thread to start servicing the new request. 677 log_info(gc)("Preempting old generation mark to allow %s GC", generation->name()); 678 while (gc_mode() == servicing_old) { 679 ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc); 680 notify_cancellation(ml, GCCause::_shenandoah_concurrent_gc); 681 ml.wait(); 682 } 683 return true; 684 } 685 686 if (gc_mode() == none) { 687 const size_t current_gc_id = get_gc_id(); 688 while (gc_mode() == none && current_gc_id == get_gc_id()) { 689 if (_requested_gc_cause != GCCause::_no_gc) { 690 log_debug(gc, thread)("Reject request for concurrent gc because another gc is pending: %s", GCCause::to_string(_requested_gc_cause)); 691 return false; 692 } 693 694 notify_control_thread(ml, GCCause::_shenandoah_concurrent_gc, generation); 695 ml.wait(); 696 } 697 return true; 698 } 699 700 701 log_debug(gc, thread)("Reject request for concurrent gc: mode: %s, allow_old_preemption: %s", 702 gc_mode_name(gc_mode()), 703 BOOL_TO_STR(_allow_old_preemption.is_set())); 704 return false; 705 } 706 707 void ShenandoahGenerationalControlThread::notify_control_thread(GCCause::Cause cause, ShenandoahGeneration* generation) { 708 MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); 709 notify_control_thread(ml, cause, generation); 710 } 711 712 void ShenandoahGenerationalControlThread::notify_control_thread(MonitorLocker& ml, GCCause::Cause cause, ShenandoahGeneration* generation) { 713 assert(_control_lock.is_locked(), "Request lock must be held here"); 714 log_debug(gc, thread)("Notify control (%s): %s, %s", gc_mode_name(gc_mode()), GCCause::to_string(cause), generation->name()); 715 _requested_gc_cause = cause; 716 _requested_generation = generation; 717 ml.notify(); 718 } 719 720 void ShenandoahGenerationalControlThread::notify_cancellation(GCCause::Cause cause) { 721 MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); 722 notify_cancellation(ml, cause); 723 } 724 725 void ShenandoahGenerationalControlThread::notify_cancellation(MonitorLocker& ml, GCCause::Cause cause) { 726 assert(_heap->cancelled_gc(), "GC should already be cancelled"); 727 log_debug(gc,thread)("Notify control (%s): %s", gc_mode_name(gc_mode()), GCCause::to_string(cause)); 728 ml.notify(); 729 } 730 731 bool ShenandoahGenerationalControlThread::preempt_old_marking(ShenandoahGeneration* generation) { 732 return generation->is_young() && _allow_old_preemption.try_unset(); 733 } 734 735 void ShenandoahGenerationalControlThread::handle_requested_gc(GCCause::Cause cause) { 736 // For normal requested GCs (System.gc) we want to block the caller. However, 737 // for whitebox requested GC, we want to initiate the GC and return immediately. 738 // The whitebox caller thread will arrange for itself to wait until the GC notifies 739 // it that has reached the requested breakpoint (phase in the GC). 740 if (cause == GCCause::_wb_breakpoint) { 741 notify_control_thread(cause, ShenandoahHeap::heap()->global_generation()); 742 return; 743 } 744 745 // Make sure we have at least one complete GC cycle before unblocking 746 // from the explicit GC request. 747 // 748 // This is especially important for weak references cleanup and/or native 749 // resources (e.g. DirectByteBuffers) machinery: when explicit GC request 750 // comes very late in the already running cycle, it would miss lots of new 751 // opportunities for cleanup that were made available before the caller 752 // requested the GC. 753 754 MonitorLocker ml(&_gc_waiters_lock); 755 size_t current_gc_id = get_gc_id(); 756 const size_t required_gc_id = current_gc_id + 1; 757 while (current_gc_id < required_gc_id && !should_terminate()) { 758 // Make requests to run a global cycle until at least one is completed 759 notify_control_thread(cause, ShenandoahHeap::heap()->global_generation()); 760 ml.wait(); 761 current_gc_id = get_gc_id(); 762 } 763 } 764 765 void ShenandoahGenerationalControlThread::notify_gc_waiters() { 766 MonitorLocker ml(&_gc_waiters_lock); 767 ml.notify_all(); 768 } 769 770 const char* ShenandoahGenerationalControlThread::gc_mode_name(GCMode mode) { 771 switch (mode) { 772 case none: return "idle"; 773 case concurrent_normal: return "normal"; 774 case stw_degenerated: return "degenerated"; 775 case stw_full: return "full"; 776 case servicing_old: return "old"; 777 case bootstrapping_old: return "bootstrap"; 778 case stopped: return "stopped"; 779 default: return "unknown"; 780 } 781 } 782 783 void ShenandoahGenerationalControlThread::set_gc_mode(GCMode new_mode) { 784 MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); 785 set_gc_mode(ml, new_mode); 786 } 787 788 void ShenandoahGenerationalControlThread::set_gc_mode(MonitorLocker& ml, GCMode new_mode) { 789 if (_gc_mode != new_mode) { 790 log_debug(gc, thread)("Transition from: %s to: %s", gc_mode_name(_gc_mode), gc_mode_name(new_mode)); 791 EventMark event("Control thread transition from: %s, to %s", gc_mode_name(_gc_mode), gc_mode_name(new_mode)); 792 _gc_mode = new_mode; 793 ml.notify_all(); 794 } 795 }