1 /* 2 * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. 4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "gc/shenandoah/shenandoahAsserts.hpp" 28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 29 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp" 31 #include "gc/shenandoah/shenandoahFreeSet.hpp" 32 #include "gc/shenandoah/shenandoahFullGC.hpp" 33 #include "gc/shenandoah/shenandoahGeneration.hpp" 34 #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp" 35 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 36 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 37 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 38 #include "gc/shenandoah/shenandoahOldGC.hpp" 39 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 40 #include "gc/shenandoah/shenandoahPacer.inline.hpp" 41 #include "gc/shenandoah/shenandoahUtils.hpp" 42 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 43 #include "logging/log.hpp" 44 #include "memory/metaspaceStats.hpp" 45 #include "memory/metaspaceUtils.hpp" 46 #include "runtime/atomic.hpp" 47 #include "utilities/events.hpp" 48 49 ShenandoahGenerationalControlThread::ShenandoahGenerationalControlThread() : 50 _control_lock(Mutex::nosafepoint - 2, "ShenandoahGCRequest_lock", true), 51 _requested_gc_cause(GCCause::_no_gc), 52 _requested_generation(nullptr), 53 _gc_mode(none), 54 _degen_point(ShenandoahGC::_degenerated_unset), 55 _heap(ShenandoahGenerationalHeap::heap()), 56 _age_period(0) { 57 shenandoah_assert_generational(); 58 set_name("Shenandoah Control Thread"); 59 create_and_start(); 60 } 61 62 void ShenandoahGenerationalControlThread::run_service() { 63 64 const int64_t wait_ms = ShenandoahPacing ? ShenandoahControlIntervalMin : 0; 65 ShenandoahGCRequest request; 66 while (!should_terminate()) { 67 68 // This control loop iteration has seen this much allocation. 69 const size_t allocs_seen = reset_allocs_seen(); 70 71 // Figure out if we have pending requests. 72 check_for_request(request); 73 74 if (request.cause == GCCause::_shenandoah_stop_vm) { 75 break; 76 } 77 78 if (request.cause != GCCause::_no_gc) { 79 run_gc_cycle(request); 80 } else { 81 // Report to pacer that we have seen this many words allocated 82 if (ShenandoahPacing && (allocs_seen > 0)) { 83 _heap->pacer()->report_alloc(allocs_seen); 84 } 85 } 86 87 // If the cycle was cancelled, continue the next iteration to deal with it. Otherwise, 88 // if there was no other cycle requested, cleanup and wait for the next request. 89 if (!_heap->cancelled_gc()) { 90 MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); 91 if (_requested_gc_cause == GCCause::_no_gc) { 92 set_gc_mode(ml, none); 93 ml.wait(wait_ms); 94 } 95 } 96 } 97 98 // In case any threads are waiting for a cycle to happen, notify them so they observe the shutdown. 99 notify_gc_waiters(); 100 notify_alloc_failure_waiters(); 101 set_gc_mode(stopped); 102 } 103 104 void ShenandoahGenerationalControlThread::stop_service() { 105 log_debug(gc, thread)("Stopping control thread"); 106 MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); 107 _heap->cancel_gc(GCCause::_shenandoah_stop_vm); 108 _requested_gc_cause = GCCause::_shenandoah_stop_vm; 109 notify_cancellation(ml, GCCause::_shenandoah_stop_vm); 110 // We can't wait here because it may interfere with the active cycle's ability 111 // to reach a safepoint (this runs on a java thread). 112 } 113 114 void ShenandoahGenerationalControlThread::check_for_request(ShenandoahGCRequest& request) { 115 // Hold the lock while we read request cause and generation 116 MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); 117 if (_heap->cancelled_gc()) { 118 // The previous request was cancelled. Either it was cancelled for an allocation 119 // failure (degenerated cycle), or old marking was cancelled to run a young collection. 120 // In either case, the correct generation for the next cycle can be determined by 121 // the cancellation cause. 122 request.cause = _heap->cancelled_cause(); 123 if (request.cause == GCCause::_shenandoah_concurrent_gc) { 124 request.generation = _heap->young_generation(); 125 _heap->clear_cancelled_gc(false); 126 } 127 } else { 128 request.cause = _requested_gc_cause; 129 request.generation = _requested_generation; 130 131 // Only clear these if we made a request from them. In the case of a cancelled gc, 132 // we do not want to inadvertently lose this pending request. 133 _requested_gc_cause = GCCause::_no_gc; 134 _requested_generation = nullptr; 135 } 136 137 if (request.cause == GCCause::_no_gc || request.cause == GCCause::_shenandoah_stop_vm) { 138 return; 139 } 140 141 GCMode mode; 142 if (ShenandoahCollectorPolicy::is_allocation_failure(request.cause)) { 143 mode = prepare_for_allocation_failure_gc(request); 144 } else if (ShenandoahCollectorPolicy::is_explicit_gc(request.cause)) { 145 mode = prepare_for_explicit_gc(request); 146 } else { 147 mode = prepare_for_concurrent_gc(request); 148 } 149 set_gc_mode(ml, mode); 150 } 151 152 ShenandoahGenerationalControlThread::GCMode ShenandoahGenerationalControlThread::prepare_for_allocation_failure_gc(ShenandoahGCRequest &request) { 153 154 if (_degen_point == ShenandoahGC::_degenerated_unset) { 155 _degen_point = ShenandoahGC::_degenerated_outside_cycle; 156 request.generation = _heap->young_generation(); 157 } else if (request.generation->is_old()) { 158 // This means we degenerated during the young bootstrap for the old generation 159 // cycle. The following degenerated cycle should therefore also be young. 160 request.generation = _heap->young_generation(); 161 } 162 163 ShenandoahHeuristics* heuristics = request.generation->heuristics(); 164 bool old_gen_evacuation_failed = _heap->old_generation()->clear_failed_evacuation(); 165 166 heuristics->log_trigger("Handle Allocation Failure"); 167 168 // Do not bother with degenerated cycle if old generation evacuation failed or if humongous allocation failed 169 if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() && 170 !old_gen_evacuation_failed && request.cause != GCCause::_shenandoah_humongous_allocation_failure) { 171 heuristics->record_allocation_failure_gc(); 172 _heap->shenandoah_policy()->record_alloc_failure_to_degenerated(_degen_point); 173 return stw_degenerated; 174 } else { 175 heuristics->record_allocation_failure_gc(); 176 _heap->shenandoah_policy()->record_alloc_failure_to_full(); 177 request.generation = _heap->global_generation(); 178 return stw_full; 179 } 180 } 181 182 ShenandoahGenerationalControlThread::GCMode ShenandoahGenerationalControlThread::prepare_for_explicit_gc(ShenandoahGCRequest &request) const { 183 ShenandoahHeuristics* global_heuristics = _heap->global_generation()->heuristics(); 184 request.generation = _heap->global_generation(); 185 global_heuristics->log_trigger("GC request (%s)", GCCause::to_string(request.cause)); 186 global_heuristics->record_requested_gc(); 187 188 if (ShenandoahCollectorPolicy::should_run_full_gc(request.cause)) { 189 return stw_full;; 190 } else { 191 // Unload and clean up everything. Note that this is an _explicit_ request and so does not use 192 // the same `should_unload_classes` call as the regulator's concurrent gc request. 193 _heap->set_unload_classes(global_heuristics->can_unload_classes()); 194 return concurrent_normal; 195 } 196 } 197 198 ShenandoahGenerationalControlThread::GCMode ShenandoahGenerationalControlThread::prepare_for_concurrent_gc(const ShenandoahGCRequest &request) const { 199 assert(!(request.generation->is_old() && _heap->old_generation()->is_doing_mixed_evacuations()), 200 "Old heuristic should not request cycles while it waits for mixed evacuations"); 201 202 if (request.generation->is_global()) { 203 ShenandoahHeuristics* global_heuristics = _heap->global_generation()->heuristics(); 204 _heap->set_unload_classes(global_heuristics->should_unload_classes()); 205 } else { 206 _heap->set_unload_classes(false); 207 } 208 209 // preemption was requested or this is a regular cycle 210 return request.generation->is_old() ? servicing_old : concurrent_normal; 211 } 212 213 void ShenandoahGenerationalControlThread::maybe_set_aging_cycle() { 214 if (_age_period-- == 0) { 215 _heap->set_aging_cycle(true); 216 _age_period = ShenandoahAgingCyclePeriod - 1; 217 } else { 218 _heap->set_aging_cycle(false); 219 } 220 } 221 222 void ShenandoahGenerationalControlThread::run_gc_cycle(const ShenandoahGCRequest& request) { 223 224 log_debug(gc, thread)("Starting GC (%s): %s, %s", gc_mode_name(gc_mode()), GCCause::to_string(request.cause), request.generation->name()); 225 assert(gc_mode() != none, "GC mode cannot be none here"); 226 227 // Blow away all soft references on this cycle, if handling allocation failure, 228 // either implicit or explicit GC request, or we are requested to do so unconditionally. 229 if (request.generation->is_global() && (ShenandoahCollectorPolicy::is_allocation_failure(request.cause) || ShenandoahCollectorPolicy::is_explicit_gc(request.cause) || ShenandoahAlwaysClearSoftRefs)) { 230 _heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); 231 } 232 233 // GC is starting, bump the internal ID 234 update_gc_id(); 235 236 GCIdMark gc_id_mark; 237 238 _heap->reset_bytes_allocated_since_gc_start(); 239 240 MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics(); 241 242 // If GC was requested, we are sampling the counters even without actual triggers 243 // from allocation machinery. This captures GC phases more accurately. 244 _heap->set_forced_counters_update(true); 245 246 // If GC was requested, we better dump freeset data for performance debugging 247 _heap->free_set()->log_status_under_lock(); 248 249 { 250 // Cannot uncommit bitmap slices during concurrent reset 251 ShenandoahNoUncommitMark forbid_region_uncommit(_heap); 252 253 switch (gc_mode()) { 254 case concurrent_normal: { 255 service_concurrent_normal_cycle(request); 256 break; 257 } 258 case stw_degenerated: { 259 service_stw_degenerated_cycle(request); 260 break; 261 } 262 case stw_full: { 263 service_stw_full_cycle(request.cause); 264 break; 265 } 266 case servicing_old: { 267 assert(request.generation->is_old(), "Expected old generation here"); 268 service_concurrent_old_cycle(request); 269 break; 270 } 271 default: 272 ShouldNotReachHere(); 273 } 274 } 275 276 // If this cycle completed successfully, notify threads waiting for gc 277 if (!_heap->cancelled_gc()) { 278 notify_gc_waiters(); 279 notify_alloc_failure_waiters(); 280 } 281 282 // Report current free set state at the end of cycle, whether 283 // it is a normal completion, or the abort. 284 _heap->free_set()->log_status_under_lock(); 285 286 // Notify Universe about new heap usage. This has implications for 287 // global soft refs policy, and we better report it every time heap 288 // usage goes down. 289 _heap->update_capacity_and_used_at_gc(); 290 291 // Signal that we have completed a visit to all live objects. 292 _heap->record_whole_heap_examined_timestamp(); 293 294 // Disable forced counters update, and update counters one more time 295 // to capture the state at the end of GC session. 296 _heap->handle_force_counters_update(); 297 _heap->set_forced_counters_update(false); 298 299 // Retract forceful part of soft refs policy 300 _heap->soft_ref_policy()->set_should_clear_all_soft_refs(false); 301 302 // Clear metaspace oom flag, if current cycle unloaded classes 303 if (_heap->unload_classes()) { 304 _heap->global_generation()->heuristics()->clear_metaspace_oom(); 305 } 306 307 process_phase_timings(); 308 309 // Print Metaspace change following GC (if logging is enabled). 310 MetaspaceUtils::print_metaspace_change(meta_sizes); 311 312 // GC is over, we are at idle now 313 if (ShenandoahPacing) { 314 _heap->pacer()->setup_for_idle(); 315 } 316 317 // Check if we have seen a new target for soft max heap size or if a gc was requested. 318 // Either of these conditions will attempt to uncommit regions. 319 if (ShenandoahUncommit) { 320 if (_heap->check_soft_max_changed()) { 321 _heap->notify_soft_max_changed(); 322 } else if (ShenandoahCollectorPolicy::is_explicit_gc(request.cause)) { 323 _heap->notify_explicit_gc_requested(); 324 } 325 } 326 327 log_debug(gc, thread)("Completed GC (%s): %s, %s, cancelled: %s", 328 gc_mode_name(gc_mode()), GCCause::to_string(request.cause), request.generation->name(), GCCause::to_string(_heap->cancelled_cause())); 329 } 330 331 void ShenandoahGenerationalControlThread::process_phase_timings() const { 332 // Commit worker statistics to cycle data 333 _heap->phase_timings()->flush_par_workers_to_cycle(); 334 if (ShenandoahPacing) { 335 _heap->pacer()->flush_stats_to_cycle(); 336 } 337 338 ShenandoahEvacuationTracker* evac_tracker = _heap->evac_tracker(); 339 ShenandoahCycleStats evac_stats = evac_tracker->flush_cycle_to_global(); 340 341 // Print GC stats for current cycle 342 { 343 LogTarget(Info, gc, stats) lt; 344 if (lt.is_enabled()) { 345 ResourceMark rm; 346 LogStream ls(lt); 347 _heap->phase_timings()->print_cycle_on(&ls); 348 evac_tracker->print_evacuations_on(&ls, &evac_stats.workers, 349 &evac_stats.mutators); 350 if (ShenandoahPacing) { 351 _heap->pacer()->print_cycle_on(&ls); 352 } 353 } 354 } 355 356 // Commit statistics to globals 357 _heap->phase_timings()->flush_cycle_to_global(); 358 } 359 360 // Young and old concurrent cycles are initiated by the regulator. Implicit 361 // and explicit GC requests are handled by the controller thread and always 362 // run a global cycle (which is concurrent by default, but may be overridden 363 // by command line options). Old cycles always degenerate to a global cycle. 364 // Young cycles are degenerated to complete the young cycle. Young 365 // and old degen may upgrade to Full GC. Full GC may also be 366 // triggered directly by a System.gc() invocation. 367 // 368 // 369 // +-----+ Idle +-----+-----------+---------------------+ 370 // | + | | | 371 // | | | | | 372 // | | v | | 373 // | | Bootstrap Old +-- | ------------+ | 374 // | | + | | | 375 // | | | | | | 376 // | v v v v | 377 // | Resume Old <----------+ Young +--> Young Degen | 378 // | + + ^ + + | 379 // v | | | | | | 380 // Global <-+ | +----------------------------+ | | 381 // + | | | 382 // | v v | 383 // +---> Global Degen +--------------------> Full <----+ 384 // 385 void ShenandoahGenerationalControlThread::service_concurrent_normal_cycle(const ShenandoahGCRequest& request) { 386 log_info(gc, ergo)("Start GC cycle (%s)", request.generation->name()); 387 if (request.generation->is_old()) { 388 service_concurrent_old_cycle(request); 389 } else { 390 service_concurrent_cycle(request.generation, request.cause, false); 391 } 392 } 393 394 void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(const ShenandoahGCRequest& request) { 395 ShenandoahOldGeneration* old_generation = _heap->old_generation(); 396 ShenandoahYoungGeneration* young_generation = _heap->young_generation(); 397 ShenandoahOldGeneration::State original_state = old_generation->state(); 398 399 TraceCollectorStats tcs(_heap->monitoring_support()->concurrent_collection_counters()); 400 401 switch (original_state) { 402 case ShenandoahOldGeneration::FILLING: { 403 ShenandoahGCSession session(request.cause, old_generation); 404 assert(gc_mode() == servicing_old, "Filling should be servicing old"); 405 _allow_old_preemption.set(); 406 old_generation->entry_coalesce_and_fill(); 407 _allow_old_preemption.unset(); 408 409 // Before bootstrapping begins, we must acknowledge any cancellation request. 410 // If the gc has not been cancelled, this does nothing. If it has been cancelled, 411 // this will clear the cancellation request and exit before starting the bootstrap 412 // phase. This will allow the young GC cycle to proceed normally. If we do not 413 // acknowledge the cancellation request, the subsequent young cycle will observe 414 // the request and essentially cancel itself. 415 if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) { 416 log_info(gc, thread)("Preparation for old generation cycle was cancelled"); 417 return; 418 } 419 420 // Coalescing threads completed and nothing was cancelled. it is safe to transition from this state. 421 old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); 422 return; 423 } 424 case ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP: 425 old_generation->transition_to(ShenandoahOldGeneration::BOOTSTRAPPING); 426 case ShenandoahOldGeneration::BOOTSTRAPPING: { 427 // Configure the young generation's concurrent mark to put objects in 428 // old regions into the concurrent mark queues associated with the old 429 // generation. The young cycle will run as normal except that rather than 430 // ignore old references it will mark and enqueue them in the old concurrent 431 // task queues but it will not traverse them. 432 set_gc_mode(bootstrapping_old); 433 young_generation->set_old_gen_task_queues(old_generation->task_queues()); 434 service_concurrent_cycle(young_generation, request.cause, true); 435 process_phase_timings(); 436 if (_heap->cancelled_gc()) { 437 // Young generation bootstrap cycle has failed. Concurrent mark for old generation 438 // is going to resume after degenerated bootstrap cycle completes. 439 log_info(gc)("Bootstrap cycle for old generation was cancelled"); 440 return; 441 } 442 443 assert(_degen_point == ShenandoahGC::_degenerated_unset, "Degen point should not be set if gc wasn't cancelled"); 444 445 // From here we will 'resume' the old concurrent mark. This will skip reset 446 // and init mark for the concurrent mark. All of that work will have been 447 // done by the bootstrapping young cycle. 448 set_gc_mode(servicing_old); 449 old_generation->transition_to(ShenandoahOldGeneration::MARKING); 450 } 451 case ShenandoahOldGeneration::MARKING: { 452 ShenandoahGCSession session(request.cause, old_generation); 453 bool marking_complete = resume_concurrent_old_cycle(old_generation, request.cause); 454 if (marking_complete) { 455 assert(old_generation->state() != ShenandoahOldGeneration::MARKING, "Should not still be marking"); 456 if (original_state == ShenandoahOldGeneration::MARKING) { 457 _heap->mmu_tracker()->record_old_marking_increment(true); 458 _heap->log_heap_status("At end of Concurrent Old Marking finishing increment"); 459 } 460 } else if (original_state == ShenandoahOldGeneration::MARKING) { 461 _heap->mmu_tracker()->record_old_marking_increment(false); 462 _heap->log_heap_status("At end of Concurrent Old Marking increment"); 463 } 464 break; 465 } 466 default: 467 fatal("Unexpected state for old GC: %s", ShenandoahOldGeneration::state_name(old_generation->state())); 468 } 469 } 470 471 bool ShenandoahGenerationalControlThread::resume_concurrent_old_cycle(ShenandoahOldGeneration* generation, GCCause::Cause cause) { 472 assert(_heap->is_concurrent_old_mark_in_progress(), "Old mark should be in progress"); 473 log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued", generation->task_queues()->tasks()); 474 475 // We can only tolerate being cancelled during concurrent marking or during preparation for mixed 476 // evacuation. This flag here (passed by reference) is used to control precisely where the regulator 477 // is allowed to cancel a GC. 478 ShenandoahOldGC gc(generation, _allow_old_preemption); 479 if (gc.collect(cause)) { 480 _heap->notify_gc_progress(); 481 generation->record_success_concurrent(false); 482 } 483 484 if (_heap->cancelled_gc()) { 485 // It's possible the gc cycle was cancelled after the last time the collection checked for cancellation. In which 486 // case, the old gc cycle is still completed, and we have to deal with this cancellation. We set the degeneration 487 // point to be outside the cycle because if this is an allocation failure, that is what must be done (there is no 488 // degenerated old cycle). If the cancellation was due to a heuristic wanting to start a young cycle, then we are 489 // not actually going to a degenerated cycle, so don't set the degeneration point here. 490 if (ShenandoahCollectorPolicy::is_allocation_failure(cause)) { 491 check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle); 492 } else if (cause == GCCause::_shenandoah_concurrent_gc) { 493 _heap->shenandoah_policy()->record_interrupted_old(); 494 } 495 return false; 496 } 497 return true; 498 } 499 500 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during 501 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there. 502 // If second allocation failure happens during Degenerated GC cycle (for example, when GC 503 // tries to evac something and no memory is available), cycle degrades to Full GC. 504 // 505 // There are also a shortcut through the normal cycle: immediate garbage shortcut, when 506 // heuristics says there are no regions to compact, and all the collection comes from immediately 507 // reclaimable regions. 508 // 509 // ................................................................................................ 510 // 511 // (immediate garbage shortcut) Concurrent GC 512 // /-------------------------------------------\ 513 // | | 514 // | | 515 // | | 516 // | v 517 // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END] 518 // | | | ^ 519 // | (af) | (af) | (af) | 520 // ..................|....................|.................|..............|....................... 521 // | | | | 522 // | | | | Degenerated GC 523 // v v v | 524 // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o 525 // | | | ^ 526 // | (af) | (af) | (af) | 527 // ..................|....................|.................|..............|....................... 528 // | | | | 529 // | v | | Full GC 530 // \------------------->o<----------------/ | 531 // | | 532 // v | 533 // Full GC --------------------------/ 534 // 535 void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, 536 GCCause::Cause cause, 537 bool do_old_gc_bootstrap) { 538 // At this point: 539 // if (generation == YOUNG), this is a normal young cycle or a bootstrap cycle 540 // if (generation == GLOBAL), this is a GLOBAL cycle 541 // In either case, we want to age old objects if this is an aging cycle 542 maybe_set_aging_cycle(); 543 544 ShenandoahGCSession session(cause, generation); 545 TraceCollectorStats tcs(_heap->monitoring_support()->concurrent_collection_counters()); 546 547 assert(!generation->is_old(), "Old GC takes a different control path"); 548 549 ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap); 550 if (gc.collect(cause)) { 551 // Cycle is complete 552 _heap->notify_gc_progress(); 553 generation->record_success_concurrent(gc.abbreviated()); 554 } else { 555 assert(_heap->cancelled_gc(), "Must have been cancelled"); 556 check_cancellation_or_degen(gc.degen_point()); 557 } 558 559 const char* msg; 560 ShenandoahMmuTracker* mmu_tracker = _heap->mmu_tracker(); 561 if (generation->is_young()) { 562 if (_heap->cancelled_gc()) { 563 msg = (do_old_gc_bootstrap) ? "At end of Interrupted Concurrent Bootstrap GC" : 564 "At end of Interrupted Concurrent Young GC"; 565 } else { 566 // We only record GC results if GC was successful 567 msg = (do_old_gc_bootstrap) ? "At end of Concurrent Bootstrap GC" : 568 "At end of Concurrent Young GC"; 569 if (_heap->collection_set()->has_old_regions()) { 570 mmu_tracker->record_mixed(get_gc_id()); 571 } else if (do_old_gc_bootstrap) { 572 mmu_tracker->record_bootstrap(get_gc_id()); 573 } else { 574 mmu_tracker->record_young(get_gc_id()); 575 } 576 } 577 } else { 578 assert(generation->is_global(), "If not young, must be GLOBAL"); 579 assert(!do_old_gc_bootstrap, "Do not bootstrap with GLOBAL GC"); 580 if (_heap->cancelled_gc()) { 581 msg = "At end of Interrupted Concurrent GLOBAL GC"; 582 } else { 583 // We only record GC results if GC was successful 584 msg = "At end of Concurrent Global GC"; 585 mmu_tracker->record_global(get_gc_id()); 586 } 587 } 588 _heap->log_heap_status(msg); 589 } 590 591 bool ShenandoahGenerationalControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) { 592 if (!_heap->cancelled_gc()) { 593 return false; 594 } 595 596 if (_heap->cancelled_cause() == GCCause::_shenandoah_stop_vm 597 || _heap->cancelled_cause() == GCCause::_shenandoah_concurrent_gc) { 598 log_debug(gc, thread)("Cancellation detected, reason: %s", GCCause::to_string(_heap->cancelled_cause())); 599 return true; 600 } 601 602 if (ShenandoahCollectorPolicy::is_allocation_failure(_heap->cancelled_cause())) { 603 assert(_degen_point == ShenandoahGC::_degenerated_unset, 604 "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point)); 605 _degen_point = point; 606 log_debug(gc, thread)("Cancellation detected:, reason: %s, degen point: %s", 607 GCCause::to_string(_heap->cancelled_cause()), 608 ShenandoahGC::degen_point_to_string(_degen_point)); 609 return true; 610 } 611 612 fatal("Cancel GC either for alloc failure GC, or gracefully exiting, or to pause old generation marking"); 613 return false; 614 } 615 616 void ShenandoahGenerationalControlThread::service_stw_full_cycle(GCCause::Cause cause) { 617 ShenandoahGCSession session(cause, _heap->global_generation()); 618 maybe_set_aging_cycle(); 619 ShenandoahFullGC gc; 620 gc.collect(cause); 621 _degen_point = ShenandoahGC::_degenerated_unset; 622 } 623 624 void ShenandoahGenerationalControlThread::service_stw_degenerated_cycle(const ShenandoahGCRequest& request) { 625 assert(_degen_point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set"); 626 627 ShenandoahGCSession session(request.cause, request.generation); 628 629 ShenandoahDegenGC gc(_degen_point, request.generation); 630 gc.collect(request.cause); 631 _degen_point = ShenandoahGC::_degenerated_unset; 632 633 assert(_heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks"); 634 if (request.generation->is_global()) { 635 assert(_heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks"); 636 assert(_heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks"); 637 } else { 638 assert(request.generation->is_young(), "Expected degenerated young cycle, if not global."); 639 ShenandoahOldGeneration* old = _heap->old_generation(); 640 if (old->is_bootstrapping()) { 641 old->transition_to(ShenandoahOldGeneration::MARKING); 642 } 643 } 644 } 645 646 void ShenandoahGenerationalControlThread::request_gc(GCCause::Cause cause) { 647 if (ShenandoahCollectorPolicy::is_allocation_failure(cause)) { 648 // GC should already be cancelled. Here we are just notifying the control thread to 649 // wake up and handle the cancellation request, so we don't need to set _requested_gc_cause. 650 notify_cancellation(cause); 651 } else if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) { 652 handle_requested_gc(cause); 653 } 654 } 655 656 bool ShenandoahGenerationalControlThread::request_concurrent_gc(ShenandoahGeneration* generation) { 657 if (_heap->cancelled_gc()) { 658 // Ignore subsequent requests from the heuristics 659 log_debug(gc, thread)("Reject request for concurrent gc: gc_requested: %s, gc_cancelled: %s", 660 GCCause::to_string(_requested_gc_cause), 661 BOOL_TO_STR(_heap->cancelled_gc())); 662 return false; 663 } 664 665 MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); 666 if (gc_mode() == servicing_old) { 667 if (!preempt_old_marking(generation)) { 668 log_debug(gc, thread)("Cannot start young, old collection is not preemptible"); 669 return false; 670 } 671 672 // Cancel the old GC and wait for the control thread to start servicing the new request. 673 log_info(gc)("Preempting old generation mark to allow %s GC", generation->name()); 674 while (gc_mode() == servicing_old) { 675 ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc); 676 notify_cancellation(ml, GCCause::_shenandoah_concurrent_gc); 677 ml.wait(); 678 } 679 return true; 680 } 681 682 if (gc_mode() == none) { 683 const size_t current_gc_id = get_gc_id(); 684 while (gc_mode() == none && current_gc_id == get_gc_id()) { 685 if (_requested_gc_cause != GCCause::_no_gc) { 686 log_debug(gc, thread)("Reject request for concurrent gc because another gc is pending: %s", GCCause::to_string(_requested_gc_cause)); 687 return false; 688 } 689 690 notify_control_thread(ml, GCCause::_shenandoah_concurrent_gc, generation); 691 ml.wait(); 692 } 693 return true; 694 } 695 696 697 log_debug(gc, thread)("Reject request for concurrent gc: mode: %s, allow_old_preemption: %s", 698 gc_mode_name(gc_mode()), 699 BOOL_TO_STR(_allow_old_preemption.is_set())); 700 return false; 701 } 702 703 void ShenandoahGenerationalControlThread::notify_control_thread(GCCause::Cause cause, ShenandoahGeneration* generation) { 704 MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); 705 notify_control_thread(ml, cause, generation); 706 } 707 708 void ShenandoahGenerationalControlThread::notify_control_thread(MonitorLocker& ml, GCCause::Cause cause, ShenandoahGeneration* generation) { 709 assert(_control_lock.is_locked(), "Request lock must be held here"); 710 log_debug(gc, thread)("Notify control (%s): %s, %s", gc_mode_name(gc_mode()), GCCause::to_string(cause), generation->name()); 711 _requested_gc_cause = cause; 712 _requested_generation = generation; 713 ml.notify(); 714 } 715 716 void ShenandoahGenerationalControlThread::notify_cancellation(GCCause::Cause cause) { 717 MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); 718 notify_cancellation(ml, cause); 719 } 720 721 void ShenandoahGenerationalControlThread::notify_cancellation(MonitorLocker& ml, GCCause::Cause cause) { 722 assert(_heap->cancelled_gc(), "GC should already be cancelled"); 723 log_debug(gc,thread)("Notify control (%s): %s", gc_mode_name(gc_mode()), GCCause::to_string(cause)); 724 ml.notify(); 725 } 726 727 bool ShenandoahGenerationalControlThread::preempt_old_marking(ShenandoahGeneration* generation) { 728 return generation->is_young() && _allow_old_preemption.try_unset(); 729 } 730 731 void ShenandoahGenerationalControlThread::handle_requested_gc(GCCause::Cause cause) { 732 // For normal requested GCs (System.gc) we want to block the caller. However, 733 // for whitebox requested GC, we want to initiate the GC and return immediately. 734 // The whitebox caller thread will arrange for itself to wait until the GC notifies 735 // it that has reached the requested breakpoint (phase in the GC). 736 if (cause == GCCause::_wb_breakpoint) { 737 notify_control_thread(cause, ShenandoahHeap::heap()->global_generation()); 738 return; 739 } 740 741 // Make sure we have at least one complete GC cycle before unblocking 742 // from the explicit GC request. 743 // 744 // This is especially important for weak references cleanup and/or native 745 // resources (e.g. DirectByteBuffers) machinery: when explicit GC request 746 // comes very late in the already running cycle, it would miss lots of new 747 // opportunities for cleanup that were made available before the caller 748 // requested the GC. 749 750 MonitorLocker ml(&_gc_waiters_lock); 751 size_t current_gc_id = get_gc_id(); 752 const size_t required_gc_id = current_gc_id + 1; 753 while (current_gc_id < required_gc_id && !should_terminate()) { 754 // Make requests to run a global cycle until at least one is completed 755 notify_control_thread(cause, ShenandoahHeap::heap()->global_generation()); 756 ml.wait(); 757 current_gc_id = get_gc_id(); 758 } 759 } 760 761 void ShenandoahGenerationalControlThread::notify_gc_waiters() { 762 MonitorLocker ml(&_gc_waiters_lock); 763 ml.notify_all(); 764 } 765 766 const char* ShenandoahGenerationalControlThread::gc_mode_name(GCMode mode) { 767 switch (mode) { 768 case none: return "idle"; 769 case concurrent_normal: return "normal"; 770 case stw_degenerated: return "degenerated"; 771 case stw_full: return "full"; 772 case servicing_old: return "old"; 773 case bootstrapping_old: return "bootstrap"; 774 case stopped: return "stopped"; 775 default: return "unknown"; 776 } 777 } 778 779 void ShenandoahGenerationalControlThread::set_gc_mode(GCMode new_mode) { 780 MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); 781 set_gc_mode(ml, new_mode); 782 } 783 784 void ShenandoahGenerationalControlThread::set_gc_mode(MonitorLocker& ml, GCMode new_mode) { 785 if (_gc_mode != new_mode) { 786 log_debug(gc, thread)("Transition from: %s to: %s", gc_mode_name(_gc_mode), gc_mode_name(new_mode)); 787 EventMark event("Control thread transition from: %s, to %s", gc_mode_name(_gc_mode), gc_mode_name(new_mode)); 788 _gc_mode = new_mode; 789 ml.notify_all(); 790 } 791 }