1 /*
   2  * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  30 #include "gc/shenandoah/shenandoahControlThread.hpp"
  31 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
  32 #include "gc/shenandoah/shenandoahEvacTracker.hpp"
  33 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  34 #include "gc/shenandoah/shenandoahFullGC.hpp"
  35 #include "gc/shenandoah/shenandoahGeneration.hpp"
  36 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  37 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  38 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  40 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  41 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  42 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  43 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  44 #include "gc/shenandoah/shenandoahOldGC.hpp"
  45 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  46 #include "gc/shenandoah/shenandoahUtils.hpp"
  47 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  48 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  49 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  50 #include "gc/shenandoah/mode/shenandoahMode.hpp"
  51 #include "memory/iterator.hpp"
  52 #include "memory/metaspaceUtils.hpp"
  53 #include "memory/metaspaceStats.hpp"
  54 #include "memory/universe.hpp"
  55 #include "runtime/atomic.hpp"
  56 
  57 ShenandoahControlThread::ShenandoahControlThread() :
  58   ConcurrentGCThread(),
  59   _alloc_failure_waiters_lock(Mutex::safepoint - 2, "ShenandoahAllocFailureGC_lock", true),
  60   _gc_waiters_lock(Mutex::safepoint - 2, "ShenandoahRequestedGC_lock", true),
  61   _control_lock(Mutex::nosafepoint - 2, "ShenandoahControlGC_lock", true),
  62   _regulator_lock(Mutex::nosafepoint - 2, "ShenandoahRegulatorGC_lock", true),
  63   _periodic_task(this),
  64   _requested_gc_cause(GCCause::_no_cause_specified),
  65   _requested_generation(select_global_generation()),
  66   _degen_point(ShenandoahGC::_degenerated_outside_cycle),
  67   _degen_generation(nullptr),
  68   _allocs_seen(0),
  69   _mode(none) {
  70   set_name("Shenandoah Control Thread");
  71   reset_gc_id();
  72   create_and_start();
  73   _periodic_task.enroll();
  74   if (ShenandoahPacing) {
  75     _periodic_pacer_notify_task.enroll();
  76   }
  77 }
  78 
  79 ShenandoahControlThread::~ShenandoahControlThread() {
  80   // This is here so that super is called.
  81 }
  82 
  83 void ShenandoahPeriodicTask::task() {
  84   _thread->handle_force_counters_update();
  85   _thread->handle_counters_update();
  86 }
  87 
  88 void ShenandoahPeriodicPacerNotify::task() {
  89   assert(ShenandoahPacing, "Should not be here otherwise");
  90   ShenandoahHeap::heap()->pacer()->notify_waiters();
  91 }
  92 
  93 void ShenandoahControlThread::run_service() {
  94   ShenandoahHeap* heap = ShenandoahHeap::heap();
  95 
  96   GCMode default_mode = concurrent_normal;
  97   ShenandoahGenerationType generation = select_global_generation();
  98   GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
  99 
 100   double last_shrink_time = os::elapsedTime();
 101   uint age_period = 0;
 102 
 103   // Shrink period avoids constantly polling regions for shrinking.
 104   // Having a period 10x lower than the delay would mean we hit the
 105   // shrinking with lag of less than 1/10-th of true delay.
 106   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
 107   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
 108 
 109   ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
 110 
 111   // Heuristics are notified of allocation failures here and other outcomes
 112   // of the cycle. They're also used here to control whether the Nth consecutive
 113   // degenerated cycle should be 'promoted' to a full cycle. The decision to
 114   // trigger a cycle or not is evaluated on the regulator thread.
 115   ShenandoahHeuristics* global_heuristics = heap->global_generation()->heuristics();
 116   bool old_bootstrap_requested = false;
 117   while (!in_graceful_shutdown() && !should_terminate()) {
 118     // Figure out if we have pending requests.
 119     bool alloc_failure_pending = _alloc_failure_gc.is_set();
 120     bool is_gc_requested = _gc_requested.is_set();
 121     GCCause::Cause requested_gc_cause = _requested_gc_cause;
 122     bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause);
 123     bool implicit_gc_requested = is_gc_requested && is_implicit_gc(requested_gc_cause);
 124 
 125     // This control loop iteration have seen this much allocations.
 126     size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
 127 
 128     // Check if we have seen a new target for soft max heap size.
 129     bool soft_max_changed = check_soft_max_changed();
 130 
 131     // Choose which GC mode to run in. The block below should select a single mode.
 132     set_gc_mode(none);
 133     GCCause::Cause cause = GCCause::_last_gc_cause;
 134     ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
 135 
 136     if (alloc_failure_pending) {
 137       // Allocation failure takes precedence: we have to deal with it first thing
 138       log_info(gc)("Trigger: Handle Allocation Failure");
 139 
 140       cause = GCCause::_allocation_failure;
 141 
 142       // Consume the degen point, and seed it with default value
 143       degen_point = _degen_point;
 144       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
 145 
 146       if (degen_point == ShenandoahGC::_degenerated_outside_cycle) {
 147         _degen_generation = heap->mode()->is_generational() ?
 148                 heap->young_generation() : heap->global_generation();
 149       } else {
 150         assert(_degen_generation != nullptr, "Need to know which generation to resume");
 151       }
 152 
 153       ShenandoahHeuristics* heuristics = _degen_generation->heuristics();
 154       generation = _degen_generation->type();
 155       bool old_gen_evacuation_failed = heap->clear_old_evacuation_failure();
 156 
 157       // Do not bother with degenerated cycle if old generation evacuation failed
 158       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() && !old_gen_evacuation_failed) {
 159         heuristics->record_allocation_failure_gc();
 160         policy->record_alloc_failure_to_degenerated(degen_point);
 161         set_gc_mode(stw_degenerated);
 162       } else {
 163         heuristics->record_allocation_failure_gc();
 164         policy->record_alloc_failure_to_full();
 165         generation = select_global_generation();
 166         set_gc_mode(stw_full);
 167       }
 168     } else if (explicit_gc_requested) {
 169       cause = requested_gc_cause;
 170       generation = select_global_generation();
 171       log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
 172 
 173       global_heuristics->record_requested_gc();
 174 
 175       if (ExplicitGCInvokesConcurrent) {
 176         policy->record_explicit_to_concurrent();
 177         set_gc_mode(default_mode);
 178         // Unload and clean up everything
 179         heap->set_unload_classes(global_heuristics->can_unload_classes());
 180       } else {
 181         policy->record_explicit_to_full();
 182         set_gc_mode(stw_full);
 183       }
 184     } else if (implicit_gc_requested) {
 185       cause = requested_gc_cause;
 186       generation = select_global_generation();
 187       log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
 188 
 189       global_heuristics->record_requested_gc();
 190 
 191       if (ShenandoahImplicitGCInvokesConcurrent) {
 192         policy->record_implicit_to_concurrent();
 193         set_gc_mode(default_mode);
 194 
 195         // Unload and clean up everything
 196         heap->set_unload_classes(global_heuristics->can_unload_classes());
 197       } else {
 198         policy->record_implicit_to_full();
 199         set_gc_mode(stw_full);
 200       }
 201     } else {
 202       // We should only be here if the regulator requested a cycle or if
 203       // there is an old generation mark in progress.
 204       if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) {
 205         if (_requested_generation == OLD && heap->doing_mixed_evacuations()) {
 206           // If a request to start an old cycle arrived while an old cycle was running, but _before_
 207           // it chose any regions for evacuation we don't want to start a new old cycle. Rather, we want
 208           // the heuristic to run a young collection so that we can evacuate some old regions.
 209           assert(!heap->is_concurrent_old_mark_in_progress(), "Should not be running mixed collections and concurrent marking");
 210           generation = YOUNG;
 211         } else if (_requested_generation == OLD && !old_bootstrap_requested) {
 212           // Arrange to perform a young GC immediately followed by a bootstrap OLD GC.  OLD GC typically requires more
 213           // than twice the time required for YOUNG GC, so we run a YOUNG GC to replenish the YOUNG allocation pool before
 214           // we start the longer OLD GC effort.
 215           old_bootstrap_requested = true;
 216           generation = YOUNG;
 217         } else {
 218           // if (old_bootstrap_requested && (_requested_generation == OLD)), this starts the bootstrap GC that
 219           //  immediately follows the preparatory young GC.
 220           // But we will abandon the planned bootstrap GC if a GLOBAL GC has been now been requested.
 221           generation = _requested_generation;
 222           old_bootstrap_requested = false;
 223         }
 224         // preemption was requested or this is a regular cycle
 225         cause = GCCause::_shenandoah_concurrent_gc;
 226         set_gc_mode(default_mode);
 227 
 228         // Don't start a new old marking if there is one already in progress
 229         if (generation == OLD && heap->is_concurrent_old_mark_in_progress()) {
 230           set_gc_mode(servicing_old);
 231         }
 232 
 233         if (generation == select_global_generation()) {
 234           heap->set_unload_classes(global_heuristics->should_unload_classes());
 235         } else {
 236           heap->set_unload_classes(false);
 237         }
 238 
 239         // Don't want to spin in this loop and start a cycle every time, so
 240         // clear requested gc cause. This creates a race with callers of the
 241         // blocking 'request_gc' method, but there it loops and resets the
 242         // '_requested_gc_cause' until a full cycle is completed.
 243         _requested_gc_cause = GCCause::_no_gc;
 244       } else if (heap->is_concurrent_old_mark_in_progress() || heap->is_prepare_for_old_mark_in_progress()) {
 245         // Nobody asked us to do anything, but we have an old-generation mark or old-generation preparation for
 246         // mixed evacuation in progress, so resume working on that.
 247         log_info(gc)("Resume old GC: marking is%s in progress, preparing is%s in progress",
 248                      heap->is_concurrent_old_mark_in_progress() ? "" : " NOT",
 249                      heap->is_prepare_for_old_mark_in_progress() ? "" : " NOT");
 250 
 251         cause = GCCause::_shenandoah_concurrent_gc;
 252         generation = OLD;
 253         set_gc_mode(servicing_old);
 254       }
 255     }
 256 
 257     // Blow all soft references on this cycle, if handling allocation failure,
 258     // either implicit or explicit GC request, or we are requested to do so unconditionally.
 259     if (generation == select_global_generation() && (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs)) {
 260       heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
 261     }
 262 
 263     bool gc_requested = (_mode != none);
 264     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
 265 
 266     if (gc_requested) {
 267       // GC is starting, bump the internal ID
 268       update_gc_id();
 269 
 270       heap->reset_bytes_allocated_since_gc_start();
 271 
 272       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
 273 
 274       // If GC was requested, we are sampling the counters even without actual triggers
 275       // from allocation machinery. This captures GC phases more accurately.
 276       set_forced_counters_update(true);
 277 
 278       // If GC was requested, we better dump freeset data for performance debugging
 279       {
 280         ShenandoahHeapLocker locker(heap->lock());
 281         heap->free_set()->log_status();
 282       }
 283       // In case this is a degenerated cycle, remember whether original cycle was aging.
 284       bool was_aging_cycle = heap->is_aging_cycle();
 285       heap->set_aging_cycle(false);
 286 
 287       switch (_mode) {
 288         case concurrent_normal: {
 289           // At this point:
 290           //  if (generation == YOUNG), this is a normal YOUNG cycle
 291           //  if (generation == OLD), this is a bootstrap OLD cycle
 292           //  if (generation == GLOBAL), this is a GLOBAL cycle triggered by System.gc()
 293           // In all three cases, we want to age old objects if this is an aging cycle
 294           if (age_period-- == 0) {
 295              heap->set_aging_cycle(true);
 296              age_period = ShenandoahAgingCyclePeriod - 1;
 297           }
 298           service_concurrent_normal_cycle(heap, generation, cause);
 299           break;
 300         }
 301         case stw_degenerated: {
 302           heap->set_aging_cycle(was_aging_cycle);
 303           if (!service_stw_degenerated_cycle(cause, degen_point)) {
 304             // The degenerated GC was upgraded to a Full GC
 305             generation = select_global_generation();
 306           }
 307           break;
 308         }
 309         case stw_full: {
 310           if (age_period-- == 0) {
 311             heap->set_aging_cycle(true);
 312             age_period = ShenandoahAgingCyclePeriod - 1;
 313           }
 314           service_stw_full_cycle(cause);
 315           break;
 316         }
 317         case servicing_old: {
 318           assert(generation == OLD, "Expected old generation here");
 319           GCIdMark gc_id_mark;
 320           service_concurrent_old_cycle(heap, cause);
 321           break;
 322         }
 323         default:
 324           ShouldNotReachHere();
 325       }
 326 
 327       // If this was the requested GC cycle, notify waiters about it
 328       if (explicit_gc_requested || implicit_gc_requested) {
 329         notify_gc_waiters();
 330       }
 331 
 332       // If this was the allocation failure GC cycle, notify waiters about it
 333       if (alloc_failure_pending) {
 334         notify_alloc_failure_waiters();
 335       }
 336 
 337       // Report current free set state at the end of cycle, whether
 338       // it is a normal completion, or the abort.
 339       {
 340         ShenandoahHeapLocker locker(heap->lock());
 341         heap->free_set()->log_status();
 342 
 343         // Notify Universe about new heap usage. This has implications for
 344         // global soft refs policy, and we better report it every time heap
 345         // usage goes down.
 346         Universe::heap()->update_capacity_and_used_at_gc();
 347 
 348         // Signal that we have completed a visit to all live objects.
 349         Universe::heap()->record_whole_heap_examined_timestamp();
 350       }
 351 
 352       // Disable forced counters update, and update counters one more time
 353       // to capture the state at the end of GC session.
 354       handle_force_counters_update();
 355       set_forced_counters_update(false);
 356 
 357       // Retract forceful part of soft refs policy
 358       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
 359 
 360       // Clear metaspace oom flag, if current cycle unloaded classes
 361       if (heap->unload_classes()) {
 362         assert(generation == select_global_generation(), "Only unload classes during GLOBAL cycle");
 363         global_heuristics->clear_metaspace_oom();
 364       }
 365 
 366       process_phase_timings(heap);
 367 
 368       // Print Metaspace change following GC (if logging is enabled).
 369       MetaspaceUtils::print_metaspace_change(meta_sizes);
 370 
 371       // GC is over, we are at idle now
 372       if (ShenandoahPacing) {
 373         heap->pacer()->setup_for_idle();
 374       }
 375     } else {
 376       // Allow allocators to know we have seen this much regions
 377       if (ShenandoahPacing && (allocs_seen > 0)) {
 378         heap->pacer()->report_alloc(allocs_seen);
 379       }
 380     }
 381 
 382     double current = os::elapsedTime();
 383 
 384     if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
 385       // Explicit GC tries to uncommit everything down to min capacity.
 386       // Soft max change tries to uncommit everything down to target capacity.
 387       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
 388 
 389       double shrink_before = (explicit_gc_requested || soft_max_changed) ?
 390                              current :
 391                              current - (ShenandoahUncommitDelay / 1000.0);
 392 
 393       size_t shrink_until = soft_max_changed ?
 394                              heap->soft_max_capacity() :
 395                              heap->min_capacity();
 396 
 397       service_uncommit(shrink_before, shrink_until);
 398       heap->phase_timings()->flush_cycle_to_global();
 399       last_shrink_time = current;
 400     }
 401 
 402     // Don't wait around if there was an allocation failure - start the next cycle immediately.
 403     if (!is_alloc_failure_gc()) {
 404       if (old_bootstrap_requested) {
 405         _requested_generation = OLD;
 406         _requested_gc_cause = GCCause::_shenandoah_concurrent_gc;
 407       } else {
 408         // The timed wait is necessary because this thread has a responsibility to send
 409         // 'alloc_words' to the pacer when it does not perform a GC.
 410         MonitorLocker lock(&_control_lock, Mutex::_no_safepoint_check_flag);
 411         lock.wait(ShenandoahControlIntervalMax);
 412       }
 413     } else {
 414       // in case of alloc_failure, abandon any plans to do immediate OLD Bootstrap
 415       old_bootstrap_requested = false;
 416     }
 417   }
 418 
 419   // Wait for the actual stop(), can't leave run_service() earlier.
 420   while (!should_terminate()) {
 421     os::naked_short_sleep(ShenandoahControlIntervalMin);
 422   }
 423 }
 424 
 425 void ShenandoahControlThread::process_phase_timings(const ShenandoahHeap* heap) {
 426   // Commit worker statistics to cycle data
 427   heap->phase_timings()->flush_par_workers_to_cycle();
 428   if (ShenandoahPacing) {
 429     heap->pacer()->flush_stats_to_cycle();
 430   }
 431 
 432   ShenandoahEvacuationTracker* evac_tracker = heap->evac_tracker();
 433   ShenandoahCycleStats         evac_stats   = evac_tracker->flush_cycle_to_global();
 434 
 435   // Print GC stats for current cycle
 436   {
 437     LogTarget(Info, gc, stats) lt;
 438     if (lt.is_enabled()) {
 439       ResourceMark rm;
 440       LogStream ls(lt);
 441       heap->phase_timings()->print_cycle_on(&ls);
 442       evac_tracker->print_evacuations_on(&ls, &evac_stats.workers,
 443                                               &evac_stats.mutators);
 444       if (ShenandoahPacing) {
 445         heap->pacer()->print_cycle_on(&ls);
 446       }
 447     }
 448   }
 449 
 450   // Commit statistics to globals
 451   heap->phase_timings()->flush_cycle_to_global();
 452 }
 453 
 454 // Young and old concurrent cycles are initiated by the regulator. Implicit
 455 // and explicit GC requests are handled by the controller thread and always
 456 // run a global cycle (which is concurrent by default, but may be overridden
 457 // by command line options). Old cycles always degenerate to a global cycle.
 458 // Young cycles are degenerated to complete the young cycle.  Young
 459 // and old degen may upgrade to Full GC.  Full GC may also be
 460 // triggered directly by a System.gc() invocation.
 461 //
 462 //
 463 //      +-----+ Idle +-----+-----------+---------------------+
 464 //      |         +        |           |                     |
 465 //      |         |        |           |                     |
 466 //      |         |        v           |                     |
 467 //      |         |  Bootstrap Old +-- | ------------+       |
 468 //      |         |   +                |             |       |
 469 //      |         |   |                |             |       |
 470 //      |         v   v                v             v       |
 471 //      |    Resume Old <----------+ Young +--> Young Degen  |
 472 //      |     +  +   ^                            +  +       |
 473 //      v     |  |   |                            |  |       |
 474 //   Global <-+  |   +----------------------------+  |       |
 475 //      +        |                                   |       |
 476 //      |        v                                   v       |
 477 //      +--->  Global Degen +--------------------> Full <----+
 478 //
 479 void ShenandoahControlThread::service_concurrent_normal_cycle(ShenandoahHeap* heap,
 480                                                               const ShenandoahGenerationType generation,
 481                                                               GCCause::Cause cause) {
 482   GCIdMark gc_id_mark;
 483   ShenandoahGeneration* the_generation = nullptr;
 484   switch (generation) {
 485     case YOUNG: {
 486       // Run a young cycle. This might or might not, have interrupted an ongoing
 487       // concurrent mark in the old generation. We need to think about promotions
 488       // in this case. Promoted objects should be above the TAMS in the old regions
 489       // they end up in, but we have to be sure we don't promote into any regions
 490       // that are in the cset.
 491       log_info(gc, ergo)("Start GC cycle (YOUNG)");
 492       the_generation = heap->young_generation();
 493       service_concurrent_cycle(the_generation, cause, false);
 494       break;
 495     }
 496     case OLD: {
 497       log_info(gc, ergo)("Start GC cycle (OLD)");
 498       the_generation = heap->old_generation();
 499       service_concurrent_old_cycle(heap, cause);
 500       break;
 501     }
 502     case GLOBAL_GEN: {
 503       log_info(gc, ergo)("Start GC cycle (GLOBAL)");
 504       the_generation = heap->global_generation();
 505       service_concurrent_cycle(the_generation, cause, false);
 506       break;
 507     }
 508     case GLOBAL_NON_GEN: {
 509       log_info(gc, ergo)("Start GC cycle");
 510       the_generation = heap->global_generation();
 511       service_concurrent_cycle(the_generation, cause, false);
 512       break;
 513     }
 514     default:
 515       ShouldNotReachHere();
 516   }
 517 }
 518 
 519 void ShenandoahControlThread::service_concurrent_old_cycle(ShenandoahHeap* heap, GCCause::Cause &cause) {
 520   ShenandoahOldGeneration* old_generation = heap->old_generation();
 521   ShenandoahYoungGeneration* young_generation = heap->young_generation();
 522   ShenandoahOldGeneration::State original_state = old_generation->state();
 523 
 524   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 525 
 526   switch (original_state) {
 527     case ShenandoahOldGeneration::WAITING_FOR_FILL:
 528     case ShenandoahOldGeneration::IDLE: {
 529       assert(!heap->is_concurrent_old_mark_in_progress(), "Old already in progress");
 530       assert(old_generation->task_queues()->is_empty(), "Old mark queues should be empty");
 531     }
 532     case ShenandoahOldGeneration::FILLING: {
 533       _allow_old_preemption.set();
 534       ShenandoahGCSession session(cause, old_generation);
 535       old_generation->prepare_gc();
 536       _allow_old_preemption.unset();
 537 
 538       if (heap->is_prepare_for_old_mark_in_progress()) {
 539         // Coalescing threads detected the cancellation request and aborted. Stay
 540         // in this state so control thread may resume the coalescing work.
 541         assert(old_generation->state() == ShenandoahOldGeneration::FILLING, "Prepare for mark should be in progress");
 542         assert(heap->cancelled_gc(), "Preparation for GC is not complete, expected cancellation");
 543       }
 544 
 545       // Before bootstrapping begins, we must acknowledge any cancellation request.
 546       // If the gc has not been cancelled, this does nothing. If it has been cancelled,
 547       // this will clear the cancellation request and exit before starting the bootstrap
 548       // phase. This will allow the young GC cycle to proceed normally. If we do not
 549       // acknowledge the cancellation request, the subsequent young cycle will observe
 550       // the request and essentially cancel itself.
 551       if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) {
 552         log_info(gc)("Preparation for old generation cycle was cancelled");
 553         return;
 554       }
 555 
 556       // Coalescing threads completed and nothing was cancelled. it is safe to transition
 557       // to the bootstrapping state now.
 558       old_generation->transition_to(ShenandoahOldGeneration::BOOTSTRAPPING);
 559     }
 560     case ShenandoahOldGeneration::BOOTSTRAPPING: {
 561       // Configure the young generation's concurrent mark to put objects in
 562       // old regions into the concurrent mark queues associated with the old
 563       // generation. The young cycle will run as normal except that rather than
 564       // ignore old references it will mark and enqueue them in the old concurrent
 565       // task queues but it will not traverse them.
 566       set_gc_mode(bootstrapping_old);
 567       young_generation->set_old_gen_task_queues(old_generation->task_queues());
 568       ShenandoahGCSession session(cause, young_generation);
 569       service_concurrent_cycle(heap, young_generation, cause, true);
 570       process_phase_timings(heap);
 571       if (heap->cancelled_gc()) {
 572         // Young generation bootstrap cycle has failed. Concurrent mark for old generation
 573         // is going to resume after degenerated bootstrap cycle completes.
 574         log_info(gc)("Bootstrap cycle for old generation was cancelled");
 575         return;
 576       }
 577 
 578       // Reset the degenerated point. Normally this would happen at the top
 579       // of the control loop, but here we have just completed a young cycle
 580       // which has bootstrapped the old concurrent marking.
 581       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
 582 
 583       // From here we will 'resume' the old concurrent mark. This will skip reset
 584       // and init mark for the concurrent mark. All of that work will have been
 585       // done by the bootstrapping young cycle.
 586       set_gc_mode(servicing_old);
 587       old_generation->transition_to(ShenandoahOldGeneration::MARKING);
 588     }
 589     case ShenandoahOldGeneration::MARKING: {
 590       ShenandoahGCSession session(cause, old_generation);
 591       bool marking_complete = resume_concurrent_old_cycle(old_generation, cause);
 592       if (marking_complete) {
 593         assert(old_generation->state() != ShenandoahOldGeneration::MARKING, "Should not still be marking");
 594         if (original_state == ShenandoahOldGeneration::MARKING) {
 595           heap->mmu_tracker()->record_old_marking_increment(old_generation, GCId::current(), true,
 596                                                             heap->collection_set()->has_old_regions());
 597           heap->log_heap_status("At end of Concurrent Old Marking finishing increment");
 598         }
 599       } else if (original_state == ShenandoahOldGeneration::MARKING) {
 600         heap->mmu_tracker()->record_old_marking_increment(old_generation, GCId::current(), false,
 601                                                           heap->collection_set()->has_old_regions());
 602         heap->log_heap_status("At end of Concurrent Old Marking increment");
 603       }
 604       break;
 605     }
 606     default:
 607       fatal("Unexpected state for old GC: %s", ShenandoahOldGeneration::state_name(old_generation->state()));
 608   }
 609 }
 610 
 611 bool ShenandoahControlThread::resume_concurrent_old_cycle(ShenandoahGeneration* generation, GCCause::Cause cause) {
 612   assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(), "Old mark should be in progress");
 613   log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued", generation->task_queues()->tasks());
 614 
 615   ShenandoahHeap* heap = ShenandoahHeap::heap();
 616 
 617   // We can only tolerate being cancelled during concurrent marking or during preparation for mixed
 618   // evacuation. This flag here (passed by reference) is used to control precisely where the regulator
 619   // is allowed to cancel a GC.
 620   ShenandoahOldGC gc(generation, _allow_old_preemption);
 621   if (gc.collect(cause)) {
 622     generation->record_success_concurrent(false);
 623   }
 624 
 625   if (heap->cancelled_gc()) {
 626     // It's possible the gc cycle was cancelled after the last time
 627     // the collection checked for cancellation. In which case, the
 628     // old gc cycle is still completed, and we have to deal with this
 629     // cancellation. We set the degeneration point to be outside
 630     // the cycle because if this is an allocation failure, that is
 631     // what must be done (there is no degenerated old cycle). If the
 632     // cancellation was due to a heuristic wanting to start a young
 633     // cycle, then we are not actually going to a degenerated cycle,
 634     // so the degenerated point doesn't matter here.
 635     check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle);
 636     if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) {
 637       heap->shenandoah_policy()->record_interrupted_old();
 638     }
 639     return false;
 640   }
 641   return true;
 642 }
 643 
 644 bool ShenandoahControlThread::check_soft_max_changed() const {
 645   ShenandoahHeap* heap = ShenandoahHeap::heap();
 646   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
 647   size_t old_soft_max = heap->soft_max_capacity();
 648   if (new_soft_max != old_soft_max) {
 649     new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
 650     new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
 651     if (new_soft_max != old_soft_max) {
 652       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
 653                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 654                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 655       );
 656       heap->set_soft_max_capacity(new_soft_max);
 657       return true;
 658     }
 659   }
 660   return false;
 661 }
 662 
 663 void ShenandoahControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool do_old_gc_bootstrap) {
 664   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
 665   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
 666   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
 667   // tries to evac something and no memory is available), cycle degrades to Full GC.
 668   //
 669   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
 670   // heuristics says there are no regions to compact, and all the collection comes from immediately
 671   // reclaimable regions.
 672   //
 673   // ................................................................................................
 674   //
 675   //                                    (immediate garbage shortcut)                Concurrent GC
 676   //                             /-------------------------------------------\
 677   //                             |                                           |
 678   //                             |                                           |
 679   //                             |                                           |
 680   //                             |                                           v
 681   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
 682   //                   |                    |                 |              ^
 683   //                   | (af)               | (af)            | (af)         |
 684   // ..................|....................|.................|..............|.......................
 685   //                   |                    |                 |              |
 686   //                   |                    |                 |              |      Degenerated GC
 687   //                   v                    v                 v              |
 688   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
 689   //                   |                    |                 |              ^
 690   //                   | (af)               | (af)            | (af)         |
 691   // ..................|....................|.................|..............|.......................
 692   //                   |                    |                 |              |
 693   //                   |                    v                 |              |      Full GC
 694   //                   \------------------->o<----------------/              |
 695   //                                        |                                |
 696   //                                        v                                |
 697   //                                      Full GC  --------------------------/
 698   //
 699   if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
 700 
 701   ShenandoahHeap* heap = ShenandoahHeap::heap();
 702   ShenandoahGCSession session(cause, generation);
 703   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 704 
 705   service_concurrent_cycle(heap, generation, cause, do_old_gc_bootstrap);
 706 }
 707 
 708 void ShenandoahControlThread::service_concurrent_cycle(ShenandoahHeap* heap,
 709                                                        ShenandoahGeneration* generation,
 710                                                        GCCause::Cause& cause,
 711                                                        bool do_old_gc_bootstrap) {
 712   ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap);
 713   if (gc.collect(cause)) {
 714     // Cycle is complete
 715     generation->record_success_concurrent(gc.abbreviated());
 716   } else {
 717     assert(heap->cancelled_gc(), "Must have been cancelled");
 718     check_cancellation_or_degen(gc.degen_point());
 719     assert(!generation->is_old(), "Old GC takes a different control path");
 720     // Concurrent young-gen collection degenerates to young
 721     // collection.  Same for global collections.
 722     _degen_generation = generation;
 723   }
 724   const char* msg;
 725   if (heap->mode()->is_generational()) {
 726     ShenandoahMmuTracker* mmu_tracker = heap->mmu_tracker();
 727     if (generation->is_young()) {
 728       if (heap->cancelled_gc()) {
 729         msg = (do_old_gc_bootstrap) ? "At end of Interrupted Concurrent Bootstrap GC":
 730                                       "At end of Interrupted Concurrent Young GC";
 731       } else {
 732         // We only record GC results if GC was successful
 733         msg = (do_old_gc_bootstrap) ? "At end of Concurrent Bootstrap GC":
 734                                       "At end of Concurrent Young GC";
 735         if (heap->collection_set()->has_old_regions()) {
 736           bool mixed_is_done = (heap->old_heuristics()->unprocessed_old_collection_candidates() == 0);
 737           mmu_tracker->record_mixed(generation, get_gc_id(), mixed_is_done);
 738         } else if (do_old_gc_bootstrap) {
 739           mmu_tracker->record_bootstrap(generation, get_gc_id(), heap->collection_set()->has_old_regions());
 740         } else {
 741           mmu_tracker->record_young(generation, get_gc_id());
 742         }
 743       }
 744     } else {
 745       assert(generation->is_global(), "If not young, must be GLOBAL");
 746       assert(!do_old_gc_bootstrap, "Do not bootstrap with GLOBAL GC");
 747       if (heap->cancelled_gc()) {
 748         msg = "At end of Interrupted Concurrent GLOBAL GC";
 749       } else {
 750         // We only record GC results if GC was successful
 751         msg = "At end of Concurrent Global GC";
 752         mmu_tracker->record_global(generation, get_gc_id());
 753       }
 754     }
 755   } else {
 756     msg = heap->cancelled_gc() ? "At end of cancelled GC" :
 757                                  "At end of GC";
 758   }
 759   heap->log_heap_status(msg);
 760 }
 761 
 762 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
 763   ShenandoahHeap* heap = ShenandoahHeap::heap();
 764   if (!heap->cancelled_gc()) {
 765     return false;
 766   }
 767 
 768   if (in_graceful_shutdown()) {
 769     return true;
 770   }
 771 
 772   assert(_degen_point == ShenandoahGC::_degenerated_outside_cycle,
 773          "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
 774 
 775   if (is_alloc_failure_gc()) {
 776     _degen_point = point;
 777     return true;
 778   }
 779 
 780   if (_preemption_requested.is_set()) {
 781     assert(_requested_generation == YOUNG, "Only young GCs may preempt old.");
 782     _preemption_requested.unset();
 783 
 784     // Old generation marking is only cancellable during concurrent marking.
 785     // Once final mark is complete, the code does not check again for cancellation.
 786     // If old generation was cancelled for an allocation failure, we wouldn't
 787     // make it to this case. The calling code is responsible for forcing a
 788     // cancellation due to allocation failure into a degenerated cycle.
 789     _degen_point = point;
 790     heap->clear_cancelled_gc(false /* clear oom handler */);
 791     return true;
 792   }
 793 
 794   fatal("Cancel GC either for alloc failure GC, or gracefully exiting, or to pause old generation marking");
 795   return false;
 796 }
 797 
 798 void ShenandoahControlThread::stop_service() {
 799   // Nothing to do here.
 800 }
 801 
 802 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
 803   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 804 
 805   GCIdMark gc_id_mark;
 806   ShenandoahGCSession session(cause, heap->global_generation());
 807 
 808   ShenandoahFullGC gc;
 809   gc.collect(cause);
 810 
 811   heap->global_generation()->heuristics()->record_success_full();
 812   heap->shenandoah_policy()->record_success_full();
 813 }
 814 
 815 bool ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause,
 816                                                             ShenandoahGC::ShenandoahDegenPoint point) {
 817   assert(point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
 818   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 819 
 820   GCIdMark gc_id_mark;
 821   ShenandoahGCSession session(cause, _degen_generation);
 822 
 823   ShenandoahDegenGC gc(point, _degen_generation);
 824   gc.collect(cause);
 825 
 826   assert(heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks");
 827   if (_degen_generation->is_global()) {
 828     assert(heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks");
 829     assert(heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks");
 830   } else {
 831     assert(_degen_generation->is_young(), "Expected degenerated young cycle, if not global.");
 832     ShenandoahOldGeneration* old = heap->old_generation();
 833     if (old->state() == ShenandoahOldGeneration::BOOTSTRAPPING && !gc.upgraded_to_full()) {
 834       old->transition_to(ShenandoahOldGeneration::MARKING);
 835     }
 836   }
 837 
 838   _degen_generation->heuristics()->record_success_degenerated();
 839   heap->shenandoah_policy()->record_success_degenerated();
 840   return !gc.upgraded_to_full();
 841 }
 842 
 843 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
 844   ShenandoahHeap* heap = ShenandoahHeap::heap();
 845 
 846   // Determine if there is work to do. This avoids taking heap lock if there is
 847   // no work available, avoids spamming logs with superfluous logging messages,
 848   // and minimises the amount of work while locks are taken.
 849 
 850   if (heap->committed() <= shrink_until) return;
 851 
 852   bool has_work = false;
 853   for (size_t i = 0; i < heap->num_regions(); i++) {
 854     ShenandoahHeapRegion *r = heap->get_region(i);
 855     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 856       has_work = true;
 857       break;
 858     }
 859   }
 860 
 861   if (has_work) {
 862     heap->entry_uncommit(shrink_before, shrink_until);
 863   }
 864 }
 865 
 866 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
 867   return GCCause::is_user_requested_gc(cause) ||
 868          GCCause::is_serviceability_requested_gc(cause);
 869 }
 870 
 871 bool ShenandoahControlThread::is_implicit_gc(GCCause::Cause cause) const {
 872   return !is_explicit_gc(cause) &&
 873           (cause != GCCause::_shenandoah_concurrent_gc);
 874 }
 875 
 876 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
 877   assert(GCCause::is_user_requested_gc(cause) ||
 878          GCCause::is_serviceability_requested_gc(cause) ||
 879          cause == GCCause::_metadata_GC_clear_soft_refs ||
 880          cause == GCCause::_codecache_GC_aggressive ||
 881          cause == GCCause::_codecache_GC_threshold ||
 882          cause == GCCause::_full_gc_alot ||
 883          cause == GCCause::_wb_young_gc ||
 884          cause == GCCause::_wb_full_gc ||
 885          cause == GCCause::_wb_breakpoint ||
 886          cause == GCCause::_scavenge_alot,
 887          "only requested GCs here: %s", GCCause::to_string(cause));
 888 
 889   if (is_explicit_gc(cause)) {
 890     if (!DisableExplicitGC) {
 891       handle_requested_gc(cause);
 892     }
 893   } else {
 894     handle_requested_gc(cause);
 895   }
 896 }
 897 
 898 bool ShenandoahControlThread::request_concurrent_gc(ShenandoahGenerationType generation) {
 899   if (_preemption_requested.is_set() || _gc_requested.is_set() || ShenandoahHeap::heap()->cancelled_gc()) {
 900     // Ignore subsequent requests from the heuristics
 901     return false;
 902   }
 903 
 904   if (_mode == none) {
 905     _requested_gc_cause = GCCause::_shenandoah_concurrent_gc;
 906     _requested_generation = generation;
 907     notify_control_thread();
 908     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
 909     ml.wait();
 910     return true;
 911   }
 912 
 913   if (preempt_old_marking(generation)) {
 914     log_info(gc)("Preempting old generation mark to allow %s GC", shenandoah_generation_name(generation));
 915     _requested_gc_cause = GCCause::_shenandoah_concurrent_gc;
 916     _requested_generation = generation;
 917     _preemption_requested.set();
 918     ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc);
 919     notify_control_thread();
 920 
 921     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
 922     ml.wait();
 923     return true;
 924   }
 925 
 926   return false;
 927 }
 928 
 929 void ShenandoahControlThread::notify_control_thread() {
 930   MonitorLocker locker(&_control_lock, Mutex::_no_safepoint_check_flag);
 931   _control_lock.notify();
 932 }
 933 
 934 bool ShenandoahControlThread::preempt_old_marking(ShenandoahGenerationType generation) {
 935   return (generation == YOUNG) && _allow_old_preemption.try_unset();
 936 }
 937 
 938 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
 939   // Make sure we have at least one complete GC cycle before unblocking
 940   // from the explicit GC request.
 941   //
 942   // This is especially important for weak references cleanup and/or native
 943   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
 944   // comes very late in the already running cycle, it would miss lots of new
 945   // opportunities for cleanup that were made available before the caller
 946   // requested the GC.
 947 
 948   MonitorLocker ml(&_gc_waiters_lock);
 949   size_t current_gc_id = get_gc_id();
 950   size_t required_gc_id = current_gc_id + 1;
 951   while (current_gc_id < required_gc_id) {
 952     // Although setting gc request is under _gc_waiters_lock, but read side (run_service())
 953     // does not take the lock. We need to enforce following order, so that read side sees
 954     // latest requested gc cause when the flag is set.
 955     _requested_gc_cause = cause;
 956     _gc_requested.set();
 957     notify_control_thread();
 958     if (cause != GCCause::_wb_breakpoint) {
 959       ml.wait();
 960     }
 961     current_gc_id = get_gc_id();
 962   }
 963 }
 964 
 965 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
 966   ShenandoahHeap* heap = ShenandoahHeap::heap();
 967 
 968   assert(current()->is_Java_thread(), "expect Java thread here");
 969 
 970   if (try_set_alloc_failure_gc()) {
 971     // Only report the first allocation failure
 972     log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
 973                  req.type_string(),
 974                  byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
 975     // Now that alloc failure GC is scheduled, we can abort everything else
 976     heap->cancel_gc(GCCause::_allocation_failure);
 977   }
 978 
 979   MonitorLocker ml(&_alloc_failure_waiters_lock);
 980   while (is_alloc_failure_gc()) {
 981     ml.wait();
 982   }
 983 }
 984 
 985 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
 986   ShenandoahHeap* heap = ShenandoahHeap::heap();
 987 
 988   if (try_set_alloc_failure_gc()) {
 989     // Only report the first allocation failure
 990     log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
 991                  byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
 992   }
 993 
 994   // Forcefully report allocation failure
 995   heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
 996 }
 997 
 998 void ShenandoahControlThread::notify_alloc_failure_waiters() {
 999   _alloc_failure_gc.unset();
1000   MonitorLocker ml(&_alloc_failure_waiters_lock);
1001   ml.notify_all();
1002 }
1003 
1004 bool ShenandoahControlThread::try_set_alloc_failure_gc() {
1005   return _alloc_failure_gc.try_set();
1006 }
1007 
1008 bool ShenandoahControlThread::is_alloc_failure_gc() {
1009   return _alloc_failure_gc.is_set();
1010 }
1011 
1012 void ShenandoahControlThread::notify_gc_waiters() {
1013   _gc_requested.unset();
1014   MonitorLocker ml(&_gc_waiters_lock);
1015   ml.notify_all();
1016 }
1017 
1018 void ShenandoahControlThread::handle_counters_update() {
1019   if (_do_counters_update.is_set()) {
1020     _do_counters_update.unset();
1021     ShenandoahHeap::heap()->monitoring_support()->update_counters();
1022   }
1023 }
1024 
1025 void ShenandoahControlThread::handle_force_counters_update() {
1026   if (_force_counters_update.is_set()) {
1027     _do_counters_update.unset(); // reset these too, we do update now!
1028     ShenandoahHeap::heap()->monitoring_support()->update_counters();
1029   }
1030 }
1031 
1032 void ShenandoahControlThread::notify_heap_changed() {
1033   // This is called from allocation path, and thus should be fast.
1034 
1035   // Update monitoring counters when we took a new region. This amortizes the
1036   // update costs on slow path.
1037   if (_do_counters_update.is_unset()) {
1038     _do_counters_update.set();
1039   }
1040 }
1041 
1042 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
1043   assert(ShenandoahPacing, "should only call when pacing is enabled");
1044   Atomic::add(&_allocs_seen, words, memory_order_relaxed);
1045 }
1046 
1047 void ShenandoahControlThread::set_forced_counters_update(bool value) {
1048   _force_counters_update.set_cond(value);
1049 }
1050 
1051 void ShenandoahControlThread::reset_gc_id() {
1052   Atomic::store(&_gc_id, (size_t)0);
1053 }
1054 
1055 void ShenandoahControlThread::update_gc_id() {
1056   Atomic::inc(&_gc_id);
1057 }
1058 
1059 size_t ShenandoahControlThread::get_gc_id() {
1060   return Atomic::load(&_gc_id);
1061 }
1062 
1063 void ShenandoahControlThread::start() {
1064   create_and_start();
1065 }
1066 
1067 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
1068   _graceful_shutdown.set();
1069 }
1070 
1071 bool ShenandoahControlThread::in_graceful_shutdown() {
1072   return _graceful_shutdown.is_set();
1073 }
1074 
1075 const char* ShenandoahControlThread::gc_mode_name(ShenandoahControlThread::GCMode mode) {
1076   switch (mode) {
1077     case none:              return "idle";
1078     case concurrent_normal: return "normal";
1079     case stw_degenerated:   return "degenerated";
1080     case stw_full:          return "full";
1081     case servicing_old:     return "old";
1082     case bootstrapping_old: return "bootstrap";
1083     default:                return "unknown";
1084   }
1085 }
1086 
1087 void ShenandoahControlThread::set_gc_mode(ShenandoahControlThread::GCMode new_mode) {
1088   if (_mode != new_mode) {
1089     log_info(gc)("Transition from: %s to: %s", gc_mode_name(_mode), gc_mode_name(new_mode));
1090     _mode = new_mode;
1091     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
1092     ml.notify_all();
1093   }
1094 }
1095 
1096 ShenandoahGenerationType ShenandoahControlThread::select_global_generation() {
1097   if (ShenandoahHeap::heap()->mode()->is_generational()) {
1098     return GLOBAL_GEN;
1099   } else {
1100     return GLOBAL_NON_GEN;
1101   }
1102 }