1 /*
   2  * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  30 #include "gc/shenandoah/shenandoahControlThread.hpp"
  31 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
  32 #include "gc/shenandoah/shenandoahEvacTracker.hpp"
  33 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  34 #include "gc/shenandoah/shenandoahFullGC.hpp"
  35 #include "gc/shenandoah/shenandoahGeneration.hpp"
  36 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  37 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  38 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  40 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  41 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  42 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  43 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  44 #include "gc/shenandoah/shenandoahOldGC.hpp"
  45 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  46 #include "gc/shenandoah/shenandoahUtils.hpp"
  47 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  48 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  49 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  50 #include "gc/shenandoah/mode/shenandoahMode.hpp"
  51 #include "logging/log.hpp"
  52 #include "memory/iterator.hpp"
  53 #include "memory/metaspaceUtils.hpp"
  54 #include "memory/metaspaceStats.hpp"
  55 #include "memory/universe.hpp"
  56 #include "runtime/atomic.hpp"
  57 
  58 ShenandoahControlThread::ShenandoahControlThread() :
  59   ConcurrentGCThread(),
  60   _alloc_failure_waiters_lock(Mutex::safepoint - 2, "ShenandoahAllocFailureGC_lock", true),
  61   _gc_waiters_lock(Mutex::safepoint - 2, "ShenandoahRequestedGC_lock", true),
  62   _control_lock(Mutex::nosafepoint - 2, "ShenandoahControlGC_lock", true),
  63   _regulator_lock(Mutex::nosafepoint - 2, "ShenandoahRegulatorGC_lock", true),
  64   _periodic_task(this),
  65   _requested_gc_cause(GCCause::_no_gc),
  66   _requested_generation(select_global_generation()),
  67   _degen_point(ShenandoahGC::_degenerated_outside_cycle),
  68   _degen_generation(nullptr),
  69   _allocs_seen(0),
  70   _mode(none) {
  71   set_name("Shenandoah Control Thread");
  72   reset_gc_id();
  73   create_and_start();
  74   _periodic_task.enroll();
  75   if (ShenandoahPacing) {
  76     _periodic_pacer_notify_task.enroll();
  77   }
  78 }
  79 
  80 ShenandoahControlThread::~ShenandoahControlThread() {
  81   // This is here so that super is called.
  82 }
  83 
  84 void ShenandoahPeriodicTask::task() {
  85   _thread->handle_force_counters_update();
  86   _thread->handle_counters_update();
  87 }
  88 
  89 void ShenandoahPeriodicPacerNotify::task() {
  90   assert(ShenandoahPacing, "Should not be here otherwise");
  91   ShenandoahHeap::heap()->pacer()->notify_waiters();
  92 }
  93 
  94 void ShenandoahControlThread::run_service() {
  95   ShenandoahHeap* const heap = ShenandoahHeap::heap();
  96 
  97   const GCMode default_mode = concurrent_normal;
  98   ShenandoahGenerationType generation = select_global_generation();
  99 
 100   double last_shrink_time = os::elapsedTime();
 101   uint age_period = 0;
 102 
 103   // Shrink period avoids constantly polling regions for shrinking.
 104   // Having a period 10x lower than the delay would mean we hit the
 105   // shrinking with lag of less than 1/10-th of true delay.
 106   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
 107   const double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
 108 
 109   ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy();
 110 
 111   // Heuristics are notified of allocation failures here and other outcomes
 112   // of the cycle. They're also used here to control whether the Nth consecutive
 113   // degenerated cycle should be 'promoted' to a full cycle. The decision to
 114   // trigger a cycle or not is evaluated on the regulator thread.
 115   ShenandoahHeuristics* global_heuristics = heap->global_generation()->heuristics();
 116   while (!in_graceful_shutdown() && !should_terminate()) {
 117     // Figure out if we have pending requests.
 118     const bool alloc_failure_pending = _alloc_failure_gc.is_set();
 119     const bool humongous_alloc_failure_pending = _humongous_alloc_failure_gc.is_set();
 120 
 121     GCCause::Cause cause = Atomic::xchg(&_requested_gc_cause, GCCause::_no_gc);
 122 
 123     const bool explicit_gc_requested = is_explicit_gc(cause);
 124     const bool implicit_gc_requested = is_implicit_gc(cause);
 125 
 126     // This control loop iteration have seen this much allocations.
 127     const size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
 128 
 129     // Check if we have seen a new target for soft max heap size.
 130     const bool soft_max_changed = check_soft_max_changed();
 131 
 132     // Choose which GC mode to run in. The block below should select a single mode.
 133     set_gc_mode(none);
 134     ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
 135 
 136     if (alloc_failure_pending) {
 137       // Allocation failure takes precedence: we have to deal with it first thing
 138       log_info(gc)("Trigger: Handle Allocation Failure");
 139 
 140       cause = GCCause::_allocation_failure;
 141 
 142       // Consume the degen point, and seed it with default value
 143       degen_point = _degen_point;
 144       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
 145 
 146       if (degen_point == ShenandoahGC::_degenerated_outside_cycle) {
 147         _degen_generation = heap->mode()->is_generational() ?
 148                 heap->young_generation() : heap->global_generation();
 149       } else {
 150         assert(_degen_generation != nullptr, "Need to know which generation to resume");
 151       }
 152 
 153       ShenandoahHeuristics* heuristics = _degen_generation->heuristics();
 154       generation = _degen_generation->type();
 155       bool old_gen_evacuation_failed = heap->clear_old_evacuation_failure();
 156 
 157       // Do not bother with degenerated cycle if old generation evacuation failed or if humongous allocation failed
 158       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() &&
 159           !old_gen_evacuation_failed && !humongous_alloc_failure_pending) {
 160         heuristics->record_allocation_failure_gc();
 161         policy->record_alloc_failure_to_degenerated(degen_point);
 162         set_gc_mode(stw_degenerated);
 163       } else {
 164         // TODO: if humongous_alloc_failure_pending, there might be value in trying a "compacting" degen before
 165         // going all the way to full.  But it's a lot of work to implement this, and it may not provide value.
 166         // A compacting degen can move young regions around without doing full old-gen mark (relying upon the
 167         // remembered set scan), so it might be faster than a full gc.
 168         //
 169         // Longer term, think about how to defragment humongous memory concurrently.
 170 
 171         heuristics->record_allocation_failure_gc();
 172         policy->record_alloc_failure_to_full();
 173         generation = select_global_generation();
 174         set_gc_mode(stw_full);
 175       }
 176     } else if (explicit_gc_requested) {
 177       generation = select_global_generation();
 178       log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
 179 
 180       global_heuristics->record_requested_gc();
 181 
 182       if (ExplicitGCInvokesConcurrent) {
 183         policy->record_explicit_to_concurrent();
 184         set_gc_mode(default_mode);
 185         // Unload and clean up everything
 186         heap->set_unload_classes(global_heuristics->can_unload_classes());
 187       } else {
 188         policy->record_explicit_to_full();
 189         set_gc_mode(stw_full);
 190       }
 191     } else if (implicit_gc_requested) {
 192       generation = select_global_generation();
 193       log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
 194 
 195       global_heuristics->record_requested_gc();
 196 
 197       if (ShenandoahImplicitGCInvokesConcurrent) {
 198         policy->record_implicit_to_concurrent();
 199         set_gc_mode(default_mode);
 200 
 201         // Unload and clean up everything
 202         heap->set_unload_classes(global_heuristics->can_unload_classes());
 203       } else {
 204         policy->record_implicit_to_full();
 205         set_gc_mode(stw_full);
 206       }
 207     } else {
 208       // We should only be here if the regulator requested a cycle or if
 209       // there is an old generation mark in progress.
 210       if (cause == GCCause::_shenandoah_concurrent_gc) {
 211         if (_requested_generation == OLD && heap->doing_mixed_evacuations()) {
 212           // If a request to start an old cycle arrived while an old cycle was running, but _before_
 213           // it chose any regions for evacuation we don't want to start a new old cycle. Rather, we want
 214           // the heuristic to run a young collection so that we can evacuate some old regions.
 215           assert(!heap->is_concurrent_old_mark_in_progress(), "Should not be running mixed collections and concurrent marking");
 216           generation = YOUNG;
 217         } else {
 218           generation = _requested_generation;
 219         }
 220 
 221         // preemption was requested or this is a regular cycle
 222         set_gc_mode(default_mode);
 223 
 224         // Don't start a new old marking if there is one already in progress
 225         if (generation == OLD && heap->is_concurrent_old_mark_in_progress()) {
 226           set_gc_mode(servicing_old);
 227         }
 228 
 229         if (generation == select_global_generation()) {
 230           heap->set_unload_classes(global_heuristics->should_unload_classes());
 231         } else {
 232           heap->set_unload_classes(false);
 233         }
 234       } else if (heap->is_concurrent_old_mark_in_progress() || heap->is_prepare_for_old_mark_in_progress()) {
 235         // Nobody asked us to do anything, but we have an old-generation mark or old-generation preparation for
 236         // mixed evacuation in progress, so resume working on that.
 237         log_info(gc)("Resume old GC: marking is%s in progress, preparing is%s in progress",
 238                      heap->is_concurrent_old_mark_in_progress() ? "" : " NOT",
 239                      heap->is_prepare_for_old_mark_in_progress() ? "" : " NOT");
 240 
 241         cause = GCCause::_shenandoah_concurrent_gc;
 242         generation = OLD;
 243         set_gc_mode(servicing_old);
 244         heap->set_unload_classes(false);
 245       }
 246     }
 247 
 248     const bool gc_requested = (gc_mode() != none);
 249     assert (!gc_requested || cause != GCCause::_no_gc, "GC cause should be set");
 250 
 251     if (gc_requested) {
 252       // Blow away all soft references on this cycle, if handling allocation failure,
 253       // either implicit or explicit GC request, or we are requested to do so unconditionally.
 254       if (generation == select_global_generation() && (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs)) {
 255         heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
 256       }
 257 
 258       // GC is starting, bump the internal ID
 259       update_gc_id();
 260 
 261       heap->reset_bytes_allocated_since_gc_start();
 262 
 263       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
 264 
 265       // If GC was requested, we are sampling the counters even without actual triggers
 266       // from allocation machinery. This captures GC phases more accurately.
 267       set_forced_counters_update(true);
 268 
 269       // If GC was requested, we better dump freeset data for performance debugging
 270       {
 271         ShenandoahHeapLocker locker(heap->lock());
 272         heap->free_set()->log_status();
 273       }
 274       // In case this is a degenerated cycle, remember whether original cycle was aging.
 275       const bool was_aging_cycle = heap->is_aging_cycle();
 276       heap->set_aging_cycle(false);
 277 
 278       switch (gc_mode()) {
 279         case concurrent_normal: {
 280           // At this point:
 281           //  if (generation == YOUNG), this is a normal YOUNG cycle
 282           //  if (generation == OLD), this is a bootstrap OLD cycle
 283           //  if (generation == GLOBAL), this is a GLOBAL cycle triggered by System.gc()
 284           // In all three cases, we want to age old objects if this is an aging cycle
 285           if (age_period-- == 0) {
 286              heap->set_aging_cycle(true);
 287              age_period = ShenandoahAgingCyclePeriod - 1;
 288           }
 289           service_concurrent_normal_cycle(heap, generation, cause);
 290           break;
 291         }
 292         case stw_degenerated: {
 293           heap->set_aging_cycle(was_aging_cycle);
 294           service_stw_degenerated_cycle(cause, degen_point);
 295           break;
 296         }
 297         case stw_full: {
 298           if (age_period-- == 0) {
 299             heap->set_aging_cycle(true);
 300             age_period = ShenandoahAgingCyclePeriod - 1;
 301           }
 302           service_stw_full_cycle(cause);
 303           break;
 304         }
 305         case servicing_old: {
 306           assert(generation == OLD, "Expected old generation here");
 307           GCIdMark gc_id_mark;
 308           service_concurrent_old_cycle(heap, cause);
 309           break;
 310         }
 311         default:
 312           ShouldNotReachHere();
 313       }
 314 
 315       // If this was the requested GC cycle, notify waiters about it
 316       if (explicit_gc_requested || implicit_gc_requested) {
 317         notify_gc_waiters();
 318       }
 319 
 320       // If this was the allocation failure GC cycle, notify waiters about it
 321       if (alloc_failure_pending) {
 322         notify_alloc_failure_waiters();
 323       }
 324 
 325       // Report current free set state at the end of cycle, whether
 326       // it is a normal completion, or the abort.
 327       {
 328         ShenandoahHeapLocker locker(heap->lock());
 329         heap->free_set()->log_status();
 330 
 331         // Notify Universe about new heap usage. This has implications for
 332         // global soft refs policy, and we better report it every time heap
 333         // usage goes down.
 334         Universe::heap()->update_capacity_and_used_at_gc();
 335 
 336         // Signal that we have completed a visit to all live objects.
 337         Universe::heap()->record_whole_heap_examined_timestamp();
 338       }
 339 
 340       // Disable forced counters update, and update counters one more time
 341       // to capture the state at the end of GC session.
 342       handle_force_counters_update();
 343       set_forced_counters_update(false);
 344 
 345       // Retract forceful part of soft refs policy
 346       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
 347 
 348       // Clear metaspace oom flag, if current cycle unloaded classes
 349       if (heap->unload_classes()) {
 350         global_heuristics->clear_metaspace_oom();
 351       }
 352 
 353       process_phase_timings(heap);
 354 
 355       // Print Metaspace change following GC (if logging is enabled).
 356       MetaspaceUtils::print_metaspace_change(meta_sizes);
 357 
 358       // GC is over, we are at idle now
 359       if (ShenandoahPacing) {
 360         heap->pacer()->setup_for_idle();
 361       }
 362     } else {
 363       // Allow pacer to know we have seen this many allocations
 364       if (ShenandoahPacing && (allocs_seen > 0)) {
 365         heap->pacer()->report_alloc(allocs_seen);
 366       }
 367     }
 368 
 369     double current = os::elapsedTime();
 370 
 371     if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
 372       // Explicit GC tries to uncommit everything down to min capacity.
 373       // Soft max change tries to uncommit everything down to target capacity.
 374       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
 375 
 376       double shrink_before = (explicit_gc_requested || soft_max_changed) ?
 377                              current :
 378                              current - (ShenandoahUncommitDelay / 1000.0);
 379 
 380       size_t shrink_until = soft_max_changed ?
 381                              heap->soft_max_capacity() :
 382                              heap->min_capacity();
 383 
 384       service_uncommit(shrink_before, shrink_until);
 385       heap->phase_timings()->flush_cycle_to_global();
 386       last_shrink_time = current;
 387     }
 388 
 389     // Wait for ShenandoahControlIntervalMax unless there was an allocation failure or another request was made mid-cycle.
 390     if (!is_alloc_failure_gc() && _requested_gc_cause == GCCause::_no_gc) {
 391       // The timed wait is necessary because this thread has a responsibility to send
 392       // 'alloc_words' to the pacer when it does not perform a GC.
 393       MonitorLocker lock(&_control_lock, Mutex::_no_safepoint_check_flag);
 394       lock.wait(ShenandoahControlIntervalMax);
 395     }
 396   }
 397 
 398   // Wait for the actual stop(), can't leave run_service() earlier.
 399   while (!should_terminate()) {
 400     os::naked_short_sleep(ShenandoahControlIntervalMin);
 401   }
 402 }
 403 
 404 void ShenandoahControlThread::process_phase_timings(const ShenandoahHeap* heap) {
 405   // Commit worker statistics to cycle data
 406   heap->phase_timings()->flush_par_workers_to_cycle();
 407   if (ShenandoahPacing) {
 408     heap->pacer()->flush_stats_to_cycle();
 409   }
 410 
 411   ShenandoahEvacuationTracker* evac_tracker = heap->evac_tracker();
 412   ShenandoahCycleStats         evac_stats   = evac_tracker->flush_cycle_to_global();
 413 
 414   // Print GC stats for current cycle
 415   {
 416     LogTarget(Info, gc, stats) lt;
 417     if (lt.is_enabled()) {
 418       ResourceMark rm;
 419       LogStream ls(lt);
 420       heap->phase_timings()->print_cycle_on(&ls);
 421       evac_tracker->print_evacuations_on(&ls, &evac_stats.workers,
 422                                               &evac_stats.mutators);
 423       if (ShenandoahPacing) {
 424         heap->pacer()->print_cycle_on(&ls);
 425       }
 426     }
 427   }
 428 
 429   // Commit statistics to globals
 430   heap->phase_timings()->flush_cycle_to_global();
 431 }
 432 
 433 // Young and old concurrent cycles are initiated by the regulator. Implicit
 434 // and explicit GC requests are handled by the controller thread and always
 435 // run a global cycle (which is concurrent by default, but may be overridden
 436 // by command line options). Old cycles always degenerate to a global cycle.
 437 // Young cycles are degenerated to complete the young cycle.  Young
 438 // and old degen may upgrade to Full GC.  Full GC may also be
 439 // triggered directly by a System.gc() invocation.
 440 //
 441 //
 442 //      +-----+ Idle +-----+-----------+---------------------+
 443 //      |         +        |           |                     |
 444 //      |         |        |           |                     |
 445 //      |         |        v           |                     |
 446 //      |         |  Bootstrap Old +-- | ------------+       |
 447 //      |         |   +                |             |       |
 448 //      |         |   |                |             |       |
 449 //      |         v   v                v             v       |
 450 //      |    Resume Old <----------+ Young +--> Young Degen  |
 451 //      |     +  +   ^                            +  +       |
 452 //      v     |  |   |                            |  |       |
 453 //   Global <-+  |   +----------------------------+  |       |
 454 //      +        |                                   |       |
 455 //      |        v                                   v       |
 456 //      +--->  Global Degen +--------------------> Full <----+
 457 //
 458 void ShenandoahControlThread::service_concurrent_normal_cycle(ShenandoahHeap* heap,
 459                                                               const ShenandoahGenerationType generation,
 460                                                               GCCause::Cause cause) {
 461   GCIdMark gc_id_mark;
 462   ShenandoahGeneration* the_generation = nullptr;
 463   switch (generation) {
 464     case YOUNG: {
 465       // Run a young cycle. This might or might not, have interrupted an ongoing
 466       // concurrent mark in the old generation. We need to think about promotions
 467       // in this case. Promoted objects should be above the TAMS in the old regions
 468       // they end up in, but we have to be sure we don't promote into any regions
 469       // that are in the cset.
 470       log_info(gc, ergo)("Start GC cycle (YOUNG)");
 471       the_generation = heap->young_generation();
 472       service_concurrent_cycle(the_generation, cause, false);
 473       break;
 474     }
 475     case OLD: {
 476       log_info(gc, ergo)("Start GC cycle (OLD)");
 477       the_generation = heap->old_generation();
 478       service_concurrent_old_cycle(heap, cause);
 479       break;
 480     }
 481     case GLOBAL_GEN: {
 482       log_info(gc, ergo)("Start GC cycle (GLOBAL)");
 483       the_generation = heap->global_generation();
 484       service_concurrent_cycle(the_generation, cause, false);
 485       break;
 486     }
 487     case GLOBAL_NON_GEN: {
 488       log_info(gc, ergo)("Start GC cycle");
 489       the_generation = heap->global_generation();
 490       service_concurrent_cycle(the_generation, cause, false);
 491       break;
 492     }
 493     default:
 494       ShouldNotReachHere();
 495   }
 496 }
 497 
 498 void ShenandoahControlThread::service_concurrent_old_cycle(ShenandoahHeap* heap, GCCause::Cause &cause) {
 499   ShenandoahOldGeneration* old_generation = heap->old_generation();
 500   ShenandoahYoungGeneration* young_generation = heap->young_generation();
 501   ShenandoahOldGeneration::State original_state = old_generation->state();
 502 
 503   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 504 
 505   switch (original_state) {
 506     case ShenandoahOldGeneration::FILLING: {
 507       _allow_old_preemption.set();
 508       old_generation->entry_coalesce_and_fill();
 509       _allow_old_preemption.unset();
 510 
 511       // Before bootstrapping begins, we must acknowledge any cancellation request.
 512       // If the gc has not been cancelled, this does nothing. If it has been cancelled,
 513       // this will clear the cancellation request and exit before starting the bootstrap
 514       // phase. This will allow the young GC cycle to proceed normally. If we do not
 515       // acknowledge the cancellation request, the subsequent young cycle will observe
 516       // the request and essentially cancel itself.
 517       if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) {
 518         log_info(gc)("Preparation for old generation cycle was cancelled");
 519         return;
 520       }
 521 
 522       // Coalescing threads completed and nothing was cancelled. it is safe to transition from this state.
 523       old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
 524       return;
 525     }
 526     case ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP:
 527       old_generation->transition_to(ShenandoahOldGeneration::BOOTSTRAPPING);
 528     case ShenandoahOldGeneration::BOOTSTRAPPING: {
 529       // Configure the young generation's concurrent mark to put objects in
 530       // old regions into the concurrent mark queues associated with the old
 531       // generation. The young cycle will run as normal except that rather than
 532       // ignore old references it will mark and enqueue them in the old concurrent
 533       // task queues but it will not traverse them.
 534       set_gc_mode(bootstrapping_old);
 535       young_generation->set_old_gen_task_queues(old_generation->task_queues());
 536       ShenandoahGCSession session(cause, young_generation);
 537       service_concurrent_cycle(heap, young_generation, cause, true);
 538       process_phase_timings(heap);
 539       if (heap->cancelled_gc()) {
 540         // Young generation bootstrap cycle has failed. Concurrent mark for old generation
 541         // is going to resume after degenerated bootstrap cycle completes.
 542         log_info(gc)("Bootstrap cycle for old generation was cancelled");
 543         return;
 544       }
 545 
 546       // Reset the degenerated point. Normally this would happen at the top
 547       // of the control loop, but here we have just completed a young cycle
 548       // which has bootstrapped the old concurrent marking.
 549       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
 550 
 551       // From here we will 'resume' the old concurrent mark. This will skip reset
 552       // and init mark for the concurrent mark. All of that work will have been
 553       // done by the bootstrapping young cycle.
 554       set_gc_mode(servicing_old);
 555       old_generation->transition_to(ShenandoahOldGeneration::MARKING);
 556     }
 557     case ShenandoahOldGeneration::MARKING: {
 558       ShenandoahGCSession session(cause, old_generation);
 559       bool marking_complete = resume_concurrent_old_cycle(old_generation, cause);
 560       if (marking_complete) {
 561         assert(old_generation->state() != ShenandoahOldGeneration::MARKING, "Should not still be marking");
 562         if (original_state == ShenandoahOldGeneration::MARKING) {
 563           heap->mmu_tracker()->record_old_marking_increment(true);
 564           heap->log_heap_status("At end of Concurrent Old Marking finishing increment");
 565         }
 566       } else if (original_state == ShenandoahOldGeneration::MARKING) {
 567         heap->mmu_tracker()->record_old_marking_increment(false);
 568         heap->log_heap_status("At end of Concurrent Old Marking increment");
 569       }
 570       break;
 571     }
 572     default:
 573       fatal("Unexpected state for old GC: %s", ShenandoahOldGeneration::state_name(old_generation->state()));
 574   }
 575 }
 576 
 577 bool ShenandoahControlThread::resume_concurrent_old_cycle(ShenandoahGeneration* generation, GCCause::Cause cause) {
 578   assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(), "Old mark should be in progress");
 579   log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued", generation->task_queues()->tasks());
 580 
 581   ShenandoahHeap* heap = ShenandoahHeap::heap();
 582 
 583   // We can only tolerate being cancelled during concurrent marking or during preparation for mixed
 584   // evacuation. This flag here (passed by reference) is used to control precisely where the regulator
 585   // is allowed to cancel a GC.
 586   ShenandoahOldGC gc(generation, _allow_old_preemption);
 587   if (gc.collect(cause)) {
 588     generation->record_success_concurrent(false);
 589   }
 590 
 591   if (heap->cancelled_gc()) {
 592     // It's possible the gc cycle was cancelled after the last time
 593     // the collection checked for cancellation. In which case, the
 594     // old gc cycle is still completed, and we have to deal with this
 595     // cancellation. We set the degeneration point to be outside
 596     // the cycle because if this is an allocation failure, that is
 597     // what must be done (there is no degenerated old cycle). If the
 598     // cancellation was due to a heuristic wanting to start a young
 599     // cycle, then we are not actually going to a degenerated cycle,
 600     // so the degenerated point doesn't matter here.
 601     check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle);
 602     if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) {
 603       heap->shenandoah_policy()->record_interrupted_old();
 604     }
 605     return false;
 606   }
 607   return true;
 608 }
 609 
 610 bool ShenandoahControlThread::check_soft_max_changed() const {
 611   ShenandoahHeap* heap = ShenandoahHeap::heap();
 612   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
 613   size_t old_soft_max = heap->soft_max_capacity();
 614   if (new_soft_max != old_soft_max) {
 615     new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
 616     new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
 617     if (new_soft_max != old_soft_max) {
 618       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
 619                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 620                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 621       );
 622       heap->set_soft_max_capacity(new_soft_max);
 623       return true;
 624     }
 625   }
 626   return false;
 627 }
 628 
 629 void ShenandoahControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool do_old_gc_bootstrap) {
 630   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
 631   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
 632   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
 633   // tries to evac something and no memory is available), cycle degrades to Full GC.
 634   //
 635   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
 636   // heuristics says there are no regions to compact, and all the collection comes from immediately
 637   // reclaimable regions.
 638   //
 639   // ................................................................................................
 640   //
 641   //                                    (immediate garbage shortcut)                Concurrent GC
 642   //                             /-------------------------------------------\
 643   //                             |                                           |
 644   //                             |                                           |
 645   //                             |                                           |
 646   //                             |                                           v
 647   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
 648   //                   |                    |                 |              ^
 649   //                   | (af)               | (af)            | (af)         |
 650   // ..................|....................|.................|..............|.......................
 651   //                   |                    |                 |              |
 652   //                   |                    |                 |              |      Degenerated GC
 653   //                   v                    v                 v              |
 654   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
 655   //                   |                    |                 |              ^
 656   //                   | (af)               | (af)            | (af)         |
 657   // ..................|....................|.................|..............|.......................
 658   //                   |                    |                 |              |
 659   //                   |                    v                 |              |      Full GC
 660   //                   \------------------->o<----------------/              |
 661   //                                        |                                |
 662   //                                        v                                |
 663   //                                      Full GC  --------------------------/
 664   //
 665   if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
 666 
 667   ShenandoahHeap* heap = ShenandoahHeap::heap();
 668   ShenandoahGCSession session(cause, generation);
 669   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 670 
 671   service_concurrent_cycle(heap, generation, cause, do_old_gc_bootstrap);
 672 }
 673 
 674 void ShenandoahControlThread::service_concurrent_cycle(ShenandoahHeap* heap,
 675                                                        ShenandoahGeneration* generation,
 676                                                        GCCause::Cause& cause,
 677                                                        bool do_old_gc_bootstrap) {
 678   ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap);
 679   if (gc.collect(cause)) {
 680     // Cycle is complete
 681     generation->record_success_concurrent(gc.abbreviated());
 682   } else {
 683     assert(heap->cancelled_gc(), "Must have been cancelled");
 684     check_cancellation_or_degen(gc.degen_point());
 685     assert(!generation->is_old(), "Old GC takes a different control path");
 686     // Concurrent young-gen collection degenerates to young
 687     // collection.  Same for global collections.
 688     _degen_generation = generation;
 689   }
 690   const char* msg;
 691   if (heap->mode()->is_generational()) {
 692     ShenandoahMmuTracker* mmu_tracker = heap->mmu_tracker();
 693     if (generation->is_young()) {
 694       if (heap->cancelled_gc()) {
 695         msg = (do_old_gc_bootstrap) ? "At end of Interrupted Concurrent Bootstrap GC":
 696                                       "At end of Interrupted Concurrent Young GC";
 697       } else {
 698         // We only record GC results if GC was successful
 699         msg = (do_old_gc_bootstrap) ? "At end of Concurrent Bootstrap GC":
 700                                       "At end of Concurrent Young GC";
 701         if (heap->collection_set()->has_old_regions()) {
 702           mmu_tracker->record_mixed(get_gc_id());
 703         } else if (do_old_gc_bootstrap) {
 704           mmu_tracker->record_bootstrap(get_gc_id());
 705         } else {
 706           mmu_tracker->record_young(get_gc_id());
 707         }
 708       }
 709     } else {
 710       assert(generation->is_global(), "If not young, must be GLOBAL");
 711       assert(!do_old_gc_bootstrap, "Do not bootstrap with GLOBAL GC");
 712       if (heap->cancelled_gc()) {
 713         msg = "At end of Interrupted Concurrent GLOBAL GC";
 714       } else {
 715         // We only record GC results if GC was successful
 716         msg = "At end of Concurrent Global GC";
 717         mmu_tracker->record_global(get_gc_id());
 718       }
 719     }
 720   } else {
 721     msg = heap->cancelled_gc() ? "At end of cancelled GC" :
 722                                  "At end of GC";
 723   }
 724   heap->log_heap_status(msg);
 725 }
 726 
 727 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
 728   ShenandoahHeap* heap = ShenandoahHeap::heap();
 729   if (!heap->cancelled_gc()) {
 730     return false;
 731   }
 732 
 733   if (in_graceful_shutdown()) {
 734     return true;
 735   }
 736 
 737   assert(_degen_point == ShenandoahGC::_degenerated_outside_cycle,
 738          "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
 739 
 740   if (is_alloc_failure_gc()) {
 741     _degen_point = point;
 742     _preemption_requested.unset();
 743     return true;
 744   }
 745 
 746   if (_preemption_requested.is_set()) {
 747     assert(_requested_generation == YOUNG, "Only young GCs may preempt old.");
 748     _preemption_requested.unset();
 749 
 750     // Old generation marking is only cancellable during concurrent marking.
 751     // Once final mark is complete, the code does not check again for cancellation.
 752     // If old generation was cancelled for an allocation failure, we wouldn't
 753     // make it to this case. The calling code is responsible for forcing a
 754     // cancellation due to allocation failure into a degenerated cycle.
 755     _degen_point = point;
 756     heap->clear_cancelled_gc(false /* clear oom handler */);
 757     return true;
 758   }
 759 
 760   fatal("Cancel GC either for alloc failure GC, or gracefully exiting, or to pause old generation marking");
 761   return false;
 762 }
 763 
 764 void ShenandoahControlThread::stop_service() {
 765   // Nothing to do here.
 766 }
 767 
 768 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
 769   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 770 
 771   GCIdMark gc_id_mark;
 772   ShenandoahGCSession session(cause, heap->global_generation());
 773 
 774   ShenandoahFullGC gc;
 775   gc.collect(cause);
 776 }
 777 
 778 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause,
 779                                                             ShenandoahGC::ShenandoahDegenPoint point) {
 780   assert(point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
 781   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 782 
 783   GCIdMark gc_id_mark;
 784   ShenandoahGCSession session(cause, _degen_generation);
 785 
 786   ShenandoahDegenGC gc(point, _degen_generation);
 787   gc.collect(cause);
 788 
 789   assert(heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks");
 790   if (_degen_generation->is_global()) {
 791     assert(heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks");
 792     assert(heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks");
 793   } else {
 794     assert(_degen_generation->is_young(), "Expected degenerated young cycle, if not global.");
 795     ShenandoahOldGeneration* old = heap->old_generation();
 796     if (old->state() == ShenandoahOldGeneration::BOOTSTRAPPING) {
 797       old->transition_to(ShenandoahOldGeneration::MARKING);
 798     }
 799   }
 800 }
 801 
 802 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
 803   ShenandoahHeap* heap = ShenandoahHeap::heap();
 804 
 805   // Determine if there is work to do. This avoids taking heap lock if there is
 806   // no work available, avoids spamming logs with superfluous logging messages,
 807   // and minimises the amount of work while locks are taken.
 808 
 809   if (heap->committed() <= shrink_until) return;
 810 
 811   bool has_work = false;
 812   for (size_t i = 0; i < heap->num_regions(); i++) {
 813     ShenandoahHeapRegion *r = heap->get_region(i);
 814     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 815       has_work = true;
 816       break;
 817     }
 818   }
 819 
 820   if (has_work) {
 821     heap->entry_uncommit(shrink_before, shrink_until);
 822   }
 823 }
 824 
 825 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
 826   return GCCause::is_user_requested_gc(cause) ||
 827          GCCause::is_serviceability_requested_gc(cause);
 828 }
 829 
 830 bool ShenandoahControlThread::is_implicit_gc(GCCause::Cause cause) const {
 831   return !is_explicit_gc(cause)
 832       && cause != GCCause::_shenandoah_concurrent_gc
 833       && cause != GCCause::_no_gc;
 834 }
 835 
 836 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
 837   assert(GCCause::is_user_requested_gc(cause) ||
 838          GCCause::is_serviceability_requested_gc(cause) ||
 839          cause == GCCause::_metadata_GC_clear_soft_refs ||
 840          cause == GCCause::_codecache_GC_aggressive ||
 841          cause == GCCause::_codecache_GC_threshold ||
 842          cause == GCCause::_full_gc_alot ||
 843          cause == GCCause::_wb_young_gc ||
 844          cause == GCCause::_wb_full_gc ||
 845          cause == GCCause::_wb_breakpoint ||
 846          cause == GCCause::_scavenge_alot,
 847          "only requested GCs here: %s", GCCause::to_string(cause));
 848 
 849   if (is_explicit_gc(cause)) {
 850     if (!DisableExplicitGC) {
 851       handle_requested_gc(cause);
 852     }
 853   } else {
 854     handle_requested_gc(cause);
 855   }
 856 }
 857 
 858 bool ShenandoahControlThread::request_concurrent_gc(ShenandoahGenerationType generation) {
 859   if (_preemption_requested.is_set() || _requested_gc_cause != GCCause::_no_gc || ShenandoahHeap::heap()->cancelled_gc()) {
 860     // Ignore subsequent requests from the heuristics
 861     log_debug(gc, thread)("Reject request for concurrent gc: preemption_requested: %s, gc_requested: %s, gc_cancelled: %s",
 862                           BOOL_TO_STR(_preemption_requested.is_set()),
 863                           GCCause::to_string(_requested_gc_cause),
 864                           BOOL_TO_STR(ShenandoahHeap::heap()->cancelled_gc()));
 865     return false;
 866   }
 867 
 868   if (gc_mode() == none) {
 869     GCCause::Cause existing = Atomic::cmpxchg(&_requested_gc_cause, GCCause::_no_gc, GCCause::_shenandoah_concurrent_gc);
 870     if (existing != GCCause::_no_gc) {
 871       log_debug(gc, thread)("Reject request for concurrent gc because another gc is pending: %s", GCCause::to_string(existing));
 872       return false;
 873     }
 874 
 875     _requested_generation = generation;
 876     notify_control_thread();
 877 
 878     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
 879     while (gc_mode() == none) {
 880       ml.wait();
 881     }
 882     return true;
 883   }
 884 
 885   if (preempt_old_marking(generation)) {
 886     assert(gc_mode() == servicing_old, "Expected to be servicing old, but was: %s.", gc_mode_name(gc_mode()));
 887     GCCause::Cause existing = Atomic::cmpxchg(&_requested_gc_cause, GCCause::_no_gc, GCCause::_shenandoah_concurrent_gc);
 888     if (existing != GCCause::_no_gc) {
 889       log_debug(gc, thread)("Reject request to interrupt old gc because another gc is pending: %s", GCCause::to_string(existing));
 890       return false;
 891     }
 892 
 893     log_info(gc)("Preempting old generation mark to allow %s GC", shenandoah_generation_name(generation));
 894     _requested_generation = generation;
 895     _preemption_requested.set();
 896     ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc);
 897     notify_control_thread();
 898 
 899     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
 900     while (gc_mode() == servicing_old) {
 901       ml.wait();
 902     }
 903     return true;
 904   }
 905 
 906   log_debug(gc, thread)("Reject request for concurrent gc: mode: %s, allow_old_preemption: %s",
 907                         gc_mode_name(gc_mode()),
 908                         BOOL_TO_STR(_allow_old_preemption.is_set()));
 909   return false;
 910 }
 911 
 912 void ShenandoahControlThread::notify_control_thread() {
 913   MonitorLocker locker(&_control_lock, Mutex::_no_safepoint_check_flag);
 914   _control_lock.notify();
 915 }
 916 
 917 bool ShenandoahControlThread::preempt_old_marking(ShenandoahGenerationType generation) {
 918   return (generation == YOUNG) && _allow_old_preemption.try_unset();
 919 }
 920 
 921 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
 922   // Make sure we have at least one complete GC cycle before unblocking
 923   // from the explicit GC request.
 924   //
 925   // This is especially important for weak references cleanup and/or native
 926   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
 927   // comes very late in the already running cycle, it would miss lots of new
 928   // opportunities for cleanup that were made available before the caller
 929   // requested the GC.
 930 
 931   MonitorLocker ml(&_gc_waiters_lock);
 932   size_t current_gc_id = get_gc_id();
 933   size_t required_gc_id = current_gc_id + 1;
 934   while (current_gc_id < required_gc_id) {
 935     // This races with the regulator thread to start a concurrent gc and the
 936     // control thread to clear it at the start of a cycle. Threads here are
 937     // allowed to escalate a heuristic's request for concurrent gc.
 938     GCCause::Cause existing = Atomic::xchg(&_requested_gc_cause, cause);
 939     if (existing != GCCause::_no_gc) {
 940       log_debug(gc, thread)("GC request supersedes existing request: %s", GCCause::to_string(existing));
 941     }
 942 
 943     notify_control_thread();
 944     if (cause != GCCause::_wb_breakpoint) {
 945       ml.wait();
 946     }
 947     current_gc_id = get_gc_id();
 948   }
 949 }
 950 
 951 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
 952   ShenandoahHeap* heap = ShenandoahHeap::heap();
 953 
 954   assert(current()->is_Java_thread(), "expect Java thread here");
 955   bool is_humongous = req.size() > ShenandoahHeapRegion::region_size_words();
 956 
 957   if (try_set_alloc_failure_gc(is_humongous)) {
 958     // Only report the first allocation failure
 959     log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
 960                  req.type_string(),
 961                  byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
 962     // Now that alloc failure GC is scheduled, we can abort everything else
 963     heap->cancel_gc(GCCause::_allocation_failure);
 964   }
 965 
 966   MonitorLocker ml(&_alloc_failure_waiters_lock);
 967   while (is_alloc_failure_gc()) {
 968     ml.wait();
 969   }
 970 }
 971 
 972 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
 973   ShenandoahHeap* heap = ShenandoahHeap::heap();
 974   bool is_humongous = (words > ShenandoahHeapRegion::region_size_words());
 975 
 976   if (try_set_alloc_failure_gc(is_humongous)) {
 977     // Only report the first allocation failure
 978     log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
 979                  byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
 980   }
 981 
 982   // Forcefully report allocation failure
 983   heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
 984 }
 985 
 986 void ShenandoahControlThread::notify_alloc_failure_waiters() {
 987   _alloc_failure_gc.unset();
 988   _humongous_alloc_failure_gc.unset();
 989   MonitorLocker ml(&_alloc_failure_waiters_lock);
 990   ml.notify_all();
 991 }
 992 
 993 bool ShenandoahControlThread::try_set_alloc_failure_gc(bool is_humongous) {
 994   if (is_humongous) {
 995     _humongous_alloc_failure_gc.try_set();
 996   }
 997   return _alloc_failure_gc.try_set();
 998 }
 999 
1000 bool ShenandoahControlThread::is_alloc_failure_gc() {
1001   return _alloc_failure_gc.is_set();
1002 }
1003 
1004 void ShenandoahControlThread::notify_gc_waiters() {
1005   MonitorLocker ml(&_gc_waiters_lock);
1006   ml.notify_all();
1007 }
1008 
1009 void ShenandoahControlThread::handle_counters_update() {
1010   if (_do_counters_update.is_set()) {
1011     _do_counters_update.unset();
1012     ShenandoahHeap::heap()->monitoring_support()->update_counters();
1013   }
1014 }
1015 
1016 void ShenandoahControlThread::handle_force_counters_update() {
1017   if (_force_counters_update.is_set()) {
1018     _do_counters_update.unset(); // reset these too, we do update now!
1019     ShenandoahHeap::heap()->monitoring_support()->update_counters();
1020   }
1021 }
1022 
1023 void ShenandoahControlThread::notify_heap_changed() {
1024   // This is called from allocation path, and thus should be fast.
1025 
1026   // Update monitoring counters when we took a new region. This amortizes the
1027   // update costs on slow path.
1028   if (_do_counters_update.is_unset()) {
1029     _do_counters_update.set();
1030   }
1031 }
1032 
1033 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
1034   assert(ShenandoahPacing, "should only call when pacing is enabled");
1035   Atomic::add(&_allocs_seen, words, memory_order_relaxed);
1036 }
1037 
1038 void ShenandoahControlThread::set_forced_counters_update(bool value) {
1039   _force_counters_update.set_cond(value);
1040 }
1041 
1042 void ShenandoahControlThread::reset_gc_id() {
1043   Atomic::store(&_gc_id, (size_t)0);
1044 }
1045 
1046 void ShenandoahControlThread::update_gc_id() {
1047   Atomic::inc(&_gc_id);
1048 }
1049 
1050 size_t ShenandoahControlThread::get_gc_id() {
1051   return Atomic::load(&_gc_id);
1052 }
1053 
1054 void ShenandoahControlThread::start() {
1055   create_and_start();
1056 }
1057 
1058 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
1059   _graceful_shutdown.set();
1060 }
1061 
1062 bool ShenandoahControlThread::in_graceful_shutdown() {
1063   return _graceful_shutdown.is_set();
1064 }
1065 
1066 const char* ShenandoahControlThread::gc_mode_name(ShenandoahControlThread::GCMode mode) {
1067   switch (mode) {
1068     case none:              return "idle";
1069     case concurrent_normal: return "normal";
1070     case stw_degenerated:   return "degenerated";
1071     case stw_full:          return "full";
1072     case servicing_old:     return "old";
1073     case bootstrapping_old: return "bootstrap";
1074     default:                return "unknown";
1075   }
1076 }
1077 
1078 void ShenandoahControlThread::set_gc_mode(ShenandoahControlThread::GCMode new_mode) {
1079   if (_mode != new_mode) {
1080     log_info(gc)("Transition from: %s to: %s", gc_mode_name(_mode), gc_mode_name(new_mode));
1081     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
1082     _mode = new_mode;
1083     ml.notify_all();
1084   }
1085 }
1086 
1087 ShenandoahGenerationType ShenandoahControlThread::select_global_generation() {
1088   if (ShenandoahHeap::heap()->mode()->is_generational()) {
1089     return GLOBAL_GEN;
1090   } else {
1091     return GLOBAL_NON_GEN;
1092   }
1093 }