1 /*
   2  * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  28 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  29 #include "gc/shenandoah/shenandoahControlThread.hpp"
  30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
  31 #include "gc/shenandoah/shenandoahEvacTracker.hpp"
  32 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  33 #include "gc/shenandoah/shenandoahFullGC.hpp"
  34 #include "gc/shenandoah/shenandoahGeneration.hpp"
  35 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  36 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  42 #include "gc/shenandoah/shenandoahOldGC.hpp"
  43 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  44 #include "gc/shenandoah/shenandoahUtils.hpp"
  45 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  46 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  47 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  48 #include "gc/shenandoah/mode/shenandoahMode.hpp"
  49 #include "memory/iterator.hpp"
  50 #include "memory/metaspaceUtils.hpp"
  51 #include "memory/metaspaceStats.hpp"
  52 #include "memory/universe.hpp"
  53 #include "runtime/atomic.hpp"
  54 
  55 ShenandoahControlThread::ShenandoahControlThread() :
  56   ConcurrentGCThread(),
  57   _alloc_failure_waiters_lock(Mutex::safepoint - 2, "ShenandoahAllocFailureGC_lock", true),
  58   _gc_waiters_lock(Mutex::safepoint - 2, "ShenandoahRequestedGC_lock", true),
  59   _control_lock(Mutex::nosafepoint - 2, "ShenandoahControlGC_lock", true),
  60   _regulator_lock(Mutex::nosafepoint - 2, "ShenandoahRegulatorGC_lock", true),
  61   _periodic_task(this),
  62   _requested_gc_cause(GCCause::_no_cause_specified),
  63   _requested_generation(GenerationMode::GLOBAL),
  64   _degen_point(ShenandoahGC::_degenerated_outside_cycle),
  65   _degen_generation(nullptr),
  66   _allocs_seen(0),
  67   _mode(none) {
  68   set_name("Shenandoah Control Thread");
  69   reset_gc_id();
  70   create_and_start();
  71   _periodic_task.enroll();
  72   if (ShenandoahPacing) {
  73     _periodic_pacer_notify_task.enroll();
  74   }
  75 }
  76 
  77 ShenandoahControlThread::~ShenandoahControlThread() {
  78   // This is here so that super is called.
  79 }
  80 
  81 void ShenandoahPeriodicTask::task() {
  82   _thread->handle_force_counters_update();
  83   _thread->handle_counters_update();
  84 }
  85 
  86 void ShenandoahPeriodicPacerNotify::task() {
  87   assert(ShenandoahPacing, "Should not be here otherwise");
  88   ShenandoahHeap::heap()->pacer()->notify_waiters();
  89 }
  90 
  91 void ShenandoahControlThread::run_service() {
  92   ShenandoahHeap* heap = ShenandoahHeap::heap();
  93 
  94   GCMode default_mode = concurrent_normal;
  95   GenerationMode generation = GLOBAL;
  96   GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
  97 
  98   double last_shrink_time = os::elapsedTime();
  99   uint age_period = 0;
 100 
 101   // Shrink period avoids constantly polling regions for shrinking.
 102   // Having a period 10x lower than the delay would mean we hit the
 103   // shrinking with lag of less than 1/10-th of true delay.
 104   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
 105   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
 106 
 107   ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
 108 
 109   // Heuristics are notified of allocation failures here and other outcomes
 110   // of the cycle. They're also used here to control whether the Nth consecutive
 111   // degenerated cycle should be 'promoted' to a full cycle. The decision to
 112   // trigger a cycle or not is evaluated on the regulator thread.
 113   ShenandoahHeuristics* global_heuristics = heap->global_generation()->heuristics();
 114   while (!in_graceful_shutdown() && !should_terminate()) {
 115     // Figure out if we have pending requests.
 116     bool alloc_failure_pending = _alloc_failure_gc.is_set();
 117     bool is_gc_requested = _gc_requested.is_set();
 118     GCCause::Cause requested_gc_cause = _requested_gc_cause;
 119     bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause);
 120     bool implicit_gc_requested = is_gc_requested && is_implicit_gc(requested_gc_cause);
 121 
 122     // This control loop iteration have seen this much allocations.
 123     size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
 124 
 125     // Check if we have seen a new target for soft max heap size.
 126     bool soft_max_changed = check_soft_max_changed();
 127 
 128     // Choose which GC mode to run in. The block below should select a single mode.
 129     set_gc_mode(none);
 130     GCCause::Cause cause = GCCause::_last_gc_cause;
 131     ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
 132 
 133     if (alloc_failure_pending) {
 134       // Allocation failure takes precedence: we have to deal with it first thing
 135       log_info(gc)("Trigger: Handle Allocation Failure");
 136 
 137       cause = GCCause::_allocation_failure;
 138 
 139       // Consume the degen point, and seed it with default value
 140       degen_point = _degen_point;
 141       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
 142 
 143       if (degen_point == ShenandoahGC::_degenerated_outside_cycle) {
 144         _degen_generation = heap->mode()->is_generational() ? heap->young_generation() : heap->global_generation();
 145       } else {
 146         assert(_degen_generation != nullptr, "Need to know which generation to resume.");
 147       }
 148 
 149       ShenandoahHeuristics* heuristics = _degen_generation->heuristics();
 150       generation = _degen_generation->generation_mode();
 151       bool old_gen_evacuation_failed = heap->clear_old_evacuation_failure();
 152 
 153       // Do not bother with degenerated cycle if old generation evacuation failed.
 154       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() && !old_gen_evacuation_failed) {
 155         heuristics->record_allocation_failure_gc();
 156         policy->record_alloc_failure_to_degenerated(degen_point);
 157         set_gc_mode(stw_degenerated);
 158       } else {
 159         heuristics->record_allocation_failure_gc();
 160         policy->record_alloc_failure_to_full();
 161         generation = GLOBAL;
 162         set_gc_mode(stw_full);
 163       }
 164     } else if (explicit_gc_requested) {
 165       cause = requested_gc_cause;
 166       generation = GLOBAL;
 167       log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
 168 
 169       global_heuristics->record_requested_gc();
 170 
 171       if (ExplicitGCInvokesConcurrent) {
 172         policy->record_explicit_to_concurrent();
 173         set_gc_mode(default_mode);
 174         // Unload and clean up everything
 175         heap->set_unload_classes(global_heuristics->can_unload_classes());
 176       } else {
 177         policy->record_explicit_to_full();
 178         set_gc_mode(stw_full);
 179       }
 180     } else if (implicit_gc_requested) {
 181       cause = requested_gc_cause;
 182       generation = GLOBAL;
 183       log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
 184 
 185       global_heuristics->record_requested_gc();
 186 
 187       if (ShenandoahImplicitGCInvokesConcurrent) {
 188         policy->record_implicit_to_concurrent();
 189         set_gc_mode(default_mode);
 190 
 191         // Unload and clean up everything
 192         heap->set_unload_classes(global_heuristics->can_unload_classes());
 193       } else {
 194         policy->record_implicit_to_full();
 195         set_gc_mode(stw_full);
 196       }
 197     } else {
 198       // We should only be here if the regulator requested a cycle or if
 199       // there is an old generation mark in progress.
 200       if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) {
 201         if (_requested_generation == OLD && heap->doing_mixed_evacuations()) {
 202           // If a request to start an old cycle arrived while an old cycle was running, but _before_
 203           // it chose any regions for evacuation we don't want to start a new old cycle. Rather, we want
 204           // the heuristic to run a young collection so that we can evacuate some old regions.
 205           assert(!heap->is_concurrent_old_mark_in_progress(), "Should not be running mixed collections and concurrent marking.");
 206           generation = YOUNG;
 207         } else {
 208           generation = _requested_generation;
 209         }
 210 
 211         // preemption was requested or this is a regular cycle
 212         cause = GCCause::_shenandoah_concurrent_gc;
 213         set_gc_mode(default_mode);
 214 
 215         // Don't start a new old marking if there is one already in progress.
 216         if (generation == OLD && heap->is_concurrent_old_mark_in_progress()) {
 217           set_gc_mode(servicing_old);
 218         }
 219 
 220         if (generation == GLOBAL) {
 221           heap->set_unload_classes(global_heuristics->should_unload_classes());
 222         } else {
 223           heap->set_unload_classes(false);
 224         }
 225 
 226         // Don't want to spin in this loop and start a cycle every time, so
 227         // clear requested gc cause. This creates a race with callers of the
 228         // blocking 'request_gc' method, but there it loops and resets the
 229         // '_requested_gc_cause' until a full cycle is completed.
 230         _requested_gc_cause = GCCause::_no_gc;
 231       } else if (heap->is_concurrent_old_mark_in_progress() || heap->is_prepare_for_old_mark_in_progress()) {
 232         // Nobody asked us to do anything, but we have an old-generation mark or old-generation preparation for
 233         // mixed evacuation in progress, so resume working on that.
 234         log_info(gc)("Resume old gc: marking=%s, preparing=%s",
 235                      BOOL_TO_STR(heap->is_concurrent_old_mark_in_progress()),
 236                      BOOL_TO_STR(heap->is_prepare_for_old_mark_in_progress()));
 237 
 238         cause = GCCause::_shenandoah_concurrent_gc;
 239         generation = OLD;
 240         set_gc_mode(servicing_old);
 241       }
 242     }
 243 
 244     // Blow all soft references on this cycle, if handling allocation failure,
 245     // either implicit or explicit GC request, or we are requested to do so unconditionally.
 246     if (generation == GLOBAL && (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs)) {
 247       heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
 248     }
 249 
 250     bool gc_requested = (_mode != none);
 251     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
 252 
 253     if (gc_requested) {
 254       // GC is starting, bump the internal ID
 255       update_gc_id();
 256 
 257       heap->reset_bytes_allocated_since_gc_start();
 258 
 259       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
 260 
 261       // If GC was requested, we are sampling the counters even without actual triggers
 262       // from allocation machinery. This captures GC phases more accurately.
 263       set_forced_counters_update(true);
 264 
 265       // If GC was requested, we better dump freeset data for performance debugging
 266       {
 267         ShenandoahHeapLocker locker(heap->lock());
 268         heap->free_set()->log_status();
 269       }
 270       // In case this is a degenerated cycle, remember whether original cycle was aging.
 271       bool was_aging_cycle = heap->is_aging_cycle();
 272       heap->set_aging_cycle(false);
 273       {
 274         switch (_mode) {
 275           case concurrent_normal: {
 276             // At this point:
 277             //  if (generation == YOUNG), this is a normal YOUNG cycle
 278             //  if (generation == OLD), this is a bootstrap OLD cycle
 279             //  if (generation == GLOBAL), this is a GLOBAL cycle triggered by System.gc()
 280             // In all three cases, we want to age old objects if this is an aging cycle
 281             if (age_period-- == 0) {
 282               heap->set_aging_cycle(true);
 283               age_period = ShenandoahAgingCyclePeriod - 1;
 284             }
 285             service_concurrent_normal_cycle(heap, generation, cause);
 286             break;
 287           }
 288           case stw_degenerated: {
 289             heap->set_aging_cycle(was_aging_cycle);
 290             if (!service_stw_degenerated_cycle(cause, degen_point)) {
 291               // The degenerated GC was upgraded to a Full GC
 292               generation = GLOBAL;
 293             }
 294             break;
 295           }
 296           case stw_full: {
 297             if (age_period-- == 0) {
 298               heap->set_aging_cycle(true);
 299               age_period = ShenandoahAgingCyclePeriod - 1;
 300             }
 301             service_stw_full_cycle(cause);
 302             break;
 303           }
 304           case servicing_old: {
 305             assert(generation == OLD, "Expected old generation here");
 306             GCIdMark gc_id_mark;
 307             service_concurrent_old_cycle(heap, cause);
 308             break;
 309           }
 310           default: {
 311             ShouldNotReachHere();
 312           }
 313         }
 314       }
 315 
 316       // If this was the requested GC cycle, notify waiters about it
 317       if (explicit_gc_requested || implicit_gc_requested) {
 318         notify_gc_waiters();
 319       }
 320 
 321       // If this was the allocation failure GC cycle, notify waiters about it
 322       if (alloc_failure_pending) {
 323         notify_alloc_failure_waiters();
 324       }
 325 
 326       // Report current free set state at the end of cycle, whether
 327       // it is a normal completion, or the abort.
 328       {
 329         ShenandoahHeapLocker locker(heap->lock());
 330         heap->free_set()->log_status();
 331 
 332         // Notify Universe about new heap usage. This has implications for
 333         // global soft refs policy, and we better report it every time heap
 334         // usage goes down.
 335         Universe::heap()->update_capacity_and_used_at_gc();
 336 
 337         // Signal that we have completed a visit to all live objects.
 338         Universe::heap()->record_whole_heap_examined_timestamp();
 339       }
 340 
 341       // Disable forced counters update, and update counters one more time
 342       // to capture the state at the end of GC session.
 343       handle_force_counters_update();
 344       set_forced_counters_update(false);
 345 
 346       // Retract forceful part of soft refs policy
 347       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
 348 
 349       // Clear metaspace oom flag, if current cycle unloaded classes
 350       if (heap->unload_classes()) {
 351         assert(generation == GLOBAL, "Only unload classes during GLOBAL cycle");
 352         global_heuristics->clear_metaspace_oom();
 353       }
 354 
 355       process_phase_timings(heap);
 356 
 357       // Print Metaspace change following GC (if logging is enabled).
 358       MetaspaceUtils::print_metaspace_change(meta_sizes);
 359 
 360       // GC is over, we are at idle now
 361       if (ShenandoahPacing) {
 362         heap->pacer()->setup_for_idle();
 363       }
 364     } else {
 365       // Allow allocators to know we have seen this much regions
 366       if (ShenandoahPacing && (allocs_seen > 0)) {
 367         heap->pacer()->report_alloc(allocs_seen);
 368       }
 369     }
 370 
 371     double current = os::elapsedTime();
 372 
 373     if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
 374       // Explicit GC tries to uncommit everything down to min capacity.
 375       // Soft max change tries to uncommit everything down to target capacity.
 376       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
 377 
 378       double shrink_before = (explicit_gc_requested || soft_max_changed) ?
 379                              current :
 380                              current - (ShenandoahUncommitDelay / 1000.0);
 381 
 382       size_t shrink_until = soft_max_changed ?
 383                              heap->soft_max_capacity() :
 384                              heap->min_capacity();
 385 
 386       service_uncommit(shrink_before, shrink_until);
 387       heap->phase_timings()->flush_cycle_to_global();
 388       last_shrink_time = current;
 389     }
 390 
 391     // Don't wait around if there was an allocation failure - start the next cycle immediately.
 392     if (!is_alloc_failure_gc()) {
 393       // The timed wait is necessary because this thread has a responsibility to send
 394       // 'alloc_words' to the pacer when it does not perform a GC.
 395       MonitorLocker lock(&_control_lock, Mutex::_no_safepoint_check_flag);
 396       lock.wait(ShenandoahControlIntervalMax);
 397     }
 398   }
 399 
 400   // Wait for the actual stop(), can't leave run_service() earlier.
 401   while (!should_terminate()) {
 402     os::naked_short_sleep(ShenandoahControlIntervalMin);
 403   }
 404 }
 405 
 406 void ShenandoahControlThread::process_phase_timings(const ShenandoahHeap* heap) {
 407 
 408   // Commit worker statistics to cycle data
 409   heap->phase_timings()->flush_par_workers_to_cycle();
 410   if (ShenandoahPacing) {
 411     heap->pacer()->flush_stats_to_cycle();
 412   }
 413 
 414   ShenandoahCycleStats evac_stats = heap->evac_tracker()->flush_cycle_to_global();
 415 
 416   // Print GC stats for current cycle
 417   {
 418     LogTarget(Info, gc, stats) lt;
 419     if (lt.is_enabled()) {
 420       ResourceMark rm;
 421       LogStream ls(lt);
 422       heap->phase_timings()->print_cycle_on(&ls);
 423       ShenandoahEvacuationTracker::print_evacuations_on(&ls, &evac_stats.workers,
 424                                                              &evac_stats.mutators);
 425       if (ShenandoahPacing) {
 426         heap->pacer()->print_cycle_on(&ls);
 427       }
 428     }
 429   }
 430 
 431   // Commit statistics to globals
 432   heap->phase_timings()->flush_cycle_to_global();
 433 
 434 }
 435 
 436 // Young and old concurrent cycles are initiated by the regulator. Implicit
 437 // and explicit GC requests are handled by the controller thread and always
 438 // run a global cycle (which is concurrent by default, but may be overridden
 439 // by command line options). Old cycles always degenerate to a global cycle.
 440 // Young cycles are degenerated to complete the young cycle.  Young
 441 // and old degen may upgrade to Full GC.  Full GC may also be
 442 // triggered directly by a System.gc() invocation.
 443 //
 444 //
 445 //      +-----+ Idle +-----+-----------+---------------------+
 446 //      |         +        |           |                     |
 447 //      |         |        |           |                     |
 448 //      |         |        v           |                     |
 449 //      |         |  Bootstrap Old +-- | ------------+       |
 450 //      |         |   +                |             |       |
 451 //      |         |   |                |             |       |
 452 //      |         v   v                v             v       |
 453 //      |    Resume Old <----------+ Young +--> Young Degen  |
 454 //      |     +  +   ^                            +  +       |
 455 //      v     |  |   |                            |  |       |
 456 //   Global <-+  |   +----------------------------+  |       |
 457 //      +        |                                   |       |
 458 //      |        v                                   v       |
 459 //      +--->  Global Degen +--------------------> Full <----+
 460 //
 461 void ShenandoahControlThread::service_concurrent_normal_cycle(
 462   const ShenandoahHeap* heap, const GenerationMode generation, GCCause::Cause cause) {
 463   GCIdMark gc_id_mark;
 464   switch (generation) {
 465     case YOUNG: {
 466       // Run a young cycle. This might or might not, have interrupted an ongoing
 467       // concurrent mark in the old generation. We need to think about promotions
 468       // in this case. Promoted objects should be above the TAMS in the old regions
 469       // they end up in, but we have to be sure we don't promote into any regions
 470       // that are in the cset.
 471       log_info(gc, ergo)("Start GC cycle (YOUNG)");
 472       service_concurrent_cycle(heap->young_generation(), cause, false);
 473       break;
 474     }
 475     case GLOBAL: {
 476       log_info(gc, ergo)("Start GC cycle (GLOBAL)");
 477       service_concurrent_cycle(heap->global_generation(), cause, false);
 478       break;
 479     }
 480     case OLD: {
 481       log_info(gc, ergo)("Start GC cycle (OLD)");
 482       service_concurrent_old_cycle(heap, cause);
 483       break;
 484     }
 485     default:
 486       ShouldNotReachHere();
 487   }
 488   const char* msg;
 489   if (heap->mode()->is_generational()) {
 490     if (heap->cancelled_gc()) {
 491       msg = (generation == YOUNG)? "At end of Interrupted Concurrent Young GC": "At end of Interrupted Concurrent Bootstrap GC";
 492     } else {
 493       msg = (generation == YOUNG)? "At end of Concurrent Young GC": "At end of Concurrent Bootstrap GC";
 494     }
 495   } else {
 496     msg = heap->cancelled_gc() ? "At end of cancelled GC" : "At end of GC";
 497   }
 498   heap->log_heap_status(msg);
 499 }
 500 
 501 void ShenandoahControlThread::service_concurrent_old_cycle(const ShenandoahHeap* heap, GCCause::Cause &cause) {
 502 
 503   ShenandoahOldGeneration* old_generation = heap->old_generation();
 504   ShenandoahYoungGeneration* young_generation = heap->young_generation();
 505   ShenandoahOldGeneration::State original_state = old_generation->state();
 506 
 507   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 508 
 509   switch (original_state) {
 510     case ShenandoahOldGeneration::IDLE: {
 511       assert(!heap->is_concurrent_old_mark_in_progress(), "Old already in progress.");
 512       assert(old_generation->task_queues()->is_empty(), "Old mark queues should be empty.");
 513     }
 514     case ShenandoahOldGeneration::FILLING: {
 515       _allow_old_preemption.set();
 516       ShenandoahGCSession session(cause, old_generation);
 517       old_generation->prepare_gc();
 518       _allow_old_preemption.unset();
 519 
 520       if (heap->is_prepare_for_old_mark_in_progress()) {
 521         assert(old_generation->state() == ShenandoahOldGeneration::FILLING, "Prepare for mark should be in progress.");
 522         return;
 523       }
 524 
 525       assert(old_generation->state() == ShenandoahOldGeneration::BOOTSTRAPPING, "Finished with filling, should be bootstrapping.");
 526     }
 527     case ShenandoahOldGeneration::BOOTSTRAPPING: {
 528       // Configure the young generation's concurrent mark to put objects in
 529       // old regions into the concurrent mark queues associated with the old
 530       // generation. The young cycle will run as normal except that rather than
 531       // ignore old references it will mark and enqueue them in the old concurrent
 532       // task queues but it will not traverse them.
 533       young_generation->set_old_gen_task_queues(old_generation->task_queues());
 534       ShenandoahGCSession session(cause, young_generation);
 535       service_concurrent_cycle(heap,young_generation, cause, true);
 536       process_phase_timings(heap);
 537       if (heap->cancelled_gc()) {
 538         // Young generation bootstrap cycle has failed. Concurrent mark for old generation
 539         // is going to resume after degenerated bootstrap cycle completes.
 540         log_info(gc)("Bootstrap cycle for old generation was cancelled.");
 541         return;
 542       }
 543 
 544       // Reset the degenerated point. Normally this would happen at the top
 545       // of the control loop, but here we have just completed a young cycle
 546       // which has bootstrapped the old concurrent marking.
 547       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
 548 
 549       // From here we will 'resume' the old concurrent mark. This will skip reset
 550       // and init mark for the concurrent mark. All of that work will have been
 551       // done by the bootstrapping young cycle. In order to simplify the debugging
 552       // effort, the old cycle will ONLY complete the mark phase. No actual
 553       // collection of the old generation is happening here.
 554       set_gc_mode(servicing_old);
 555       old_generation->transition_to(ShenandoahOldGeneration::MARKING);
 556     }
 557     case ShenandoahOldGeneration::MARKING: {
 558       ShenandoahGCSession session(cause, old_generation);
 559       bool marking_complete = resume_concurrent_old_cycle(old_generation, cause);
 560       if (marking_complete) {
 561         assert(old_generation->state() != ShenandoahOldGeneration::MARKING, "Should not still be marking.");
 562         if (original_state == ShenandoahOldGeneration::MARKING) {
 563           heap->log_heap_status("At end of Concurrent Old Marking finishing increment");
 564         }
 565       } else if (original_state == ShenandoahOldGeneration::MARKING) {
 566         heap->log_heap_status("At end of Concurrent Old Marking increment");
 567       }
 568       break;
 569     }
 570     default:
 571       log_error(gc)("Unexpected state for old GC: %d", old_generation->state());
 572       ShouldNotReachHere();
 573   }
 574 }
 575 
 576 bool ShenandoahControlThread::resume_concurrent_old_cycle(ShenandoahGeneration* generation, GCCause::Cause cause) {
 577 
 578   assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(), "Old mark should be in progress");
 579   log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued.", generation->task_queues()->tasks());
 580 
 581   ShenandoahHeap* heap = ShenandoahHeap::heap();
 582 
 583   // We can only tolerate being cancelled during concurrent marking or during preparation for mixed
 584   // evacuation. This flag here (passed by reference) is used to control precisely where the regulator
 585   // is allowed to cancel a GC.
 586   ShenandoahOldGC gc(generation, _allow_old_preemption);
 587   if (gc.collect(cause)) {
 588     generation->record_success_concurrent(false);
 589   }
 590 
 591   if (heap->cancelled_gc()) {
 592     // It's possible the gc cycle was cancelled after the last time
 593     // the collection checked for cancellation. In which case, the
 594     // old gc cycle is still completed, and we have to deal with this
 595     // cancellation. We set the degeneration point to be outside
 596     // the cycle because if this is an allocation failure, that is
 597     // what must be done (there is no degenerated old cycle). If the
 598     // cancellation was due to a heuristic wanting to start a young
 599     // cycle, then we are not actually going to a degenerated cycle,
 600     // so the degenerated point doesn't matter here.
 601     check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle);
 602     if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) {
 603       heap->shenandoah_policy()->record_interrupted_old();
 604     }
 605     return false;
 606   }
 607   return true;
 608 }
 609 
 610 bool ShenandoahControlThread::check_soft_max_changed() const {
 611   ShenandoahHeap* heap = ShenandoahHeap::heap();
 612   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
 613   size_t old_soft_max = heap->soft_max_capacity();
 614   if (new_soft_max != old_soft_max) {
 615     new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
 616     new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
 617     if (new_soft_max != old_soft_max) {
 618       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
 619                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 620                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 621       );
 622       heap->set_soft_max_capacity(new_soft_max);
 623       return true;
 624     }
 625   }
 626   return false;
 627 }
 628 
 629 void ShenandoahControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool do_old_gc_bootstrap) {
 630   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
 631   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
 632   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
 633   // tries to evac something and no memory is available), cycle degrades to Full GC.
 634   //
 635   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
 636   // heuristics says there are no regions to compact, and all the collection comes from immediately
 637   // reclaimable regions.
 638   //
 639   // ................................................................................................
 640   //
 641   //                                    (immediate garbage shortcut)                Concurrent GC
 642   //                             /-------------------------------------------\
 643   //                             |                                           |
 644   //                             |                                           |
 645   //                             |                                           |
 646   //                             |                                           v
 647   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
 648   //                   |                    |                 |              ^
 649   //                   | (af)               | (af)            | (af)         |
 650   // ..................|....................|.................|..............|.......................
 651   //                   |                    |                 |              |
 652   //                   |                    |                 |              |      Degenerated GC
 653   //                   v                    v                 v              |
 654   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
 655   //                   |                    |                 |              ^
 656   //                   | (af)               | (af)            | (af)         |
 657   // ..................|....................|.................|..............|.......................
 658   //                   |                    |                 |              |
 659   //                   |                    v                 |              |      Full GC
 660   //                   \------------------->o<----------------/              |
 661   //                                        |                                |
 662   //                                        v                                |
 663   //                                      Full GC  --------------------------/
 664   //
 665   if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
 666 
 667   ShenandoahHeap* heap = ShenandoahHeap::heap();
 668   ShenandoahGCSession session(cause, generation);
 669   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 670 
 671   service_concurrent_cycle(heap, generation, cause, do_old_gc_bootstrap);
 672 }
 673 
 674 void ShenandoahControlThread::service_concurrent_cycle(const ShenandoahHeap* heap, ShenandoahGeneration* generation,
 675                                                        GCCause::Cause &cause, bool do_old_gc_bootstrap) {
 676   ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap);
 677   if (gc.collect(cause)) {
 678     // Cycle is complete
 679     generation->record_success_concurrent(gc.abbreviated());
 680   } else {
 681     assert(heap->cancelled_gc(), "Must have been cancelled");
 682     check_cancellation_or_degen(gc.degen_point());
 683     assert(generation->generation_mode() != OLD, "Old GC takes a different control path");
 684     // Concurrent young-gen collection degenerates to young
 685     // collection.  Same for global collections.
 686     _degen_generation = generation;
 687   }
 688 }
 689 
 690 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
 691   ShenandoahHeap* heap = ShenandoahHeap::heap();
 692   if (!heap->cancelled_gc()) {
 693     return false;
 694   }
 695 
 696   if (in_graceful_shutdown()) {
 697     return true;
 698   }
 699 
 700   assert(_degen_point == ShenandoahGC::_degenerated_outside_cycle,
 701          "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
 702 
 703   if (is_alloc_failure_gc()) {
 704     _degen_point = point;
 705     return true;
 706   }
 707 
 708   if (_preemption_requested.is_set()) {
 709     assert(_requested_generation == YOUNG, "Only young GCs may preempt old.");
 710     _preemption_requested.unset();
 711 
 712     // Old generation marking is only cancellable during concurrent marking.
 713     // Once final mark is complete, the code does not check again for cancellation.
 714     // If old generation was cancelled for an allocation failure, we wouldn't
 715     // make it to this case. The calling code is responsible for forcing a
 716     // cancellation due to allocation failure into a degenerated cycle.
 717     _degen_point = point;
 718     heap->clear_cancelled_gc(false /* clear oom handler */);
 719     return true;
 720   }
 721 
 722   fatal("Cancel GC either for alloc failure GC, or gracefully exiting, or to pause old generation marking.");
 723   return false;
 724 }
 725 
 726 void ShenandoahControlThread::stop_service() {
 727   // Nothing to do here.
 728 }
 729 
 730 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
 731   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 732 
 733   GCIdMark gc_id_mark;
 734   ShenandoahGCSession session(cause, heap->global_generation());
 735 
 736   ShenandoahFullGC gc;
 737   gc.collect(cause);
 738 
 739   heap->global_generation()->heuristics()->record_success_full();
 740   heap->shenandoah_policy()->record_success_full();
 741 }
 742 
 743 bool ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
 744   assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
 745   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 746 
 747   GCIdMark gc_id_mark;
 748   ShenandoahGCSession session(cause, _degen_generation);
 749 
 750   ShenandoahDegenGC gc(point, _degen_generation);
 751   gc.collect(cause);
 752 
 753   assert(heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks");
 754   if (_degen_generation->generation_mode() == GLOBAL) {
 755     assert(heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks");
 756     assert(heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks");
 757   } else {
 758     assert(_degen_generation->generation_mode() == YOUNG, "Expected degenerated young cycle, if not global.");
 759     ShenandoahOldGeneration* old_generation = (ShenandoahOldGeneration*) heap->old_generation();
 760     if (old_generation->state() == ShenandoahOldGeneration::BOOTSTRAPPING && !gc.upgraded_to_full()) {
 761       old_generation->transition_to(ShenandoahOldGeneration::MARKING);
 762     }
 763   }
 764 
 765   _degen_generation->heuristics()->record_success_degenerated();
 766   heap->shenandoah_policy()->record_success_degenerated();
 767   return !gc.upgraded_to_full();
 768 }
 769 
 770 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
 771   ShenandoahHeap* heap = ShenandoahHeap::heap();
 772 
 773   // Determine if there is work to do. This avoids taking heap lock if there is
 774   // no work available, avoids spamming logs with superfluous logging messages,
 775   // and minimises the amount of work while locks are taken.
 776 
 777   if (heap->committed() <= shrink_until) return;
 778 
 779   bool has_work = false;
 780   for (size_t i = 0; i < heap->num_regions(); i++) {
 781     ShenandoahHeapRegion *r = heap->get_region(i);
 782     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 783       has_work = true;
 784       break;
 785     }
 786   }
 787 
 788   if (has_work) {
 789     heap->entry_uncommit(shrink_before, shrink_until);
 790   }
 791 }
 792 
 793 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
 794   return GCCause::is_user_requested_gc(cause) ||
 795          GCCause::is_serviceability_requested_gc(cause);
 796 }
 797 
 798 bool ShenandoahControlThread::is_implicit_gc(GCCause::Cause cause) const {
 799   return !is_explicit_gc(cause) && cause != GCCause::_shenandoah_concurrent_gc;
 800 }
 801 
 802 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
 803   assert(GCCause::is_user_requested_gc(cause) ||
 804          GCCause::is_serviceability_requested_gc(cause) ||
 805          cause == GCCause::_metadata_GC_clear_soft_refs ||
 806          cause == GCCause::_codecache_GC_aggressive ||
 807          cause == GCCause::_codecache_GC_threshold ||
 808          cause == GCCause::_full_gc_alot ||
 809          cause == GCCause::_wb_young_gc ||
 810          cause == GCCause::_wb_full_gc ||
 811          cause == GCCause::_wb_breakpoint ||
 812          cause == GCCause::_scavenge_alot,
 813          "only requested GCs here: %s", GCCause::to_string(cause));
 814 
 815   if (is_explicit_gc(cause)) {
 816     if (!DisableExplicitGC) {
 817       handle_requested_gc(cause);
 818     }
 819   } else {
 820     handle_requested_gc(cause);
 821   }
 822 }
 823 
 824 bool ShenandoahControlThread::request_concurrent_gc(GenerationMode generation) {
 825   if (_preemption_requested.is_set() || _gc_requested.is_set() || ShenandoahHeap::heap()->cancelled_gc()) {
 826     // ignore subsequent requests from the heuristics
 827     return false;
 828   }
 829 
 830   if (_mode == none) {
 831     _requested_gc_cause = GCCause::_shenandoah_concurrent_gc;
 832     _requested_generation = generation;
 833     notify_control_thread();
 834     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
 835     ml.wait();
 836     return true;
 837   }
 838 
 839   if (preempt_old_marking(generation)) {
 840     log_info(gc)("Preempting old generation mark to allow %s GC.", generation_name(generation));
 841     _requested_gc_cause = GCCause::_shenandoah_concurrent_gc;
 842     _requested_generation = generation;
 843     _preemption_requested.set();
 844     ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc);
 845     notify_control_thread();
 846 
 847     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
 848     ml.wait();
 849     return true;
 850   }
 851 
 852   return false;
 853 }
 854 
 855 void ShenandoahControlThread::notify_control_thread() {
 856   MonitorLocker locker(&_control_lock, Mutex::_no_safepoint_check_flag);
 857   _control_lock.notify();
 858 }
 859 
 860 bool ShenandoahControlThread::preempt_old_marking(GenerationMode generation) {
 861   return generation == YOUNG && _allow_old_preemption.try_unset();
 862 }
 863 
 864 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
 865   // Make sure we have at least one complete GC cycle before unblocking
 866   // from the explicit GC request.
 867   //
 868   // This is especially important for weak references cleanup and/or native
 869   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
 870   // comes very late in the already running cycle, it would miss lots of new
 871   // opportunities for cleanup that were made available before the caller
 872   // requested the GC.
 873 
 874   MonitorLocker ml(&_gc_waiters_lock);
 875   size_t current_gc_id = get_gc_id();
 876   size_t required_gc_id = current_gc_id + 1;
 877   while (current_gc_id < required_gc_id) {
 878     // Although setting gc request is under _gc_waiters_lock, but read side (run_service())
 879     // does not take the lock. We need to enforce following order, so that read side sees
 880     // latest requested gc cause when the flag is set.
 881     _requested_gc_cause = cause;
 882     _gc_requested.set();
 883     notify_control_thread();
 884     if (cause != GCCause::_wb_breakpoint) {
 885       ml.wait();
 886     }
 887     current_gc_id = get_gc_id();
 888   }
 889 }
 890 
 891 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
 892   ShenandoahHeap* heap = ShenandoahHeap::heap();
 893 
 894   assert(current()->is_Java_thread(), "expect Java thread here");
 895 
 896   if (try_set_alloc_failure_gc()) {
 897     // Only report the first allocation failure
 898     log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
 899                  req.type_string(),
 900                  byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
 901 
 902     // Now that alloc failure GC is scheduled, we can abort everything else
 903     heap->cancel_gc(GCCause::_allocation_failure);
 904   }
 905 
 906   MonitorLocker ml(&_alloc_failure_waiters_lock);
 907   while (is_alloc_failure_gc()) {
 908     ml.wait();
 909   }
 910 }
 911 
 912 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
 913   ShenandoahHeap* heap = ShenandoahHeap::heap();
 914 
 915   if (try_set_alloc_failure_gc()) {
 916     // Only report the first allocation failure
 917     log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
 918                  byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
 919   }
 920 
 921   // Forcefully report allocation failure
 922   heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
 923 }
 924 
 925 void ShenandoahControlThread::notify_alloc_failure_waiters() {
 926   _alloc_failure_gc.unset();
 927   MonitorLocker ml(&_alloc_failure_waiters_lock);
 928   ml.notify_all();
 929 }
 930 
 931 bool ShenandoahControlThread::try_set_alloc_failure_gc() {
 932   return _alloc_failure_gc.try_set();
 933 }
 934 
 935 bool ShenandoahControlThread::is_alloc_failure_gc() {
 936   return _alloc_failure_gc.is_set();
 937 }
 938 
 939 void ShenandoahControlThread::notify_gc_waiters() {
 940   _gc_requested.unset();
 941   MonitorLocker ml(&_gc_waiters_lock);
 942   ml.notify_all();
 943 }
 944 
 945 void ShenandoahControlThread::handle_counters_update() {
 946   if (_do_counters_update.is_set()) {
 947     _do_counters_update.unset();
 948     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 949   }
 950 }
 951 
 952 void ShenandoahControlThread::handle_force_counters_update() {
 953   if (_force_counters_update.is_set()) {
 954     _do_counters_update.unset(); // reset these too, we do update now!
 955     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 956   }
 957 }
 958 
 959 void ShenandoahControlThread::notify_heap_changed() {
 960   // This is called from allocation path, and thus should be fast.
 961 
 962   // Update monitoring counters when we took a new region. This amortizes the
 963   // update costs on slow path.
 964   if (_do_counters_update.is_unset()) {
 965     _do_counters_update.set();
 966   }
 967 }
 968 
 969 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
 970   assert(ShenandoahPacing, "should only call when pacing is enabled");
 971   Atomic::add(&_allocs_seen, words, memory_order_relaxed);
 972 }
 973 
 974 void ShenandoahControlThread::set_forced_counters_update(bool value) {
 975   _force_counters_update.set_cond(value);
 976 }
 977 
 978 void ShenandoahControlThread::reset_gc_id() {
 979   Atomic::store(&_gc_id, (size_t)0);
 980 }
 981 
 982 void ShenandoahControlThread::update_gc_id() {
 983   Atomic::inc(&_gc_id);
 984 }
 985 
 986 size_t ShenandoahControlThread::get_gc_id() {
 987   return Atomic::load(&_gc_id);
 988 }
 989 
 990 void ShenandoahControlThread::start() {
 991   create_and_start();
 992 }
 993 
 994 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
 995   _graceful_shutdown.set();
 996 }
 997 
 998 bool ShenandoahControlThread::in_graceful_shutdown() {
 999   return _graceful_shutdown.is_set();
1000 }
1001 
1002 const char* ShenandoahControlThread::gc_mode_name(ShenandoahControlThread::GCMode mode) {
1003   switch (mode) {
1004     case none:              return "idle";
1005     case concurrent_normal: return "normal";
1006     case stw_degenerated:   return "degenerated";
1007     case stw_full:          return "full";
1008     case servicing_old:     return "old";
1009     default:                return "unknown";
1010   }
1011 }
1012 
1013 void ShenandoahControlThread::set_gc_mode(ShenandoahControlThread::GCMode new_mode) {
1014   if (_mode != new_mode) {
1015     log_info(gc)("Transition from: %s to: %s", gc_mode_name(_mode), gc_mode_name(new_mode));
1016     _mode = new_mode;
1017     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
1018     ml.notify_all();
1019   }
1020 }