1 /*
   2  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc_implementation/shared/gcTimer.hpp"
  27 #include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp"
  28 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
  29 #include "gc_implementation/shenandoah/shenandoahControlThread.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp"
  34 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
  35 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
  36 #include "gc_implementation/shenandoah/shenandoahVMOperations.hpp"
  37 #include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp"
  38 #include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp"
  39 #include "memory/iterator.hpp"
  40 #include "memory/universe.hpp"
  41 
  42 #ifdef _WINDOWS
  43 #pragma warning(disable : 4355)
  44 #endif
  45 
  46 SurrogateLockerThread* ShenandoahControlThread::_slt = NULL;
  47 
  48 ShenandoahControlThread::ShenandoahControlThread() :
  49   ConcurrentGCThread(),
  50   _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true),
  51   _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true),
  52   _periodic_task(this),
  53   _requested_gc_cause(GCCause::_no_cause_specified),
  54   _degen_point(ShenandoahHeap::_degenerated_outside_cycle),
  55   _allocs_seen(0) {
  56 
  57   reset_gc_id();
  58   if (os::create_thread(this, os::cgc_thread)) {
  59     os::set_native_priority(this, os::java_to_os_priority[NearMaxPriority]);
  60     if (!_should_terminate && !DisableStartThread) {
  61       os::start_thread(this);
  62     }
  63   }
  64 
  65   _periodic_task.enroll();
  66   _periodic_satb_flush_task.enroll();
  67   if (ShenandoahPacing) {
  68     _periodic_pacer_notify_task.enroll();
  69   }
  70 }
  71 
  72 ShenandoahControlThread::~ShenandoahControlThread() {
  73   // This is here so that super is called.
  74 }
  75 
  76 void ShenandoahPeriodicTask::task() {
  77   _thread->handle_force_counters_update();
  78   _thread->handle_counters_update();
  79 }
  80 
  81 void ShenandoahPeriodicSATBFlushTask::task() {
  82   ShenandoahHeap::heap()->force_satb_flush_all_threads();
  83 }
  84 
  85 void ShenandoahPeriodicPacerNotify::task() {
  86   assert(ShenandoahPacing, "Should not be here otherwise");
  87   ShenandoahHeap::heap()->pacer()->notify_waiters();
  88 }
  89 
  90 void ShenandoahControlThread::run() {
  91   initialize_in_thread();
  92 
  93   wait_for_universe_init();
  94 
  95   // Wait until we have the surrogate locker thread in place.
  96   {
  97     MutexLockerEx x(CGC_lock, true);
  98     while(_slt == NULL && !_should_terminate) {
  99       CGC_lock->wait(true, 200);
 100     }
 101   }
 102 
 103   ShenandoahHeap* heap = ShenandoahHeap::heap();
 104 
 105   GCMode default_mode = concurrent_normal;
 106   GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
 107   int sleep = ShenandoahControlIntervalMin;
 108 
 109   double last_shrink_time = os::elapsedTime();
 110   double last_sleep_adjust_time = os::elapsedTime();
 111 
 112   // Shrink period avoids constantly polling regions for shrinking.
 113   // Having a period 10x lower than the delay would mean we hit the
 114   // shrinking with lag of less than 1/10-th of true delay.
 115   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
 116   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
 117 
 118   ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
 119 
 120   ShenandoahHeuristics* heuristics = heap->heuristics();
 121   while (!in_graceful_shutdown() && !_should_terminate) {
 122     // Figure out if we have pending requests.
 123     bool alloc_failure_pending = _alloc_failure_gc.is_set();
 124     bool explicit_gc_requested = _gc_requested.is_set() &&  is_explicit_gc(_requested_gc_cause);
 125     bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);
 126 
 127     // This control loop iteration have seen this much allocations.
 128     intptr_t allocs_seen = (intptr_t)(Atomic::xchg_ptr(0, &_allocs_seen));
 129 
 130     // Check if we have seen a new target for soft max heap size.
 131     bool soft_max_changed = check_soft_max_changed();
 132 
 133     // Choose which GC mode to run in. The block below should select a single mode.
 134     GCMode mode = none;
 135     GCCause::Cause cause = GCCause::_last_gc_cause;
 136     ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset;
 137 
 138     if (alloc_failure_pending) {
 139       // Allocation failure takes precedence: we have to deal with it first thing
 140       log_info(gc)("Trigger: Handle Allocation Failure");
 141 
 142       cause = GCCause::_allocation_failure;
 143 
 144       // Consume the degen point, and seed it with default value
 145       degen_point = _degen_point;
 146       _degen_point = ShenandoahHeap::_degenerated_outside_cycle;
 147 
 148       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
 149         heuristics->record_allocation_failure_gc();
 150         policy->record_alloc_failure_to_degenerated(degen_point);
 151         mode = stw_degenerated;
 152       } else {
 153         heuristics->record_allocation_failure_gc();
 154         policy->record_alloc_failure_to_full();
 155         mode = stw_full;
 156       }
 157 
 158     } else if (explicit_gc_requested) {
 159       cause = _requested_gc_cause;
 160       log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
 161 
 162       heuristics->record_requested_gc();
 163 
 164       if (ExplicitGCInvokesConcurrent) {
 165         policy->record_explicit_to_concurrent();
 166         mode = default_mode;
 167         // Unload and clean up everything
 168         heap->set_process_references(heuristics->can_process_references());
 169         heap->set_unload_classes(heuristics->can_unload_classes());
 170       } else {
 171         policy->record_explicit_to_full();
 172         mode = stw_full;
 173       }
 174     } else if (implicit_gc_requested) {
 175       cause = _requested_gc_cause;
 176       log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
 177 
 178       heuristics->record_requested_gc();
 179 
 180       if (ShenandoahImplicitGCInvokesConcurrent) {
 181         policy->record_implicit_to_concurrent();
 182         mode = default_mode;
 183 
 184         // Unload and clean up everything
 185         heap->set_process_references(heuristics->can_process_references());
 186         heap->set_unload_classes(heuristics->can_unload_classes());
 187       } else {
 188         policy->record_implicit_to_full();
 189         mode = stw_full;
 190       }
 191     } else {
 192       // Potential normal cycle: ask heuristics if it wants to act
 193       if (heuristics->should_start_gc()) {
 194         mode = default_mode;
 195         cause = default_cause;
 196       }
 197 
 198       // Ask policy if this cycle wants to process references or unload classes
 199       heap->set_process_references(heuristics->should_process_references());
 200       heap->set_unload_classes(heuristics->should_unload_classes());
 201     }
 202 
 203     // Blow all soft references on this cycle, if handling allocation failure,
 204     // either implicit or explicit GC request,  or we are requested to do so unconditionally.
 205     if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) {
 206       heap->collector_policy()->set_should_clear_all_soft_refs(true);
 207     }
 208 
 209     bool gc_requested = (mode != none);
 210     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
 211 
 212     if (gc_requested) {
 213       // GC is starting, bump the internal ID
 214       update_gc_id();
 215 
 216       heap->reset_bytes_allocated_since_gc_start();
 217 
 218       // Capture metaspace usage before GC.
 219       const size_t metadata_prev_used = MetaspaceAux::used_bytes();
 220 
 221       // If GC was requested, we are sampling the counters even without actual triggers
 222       // from allocation machinery. This captures GC phases more accurately.
 223       set_forced_counters_update(true);
 224 
 225       // If GC was requested, we better dump freeset data for performance debugging
 226       {
 227         ShenandoahHeapLocker locker(heap->lock());
 228         heap->free_set()->log_status();
 229       }
 230 
 231       switch (mode) {
 232         case none:
 233           break;
 234         case concurrent_normal:
 235           service_concurrent_normal_cycle(cause);
 236           break;
 237         case stw_degenerated:
 238           service_stw_degenerated_cycle(cause, degen_point);
 239           break;
 240         case stw_full:
 241           service_stw_full_cycle(cause);
 242           break;
 243         default:
 244           ShouldNotReachHere();
 245       }
 246 
 247       // If this was the requested GC cycle, notify waiters about it
 248       if (explicit_gc_requested || implicit_gc_requested) {
 249         notify_gc_waiters();
 250       }
 251 
 252       // If this was the allocation failure GC cycle, notify waiters about it
 253       if (alloc_failure_pending) {
 254         notify_alloc_failure_waiters();
 255       }
 256 
 257       // Report current free set state at the end of cycle, whether
 258       // it is a normal completion, or the abort.
 259       {
 260         ShenandoahHeapLocker locker(heap->lock());
 261         heap->free_set()->log_status();
 262 
 263         // Notify Universe about new heap usage. This has implications for
 264         // global soft refs policy, and we better report it every time heap
 265         // usage goes down.
 266         Universe::update_heap_info_at_gc();
 267       }
 268 
 269       // Disable forced counters update, and update counters one more time
 270       // to capture the state at the end of GC session.
 271       handle_force_counters_update();
 272       set_forced_counters_update(false);
 273 
 274       // Retract forceful part of soft refs policy
 275       heap->collector_policy()->set_should_clear_all_soft_refs(false);
 276 
 277       // Clear metaspace oom flag, if current cycle unloaded classes
 278       if (heap->unload_classes()) {
 279         heuristics->clear_metaspace_oom();
 280       }
 281 
 282       // Commit worker statistics to cycle data
 283       heap->phase_timings()->flush_par_workers_to_cycle();
 284       if (ShenandoahPacing) {
 285         heap->pacer()->flush_stats_to_cycle();
 286       }
 287 
 288       // Print GC stats for current cycle
 289       if (PrintGCDetails) {
 290         ResourceMark rm;
 291         heap->phase_timings()->print_cycle_on(gclog_or_tty);
 292         if (ShenandoahPacing) {
 293           heap->pacer()->print_cycle_on(gclog_or_tty);
 294         }
 295       }
 296 
 297       // Commit statistics to globals
 298       heap->phase_timings()->flush_cycle_to_global();
 299 
 300       // Print Metaspace change following GC (if logging is enabled).
 301       if (PrintGCDetails) {
 302         MetaspaceAux::print_metaspace_change(metadata_prev_used);
 303       }
 304 
 305       // GC is over, we are at idle now
 306       if (ShenandoahPacing) {
 307         heap->pacer()->setup_for_idle();
 308       }
 309     } else {
 310       // Allow allocators to know we have seen this much regions
 311       if (ShenandoahPacing && (allocs_seen > 0)) {
 312         heap->pacer()->report_alloc(allocs_seen);
 313       }
 314     }
 315 
 316     double current = os::elapsedTime();
 317 
 318     if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
 319       // Explicit GC tries to uncommit everything down to min capacity.
 320       // Soft max change tries to uncommit everything down to target capacity.
 321       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
 322 
 323       double shrink_before = (explicit_gc_requested || soft_max_changed) ?
 324                              current :
 325                              current - (ShenandoahUncommitDelay / 1000.0);
 326 
 327       size_t shrink_until = soft_max_changed ?
 328                              heap->soft_max_capacity() :
 329                              heap->min_capacity();
 330 
 331       service_uncommit(shrink_before, shrink_until);
 332       heap->phase_timings()->flush_cycle_to_global();
 333       last_shrink_time = current;
 334     }
 335 
 336     // Wait before performing the next action. If allocation happened during this wait,
 337     // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
 338     // back off exponentially.
 339     if (_heap_changed.try_unset()) {
 340       sleep = ShenandoahControlIntervalMin;
 341     } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
 342       sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
 343       last_sleep_adjust_time = current;
 344     }
 345     os::naked_short_sleep(sleep);
 346   }
 347 
 348   // Wait for the actual stop(), can't leave run_service() earlier.
 349   while (! _should_terminate) {
 350     os::naked_short_sleep(ShenandoahControlIntervalMin);
 351   }
 352   terminate();
 353 }
 354 
 355 bool ShenandoahControlThread::check_soft_max_changed() const {
 356   ShenandoahHeap* heap = ShenandoahHeap::heap();
 357   size_t new_soft_max = OrderAccess::load_acquire(&ShenandoahSoftMaxHeapSize);
 358   size_t old_soft_max = heap->soft_max_capacity();
 359   if (new_soft_max != old_soft_max) {
 360     new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
 361     new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
 362     if (new_soft_max != old_soft_max) {
 363       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
 364                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 365                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 366       );
 367       heap->set_soft_max_capacity(new_soft_max);
 368       return true;
 369     }
 370   }
 371   return false;
 372 }
 373 
 374 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
 375   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
 376   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
 377   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
 378   // tries to evac something and no memory is available), cycle degrades to Full GC.
 379   //
 380   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
 381   // heuristics says there are no regions to compact, and all the collection comes from immediately
 382   // reclaimable regions.
 383   //
 384   // ................................................................................................
 385   //
 386   //                                    (immediate garbage shortcut)                Concurrent GC
 387   //                             /-------------------------------------------\
 388   //                             |                                           |
 389   //                             |                                           |
 390   //                             |                                           |
 391   //                             |                                           v
 392   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
 393   //                   |                    |                 |              ^
 394   //                   | (af)               | (af)            | (af)         |
 395   // ..................|....................|.................|..............|.......................
 396   //                   |                    |                 |              |
 397   //                   |                    |                 |              |      Degenerated GC
 398   //                   v                    v                 v              |
 399   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
 400   //                   |                    |                 |              ^
 401   //                   | (af)               | (af)            | (af)         |
 402   // ..................|....................|.................|..............|.......................
 403   //                   |                    |                 |              |
 404   //                   |                    v                 |              |      Full GC
 405   //                   \------------------->o<----------------/              |
 406   //                                        |                                |
 407   //                                        v                                |
 408   //                                      Full GC  --------------------------/
 409   //
 410 
 411   ShenandoahHeap* heap = ShenandoahHeap::heap();
 412 
 413   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return;
 414 
 415   ShenandoahGCSession session(cause);
 416 
 417   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 418 
 419   // Reset for upcoming marking
 420   heap->entry_reset();
 421 
 422   // Start initial mark under STW
 423   heap->vmop_entry_init_mark();
 424 
 425   // Continue concurrent mark
 426   heap->entry_mark();
 427   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return;
 428 
 429   // If not cancelled, can try to concurrently pre-clean
 430   heap->entry_preclean();
 431 
 432   // Complete marking under STW, and start evacuation
 433   heap->vmop_entry_final_mark();
 434 
 435   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 436   // the space. This would be the last action if there is nothing to evacuate.
 437   heap->entry_cleanup_early();
 438 
 439   {
 440     ShenandoahHeapLocker locker(heap->lock());
 441     heap->free_set()->log_status();
 442   }
 443 
 444   // Continue the cycle with evacuation and optional update-refs.
 445   // This may be skipped if there is nothing to evacuate.
 446   // If so, evac_in_progress would be unset by collection set preparation code.
 447   if (heap->is_evacuation_in_progress()) {
 448     // Concurrently evacuate
 449     heap->entry_evac();
 450     if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return;
 451 
 452     // Perform update-refs phase.
 453     heap->vmop_entry_init_updaterefs();
 454     heap->entry_updaterefs();
 455     if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return;
 456 
 457     heap->vmop_entry_final_updaterefs();
 458 
 459     // Update references freed up collection set, kick the cleanup to reclaim the space.
 460     heap->entry_cleanup_complete();
 461   }
 462 
 463   // Cycle is complete
 464   heap->heuristics()->record_success_concurrent();
 465   heap->shenandoah_policy()->record_success_concurrent();
 466 }
 467 
 468 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) {
 469   ShenandoahHeap* heap = ShenandoahHeap::heap();
 470   if (heap->cancelled_gc()) {
 471     assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
 472     if (!in_graceful_shutdown()) {
 473       assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle,
 474               err_msg("Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point)));
 475       _degen_point = point;
 476     }
 477     return true;
 478   }
 479   return false;
 480 }
 481 
 482 void ShenandoahControlThread::stop() {
 483   {
 484     MutexLockerEx ml(Terminator_lock);
 485     _should_terminate = true;
 486   }
 487 
 488   {
 489     MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
 490     CGC_lock->notify_all();
 491   }
 492 
 493   {
 494     MutexLockerEx ml(Terminator_lock);
 495     while (!_has_terminated) {
 496       Terminator_lock->wait();
 497     }
 498   }
 499 }
 500 
 501 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
 502   ShenandoahHeap* heap = ShenandoahHeap::heap();
 503   ShenandoahGCSession session(cause);
 504 
 505   heap->vmop_entry_full(cause);
 506 
 507   heap->heuristics()->record_success_full();
 508   heap->shenandoah_policy()->record_success_full();
 509 }
 510 
 511 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) {
 512   assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set");
 513   ShenandoahHeap* heap = ShenandoahHeap::heap();
 514   ShenandoahGCSession session(cause);
 515 
 516   heap->vmop_degenerated(point);
 517 
 518   heap->heuristics()->record_success_degenerated();
 519   heap->shenandoah_policy()->record_success_degenerated();
 520 }
 521 
 522 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
 523   ShenandoahHeap* heap = ShenandoahHeap::heap();
 524 
 525   // Determine if there is work to do. This avoids taking heap lock if there is
 526   // no work available, avoids spamming logs with superfluous logging messages,
 527   // and minimises the amount of work while locks are taken.
 528 
 529   if (heap->committed() <= shrink_until) return;
 530 
 531   bool has_work = false;
 532   for (size_t i = 0; i < heap->num_regions(); i++) {
 533     ShenandoahHeapRegion *r = heap->get_region(i);
 534     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 535       has_work = true;
 536       break;
 537     }
 538   }
 539 
 540   if (has_work) {
 541     heap->entry_uncommit(shrink_before, shrink_until);
 542   }
 543 }
 544 
 545 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
 546   return GCCause::is_user_requested_gc(cause) ||
 547          GCCause::is_serviceability_requested_gc(cause);
 548 }
 549 
 550 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
 551   assert(GCCause::is_user_requested_gc(cause) ||
 552          GCCause::is_serviceability_requested_gc(cause) ||
 553          cause == GCCause::_shenandoah_metadata_gc_clear_softrefs ||
 554          cause == GCCause::_full_gc_alot ||
 555          cause == GCCause::_scavenge_alot,
 556          "only requested GCs here");
 557 
 558   if (is_explicit_gc(cause)) {
 559     if (!DisableExplicitGC) {
 560       handle_requested_gc(cause);
 561     }
 562   } else {
 563     handle_requested_gc(cause);
 564   }
 565 }
 566 
 567 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
 568   // Make sure we have at least one complete GC cycle before unblocking
 569   // from the explicit GC request.
 570   //
 571   // This is especially important for weak references cleanup and/or native
 572   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
 573   // comes very late in the already running cycle, it would miss lots of new
 574   // opportunities for cleanup that were made available before the caller
 575   // requested the GC.
 576 
 577   MonitorLockerEx ml(&_gc_waiters_lock);
 578   size_t current_gc_id = get_gc_id();
 579   size_t required_gc_id = current_gc_id + 1;
 580   while (current_gc_id < required_gc_id) {
 581     _gc_requested.set();
 582     _requested_gc_cause = cause;
 583     ml.wait();
 584     current_gc_id = get_gc_id();
 585   }
 586 }
 587 
 588 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
 589   ShenandoahHeap* heap = ShenandoahHeap::heap();
 590 
 591   assert(current()->is_Java_thread(), "expect Java thread here");
 592 
 593   if (try_set_alloc_failure_gc()) {
 594     // Only report the first allocation failure
 595     log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
 596                  req.type_string(),
 597                  byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
 598 
 599     // Now that alloc failure GC is scheduled, we can abort everything else
 600     heap->cancel_gc(GCCause::_allocation_failure);
 601   }
 602 
 603   MonitorLockerEx ml(&_alloc_failure_waiters_lock);
 604   while (is_alloc_failure_gc()) {
 605     ml.wait();
 606   }
 607 }
 608 
 609 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
 610   Thread* t = Thread::current();
 611 
 612   ShenandoahHeap* heap = ShenandoahHeap::heap();
 613 
 614   if (try_set_alloc_failure_gc()) {
 615     // Only report the first allocation failure
 616     log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
 617                  byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
 618   }
 619 
 620   heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
 621 }
 622 
 623 void ShenandoahControlThread::notify_alloc_failure_waiters() {
 624   _alloc_failure_gc.unset();
 625   MonitorLockerEx ml(&_alloc_failure_waiters_lock);
 626   ml.notify_all();
 627 }
 628 
 629 bool ShenandoahControlThread::try_set_alloc_failure_gc() {
 630   return _alloc_failure_gc.try_set();
 631 }
 632 
 633 bool ShenandoahControlThread::is_alloc_failure_gc() {
 634   return _alloc_failure_gc.is_set();
 635 }
 636 
 637 void ShenandoahControlThread::notify_gc_waiters() {
 638   _gc_requested.unset();
 639   MonitorLockerEx ml(&_gc_waiters_lock);
 640   ml.notify_all();
 641 }
 642 
 643 void ShenandoahControlThread::handle_counters_update() {
 644   if (_do_counters_update.is_set()) {
 645     _do_counters_update.unset();
 646     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 647   }
 648 }
 649 
 650 void ShenandoahControlThread::handle_force_counters_update() {
 651   if (_force_counters_update.is_set()) {
 652     _do_counters_update.unset(); // reset these too, we do update now!
 653     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 654   }
 655 }
 656 
 657 void ShenandoahControlThread::notify_heap_changed() {
 658   // This is called from allocation path, and thus should be fast.
 659 
 660   // Update monitoring counters when we took a new region. This amortizes the
 661   // update costs on slow path.
 662   if (_do_counters_update.is_unset()) {
 663     _do_counters_update.set();
 664   }
 665   // Notify that something had changed.
 666   if (_heap_changed.is_unset()) {
 667     _heap_changed.set();
 668   }
 669 }
 670 
 671 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
 672   assert(ShenandoahPacing, "should only call when pacing is enabled");
 673   Atomic::add(words, &_allocs_seen);
 674 }
 675 
 676 void ShenandoahControlThread::set_forced_counters_update(bool value) {
 677   _force_counters_update.set_cond(value);
 678 }
 679 
 680 void ShenandoahControlThread::reset_gc_id() {
 681   OrderAccess::release_store_ptr_fence(&_gc_id, 0);
 682 }
 683 
 684 void ShenandoahControlThread::update_gc_id() {
 685   Atomic::add(1, &_gc_id);
 686 }
 687 
 688 size_t ShenandoahControlThread::get_gc_id() {
 689   return OrderAccess::load_acquire(&_gc_id);
 690 }
 691 
 692 void ShenandoahControlThread::print() const {
 693   print_on(tty);
 694 }
 695 
 696 void ShenandoahControlThread::print_on(outputStream* st) const {
 697   st->print("Shenandoah Concurrent Thread");
 698   Thread::print_on(st);
 699   st->cr();
 700 }
 701 
 702 void ShenandoahControlThread::start() {
 703   create_and_start();
 704 }
 705 
 706 void ShenandoahControlThread::makeSurrogateLockerThread(TRAPS) {
 707   assert(UseShenandoahGC, "SLT thread needed only for concurrent GC");
 708   assert(THREAD->is_Java_thread(), "must be a Java thread");
 709   assert(_slt == NULL, "SLT already created");
 710   _slt = SurrogateLockerThread::make(THREAD);
 711 }
 712 
 713 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
 714   _graceful_shutdown.set();
 715 }
 716 
 717 bool ShenandoahControlThread::in_graceful_shutdown() {
 718   return _graceful_shutdown.is_set();
 719 }