1 /*
   2  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc_implementation/shared/gcTimer.hpp"
  27 #include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp"
  28 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
  29 #include "gc_implementation/shenandoah/shenandoahControlThread.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp"
  34 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
  35 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
  36 #include "gc_implementation/shenandoah/shenandoahVMOperations.hpp"
  37 #include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp"
  38 #include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp"
  39 #include "memory/iterator.hpp"
  40 #include "memory/universe.hpp"
  41 
  42 #ifdef _WINDOWS
  43 #pragma warning(disable : 4355)
  44 #endif
  45 
  46 SurrogateLockerThread* ShenandoahControlThread::_slt = NULL;
  47 
  48 ShenandoahControlThread::ShenandoahControlThread() :
  49   ConcurrentGCThread(),
  50   _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true),
  51   _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true),
  52   _periodic_task(this),
  53   _requested_gc_cause(GCCause::_no_cause_specified),
  54   _degen_point(ShenandoahHeap::_degenerated_outside_cycle),
  55   _allocs_seen(0) {
  56 
  57   reset_gc_id();
  58   if (os::create_thread(this, os::cgc_thread)) {
  59     os::set_native_priority(this, os::java_to_os_priority[NearMaxPriority]);
  60     if (!_should_terminate && !DisableStartThread) {
  61       os::start_thread(this);
  62     }
  63   }
  64 
  65   _periodic_task.enroll();
  66   _periodic_satb_flush_task.enroll();
  67   if (ShenandoahPacing) {
  68     _periodic_pacer_notify_task.enroll();
  69   }
  70 }
  71 
  72 ShenandoahControlThread::~ShenandoahControlThread() {
  73   // This is here so that super is called.
  74 }
  75 
  76 void ShenandoahPeriodicTask::task() {
  77   _thread->handle_force_counters_update();
  78   _thread->handle_counters_update();
  79 }
  80 
  81 void ShenandoahPeriodicSATBFlushTask::task() {
  82   ShenandoahHeap::heap()->force_satb_flush_all_threads();
  83 }
  84 
  85 void ShenandoahPeriodicPacerNotify::task() {
  86   assert(ShenandoahPacing, "Should not be here otherwise");
  87   ShenandoahHeap::heap()->pacer()->notify_waiters();
  88 }
  89 
  90 void ShenandoahControlThread::run() {
  91   initialize_in_thread();
  92 
  93   wait_for_universe_init();
  94 
  95   // Wait until we have the surrogate locker thread in place.
  96   {
  97     MutexLockerEx x(CGC_lock, true);
  98     while(_slt == NULL && !_should_terminate) {
  99       CGC_lock->wait(true, 200);
 100     }
 101   }
 102 
 103   ShenandoahHeap* heap = ShenandoahHeap::heap();
 104 
 105   GCMode default_mode = concurrent_normal;
 106   GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
 107   int sleep = ShenandoahControlIntervalMin;
 108 
 109   double last_shrink_time = os::elapsedTime();
 110   double last_sleep_adjust_time = os::elapsedTime();
 111 
 112   // Shrink period avoids constantly polling regions for shrinking.
 113   // Having a period 10x lower than the delay would mean we hit the
 114   // shrinking with lag of less than 1/10-th of true delay.
 115   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
 116   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
 117 
 118   ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
 119 
 120   ShenandoahHeuristics* heuristics = heap->heuristics();
 121   while (!in_graceful_shutdown() && !_should_terminate) {
 122     // Figure out if we have pending requests.
 123     bool alloc_failure_pending = _alloc_failure_gc.is_set();
 124     bool is_gc_requested = _gc_requested.is_set();
 125     GCCause::Cause requested_gc_cause = _requested_gc_cause;
 126     bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause);
 127     bool implicit_gc_requested = is_gc_requested && !is_explicit_gc(requested_gc_cause);
 128 
 129     // This control loop iteration have seen this much allocations.
 130     intptr_t allocs_seen = (intptr_t)(Atomic::xchg_ptr(0, &_allocs_seen));
 131 
 132     // Check if we have seen a new target for soft max heap size.
 133     bool soft_max_changed = check_soft_max_changed();
 134 
 135     // Choose which GC mode to run in. The block below should select a single mode.
 136     GCMode mode = none;
 137     GCCause::Cause cause = GCCause::_last_gc_cause;
 138     ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset;
 139 
 140     if (alloc_failure_pending) {
 141       // Allocation failure takes precedence: we have to deal with it first thing
 142       log_info(gc)("Trigger: Handle Allocation Failure");
 143 
 144       cause = GCCause::_allocation_failure;
 145 
 146       // Consume the degen point, and seed it with default value
 147       degen_point = _degen_point;
 148       _degen_point = ShenandoahHeap::_degenerated_outside_cycle;
 149 
 150       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
 151         heuristics->record_allocation_failure_gc();
 152         policy->record_alloc_failure_to_degenerated(degen_point);
 153         mode = stw_degenerated;
 154       } else {
 155         heuristics->record_allocation_failure_gc();
 156         policy->record_alloc_failure_to_full();
 157         mode = stw_full;
 158       }
 159 
 160     } else if (explicit_gc_requested) {
 161       cause = requested_gc_cause;
 162       log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
 163 
 164       heuristics->record_requested_gc();
 165 
 166       if (ExplicitGCInvokesConcurrent) {
 167         policy->record_explicit_to_concurrent();
 168         mode = default_mode;
 169         // Unload and clean up everything
 170         heap->set_process_references(heuristics->can_process_references());
 171         heap->set_unload_classes(heuristics->can_unload_classes());
 172       } else {
 173         policy->record_explicit_to_full();
 174         mode = stw_full;
 175       }
 176     } else if (implicit_gc_requested) {
 177       cause = requested_gc_cause;
 178       log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
 179 
 180       heuristics->record_requested_gc();
 181 
 182       if (ShenandoahImplicitGCInvokesConcurrent) {
 183         policy->record_implicit_to_concurrent();
 184         mode = default_mode;
 185 
 186         // Unload and clean up everything
 187         heap->set_process_references(heuristics->can_process_references());
 188         heap->set_unload_classes(heuristics->can_unload_classes());
 189       } else {
 190         policy->record_implicit_to_full();
 191         mode = stw_full;
 192       }
 193     } else {
 194       // Potential normal cycle: ask heuristics if it wants to act
 195       if (heuristics->should_start_gc()) {
 196         mode = default_mode;
 197         cause = default_cause;
 198       }
 199 
 200       // Ask policy if this cycle wants to process references or unload classes
 201       heap->set_process_references(heuristics->should_process_references());
 202       heap->set_unload_classes(heuristics->should_unload_classes());
 203     }
 204 
 205     // Blow all soft references on this cycle, if handling allocation failure,
 206     // either implicit or explicit GC request,  or we are requested to do so unconditionally.
 207     if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) {
 208       heap->collector_policy()->set_should_clear_all_soft_refs(true);
 209     }
 210 
 211     bool gc_requested = (mode != none);
 212     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
 213 
 214     if (gc_requested) {
 215       // GC is starting, bump the internal ID
 216       update_gc_id();
 217 
 218       heap->reset_bytes_allocated_since_gc_start();
 219 
 220       // Capture metaspace usage before GC.
 221       const size_t metadata_prev_used = MetaspaceAux::used_bytes();
 222 
 223       // If GC was requested, we are sampling the counters even without actual triggers
 224       // from allocation machinery. This captures GC phases more accurately.
 225       set_forced_counters_update(true);
 226 
 227       // If GC was requested, we better dump freeset data for performance debugging
 228       {
 229         ShenandoahHeapLocker locker(heap->lock());
 230         heap->free_set()->log_status();
 231       }
 232 
 233       switch (mode) {
 234         case none:
 235           break;
 236         case concurrent_normal:
 237           service_concurrent_normal_cycle(cause);
 238           break;
 239         case stw_degenerated:
 240           service_stw_degenerated_cycle(cause, degen_point);
 241           break;
 242         case stw_full:
 243           service_stw_full_cycle(cause);
 244           break;
 245         default:
 246           ShouldNotReachHere();
 247       }
 248 
 249       // If this was the requested GC cycle, notify waiters about it
 250       if (explicit_gc_requested || implicit_gc_requested) {
 251         notify_gc_waiters();
 252       }
 253 
 254       // If this was the allocation failure GC cycle, notify waiters about it
 255       if (alloc_failure_pending) {
 256         notify_alloc_failure_waiters();
 257       }
 258 
 259       // Report current free set state at the end of cycle, whether
 260       // it is a normal completion, or the abort.
 261       {
 262         ShenandoahHeapLocker locker(heap->lock());
 263         heap->free_set()->log_status();
 264 
 265         // Notify Universe about new heap usage. This has implications for
 266         // global soft refs policy, and we better report it every time heap
 267         // usage goes down.
 268         Universe::update_heap_info_at_gc();
 269       }
 270 
 271       // Disable forced counters update, and update counters one more time
 272       // to capture the state at the end of GC session.
 273       handle_force_counters_update();
 274       set_forced_counters_update(false);
 275 
 276       // Retract forceful part of soft refs policy
 277       heap->collector_policy()->set_should_clear_all_soft_refs(false);
 278 
 279       // Clear metaspace oom flag, if current cycle unloaded classes
 280       if (heap->unload_classes()) {
 281         heuristics->clear_metaspace_oom();
 282       }
 283 
 284       // Commit worker statistics to cycle data
 285       heap->phase_timings()->flush_par_workers_to_cycle();
 286       if (ShenandoahPacing) {
 287         heap->pacer()->flush_stats_to_cycle();
 288       }
 289 
 290       // Print GC stats for current cycle
 291       if (PrintGCDetails) {
 292         ResourceMark rm;
 293         heap->phase_timings()->print_cycle_on(gclog_or_tty);
 294         if (ShenandoahPacing) {
 295           heap->pacer()->print_cycle_on(gclog_or_tty);
 296         }
 297       }
 298 
 299       // Commit statistics to globals
 300       heap->phase_timings()->flush_cycle_to_global();
 301 
 302       // Print Metaspace change following GC (if logging is enabled).
 303       if (PrintGCDetails) {
 304         MetaspaceAux::print_metaspace_change(metadata_prev_used);
 305       }
 306 
 307       // GC is over, we are at idle now
 308       if (ShenandoahPacing) {
 309         heap->pacer()->setup_for_idle();
 310       }
 311     } else {
 312       // Allow allocators to know we have seen this much regions
 313       if (ShenandoahPacing && (allocs_seen > 0)) {
 314         heap->pacer()->report_alloc(allocs_seen);
 315       }
 316     }
 317 
 318     double current = os::elapsedTime();
 319 
 320     if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
 321       // Explicit GC tries to uncommit everything down to min capacity.
 322       // Soft max change tries to uncommit everything down to target capacity.
 323       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
 324 
 325       double shrink_before = (explicit_gc_requested || soft_max_changed) ?
 326                              current :
 327                              current - (ShenandoahUncommitDelay / 1000.0);
 328 
 329       size_t shrink_until = soft_max_changed ?
 330                              heap->soft_max_capacity() :
 331                              heap->min_capacity();
 332 
 333       service_uncommit(shrink_before, shrink_until);
 334       heap->phase_timings()->flush_cycle_to_global();
 335       last_shrink_time = current;
 336     }
 337 
 338     // Wait before performing the next action. If allocation happened during this wait,
 339     // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
 340     // back off exponentially.
 341     if (_heap_changed.try_unset()) {
 342       sleep = ShenandoahControlIntervalMin;
 343     } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
 344       sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
 345       last_sleep_adjust_time = current;
 346     }
 347     os::naked_short_sleep(sleep);
 348   }
 349 
 350   // Wait for the actual stop(), can't leave run_service() earlier.
 351   while (! _should_terminate) {
 352     os::naked_short_sleep(ShenandoahControlIntervalMin);
 353   }
 354   terminate();
 355 }
 356 
 357 bool ShenandoahControlThread::check_soft_max_changed() const {
 358   ShenandoahHeap* heap = ShenandoahHeap::heap();
 359   size_t new_soft_max = OrderAccess::load_acquire(&ShenandoahSoftMaxHeapSize);
 360   size_t old_soft_max = heap->soft_max_capacity();
 361   if (new_soft_max != old_soft_max) {
 362     new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
 363     new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
 364     if (new_soft_max != old_soft_max) {
 365       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
 366                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 367                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 368       );
 369       heap->set_soft_max_capacity(new_soft_max);
 370       return true;
 371     }
 372   }
 373   return false;
 374 }
 375 
 376 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
 377   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
 378   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
 379   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
 380   // tries to evac something and no memory is available), cycle degrades to Full GC.
 381   //
 382   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
 383   // heuristics says there are no regions to compact, and all the collection comes from immediately
 384   // reclaimable regions.
 385   //
 386   // ................................................................................................
 387   //
 388   //                                    (immediate garbage shortcut)                Concurrent GC
 389   //                             /-------------------------------------------\
 390   //                             |                                           |
 391   //                             |                                           |
 392   //                             |                                           |
 393   //                             |                                           v
 394   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
 395   //                   |                    |                 |              ^
 396   //                   | (af)               | (af)            | (af)         |
 397   // ..................|....................|.................|..............|.......................
 398   //                   |                    |                 |              |
 399   //                   |                    |                 |              |      Degenerated GC
 400   //                   v                    v                 v              |
 401   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
 402   //                   |                    |                 |              ^
 403   //                   | (af)               | (af)            | (af)         |
 404   // ..................|....................|.................|..............|.......................
 405   //                   |                    |                 |              |
 406   //                   |                    v                 |              |      Full GC
 407   //                   \------------------->o<----------------/              |
 408   //                                        |                                |
 409   //                                        v                                |
 410   //                                      Full GC  --------------------------/
 411   //
 412 
 413   ShenandoahHeap* heap = ShenandoahHeap::heap();
 414 
 415   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return;
 416 
 417   ShenandoahGCSession session(cause);
 418 
 419   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 420 
 421   // Reset for upcoming marking
 422   heap->entry_reset();
 423 
 424   // Start initial mark under STW
 425   heap->vmop_entry_init_mark();
 426 
 427   // Continue concurrent mark
 428   heap->entry_mark();
 429   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return;
 430 
 431   // If not cancelled, can try to concurrently pre-clean
 432   heap->entry_preclean();
 433 
 434   // Complete marking under STW, and start evacuation
 435   heap->vmop_entry_final_mark();
 436 
 437   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 438   // the space. This would be the last action if there is nothing to evacuate.
 439   heap->entry_cleanup_early();
 440 
 441   {
 442     ShenandoahHeapLocker locker(heap->lock());
 443     heap->free_set()->log_status();
 444   }
 445 
 446   // Continue the cycle with evacuation and optional update-refs.
 447   // This may be skipped if there is nothing to evacuate.
 448   // If so, evac_in_progress would be unset by collection set preparation code.
 449   if (heap->is_evacuation_in_progress()) {
 450     // Concurrently evacuate
 451     heap->entry_evac();
 452     if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return;
 453 
 454     // Perform update-refs phase.
 455     heap->vmop_entry_init_updaterefs();
 456     heap->entry_updaterefs();
 457     if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return;
 458 
 459     heap->vmop_entry_final_updaterefs();
 460 
 461     // Update references freed up collection set, kick the cleanup to reclaim the space.
 462     heap->entry_cleanup_complete();
 463   }
 464 
 465   // Cycle is complete
 466   heap->heuristics()->record_success_concurrent();
 467   heap->shenandoah_policy()->record_success_concurrent();
 468 }
 469 
 470 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) {
 471   ShenandoahHeap* heap = ShenandoahHeap::heap();
 472   if (heap->cancelled_gc()) {
 473     assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
 474     if (!in_graceful_shutdown()) {
 475       assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle,
 476               err_msg("Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point)));
 477       _degen_point = point;
 478     }
 479     return true;
 480   }
 481   return false;
 482 }
 483 
 484 void ShenandoahControlThread::stop() {
 485   {
 486     MutexLockerEx ml(Terminator_lock);
 487     _should_terminate = true;
 488   }
 489 
 490   {
 491     MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
 492     CGC_lock->notify_all();
 493   }
 494 
 495   {
 496     MutexLockerEx ml(Terminator_lock);
 497     while (!_has_terminated) {
 498       Terminator_lock->wait();
 499     }
 500   }
 501 }
 502 
 503 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
 504   ShenandoahHeap* heap = ShenandoahHeap::heap();
 505   ShenandoahGCSession session(cause);
 506 
 507   heap->vmop_entry_full(cause);
 508 
 509   heap->heuristics()->record_success_full();
 510   heap->shenandoah_policy()->record_success_full();
 511 }
 512 
 513 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) {
 514   assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set");
 515   ShenandoahHeap* heap = ShenandoahHeap::heap();
 516   ShenandoahGCSession session(cause);
 517 
 518   heap->vmop_degenerated(point);
 519 
 520   heap->heuristics()->record_success_degenerated();
 521   heap->shenandoah_policy()->record_success_degenerated();
 522 }
 523 
 524 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
 525   ShenandoahHeap* heap = ShenandoahHeap::heap();
 526 
 527   // Determine if there is work to do. This avoids taking heap lock if there is
 528   // no work available, avoids spamming logs with superfluous logging messages,
 529   // and minimises the amount of work while locks are taken.
 530 
 531   if (heap->committed() <= shrink_until) return;
 532 
 533   bool has_work = false;
 534   for (size_t i = 0; i < heap->num_regions(); i++) {
 535     ShenandoahHeapRegion *r = heap->get_region(i);
 536     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 537       has_work = true;
 538       break;
 539     }
 540   }
 541 
 542   if (has_work) {
 543     heap->entry_uncommit(shrink_before, shrink_until);
 544   }
 545 }
 546 
 547 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
 548   return GCCause::is_user_requested_gc(cause) ||
 549          GCCause::is_serviceability_requested_gc(cause);
 550 }
 551 
 552 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
 553   assert(GCCause::is_user_requested_gc(cause) ||
 554          GCCause::is_serviceability_requested_gc(cause) ||
 555          cause == GCCause::_shenandoah_metadata_gc_clear_softrefs ||
 556          cause == GCCause::_full_gc_alot ||
 557          cause == GCCause::_scavenge_alot,
 558          "only requested GCs here");
 559 
 560   if (is_explicit_gc(cause)) {
 561     if (!DisableExplicitGC) {
 562       handle_requested_gc(cause);
 563     }
 564   } else {
 565     handle_requested_gc(cause);
 566   }
 567 }
 568 
 569 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
 570   // Make sure we have at least one complete GC cycle before unblocking
 571   // from the explicit GC request.
 572   //
 573   // This is especially important for weak references cleanup and/or native
 574   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
 575   // comes very late in the already running cycle, it would miss lots of new
 576   // opportunities for cleanup that were made available before the caller
 577   // requested the GC.
 578 
 579   MonitorLockerEx ml(&_gc_waiters_lock);
 580   size_t current_gc_id = get_gc_id();
 581   size_t required_gc_id = current_gc_id + 1;
 582   while (current_gc_id < required_gc_id) {
 583     // Although setting gc request is under _gc_waiters_lock, but read side (run_service())
 584     // does not take the lock. We need to enforce following order, so that read side sees
 585     // latest requested gc cause when the flag is set.
 586     _requested_gc_cause = cause;
 587     _gc_requested.set();
 588     ml.wait();
 589     current_gc_id = get_gc_id();
 590   }
 591 }
 592 
 593 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
 594   ShenandoahHeap* heap = ShenandoahHeap::heap();
 595 
 596   assert(current()->is_Java_thread(), "expect Java thread here");
 597 
 598   if (try_set_alloc_failure_gc()) {
 599     // Only report the first allocation failure
 600     log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
 601                  req.type_string(),
 602                  byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
 603 
 604     // Now that alloc failure GC is scheduled, we can abort everything else
 605     heap->cancel_gc(GCCause::_allocation_failure);
 606   }
 607 
 608   MonitorLockerEx ml(&_alloc_failure_waiters_lock);
 609   while (is_alloc_failure_gc()) {
 610     ml.wait();
 611   }
 612 }
 613 
 614 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
 615   Thread* t = Thread::current();
 616 
 617   ShenandoahHeap* heap = ShenandoahHeap::heap();
 618 
 619   if (try_set_alloc_failure_gc()) {
 620     // Only report the first allocation failure
 621     log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
 622                  byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
 623   }
 624 
 625   heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
 626 }
 627 
 628 void ShenandoahControlThread::notify_alloc_failure_waiters() {
 629   _alloc_failure_gc.unset();
 630   MonitorLockerEx ml(&_alloc_failure_waiters_lock);
 631   ml.notify_all();
 632 }
 633 
 634 bool ShenandoahControlThread::try_set_alloc_failure_gc() {
 635   return _alloc_failure_gc.try_set();
 636 }
 637 
 638 bool ShenandoahControlThread::is_alloc_failure_gc() {
 639   return _alloc_failure_gc.is_set();
 640 }
 641 
 642 void ShenandoahControlThread::notify_gc_waiters() {
 643   _gc_requested.unset();
 644   MonitorLockerEx ml(&_gc_waiters_lock);
 645   ml.notify_all();
 646 }
 647 
 648 void ShenandoahControlThread::handle_counters_update() {
 649   if (_do_counters_update.is_set()) {
 650     _do_counters_update.unset();
 651     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 652   }
 653 }
 654 
 655 void ShenandoahControlThread::handle_force_counters_update() {
 656   if (_force_counters_update.is_set()) {
 657     _do_counters_update.unset(); // reset these too, we do update now!
 658     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 659   }
 660 }
 661 
 662 void ShenandoahControlThread::notify_heap_changed() {
 663   // This is called from allocation path, and thus should be fast.
 664 
 665   // Update monitoring counters when we took a new region. This amortizes the
 666   // update costs on slow path.
 667   if (_do_counters_update.is_unset()) {
 668     _do_counters_update.set();
 669   }
 670   // Notify that something had changed.
 671   if (_heap_changed.is_unset()) {
 672     _heap_changed.set();
 673   }
 674 }
 675 
 676 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
 677   assert(ShenandoahPacing, "should only call when pacing is enabled");
 678   Atomic::add(words, &_allocs_seen);
 679 }
 680 
 681 void ShenandoahControlThread::set_forced_counters_update(bool value) {
 682   _force_counters_update.set_cond(value);
 683 }
 684 
 685 void ShenandoahControlThread::reset_gc_id() {
 686   OrderAccess::release_store_ptr_fence(&_gc_id, 0);
 687 }
 688 
 689 void ShenandoahControlThread::update_gc_id() {
 690   Atomic::add(1, &_gc_id);
 691 }
 692 
 693 size_t ShenandoahControlThread::get_gc_id() {
 694   return OrderAccess::load_acquire(&_gc_id);
 695 }
 696 
 697 void ShenandoahControlThread::print() const {
 698   print_on(tty);
 699 }
 700 
 701 void ShenandoahControlThread::print_on(outputStream* st) const {
 702   st->print("Shenandoah Concurrent Thread");
 703   Thread::print_on(st);
 704   st->cr();
 705 }
 706 
 707 void ShenandoahControlThread::start() {
 708   create_and_start();
 709 }
 710 
 711 void ShenandoahControlThread::makeSurrogateLockerThread(TRAPS) {
 712   assert(UseShenandoahGC, "SLT thread needed only for concurrent GC");
 713   assert(THREAD->is_Java_thread(), "must be a Java thread");
 714   assert(_slt == NULL, "SLT already created");
 715   _slt = SurrogateLockerThread::make(THREAD);
 716 }
 717 
 718 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
 719   _graceful_shutdown.set();
 720 }
 721 
 722 bool ShenandoahControlThread::in_graceful_shutdown() {
 723   return _graceful_shutdown.is_set();
 724 }