< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp

Print this page

  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  28 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  29 #include "gc/shenandoah/shenandoahControlThread.hpp"
  30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"

  31 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  32 #include "gc/shenandoah/shenandoahFullGC.hpp"



  33 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  35 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"

  38 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  39 #include "gc/shenandoah/shenandoahUtils.hpp"
  40 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  41 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  42 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"

  43 #include "memory/iterator.hpp"
  44 #include "memory/metaspaceUtils.hpp"
  45 #include "memory/metaspaceStats.hpp"
  46 #include "memory/universe.hpp"
  47 #include "runtime/atomic.hpp"
  48 
  49 ShenandoahControlThread::ShenandoahControlThread() :
  50   ConcurrentGCThread(),
  51   _alloc_failure_waiters_lock(Mutex::safepoint-2, "ShenandoahAllocFailureGC_lock", true),
  52   _gc_waiters_lock(Mutex::safepoint-2, "ShenandoahRequestedGC_lock", true),


  53   _periodic_task(this),
  54   _requested_gc_cause(GCCause::_no_cause_specified),

  55   _degen_point(ShenandoahGC::_degenerated_outside_cycle),
  56   _allocs_seen(0) {


  57   set_name("Shenandoah Control Thread");
  58   reset_gc_id();
  59   create_and_start();
  60   _periodic_task.enroll();
  61   if (ShenandoahPacing) {
  62     _periodic_pacer_notify_task.enroll();
  63   }
  64 }
  65 
  66 ShenandoahControlThread::~ShenandoahControlThread() {
  67   // This is here so that super is called.
  68 }
  69 
  70 void ShenandoahPeriodicTask::task() {
  71   _thread->handle_force_counters_update();
  72   _thread->handle_counters_update();
  73 }
  74 
  75 void ShenandoahPeriodicPacerNotify::task() {
  76   assert(ShenandoahPacing, "Should not be here otherwise");
  77   ShenandoahHeap::heap()->pacer()->notify_waiters();
  78 }
  79 
  80 void ShenandoahControlThread::run_service() {
  81   ShenandoahHeap* heap = ShenandoahHeap::heap();
  82 
  83   GCMode default_mode = concurrent_normal;

  84   GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
  85   int sleep = ShenandoahControlIntervalMin;
  86 
  87   double last_shrink_time = os::elapsedTime();
  88   double last_sleep_adjust_time = os::elapsedTime();
  89 
  90   // Shrink period avoids constantly polling regions for shrinking.
  91   // Having a period 10x lower than the delay would mean we hit the
  92   // shrinking with lag of less than 1/10-th of true delay.
  93   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
  94   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
  95 
  96   ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
  97   ShenandoahHeuristics* heuristics = heap->heuristics();





  98   while (!in_graceful_shutdown() && !should_terminate()) {
  99     // Figure out if we have pending requests.
 100     bool alloc_failure_pending = _alloc_failure_gc.is_set();
 101     bool is_gc_requested = _gc_requested.is_set();
 102     GCCause::Cause requested_gc_cause = _requested_gc_cause;
 103     bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause);
 104     bool implicit_gc_requested = is_gc_requested && !is_explicit_gc(requested_gc_cause);
 105 
 106     // This control loop iteration have seen this much allocations.
 107     size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
 108 
 109     // Check if we have seen a new target for soft max heap size.
 110     bool soft_max_changed = check_soft_max_changed();
 111 
 112     // Choose which GC mode to run in. The block below should select a single mode.
 113     GCMode mode = none;
 114     GCCause::Cause cause = GCCause::_last_gc_cause;
 115     ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
 116 
 117     if (alloc_failure_pending) {
 118       // Allocation failure takes precedence: we have to deal with it first thing
 119       log_info(gc)("Trigger: Handle Allocation Failure");
 120 
 121       cause = GCCause::_allocation_failure;
 122 
 123       // Consume the degen point, and seed it with default value
 124       degen_point = _degen_point;
 125       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
 126 
 127       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {











 128         heuristics->record_allocation_failure_gc();
 129         policy->record_alloc_failure_to_degenerated(degen_point);
 130         mode = stw_degenerated;
 131       } else {
 132         heuristics->record_allocation_failure_gc();
 133         policy->record_alloc_failure_to_full();
 134         mode = stw_full;

 135       }
 136 
 137     } else if (explicit_gc_requested) {
 138       cause = requested_gc_cause;

 139       log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
 140 
 141       heuristics->record_requested_gc();
 142 
 143       if (ExplicitGCInvokesConcurrent) {
 144         policy->record_explicit_to_concurrent();
 145         mode = default_mode;
 146         // Unload and clean up everything
 147         heap->set_unload_classes(heuristics->can_unload_classes());
 148       } else {
 149         policy->record_explicit_to_full();
 150         mode = stw_full;
 151       }
 152     } else if (implicit_gc_requested) {
 153       cause = requested_gc_cause;

 154       log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
 155 
 156       heuristics->record_requested_gc();
 157 
 158       if (ShenandoahImplicitGCInvokesConcurrent) {
 159         policy->record_implicit_to_concurrent();
 160         mode = default_mode;
 161 
 162         // Unload and clean up everything
 163         heap->set_unload_classes(heuristics->can_unload_classes());
 164       } else {
 165         policy->record_implicit_to_full();
 166         mode = stw_full;
 167       }
 168     } else {
 169       // Potential normal cycle: ask heuristics if it wants to act
 170       if (heuristics->should_start_gc()) {
 171         mode = default_mode;
 172         cause = default_cause;
 173       }











 174 
 175       // Ask policy if this cycle wants to process references or unload classes
 176       heap->set_unload_classes(heuristics->should_unload_classes());

























 177     }
 178 
 179     // Blow all soft references on this cycle, if handling allocation failure,
 180     // either implicit or explicit GC request,  or we are requested to do so unconditionally.
 181     if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) {
 182       heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
 183     }
 184 
 185     bool gc_requested = (mode != none);
 186     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
 187 
 188     if (gc_requested) {
 189       // GC is starting, bump the internal ID
 190       update_gc_id();
 191 
 192       heap->reset_bytes_allocated_since_gc_start();
 193 
 194       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
 195 
 196       // If GC was requested, we are sampling the counters even without actual triggers
 197       // from allocation machinery. This captures GC phases more accurately.
 198       set_forced_counters_update(true);
 199 
 200       // If GC was requested, we better dump freeset data for performance debugging
 201       {
 202         ShenandoahHeapLocker locker(heap->lock());
 203         heap->free_set()->log_status();
 204       }
 205 
 206       switch (mode) {
 207         case concurrent_normal:
 208           service_concurrent_normal_cycle(cause);
 209           break;
 210         case stw_degenerated:
 211           service_stw_degenerated_cycle(cause, degen_point);
 212           break;
 213         case stw_full:
 214           service_stw_full_cycle(cause);
 215           break;
 216         default:
 217           ShouldNotReachHere();




















 218       }
 219 
 220       // If this was the requested GC cycle, notify waiters about it
 221       if (explicit_gc_requested || implicit_gc_requested) {
 222         notify_gc_waiters();
 223       }
 224 
 225       // If this was the allocation failure GC cycle, notify waiters about it
 226       if (alloc_failure_pending) {
 227         notify_alloc_failure_waiters();
 228       }
 229 
 230       // Report current free set state at the end of cycle, whether
 231       // it is a normal completion, or the abort.
 232       {
 233         ShenandoahHeapLocker locker(heap->lock());
 234         heap->free_set()->log_status();
 235 
 236         // Notify Universe about new heap usage. This has implications for
 237         // global soft refs policy, and we better report it every time heap
 238         // usage goes down.
 239         Universe::heap()->update_capacity_and_used_at_gc();
 240 
 241         // Signal that we have completed a visit to all live objects.
 242         Universe::heap()->record_whole_heap_examined_timestamp();
 243       }
 244 
 245       // Disable forced counters update, and update counters one more time
 246       // to capture the state at the end of GC session.
 247       handle_force_counters_update();
 248       set_forced_counters_update(false);
 249 
 250       // Retract forceful part of soft refs policy
 251       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
 252 
 253       // Clear metaspace oom flag, if current cycle unloaded classes
 254       if (heap->unload_classes()) {
 255         heuristics->clear_metaspace_oom();

 256       }
 257 
 258       // Commit worker statistics to cycle data
 259       heap->phase_timings()->flush_par_workers_to_cycle();
 260       if (ShenandoahPacing) {
 261         heap->pacer()->flush_stats_to_cycle();
 262       }
 263 
 264       // Print GC stats for current cycle
 265       {
 266         LogTarget(Info, gc, stats) lt;
 267         if (lt.is_enabled()) {
 268           ResourceMark rm;
 269           LogStream ls(lt);
 270           heap->phase_timings()->print_cycle_on(&ls);
 271           if (ShenandoahPacing) {
 272             heap->pacer()->print_cycle_on(&ls);
 273           }
 274         }
 275       }
 276 
 277       // Commit statistics to globals
 278       heap->phase_timings()->flush_cycle_to_global();
 279 
 280       // Print Metaspace change following GC (if logging is enabled).
 281       MetaspaceUtils::print_metaspace_change(meta_sizes);
 282 
 283       // GC is over, we are at idle now
 284       if (ShenandoahPacing) {
 285         heap->pacer()->setup_for_idle();
 286       }
 287     } else {
 288       // Allow allocators to know we have seen this much regions
 289       if (ShenandoahPacing && (allocs_seen > 0)) {
 290         heap->pacer()->report_alloc(allocs_seen);
 291       }
 292     }
 293 
 294     double current = os::elapsedTime();
 295 
 296     if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
 297       // Explicit GC tries to uncommit everything down to min capacity.
 298       // Soft max change tries to uncommit everything down to target capacity.
 299       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
 300 
 301       double shrink_before = (explicit_gc_requested || soft_max_changed) ?
 302                              current :
 303                              current - (ShenandoahUncommitDelay / 1000.0);
 304 
 305       size_t shrink_until = soft_max_changed ?
 306                              heap->soft_max_capacity() :
 307                              heap->min_capacity();
 308 
 309       service_uncommit(shrink_before, shrink_until);
 310       heap->phase_timings()->flush_cycle_to_global();
 311       last_shrink_time = current;
 312     }
 313 
 314     // Wait before performing the next action. If allocation happened during this wait,
 315     // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
 316     // back off exponentially.
 317     if (_heap_changed.try_unset()) {
 318       sleep = ShenandoahControlIntervalMin;
 319     } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
 320       sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
 321       last_sleep_adjust_time = current;
 322     }
 323     os::naked_short_sleep(sleep);
 324   }
 325 
 326   // Wait for the actual stop(), can't leave run_service() earlier.
 327   while (!should_terminate()) {
 328     os::naked_short_sleep(ShenandoahControlIntervalMin);
 329   }
 330 }
 331 








































































































































































































 332 bool ShenandoahControlThread::check_soft_max_changed() const {
 333   ShenandoahHeap* heap = ShenandoahHeap::heap();
 334   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
 335   size_t old_soft_max = heap->soft_max_capacity();
 336   if (new_soft_max != old_soft_max) {
 337     new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
 338     new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
 339     if (new_soft_max != old_soft_max) {
 340       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
 341                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 342                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 343       );
 344       heap->set_soft_max_capacity(new_soft_max);
 345       return true;
 346     }
 347   }
 348   return false;
 349 }
 350 
 351 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
 352   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
 353   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
 354   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
 355   // tries to evac something and no memory is available), cycle degrades to Full GC.
 356   //
 357   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
 358   // heuristics says there are no regions to compact, and all the collection comes from immediately
 359   // reclaimable regions.
 360   //
 361   // ................................................................................................
 362   //
 363   //                                    (immediate garbage shortcut)                Concurrent GC
 364   //                             /-------------------------------------------\
 365   //                             |                                           |
 366   //                             |                                           |
 367   //                             |                                           |
 368   //                             |                                           v
 369   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
 370   //                   |                    |                 |              ^
 371   //                   | (af)               | (af)            | (af)         |
 372   // ..................|....................|.................|..............|.......................
 373   //                   |                    |                 |              |
 374   //                   |                    |                 |              |      Degenerated GC
 375   //                   v                    v                 v              |
 376   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
 377   //                   |                    |                 |              ^
 378   //                   | (af)               | (af)            | (af)         |
 379   // ..................|....................|.................|..............|.......................
 380   //                   |                    |                 |              |
 381   //                   |                    v                 |              |      Full GC
 382   //                   \------------------->o<----------------/              |
 383   //                                        |                                |
 384   //                                        v                                |
 385   //                                      Full GC  --------------------------/
 386   //
 387   ShenandoahHeap* heap = ShenandoahHeap::heap();
 388   if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
 389 
 390   GCIdMark gc_id_mark;
 391   ShenandoahGCSession session(cause);
 392 
 393   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 394 
 395   ShenandoahConcurrentGC gc;





 396   if (gc.collect(cause)) {
 397     // Cycle is complete
 398     heap->heuristics()->record_success_concurrent();
 399     heap->shenandoah_policy()->record_success_concurrent();
 400   } else {
 401     assert(heap->cancelled_gc(), "Must have been cancelled");
 402     check_cancellation_or_degen(gc.degen_point());




 403   }
 404 }
 405 
 406 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
 407   ShenandoahHeap* heap = ShenandoahHeap::heap();
 408   if (heap->cancelled_gc()) {
 409     assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
 410     if (!in_graceful_shutdown()) {
 411       assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle,
 412               "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
 413       _degen_point = point;
 414     }
 415     return true;
 416   }
























 417   return false;
 418 }
 419 
 420 void ShenandoahControlThread::stop_service() {
 421   // Nothing to do here.
 422 }
 423 
 424 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {


 425   GCIdMark gc_id_mark;
 426   ShenandoahGCSession session(cause);
 427 
 428   ShenandoahFullGC gc;
 429   gc.collect(cause);
 430 
 431   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 432   heap->heuristics()->record_success_full();
 433   heap->shenandoah_policy()->record_success_full();
 434 }
 435 
 436 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
 437   assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");

 438 
 439   GCIdMark gc_id_mark;
 440   ShenandoahGCSession session(cause);
 441 
 442   ShenandoahDegenGC gc(point);
 443   gc.collect(cause);
 444 
 445   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 446   heap->heuristics()->record_success_degenerated();











 447   heap->shenandoah_policy()->record_success_degenerated();

 448 }
 449 
 450 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
 451   ShenandoahHeap* heap = ShenandoahHeap::heap();
 452 
 453   // Determine if there is work to do. This avoids taking heap lock if there is
 454   // no work available, avoids spamming logs with superfluous logging messages,
 455   // and minimises the amount of work while locks are taken.
 456 
 457   if (heap->committed() <= shrink_until) return;
 458 
 459   bool has_work = false;
 460   for (size_t i = 0; i < heap->num_regions(); i++) {
 461     ShenandoahHeapRegion *r = heap->get_region(i);
 462     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 463       has_work = true;
 464       break;
 465     }
 466   }
 467 
 468   if (has_work) {
 469     heap->entry_uncommit(shrink_before, shrink_until);
 470   }
 471 }
 472 
 473 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
 474   return GCCause::is_user_requested_gc(cause) ||
 475          GCCause::is_serviceability_requested_gc(cause);
 476 }
 477 




 478 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
 479   assert(GCCause::is_user_requested_gc(cause) ||
 480          GCCause::is_serviceability_requested_gc(cause) ||
 481          cause == GCCause::_metadata_GC_clear_soft_refs ||
 482          cause == GCCause::_codecache_GC_aggressive ||
 483          cause == GCCause::_codecache_GC_threshold ||
 484          cause == GCCause::_full_gc_alot ||
 485          cause == GCCause::_wb_young_gc ||
 486          cause == GCCause::_wb_full_gc ||
 487          cause == GCCause::_wb_breakpoint ||
 488          cause == GCCause::_scavenge_alot,
 489          "only requested GCs here: %s", GCCause::to_string(cause));
 490 
 491   if (is_explicit_gc(cause)) {
 492     if (!DisableExplicitGC) {
 493       handle_requested_gc(cause);
 494     }
 495   } else {
 496     handle_requested_gc(cause);
 497   }
 498 }
 499 








































 500 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
 501   // Make sure we have at least one complete GC cycle before unblocking
 502   // from the explicit GC request.
 503   //
 504   // This is especially important for weak references cleanup and/or native
 505   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
 506   // comes very late in the already running cycle, it would miss lots of new
 507   // opportunities for cleanup that were made available before the caller
 508   // requested the GC.
 509 
 510   MonitorLocker ml(&_gc_waiters_lock);
 511   size_t current_gc_id = get_gc_id();
 512   size_t required_gc_id = current_gc_id + 1;
 513   while (current_gc_id < required_gc_id) {
 514     // Although setting gc request is under _gc_waiters_lock, but read side (run_service())
 515     // does not take the lock. We need to enforce following order, so that read side sees
 516     // latest requested gc cause when the flag is set.
 517     _requested_gc_cause = cause;
 518     _gc_requested.set();
 519 
 520     if (cause != GCCause::_wb_breakpoint) {
 521       ml.wait();
 522     }
 523     current_gc_id = get_gc_id();
 524   }
 525 }
 526 
 527 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
 528   ShenandoahHeap* heap = ShenandoahHeap::heap();
 529 
 530   assert(current()->is_Java_thread(), "expect Java thread here");
 531 
 532   if (try_set_alloc_failure_gc()) {
 533     // Only report the first allocation failure
 534     log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
 535                  req.type_string(),
 536                  byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
 537 
 538     // Now that alloc failure GC is scheduled, we can abort everything else
 539     heap->cancel_gc(GCCause::_allocation_failure);

 583     _do_counters_update.unset();
 584     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 585   }
 586 }
 587 
 588 void ShenandoahControlThread::handle_force_counters_update() {
 589   if (_force_counters_update.is_set()) {
 590     _do_counters_update.unset(); // reset these too, we do update now!
 591     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 592   }
 593 }
 594 
 595 void ShenandoahControlThread::notify_heap_changed() {
 596   // This is called from allocation path, and thus should be fast.
 597 
 598   // Update monitoring counters when we took a new region. This amortizes the
 599   // update costs on slow path.
 600   if (_do_counters_update.is_unset()) {
 601     _do_counters_update.set();
 602   }
 603   // Notify that something had changed.
 604   if (_heap_changed.is_unset()) {
 605     _heap_changed.set();
 606   }
 607 }
 608 
 609 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
 610   assert(ShenandoahPacing, "should only call when pacing is enabled");
 611   Atomic::add(&_allocs_seen, words, memory_order_relaxed);
 612 }
 613 
 614 void ShenandoahControlThread::set_forced_counters_update(bool value) {
 615   _force_counters_update.set_cond(value);
 616 }
 617 
 618 void ShenandoahControlThread::reset_gc_id() {
 619   Atomic::store(&_gc_id, (size_t)0);
 620 }
 621 
 622 void ShenandoahControlThread::update_gc_id() {
 623   Atomic::inc(&_gc_id);
 624 }
 625 
 626 size_t ShenandoahControlThread::get_gc_id() {
 627   return Atomic::load(&_gc_id);
 628 }
 629 
 630 void ShenandoahControlThread::start() {
 631   create_and_start();
 632 }
 633 
 634 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
 635   _graceful_shutdown.set();
 636 }
 637 
 638 bool ShenandoahControlThread::in_graceful_shutdown() {
 639   return _graceful_shutdown.is_set();
 640 }





















  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  28 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  29 #include "gc/shenandoah/shenandoahControlThread.hpp"
  30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
  31 #include "gc/shenandoah/shenandoahEvacTracker.hpp"
  32 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  33 #include "gc/shenandoah/shenandoahFullGC.hpp"
  34 #include "gc/shenandoah/shenandoahGeneration.hpp"
  35 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  36 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  42 #include "gc/shenandoah/shenandoahOldGC.hpp"
  43 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  44 #include "gc/shenandoah/shenandoahUtils.hpp"
  45 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  46 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  47 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  48 #include "gc/shenandoah/mode/shenandoahMode.hpp"
  49 #include "memory/iterator.hpp"
  50 #include "memory/metaspaceUtils.hpp"
  51 #include "memory/metaspaceStats.hpp"
  52 #include "memory/universe.hpp"
  53 #include "runtime/atomic.hpp"
  54 
  55 ShenandoahControlThread::ShenandoahControlThread() :
  56   ConcurrentGCThread(),
  57   _alloc_failure_waiters_lock(Mutex::safepoint - 2, "ShenandoahAllocFailureGC_lock", true),
  58   _gc_waiters_lock(Mutex::safepoint - 2, "ShenandoahRequestedGC_lock", true),
  59   _control_lock(Mutex::nosafepoint - 2, "ShenandoahControlGC_lock", true),
  60   _regulator_lock(Mutex::nosafepoint - 2, "ShenandoahRegulatorGC_lock", true),
  61   _periodic_task(this),
  62   _requested_gc_cause(GCCause::_no_cause_specified),
  63   _requested_generation(GenerationMode::GLOBAL),
  64   _degen_point(ShenandoahGC::_degenerated_outside_cycle),
  65   _degen_generation(NULL),
  66   _allocs_seen(0),
  67   _mode(none) {
  68   set_name("Shenandoah Control Thread");
  69   reset_gc_id();
  70   create_and_start();
  71   _periodic_task.enroll();
  72   if (ShenandoahPacing) {
  73     _periodic_pacer_notify_task.enroll();
  74   }
  75 }
  76 
  77 ShenandoahControlThread::~ShenandoahControlThread() {
  78   // This is here so that super is called.
  79 }
  80 
  81 void ShenandoahPeriodicTask::task() {
  82   _thread->handle_force_counters_update();
  83   _thread->handle_counters_update();
  84 }
  85 
  86 void ShenandoahPeriodicPacerNotify::task() {
  87   assert(ShenandoahPacing, "Should not be here otherwise");
  88   ShenandoahHeap::heap()->pacer()->notify_waiters();
  89 }
  90 
  91 void ShenandoahControlThread::run_service() {
  92   ShenandoahHeap* heap = ShenandoahHeap::heap();
  93 
  94   GCMode default_mode = concurrent_normal;
  95   GenerationMode generation = GLOBAL;
  96   GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;

  97 
  98   double last_shrink_time = os::elapsedTime();
  99   uint age_period = 0;
 100 
 101   // Shrink period avoids constantly polling regions for shrinking.
 102   // Having a period 10x lower than the delay would mean we hit the
 103   // shrinking with lag of less than 1/10-th of true delay.
 104   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
 105   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
 106 
 107   ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
 108 
 109   // Heuristics are notified of allocation failures here and other outcomes
 110   // of the cycle. They're also used here to control whether the Nth consecutive
 111   // degenerated cycle should be 'promoted' to a full cycle. The decision to
 112   // trigger a cycle or not is evaluated on the regulator thread.
 113   ShenandoahHeuristics* global_heuristics = heap->global_generation()->heuristics();
 114   while (!in_graceful_shutdown() && !should_terminate()) {
 115     // Figure out if we have pending requests.
 116     bool alloc_failure_pending = _alloc_failure_gc.is_set();
 117     bool is_gc_requested = _gc_requested.is_set();
 118     GCCause::Cause requested_gc_cause = _requested_gc_cause;
 119     bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause);
 120     bool implicit_gc_requested = is_gc_requested && is_implicit_gc(requested_gc_cause);
 121 
 122     // This control loop iteration have seen this much allocations.
 123     size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
 124 
 125     // Check if we have seen a new target for soft max heap size.
 126     bool soft_max_changed = check_soft_max_changed();
 127 
 128     // Choose which GC mode to run in. The block below should select a single mode.
 129     set_gc_mode(none);
 130     GCCause::Cause cause = GCCause::_last_gc_cause;
 131     ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
 132 
 133     if (alloc_failure_pending) {
 134       // Allocation failure takes precedence: we have to deal with it first thing
 135       log_info(gc)("Trigger: Handle Allocation Failure");
 136 
 137       cause = GCCause::_allocation_failure;
 138 
 139       // Consume the degen point, and seed it with default value
 140       degen_point = _degen_point;
 141       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
 142 
 143       if (degen_point == ShenandoahGC::_degenerated_outside_cycle) {
 144         _degen_generation = heap->mode()->is_generational() ? heap->young_generation() : heap->global_generation();
 145       } else {
 146         assert(_degen_generation != NULL, "Need to know which generation to resume.");
 147       }
 148 
 149       ShenandoahHeuristics* heuristics = _degen_generation->heuristics();
 150       generation = _degen_generation->generation_mode();
 151       bool old_gen_evacuation_failed = heap->clear_old_evacuation_failure();
 152 
 153       // Do not bother with degenerated cycle if old generation evacuation failed.
 154       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() && !old_gen_evacuation_failed) {
 155         heuristics->record_allocation_failure_gc();
 156         policy->record_alloc_failure_to_degenerated(degen_point);
 157         set_gc_mode(stw_degenerated);
 158       } else {
 159         heuristics->record_allocation_failure_gc();
 160         policy->record_alloc_failure_to_full();
 161         generation = GLOBAL;
 162         set_gc_mode(stw_full);
 163       }

 164     } else if (explicit_gc_requested) {
 165       cause = requested_gc_cause;
 166       generation = GLOBAL;
 167       log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
 168 
 169       global_heuristics->record_requested_gc();
 170 
 171       if (ExplicitGCInvokesConcurrent) {
 172         policy->record_explicit_to_concurrent();
 173         set_gc_mode(default_mode);
 174         // Unload and clean up everything
 175         heap->set_unload_classes(global_heuristics->can_unload_classes());
 176       } else {
 177         policy->record_explicit_to_full();
 178         set_gc_mode(stw_full);
 179       }
 180     } else if (implicit_gc_requested) {
 181       cause = requested_gc_cause;
 182       generation = GLOBAL;
 183       log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
 184 
 185       global_heuristics->record_requested_gc();
 186 
 187       if (ShenandoahImplicitGCInvokesConcurrent) {
 188         policy->record_implicit_to_concurrent();
 189         set_gc_mode(default_mode);
 190 
 191         // Unload and clean up everything
 192         heap->set_unload_classes(global_heuristics->can_unload_classes());
 193       } else {
 194         policy->record_implicit_to_full();
 195         set_gc_mode(stw_full);
 196       }
 197     } else {
 198       // We should only be here if the regulator requested a cycle or if
 199       // there is an old generation mark in progress.
 200       if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) {
 201         if (_requested_generation == OLD && heap->doing_mixed_evacuations()) {
 202           // If a request to start an old cycle arrived while an old cycle was running, but _before_
 203           // it chose any regions for evacuation we don't want to start a new old cycle. Rather, we want
 204           // the heuristic to run a young collection so that we can evacuate some old regions.
 205           assert(!heap->is_concurrent_old_mark_in_progress(), "Should not be running mixed collections and concurrent marking.");
 206           generation = YOUNG;
 207         } else {
 208           generation = _requested_generation;
 209         }
 210 
 211         // preemption was requested or this is a regular cycle
 212         cause = GCCause::_shenandoah_concurrent_gc;
 213         set_gc_mode(default_mode);
 214 
 215         // Don't start a new old marking if there is one already in progress.
 216         if (generation == OLD && heap->is_concurrent_old_mark_in_progress()) {
 217           set_gc_mode(servicing_old);
 218         }
 219 
 220         if (generation == GLOBAL) {
 221           heap->set_unload_classes(global_heuristics->should_unload_classes());
 222         } else {
 223           heap->set_unload_classes(false);
 224         }
 225 
 226         // Don't want to spin in this loop and start a cycle every time, so
 227         // clear requested gc cause. This creates a race with callers of the
 228         // blocking 'request_gc' method, but there it loops and resets the
 229         // '_requested_gc_cause' until a full cycle is completed.
 230         _requested_gc_cause = GCCause::_no_gc;
 231       } else if (heap->is_concurrent_old_mark_in_progress() || heap->is_prepare_for_old_mark_in_progress()) {
 232         // Nobody asked us to do anything, but we have an old-generation mark or old-generation preparation for
 233         // mixed evacuation in progress, so resume working on that.
 234         log_info(gc)("Resume old gc: marking=%s, preparing=%s",
 235                      BOOL_TO_STR(heap->is_concurrent_old_mark_in_progress()),
 236                      BOOL_TO_STR(heap->is_prepare_for_old_mark_in_progress()));
 237 
 238         cause = GCCause::_shenandoah_concurrent_gc;
 239         generation = OLD;
 240         set_gc_mode(servicing_old);
 241       }
 242     }
 243 
 244     // Blow all soft references on this cycle, if handling allocation failure,
 245     // either implicit or explicit GC request, or we are requested to do so unconditionally.
 246     if (generation == GLOBAL && (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs)) {
 247       heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
 248     }
 249 
 250     bool gc_requested = (_mode != none);
 251     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
 252 
 253     if (gc_requested) {
 254       // GC is starting, bump the internal ID
 255       update_gc_id();
 256 
 257       heap->reset_bytes_allocated_since_gc_start();
 258 
 259       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
 260 
 261       // If GC was requested, we are sampling the counters even without actual triggers
 262       // from allocation machinery. This captures GC phases more accurately.
 263       set_forced_counters_update(true);
 264 
 265       // If GC was requested, we better dump freeset data for performance debugging
 266       {
 267         ShenandoahHeapLocker locker(heap->lock());
 268         heap->free_set()->log_status();
 269       }
 270 
 271       heap->set_aging_cycle(false);
 272       {
 273         switch (_mode) {
 274           case concurrent_normal: {
 275             if ((generation == YOUNG) && (age_period-- == 0)) {
 276               heap->set_aging_cycle(true);
 277               age_period = ShenandoahAgingCyclePeriod - 1;
 278             }
 279             service_concurrent_normal_cycle(heap, generation, cause);
 280             break;
 281           }
 282           case stw_degenerated: {
 283             if (!service_stw_degenerated_cycle(cause, degen_point)) {
 284               // The degenerated GC was upgraded to a Full GC
 285               generation = GLOBAL;
 286             }
 287             break;
 288           }
 289           case stw_full: {
 290             service_stw_full_cycle(cause);
 291             break;
 292           }
 293           case servicing_old: {
 294             assert(generation == OLD, "Expected old generation here");
 295             GCIdMark gc_id_mark;
 296             service_concurrent_old_cycle(heap, cause);
 297             break;
 298           }
 299           default: {
 300             ShouldNotReachHere();
 301           }
 302         }
 303       }
 304 
 305       // If this was the requested GC cycle, notify waiters about it
 306       if (explicit_gc_requested || implicit_gc_requested) {
 307         notify_gc_waiters();
 308       }
 309 
 310       // If this was the allocation failure GC cycle, notify waiters about it
 311       if (alloc_failure_pending) {
 312         notify_alloc_failure_waiters();
 313       }
 314 
 315       // Report current free set state at the end of cycle, whether
 316       // it is a normal completion, or the abort.
 317       {
 318         ShenandoahHeapLocker locker(heap->lock());
 319         heap->free_set()->log_status();
 320 
 321         // Notify Universe about new heap usage. This has implications for
 322         // global soft refs policy, and we better report it every time heap
 323         // usage goes down.
 324         Universe::heap()->update_capacity_and_used_at_gc();
 325 
 326         // Signal that we have completed a visit to all live objects.
 327         Universe::heap()->record_whole_heap_examined_timestamp();
 328       }
 329 
 330       // Disable forced counters update, and update counters one more time
 331       // to capture the state at the end of GC session.
 332       handle_force_counters_update();
 333       set_forced_counters_update(false);
 334 
 335       // Retract forceful part of soft refs policy
 336       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
 337 
 338       // Clear metaspace oom flag, if current cycle unloaded classes
 339       if (heap->unload_classes()) {
 340         assert(generation == GLOBAL, "Only unload classes during GLOBAL cycle");
 341         global_heuristics->clear_metaspace_oom();
 342       }
 343 
 344       process_phase_timings(heap);




















 345 
 346       // Print Metaspace change following GC (if logging is enabled).
 347       MetaspaceUtils::print_metaspace_change(meta_sizes);
 348 
 349       // GC is over, we are at idle now
 350       if (ShenandoahPacing) {
 351         heap->pacer()->setup_for_idle();
 352       }
 353     } else {
 354       // Allow allocators to know we have seen this much regions
 355       if (ShenandoahPacing && (allocs_seen > 0)) {
 356         heap->pacer()->report_alloc(allocs_seen);
 357       }
 358     }
 359 
 360     double current = os::elapsedTime();
 361 
 362     if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
 363       // Explicit GC tries to uncommit everything down to min capacity.
 364       // Soft max change tries to uncommit everything down to target capacity.
 365       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
 366 
 367       double shrink_before = (explicit_gc_requested || soft_max_changed) ?
 368                              current :
 369                              current - (ShenandoahUncommitDelay / 1000.0);
 370 
 371       size_t shrink_until = soft_max_changed ?
 372                              heap->soft_max_capacity() :
 373                              heap->min_capacity();
 374 
 375       service_uncommit(shrink_before, shrink_until);
 376       heap->phase_timings()->flush_cycle_to_global();
 377       last_shrink_time = current;
 378     }
 379 
 380     // Don't wait around if there was an allocation failure - start the next cycle immediately.
 381     if (!is_alloc_failure_gc()) {
 382       // The timed wait is necessary because this thread has a responsibility to send
 383       // 'alloc_words' to the pacer when it does not perform a GC.
 384       MonitorLocker lock(&_control_lock, Mutex::_no_safepoint_check_flag);
 385       lock.wait(ShenandoahControlIntervalMax);


 386     }

 387   }
 388 
 389   // Wait for the actual stop(), can't leave run_service() earlier.
 390   while (!should_terminate()) {
 391     os::naked_short_sleep(ShenandoahControlIntervalMin);
 392   }
 393 }
 394 
 395 void ShenandoahControlThread::process_phase_timings(const ShenandoahHeap* heap) {
 396 
 397   // Commit worker statistics to cycle data
 398   heap->phase_timings()->flush_par_workers_to_cycle();
 399   if (ShenandoahPacing) {
 400     heap->pacer()->flush_stats_to_cycle();
 401   }
 402 
 403   ShenandoahCycleStats evac_stats = heap->evac_tracker()->flush_cycle_to_global();
 404 
 405   // Print GC stats for current cycle
 406   {
 407     LogTarget(Info, gc, stats) lt;
 408     if (lt.is_enabled()) {
 409       ResourceMark rm;
 410       LogStream ls(lt);
 411       heap->phase_timings()->print_cycle_on(&ls);
 412       ShenandoahEvacuationTracker::print_evacuations_on(&ls, &evac_stats.workers,
 413                                                              &evac_stats.mutators);
 414       if (ShenandoahPacing) {
 415         heap->pacer()->print_cycle_on(&ls);
 416       }
 417     }
 418   }
 419 
 420   // Commit statistics to globals
 421   heap->phase_timings()->flush_cycle_to_global();
 422 
 423 }
 424 
 425 // Young and old concurrent cycles are initiated by the regulator. Implicit
 426 // and explicit GC requests are handled by the controller thread and always
 427 // run a global cycle (which is concurrent by default, but may be overridden
 428 // by command line options). Old cycles always degenerate to a global cycle.
 429 // Young cycles are degenerated to complete the young cycle.  Young
 430 // and old degen may upgrade to Full GC.  Full GC may also be
 431 // triggered directly by a System.gc() invocation.
 432 //
 433 //
 434 //      +-----+ Idle +-----+-----------+---------------------+
 435 //      |         +        |           |                     |
 436 //      |         |        |           |                     |
 437 //      |         |        v           |                     |
 438 //      |         |  Bootstrap Old +-- | ------------+       |
 439 //      |         |   +                |             |       |
 440 //      |         |   |                |             |       |
 441 //      |         v   v                v             v       |
 442 //      |    Resume Old <----------+ Young +--> Young Degen  |
 443 //      |     +  +   ^                            +  +       |
 444 //      v     |  |   |                            |  |       |
 445 //   Global <-+  |   +----------------------------+  |       |
 446 //      +        |                                   |       |
 447 //      |        v                                   v       |
 448 //      +--->  Global Degen +--------------------> Full <----+
 449 //
 450 void ShenandoahControlThread::service_concurrent_normal_cycle(
 451   const ShenandoahHeap* heap, const GenerationMode generation, GCCause::Cause cause) {
 452   GCIdMark gc_id_mark;
 453   switch (generation) {
 454     case YOUNG: {
 455       // Run a young cycle. This might or might not, have interrupted an ongoing
 456       // concurrent mark in the old generation. We need to think about promotions
 457       // in this case. Promoted objects should be above the TAMS in the old regions
 458       // they end up in, but we have to be sure we don't promote into any regions
 459       // that are in the cset.
 460       log_info(gc, ergo)("Start GC cycle (YOUNG)");
 461       service_concurrent_cycle(heap->young_generation(), cause, false);
 462       break;
 463     }
 464     case GLOBAL: {
 465       log_info(gc, ergo)("Start GC cycle (GLOBAL)");
 466       service_concurrent_cycle(heap->global_generation(), cause, false);
 467       break;
 468     }
 469     case OLD: {
 470       log_info(gc, ergo)("Start GC cycle (OLD)");
 471       service_concurrent_old_cycle(heap, cause);
 472       break;
 473     }
 474     default:
 475       ShouldNotReachHere();
 476   }
 477   const char* msg;
 478   if (heap->cancelled_gc()) {
 479     msg = (generation == YOUNG)? "At end of Interrupted Concurrent Young GC": "At end of Interrupted Concurrent Bootstrap GC";
 480   } else {
 481     msg = (generation == YOUNG)? "At end of Concurrent Young GC": "At end of Concurrent Bootstrap GC";
 482   }
 483   heap->log_heap_status(msg);
 484 }
 485 
 486 void ShenandoahControlThread::service_concurrent_old_cycle(const ShenandoahHeap* heap, GCCause::Cause &cause) {
 487 
 488   ShenandoahOldGeneration* old_generation = heap->old_generation();
 489   ShenandoahYoungGeneration* young_generation = heap->young_generation();
 490   ShenandoahOldGeneration::State original_state = old_generation->state();
 491 
 492   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 493 
 494   switch (original_state) {
 495     case ShenandoahOldGeneration::IDLE: {
 496       assert(!heap->is_concurrent_old_mark_in_progress(), "Old already in progress.");
 497       assert(old_generation->task_queues()->is_empty(), "Old mark queues should be empty.");
 498     }
 499     case ShenandoahOldGeneration::FILLING: {
 500       _allow_old_preemption.set();
 501       ShenandoahGCSession session(cause, old_generation);
 502       old_generation->prepare_gc();
 503       _allow_old_preemption.unset();
 504 
 505       if (heap->is_prepare_for_old_mark_in_progress()) {
 506         assert(old_generation->state() == ShenandoahOldGeneration::FILLING, "Prepare for mark should be in progress.");
 507         return;
 508       }
 509 
 510       assert(old_generation->state() == ShenandoahOldGeneration::BOOTSTRAPPING, "Finished with filling, should be bootstrapping.");
 511     }
 512     case ShenandoahOldGeneration::BOOTSTRAPPING: {
 513       // Configure the young generation's concurrent mark to put objects in
 514       // old regions into the concurrent mark queues associated with the old
 515       // generation. The young cycle will run as normal except that rather than
 516       // ignore old references it will mark and enqueue them in the old concurrent
 517       // task queues but it will not traverse them.
 518       young_generation->set_old_gen_task_queues(old_generation->task_queues());
 519       ShenandoahGCSession session(cause, young_generation);
 520       service_concurrent_cycle(heap,young_generation, cause, true);
 521       process_phase_timings(heap);
 522       if (heap->cancelled_gc()) {
 523         // Young generation bootstrap cycle has failed. Concurrent mark for old generation
 524         // is going to resume after degenerated bootstrap cycle completes.
 525         log_info(gc)("Bootstrap cycle for old generation was cancelled.");
 526         return;
 527       }
 528 
 529       // Reset the degenerated point. Normally this would happen at the top
 530       // of the control loop, but here we have just completed a young cycle
 531       // which has bootstrapped the old concurrent marking.
 532       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
 533 
 534       // From here we will 'resume' the old concurrent mark. This will skip reset
 535       // and init mark for the concurrent mark. All of that work will have been
 536       // done by the bootstrapping young cycle. In order to simplify the debugging
 537       // effort, the old cycle will ONLY complete the mark phase. No actual
 538       // collection of the old generation is happening here.
 539       set_gc_mode(servicing_old);
 540       old_generation->transition_to(ShenandoahOldGeneration::MARKING);
 541     }
 542     case ShenandoahOldGeneration::MARKING: {
 543       ShenandoahGCSession session(cause, old_generation);
 544       bool marking_complete = resume_concurrent_old_cycle(old_generation, cause);
 545       if (marking_complete) {
 546         assert(old_generation->state() != ShenandoahOldGeneration::MARKING, "Should not still be marking.");
 547         if (original_state == ShenandoahOldGeneration::MARKING) {
 548           heap->log_heap_status("At end of Concurrent Old Marking finishing increment");
 549         }
 550       } else if (original_state == ShenandoahOldGeneration::MARKING) {
 551         heap->log_heap_status("At end of Concurrent Old Marking increment");
 552       }
 553       break;
 554     }
 555     default:
 556       log_error(gc)("Unexpected state for old GC: %d", old_generation->state());
 557       ShouldNotReachHere();
 558   }
 559 }
 560 
 561 bool ShenandoahControlThread::resume_concurrent_old_cycle(ShenandoahGeneration* generation, GCCause::Cause cause) {
 562 
 563   assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(), "Old mark should be in progress");
 564   log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued.", generation->task_queues()->tasks());
 565 
 566   ShenandoahHeap* heap = ShenandoahHeap::heap();
 567 
 568   // We can only tolerate being cancelled during concurrent marking or during preparation for mixed
 569   // evacuation. This flag here (passed by reference) is used to control precisely where the regulator
 570   // is allowed to cancel a GC.
 571   ShenandoahOldGC gc(generation, _allow_old_preemption);
 572   if (gc.collect(cause)) {
 573     generation->record_success_concurrent(false);
 574   }
 575 
 576   if (heap->cancelled_gc()) {
 577     // It's possible the gc cycle was cancelled after the last time
 578     // the collection checked for cancellation. In which case, the
 579     // old gc cycle is still completed, and we have to deal with this
 580     // cancellation. We set the degeneration point to be outside
 581     // the cycle because if this is an allocation failure, that is
 582     // what must be done (there is no degenerated old cycle). If the
 583     // cancellation was due to a heuristic wanting to start a young
 584     // cycle, then we are not actually going to a degenerated cycle,
 585     // so the degenerated point doesn't matter here.
 586     check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle);
 587     if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) {
 588       heap->shenandoah_policy()->record_interrupted_old();
 589     }
 590     return false;
 591   }
 592   return true;
 593 }
 594 
 595 bool ShenandoahControlThread::check_soft_max_changed() const {
 596   ShenandoahHeap* heap = ShenandoahHeap::heap();
 597   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
 598   size_t old_soft_max = heap->soft_max_capacity();
 599   if (new_soft_max != old_soft_max) {
 600     new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
 601     new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
 602     if (new_soft_max != old_soft_max) {
 603       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
 604                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 605                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 606       );
 607       heap->set_soft_max_capacity(new_soft_max);
 608       return true;
 609     }
 610   }
 611   return false;
 612 }
 613 
 614 void ShenandoahControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool do_old_gc_bootstrap) {
 615   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
 616   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
 617   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
 618   // tries to evac something and no memory is available), cycle degrades to Full GC.
 619   //
 620   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
 621   // heuristics says there are no regions to compact, and all the collection comes from immediately
 622   // reclaimable regions.
 623   //
 624   // ................................................................................................
 625   //
 626   //                                    (immediate garbage shortcut)                Concurrent GC
 627   //                             /-------------------------------------------\
 628   //                             |                                           |
 629   //                             |                                           |
 630   //                             |                                           |
 631   //                             |                                           v
 632   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
 633   //                   |                    |                 |              ^
 634   //                   | (af)               | (af)            | (af)         |
 635   // ..................|....................|.................|..............|.......................
 636   //                   |                    |                 |              |
 637   //                   |                    |                 |              |      Degenerated GC
 638   //                   v                    v                 v              |
 639   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
 640   //                   |                    |                 |              ^
 641   //                   | (af)               | (af)            | (af)         |
 642   // ..................|....................|.................|..............|.......................
 643   //                   |                    |                 |              |
 644   //                   |                    v                 |              |      Full GC
 645   //                   \------------------->o<----------------/              |
 646   //                                        |                                |
 647   //                                        v                                |
 648   //                                      Full GC  --------------------------/
 649   //

 650   if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
 651 
 652   ShenandoahHeap* heap = ShenandoahHeap::heap();
 653   ShenandoahGCSession session(cause, generation);

 654   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 655 
 656   service_concurrent_cycle(heap, generation, cause, do_old_gc_bootstrap);
 657 }
 658 
 659 void ShenandoahControlThread::service_concurrent_cycle(const ShenandoahHeap* heap, ShenandoahGeneration* generation,
 660                                                        GCCause::Cause &cause, bool do_old_gc_bootstrap) {
 661   ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap);
 662   if (gc.collect(cause)) {
 663     // Cycle is complete
 664     generation->record_success_concurrent(gc.abbreviated());

 665   } else {
 666     assert(heap->cancelled_gc(), "Must have been cancelled");
 667     check_cancellation_or_degen(gc.degen_point());
 668     assert(generation->generation_mode() != OLD, "Old GC takes a different control path");
 669     // Concurrent young-gen collection degenerates to young
 670     // collection.  Same for global collections.
 671     _degen_generation = generation;
 672   }
 673 }
 674 
 675 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
 676   ShenandoahHeap* heap = ShenandoahHeap::heap();
 677   if (!heap->cancelled_gc()) {
 678     return false;
 679   }
 680 
 681   if (in_graceful_shutdown()) {


 682     return true;
 683   }
 684 
 685   assert(_degen_point == ShenandoahGC::_degenerated_outside_cycle,
 686          "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
 687 
 688   if (is_alloc_failure_gc()) {
 689     _degen_point = point;
 690     return true;
 691   }
 692 
 693   if (_preemption_requested.is_set()) {
 694     assert(_requested_generation == YOUNG, "Only young GCs may preempt old.");
 695     _preemption_requested.unset();
 696 
 697     // Old generation marking is only cancellable during concurrent marking.
 698     // Once final mark is complete, the code does not check again for cancellation.
 699     // If old generation was cancelled for an allocation failure, we wouldn't
 700     // make it to this case. The calling code is responsible for forcing a
 701     // cancellation due to allocation failure into a degenerated cycle.
 702     _degen_point = point;
 703     heap->clear_cancelled_gc(false /* clear oom handler */);
 704     return true;
 705   }
 706 
 707   fatal("Cancel GC either for alloc failure GC, or gracefully exiting, or to pause old generation marking.");
 708   return false;
 709 }
 710 
 711 void ShenandoahControlThread::stop_service() {
 712   // Nothing to do here.
 713 }
 714 
 715 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
 716   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 717 
 718   GCIdMark gc_id_mark;
 719   ShenandoahGCSession session(cause, heap->global_generation());
 720 
 721   ShenandoahFullGC gc;
 722   gc.collect(cause);
 723 
 724   heap->global_generation()->heuristics()->record_success_full();

 725   heap->shenandoah_policy()->record_success_full();
 726 }
 727 
 728 bool ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
 729   assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
 730   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 731 
 732   GCIdMark gc_id_mark;
 733   ShenandoahGCSession session(cause, _degen_generation);
 734 
 735   ShenandoahDegenGC gc(point, _degen_generation);
 736   gc.collect(cause);
 737 
 738   assert(heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks");
 739   if (_degen_generation->generation_mode() == GLOBAL) {
 740     assert(heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks");
 741     assert(heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks");
 742   } else {
 743     assert(_degen_generation->generation_mode() == YOUNG, "Expected degenerated young cycle, if not global.");
 744     ShenandoahOldGeneration* old_generation = (ShenandoahOldGeneration*) heap->old_generation();
 745     if (old_generation->state() == ShenandoahOldGeneration::BOOTSTRAPPING && !gc.upgraded_to_full()) {
 746       old_generation->transition_to(ShenandoahOldGeneration::MARKING);
 747     }
 748   }
 749 
 750   _degen_generation->heuristics()->record_success_degenerated();
 751   heap->shenandoah_policy()->record_success_degenerated();
 752   return !gc.upgraded_to_full();
 753 }
 754 
 755 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
 756   ShenandoahHeap* heap = ShenandoahHeap::heap();
 757 
 758   // Determine if there is work to do. This avoids taking heap lock if there is
 759   // no work available, avoids spamming logs with superfluous logging messages,
 760   // and minimises the amount of work while locks are taken.
 761 
 762   if (heap->committed() <= shrink_until) return;
 763 
 764   bool has_work = false;
 765   for (size_t i = 0; i < heap->num_regions(); i++) {
 766     ShenandoahHeapRegion *r = heap->get_region(i);
 767     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 768       has_work = true;
 769       break;
 770     }
 771   }
 772 
 773   if (has_work) {
 774     heap->entry_uncommit(shrink_before, shrink_until);
 775   }
 776 }
 777 
 778 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
 779   return GCCause::is_user_requested_gc(cause) ||
 780          GCCause::is_serviceability_requested_gc(cause);
 781 }
 782 
 783 bool ShenandoahControlThread::is_implicit_gc(GCCause::Cause cause) const {
 784   return !is_explicit_gc(cause) && cause != GCCause::_shenandoah_concurrent_gc;
 785 }
 786 
 787 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
 788   assert(GCCause::is_user_requested_gc(cause) ||
 789          GCCause::is_serviceability_requested_gc(cause) ||
 790          cause == GCCause::_metadata_GC_clear_soft_refs ||
 791          cause == GCCause::_codecache_GC_aggressive ||
 792          cause == GCCause::_codecache_GC_threshold ||
 793          cause == GCCause::_full_gc_alot ||
 794          cause == GCCause::_wb_young_gc ||
 795          cause == GCCause::_wb_full_gc ||
 796          cause == GCCause::_wb_breakpoint ||
 797          cause == GCCause::_scavenge_alot,
 798          "only requested GCs here: %s", GCCause::to_string(cause));
 799 
 800   if (is_explicit_gc(cause)) {
 801     if (!DisableExplicitGC) {
 802       handle_requested_gc(cause);
 803     }
 804   } else {
 805     handle_requested_gc(cause);
 806   }
 807 }
 808 
 809 bool ShenandoahControlThread::request_concurrent_gc(GenerationMode generation) {
 810   if (_preemption_requested.is_set() || _gc_requested.is_set() || ShenandoahHeap::heap()->cancelled_gc()) {
 811     // ignore subsequent requests from the heuristics
 812     return false;
 813   }
 814 
 815   if (_mode == none) {
 816     _requested_gc_cause = GCCause::_shenandoah_concurrent_gc;
 817     _requested_generation = generation;
 818     notify_control_thread();
 819     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
 820     ml.wait();
 821     return true;
 822   }
 823 
 824   if (preempt_old_marking(generation)) {
 825     log_info(gc)("Preempting old generation mark to allow %s GC.", generation_name(generation));
 826     _requested_gc_cause = GCCause::_shenandoah_concurrent_gc;
 827     _requested_generation = generation;
 828     _preemption_requested.set();
 829     ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc);
 830     notify_control_thread();
 831 
 832     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
 833     ml.wait();
 834     return true;
 835   }
 836 
 837   return false;
 838 }
 839 
 840 void ShenandoahControlThread::notify_control_thread() {
 841   MonitorLocker locker(&_control_lock, Mutex::_no_safepoint_check_flag);
 842   _control_lock.notify();
 843 }
 844 
 845 bool ShenandoahControlThread::preempt_old_marking(GenerationMode generation) {
 846   return generation == YOUNG && _allow_old_preemption.try_unset();
 847 }
 848 
 849 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
 850   // Make sure we have at least one complete GC cycle before unblocking
 851   // from the explicit GC request.
 852   //
 853   // This is especially important for weak references cleanup and/or native
 854   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
 855   // comes very late in the already running cycle, it would miss lots of new
 856   // opportunities for cleanup that were made available before the caller
 857   // requested the GC.
 858 
 859   MonitorLocker ml(&_gc_waiters_lock);
 860   size_t current_gc_id = get_gc_id();
 861   size_t required_gc_id = current_gc_id + 1;
 862   while (current_gc_id < required_gc_id) {
 863     // Although setting gc request is under _gc_waiters_lock, but read side (run_service())
 864     // does not take the lock. We need to enforce following order, so that read side sees
 865     // latest requested gc cause when the flag is set.
 866     _requested_gc_cause = cause;
 867     _gc_requested.set();
 868     notify_control_thread();
 869     if (cause != GCCause::_wb_breakpoint) {
 870       ml.wait();
 871     }
 872     current_gc_id = get_gc_id();
 873   }
 874 }
 875 
 876 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
 877   ShenandoahHeap* heap = ShenandoahHeap::heap();
 878 
 879   assert(current()->is_Java_thread(), "expect Java thread here");
 880 
 881   if (try_set_alloc_failure_gc()) {
 882     // Only report the first allocation failure
 883     log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
 884                  req.type_string(),
 885                  byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
 886 
 887     // Now that alloc failure GC is scheduled, we can abort everything else
 888     heap->cancel_gc(GCCause::_allocation_failure);

 932     _do_counters_update.unset();
 933     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 934   }
 935 }
 936 
 937 void ShenandoahControlThread::handle_force_counters_update() {
 938   if (_force_counters_update.is_set()) {
 939     _do_counters_update.unset(); // reset these too, we do update now!
 940     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 941   }
 942 }
 943 
 944 void ShenandoahControlThread::notify_heap_changed() {
 945   // This is called from allocation path, and thus should be fast.
 946 
 947   // Update monitoring counters when we took a new region. This amortizes the
 948   // update costs on slow path.
 949   if (_do_counters_update.is_unset()) {
 950     _do_counters_update.set();
 951   }




 952 }
 953 
 954 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
 955   assert(ShenandoahPacing, "should only call when pacing is enabled");
 956   Atomic::add(&_allocs_seen, words, memory_order_relaxed);
 957 }
 958 
 959 void ShenandoahControlThread::set_forced_counters_update(bool value) {
 960   _force_counters_update.set_cond(value);
 961 }
 962 
 963 void ShenandoahControlThread::reset_gc_id() {
 964   Atomic::store(&_gc_id, (size_t)0);
 965 }
 966 
 967 void ShenandoahControlThread::update_gc_id() {
 968   Atomic::inc(&_gc_id);
 969 }
 970 
 971 size_t ShenandoahControlThread::get_gc_id() {
 972   return Atomic::load(&_gc_id);
 973 }
 974 
 975 void ShenandoahControlThread::start() {
 976   create_and_start();
 977 }
 978 
 979 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
 980   _graceful_shutdown.set();
 981 }
 982 
 983 bool ShenandoahControlThread::in_graceful_shutdown() {
 984   return _graceful_shutdown.is_set();
 985 }
 986 
 987 const char* ShenandoahControlThread::gc_mode_name(ShenandoahControlThread::GCMode mode) {
 988   switch (mode) {
 989     case none:              return "idle";
 990     case concurrent_normal: return "normal";
 991     case stw_degenerated:   return "degenerated";
 992     case stw_full:          return "full";
 993     case servicing_old:     return "old";
 994     default:                return "unknown";
 995   }
 996 }
 997 
 998 void ShenandoahControlThread::set_gc_mode(ShenandoahControlThread::GCMode new_mode) {
 999   if (_mode != new_mode) {
1000     log_info(gc)("Transition from: %s to: %s", gc_mode_name(_mode), gc_mode_name(new_mode));
1001     _mode = new_mode;
1002     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
1003     ml.notify_all();
1004   }
1005 }
< prev index next >