1 /* 2 * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 28 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 29 #include "gc/shenandoah/shenandoahControlThread.hpp" 30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp" 31 #include "gc/shenandoah/shenandoahFreeSet.hpp" 32 #include "gc/shenandoah/shenandoahFullGC.hpp" 33 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 34 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 35 #include "gc/shenandoah/shenandoahMark.inline.hpp" 36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 38 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 39 #include "gc/shenandoah/shenandoahUtils.hpp" 40 #include "gc/shenandoah/shenandoahVMOperations.hpp" 41 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 42 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 43 #include "memory/iterator.hpp" 44 #include "memory/metaspaceUtils.hpp" 45 #include "memory/metaspaceStats.hpp" 46 #include "memory/universe.hpp" 47 #include "runtime/atomic.hpp" 48 49 ShenandoahControlThread::ShenandoahControlThread() : 50 ConcurrentGCThread(), 51 _alloc_failure_waiters_lock(Mutex::safepoint-2, "ShenandoahAllocFailureGC_lock", true), 52 _gc_waiters_lock(Mutex::safepoint-2, "ShenandoahRequestedGC_lock", true), 53 _periodic_task(this), 54 _requested_gc_cause(GCCause::_no_cause_specified), 55 _degen_point(ShenandoahGC::_degenerated_outside_cycle), 56 _allocs_seen(0) { 57 set_name("Shenandoah Control Thread"); 58 reset_gc_id(); 59 create_and_start(); 60 _periodic_task.enroll(); 61 if (ShenandoahPacing) { 62 _periodic_pacer_notify_task.enroll(); 63 } 64 } 65 66 ShenandoahControlThread::~ShenandoahControlThread() { 67 // This is here so that super is called. 68 } 69 70 void ShenandoahPeriodicTask::task() { 71 _thread->handle_force_counters_update(); 72 _thread->handle_counters_update(); 73 } 74 75 void ShenandoahPeriodicPacerNotify::task() { 76 assert(ShenandoahPacing, "Should not be here otherwise"); 77 ShenandoahHeap::heap()->pacer()->notify_waiters(); 78 } 79 80 void ShenandoahControlThread::run_service() { 81 ShenandoahHeap* heap = ShenandoahHeap::heap(); 82 83 GCMode default_mode = concurrent_normal; 84 GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc; 85 int sleep = ShenandoahControlIntervalMin; 86 87 double last_shrink_time = os::elapsedTime(); 88 double last_sleep_adjust_time = os::elapsedTime(); 89 90 // Shrink period avoids constantly polling regions for shrinking. 91 // Having a period 10x lower than the delay would mean we hit the 92 // shrinking with lag of less than 1/10-th of true delay. 93 // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds. 94 double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10; 95 96 ShenandoahCollectorPolicy* policy = heap->shenandoah_policy(); 97 ShenandoahHeuristics* heuristics = heap->heuristics(); 98 while (!in_graceful_shutdown() && !should_terminate()) { 99 // Figure out if we have pending requests. 100 bool alloc_failure_pending = _alloc_failure_gc.is_set(); 101 bool is_gc_requested = _gc_requested.is_set(); 102 GCCause::Cause requested_gc_cause = _requested_gc_cause; 103 bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause); 104 bool implicit_gc_requested = is_gc_requested && !is_explicit_gc(requested_gc_cause); 105 106 // This control loop iteration have seen this much allocations. 107 size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed); 108 109 // Check if we have seen a new target for soft max heap size. 110 bool soft_max_changed = check_soft_max_changed(); 111 112 // Choose which GC mode to run in. The block below should select a single mode. 113 GCMode mode = none; 114 GCCause::Cause cause = GCCause::_last_gc_cause; 115 ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset; 116 117 if (alloc_failure_pending) { 118 // Allocation failure takes precedence: we have to deal with it first thing 119 log_info(gc)("Trigger: Handle Allocation Failure"); 120 121 cause = GCCause::_allocation_failure; 122 123 // Consume the degen point, and seed it with default value 124 degen_point = _degen_point; 125 _degen_point = ShenandoahGC::_degenerated_outside_cycle; 126 127 if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) { 128 heuristics->record_allocation_failure_gc(); 129 policy->record_alloc_failure_to_degenerated(degen_point); 130 mode = stw_degenerated; 131 } else { 132 heuristics->record_allocation_failure_gc(); 133 policy->record_alloc_failure_to_full(); 134 mode = stw_full; 135 } 136 137 } else if (explicit_gc_requested) { 138 cause = requested_gc_cause; 139 log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause)); 140 141 heuristics->record_requested_gc(); 142 143 if (ExplicitGCInvokesConcurrent) { 144 policy->record_explicit_to_concurrent(); 145 mode = default_mode; 146 // Unload and clean up everything 147 heap->set_unload_classes(heuristics->can_unload_classes()); 148 } else { 149 policy->record_explicit_to_full(); 150 mode = stw_full; 151 } 152 } else if (implicit_gc_requested) { 153 cause = requested_gc_cause; 154 log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause)); 155 156 heuristics->record_requested_gc(); 157 158 if (ShenandoahImplicitGCInvokesConcurrent) { 159 policy->record_implicit_to_concurrent(); 160 mode = default_mode; 161 162 // Unload and clean up everything 163 heap->set_unload_classes(heuristics->can_unload_classes()); 164 } else { 165 policy->record_implicit_to_full(); 166 mode = stw_full; 167 } 168 } else { 169 // Potential normal cycle: ask heuristics if it wants to act 170 if (heuristics->should_start_gc()) { 171 mode = default_mode; 172 cause = default_cause; 173 } 174 175 // Ask policy if this cycle wants to process references or unload classes 176 heap->set_unload_classes(heuristics->should_unload_classes()); 177 } 178 179 // Blow all soft references on this cycle, if handling allocation failure, 180 // either implicit or explicit GC request, or we are requested to do so unconditionally. 181 if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) { 182 heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); 183 } 184 185 bool gc_requested = (mode != none); 186 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set"); 187 188 if (gc_requested) { 189 // GC is starting, bump the internal ID 190 update_gc_id(); 191 192 heap->reset_bytes_allocated_since_gc_start(); 193 194 MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics(); 195 196 // If GC was requested, we are sampling the counters even without actual triggers 197 // from allocation machinery. This captures GC phases more accurately. 198 set_forced_counters_update(true); 199 200 // If GC was requested, we better dump freeset data for performance debugging 201 { 202 ShenandoahHeapLocker locker(heap->lock()); 203 heap->free_set()->log_status(); 204 } 205 206 switch (mode) { 207 case concurrent_normal: 208 service_concurrent_normal_cycle(cause); 209 break; 210 case stw_degenerated: 211 service_stw_degenerated_cycle(cause, degen_point); 212 break; 213 case stw_full: 214 service_stw_full_cycle(cause); 215 break; 216 default: 217 ShouldNotReachHere(); 218 } 219 220 // If this was the requested GC cycle, notify waiters about it 221 if (explicit_gc_requested || implicit_gc_requested) { 222 notify_gc_waiters(); 223 } 224 225 // If this was the allocation failure GC cycle, notify waiters about it 226 if (alloc_failure_pending) { 227 notify_alloc_failure_waiters(); 228 } 229 230 // Report current free set state at the end of cycle, whether 231 // it is a normal completion, or the abort. 232 { 233 ShenandoahHeapLocker locker(heap->lock()); 234 heap->free_set()->log_status(); 235 236 // Notify Universe about new heap usage. This has implications for 237 // global soft refs policy, and we better report it every time heap 238 // usage goes down. 239 Universe::heap()->update_capacity_and_used_at_gc(); 240 241 // Signal that we have completed a visit to all live objects. 242 Universe::heap()->record_whole_heap_examined_timestamp(); 243 } 244 245 // Disable forced counters update, and update counters one more time 246 // to capture the state at the end of GC session. 247 handle_force_counters_update(); 248 set_forced_counters_update(false); 249 250 // Retract forceful part of soft refs policy 251 heap->soft_ref_policy()->set_should_clear_all_soft_refs(false); 252 253 // Clear metaspace oom flag, if current cycle unloaded classes 254 if (heap->unload_classes()) { 255 heuristics->clear_metaspace_oom(); 256 } 257 258 // Commit worker statistics to cycle data 259 heap->phase_timings()->flush_par_workers_to_cycle(); 260 if (ShenandoahPacing) { 261 heap->pacer()->flush_stats_to_cycle(); 262 } 263 264 // Print GC stats for current cycle 265 { 266 LogTarget(Info, gc, stats) lt; 267 if (lt.is_enabled()) { 268 ResourceMark rm; 269 LogStream ls(lt); 270 heap->phase_timings()->print_cycle_on(&ls); 271 if (ShenandoahPacing) { 272 heap->pacer()->print_cycle_on(&ls); 273 } 274 } 275 } 276 277 // Commit statistics to globals 278 heap->phase_timings()->flush_cycle_to_global(); 279 280 // Print Metaspace change following GC (if logging is enabled). 281 MetaspaceUtils::print_metaspace_change(meta_sizes); 282 283 // GC is over, we are at idle now 284 if (ShenandoahPacing) { 285 heap->pacer()->setup_for_idle(); 286 } 287 } else { 288 // Allow allocators to know we have seen this much regions 289 if (ShenandoahPacing && (allocs_seen > 0)) { 290 heap->pacer()->report_alloc(allocs_seen); 291 } 292 } 293 294 double current = os::elapsedTime(); 295 296 if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) { 297 // Explicit GC tries to uncommit everything down to min capacity. 298 // Soft max change tries to uncommit everything down to target capacity. 299 // Periodic uncommit tries to uncommit suitable regions down to min capacity. 300 301 double shrink_before = (explicit_gc_requested || soft_max_changed) ? 302 current : 303 current - (ShenandoahUncommitDelay / 1000.0); 304 305 size_t shrink_until = soft_max_changed ? 306 heap->soft_max_capacity() : 307 heap->min_capacity(); 308 309 service_uncommit(shrink_before, shrink_until); 310 heap->phase_timings()->flush_cycle_to_global(); 311 last_shrink_time = current; 312 } 313 314 // Wait before performing the next action. If allocation happened during this wait, 315 // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle, 316 // back off exponentially. 317 if (_heap_changed.try_unset()) { 318 sleep = ShenandoahControlIntervalMin; 319 } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){ 320 sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2)); 321 last_sleep_adjust_time = current; 322 } 323 os::naked_short_sleep(sleep); 324 } 325 326 // Wait for the actual stop(), can't leave run_service() earlier. 327 while (!should_terminate()) { 328 os::naked_short_sleep(ShenandoahControlIntervalMin); 329 } 330 } 331 332 bool ShenandoahControlThread::check_soft_max_changed() const { 333 ShenandoahHeap* heap = ShenandoahHeap::heap(); 334 size_t new_soft_max = Atomic::load(&SoftMaxHeapSize); 335 size_t old_soft_max = heap->soft_max_capacity(); 336 if (new_soft_max != old_soft_max) { 337 new_soft_max = MAX2(heap->min_capacity(), new_soft_max); 338 new_soft_max = MIN2(heap->max_capacity(), new_soft_max); 339 if (new_soft_max != old_soft_max) { 340 log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s", 341 byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max), 342 byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max) 343 ); 344 heap->set_soft_max_capacity(new_soft_max); 345 return true; 346 } 347 } 348 return false; 349 } 350 351 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) { 352 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during 353 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there. 354 // If second allocation failure happens during Degenerated GC cycle (for example, when GC 355 // tries to evac something and no memory is available), cycle degrades to Full GC. 356 // 357 // There are also a shortcut through the normal cycle: immediate garbage shortcut, when 358 // heuristics says there are no regions to compact, and all the collection comes from immediately 359 // reclaimable regions. 360 // 361 // ................................................................................................ 362 // 363 // (immediate garbage shortcut) Concurrent GC 364 // /-------------------------------------------\ 365 // | | 366 // | | 367 // | | 368 // | v 369 // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END] 370 // | | | ^ 371 // | (af) | (af) | (af) | 372 // ..................|....................|.................|..............|....................... 373 // | | | | 374 // | | | | Degenerated GC 375 // v v v | 376 // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o 377 // | | | ^ 378 // | (af) | (af) | (af) | 379 // ..................|....................|.................|..............|....................... 380 // | | | | 381 // | v | | Full GC 382 // \------------------->o<----------------/ | 383 // | | 384 // v | 385 // Full GC --------------------------/ 386 // 387 ShenandoahHeap* heap = ShenandoahHeap::heap(); 388 if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return; 389 390 GCIdMark gc_id_mark; 391 ShenandoahGCSession session(cause); 392 393 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 394 395 ShenandoahConcurrentGC gc; 396 if (gc.collect(cause)) { 397 // Cycle is complete 398 heap->heuristics()->record_success_concurrent(); 399 heap->shenandoah_policy()->record_success_concurrent(); 400 } else { 401 assert(heap->cancelled_gc(), "Must have been cancelled"); 402 check_cancellation_or_degen(gc.degen_point()); 403 } 404 } 405 406 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) { 407 ShenandoahHeap* heap = ShenandoahHeap::heap(); 408 if (heap->cancelled_gc()) { 409 assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting"); 410 if (!in_graceful_shutdown()) { 411 assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle, 412 "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point)); 413 _degen_point = point; 414 } 415 return true; 416 } 417 return false; 418 } 419 420 void ShenandoahControlThread::stop_service() { 421 // Nothing to do here. 422 } 423 424 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) { 425 GCIdMark gc_id_mark; 426 ShenandoahGCSession session(cause); 427 428 ShenandoahFullGC gc; 429 gc.collect(cause); 430 431 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 432 heap->heuristics()->record_success_full(); 433 heap->shenandoah_policy()->record_success_full(); 434 } 435 436 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) { 437 assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set"); 438 439 GCIdMark gc_id_mark; 440 ShenandoahGCSession session(cause); 441 442 ShenandoahDegenGC gc(point); 443 gc.collect(cause); 444 445 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 446 heap->heuristics()->record_success_degenerated(); 447 heap->shenandoah_policy()->record_success_degenerated(); 448 } 449 450 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) { 451 ShenandoahHeap* heap = ShenandoahHeap::heap(); 452 453 // Determine if there is work to do. This avoids taking heap lock if there is 454 // no work available, avoids spamming logs with superfluous logging messages, 455 // and minimises the amount of work while locks are taken. 456 457 if (heap->committed() <= shrink_until) return; 458 459 bool has_work = false; 460 for (size_t i = 0; i < heap->num_regions(); i++) { 461 ShenandoahHeapRegion *r = heap->get_region(i); 462 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) { 463 has_work = true; 464 break; 465 } 466 } 467 468 if (has_work) { 469 heap->entry_uncommit(shrink_before, shrink_until); 470 } 471 } 472 473 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const { 474 return GCCause::is_user_requested_gc(cause) || 475 GCCause::is_serviceability_requested_gc(cause); 476 } 477 478 void ShenandoahControlThread::request_gc(GCCause::Cause cause) { 479 assert(GCCause::is_user_requested_gc(cause) || 480 GCCause::is_serviceability_requested_gc(cause) || 481 cause == GCCause::_metadata_GC_clear_soft_refs || 482 cause == GCCause::_codecache_GC_aggressive || 483 cause == GCCause::_codecache_GC_threshold || 484 cause == GCCause::_full_gc_alot || 485 cause == GCCause::_wb_young_gc || 486 cause == GCCause::_wb_full_gc || 487 cause == GCCause::_wb_breakpoint || 488 cause == GCCause::_scavenge_alot, 489 "only requested GCs here: %s", GCCause::to_string(cause)); 490 491 if (is_explicit_gc(cause)) { 492 if (!DisableExplicitGC) { 493 handle_requested_gc(cause); 494 } 495 } else { 496 handle_requested_gc(cause); 497 } 498 } 499 500 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) { 501 // Make sure we have at least one complete GC cycle before unblocking 502 // from the explicit GC request. 503 // 504 // This is especially important for weak references cleanup and/or native 505 // resources (e.g. DirectByteBuffers) machinery: when explicit GC request 506 // comes very late in the already running cycle, it would miss lots of new 507 // opportunities for cleanup that were made available before the caller 508 // requested the GC. 509 510 MonitorLocker ml(&_gc_waiters_lock); 511 size_t current_gc_id = get_gc_id(); 512 size_t required_gc_id = current_gc_id + 1; 513 while (current_gc_id < required_gc_id) { 514 // Although setting gc request is under _gc_waiters_lock, but read side (run_service()) 515 // does not take the lock. We need to enforce following order, so that read side sees 516 // latest requested gc cause when the flag is set. 517 _requested_gc_cause = cause; 518 _gc_requested.set(); 519 520 if (cause != GCCause::_wb_breakpoint) { 521 ml.wait(); 522 } 523 current_gc_id = get_gc_id(); 524 } 525 } 526 527 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) { 528 ShenandoahHeap* heap = ShenandoahHeap::heap(); 529 530 assert(current()->is_Java_thread(), "expect Java thread here"); 531 532 if (try_set_alloc_failure_gc()) { 533 // Only report the first allocation failure 534 log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s", 535 req.type_string(), 536 byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize)); 537 538 // Now that alloc failure GC is scheduled, we can abort everything else 539 heap->cancel_gc(GCCause::_allocation_failure); 540 } 541 542 MonitorLocker ml(&_alloc_failure_waiters_lock); 543 while (is_alloc_failure_gc()) { 544 ml.wait(); 545 } 546 } 547 548 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) { 549 ShenandoahHeap* heap = ShenandoahHeap::heap(); 550 551 if (try_set_alloc_failure_gc()) { 552 // Only report the first allocation failure 553 log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation", 554 byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize)); 555 } 556 557 // Forcefully report allocation failure 558 heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac); 559 } 560 561 void ShenandoahControlThread::notify_alloc_failure_waiters() { 562 _alloc_failure_gc.unset(); 563 MonitorLocker ml(&_alloc_failure_waiters_lock); 564 ml.notify_all(); 565 } 566 567 bool ShenandoahControlThread::try_set_alloc_failure_gc() { 568 return _alloc_failure_gc.try_set(); 569 } 570 571 bool ShenandoahControlThread::is_alloc_failure_gc() { 572 return _alloc_failure_gc.is_set(); 573 } 574 575 void ShenandoahControlThread::notify_gc_waiters() { 576 _gc_requested.unset(); 577 MonitorLocker ml(&_gc_waiters_lock); 578 ml.notify_all(); 579 } 580 581 void ShenandoahControlThread::handle_counters_update() { 582 if (_do_counters_update.is_set()) { 583 _do_counters_update.unset(); 584 ShenandoahHeap::heap()->monitoring_support()->update_counters(); 585 } 586 } 587 588 void ShenandoahControlThread::handle_force_counters_update() { 589 if (_force_counters_update.is_set()) { 590 _do_counters_update.unset(); // reset these too, we do update now! 591 ShenandoahHeap::heap()->monitoring_support()->update_counters(); 592 } 593 } 594 595 void ShenandoahControlThread::notify_heap_changed() { 596 // This is called from allocation path, and thus should be fast. 597 598 // Update monitoring counters when we took a new region. This amortizes the 599 // update costs on slow path. 600 if (_do_counters_update.is_unset()) { 601 _do_counters_update.set(); 602 } 603 // Notify that something had changed. 604 if (_heap_changed.is_unset()) { 605 _heap_changed.set(); 606 } 607 } 608 609 void ShenandoahControlThread::pacing_notify_alloc(size_t words) { 610 assert(ShenandoahPacing, "should only call when pacing is enabled"); 611 Atomic::add(&_allocs_seen, words, memory_order_relaxed); 612 } 613 614 void ShenandoahControlThread::set_forced_counters_update(bool value) { 615 _force_counters_update.set_cond(value); 616 } 617 618 void ShenandoahControlThread::reset_gc_id() { 619 Atomic::store(&_gc_id, (size_t)0); 620 } 621 622 void ShenandoahControlThread::update_gc_id() { 623 Atomic::inc(&_gc_id); 624 } 625 626 size_t ShenandoahControlThread::get_gc_id() { 627 return Atomic::load(&_gc_id); 628 } 629 630 void ShenandoahControlThread::start() { 631 create_and_start(); 632 } 633 634 void ShenandoahControlThread::prepare_for_graceful_shutdown() { 635 _graceful_shutdown.set(); 636 } 637 638 bool ShenandoahControlThread::in_graceful_shutdown() { 639 return _graceful_shutdown.is_set(); 640 }