1 /* 2 * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 28 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 29 #include "gc/shenandoah/shenandoahControlThread.hpp" 30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp" 31 #include "gc/shenandoah/shenandoahFreeSet.hpp" 32 #include "gc/shenandoah/shenandoahFullGC.hpp" 33 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 34 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 35 #include "gc/shenandoah/shenandoahPacer.inline.hpp" 36 #include "gc/shenandoah/shenandoahUtils.hpp" 37 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 38 #include "memory/metaspaceUtils.hpp" 39 #include "memory/metaspaceStats.hpp" 40 #include "memory/resourceArea.hpp" 41 #include "runtime/atomic.hpp" 42 43 ShenandoahControlThread::ShenandoahControlThread() : 44 ConcurrentGCThread(), 45 _alloc_failure_waiters_lock(Mutex::safepoint-2, "ShenandoahAllocFailureGC_lock", true), 46 _gc_waiters_lock(Mutex::safepoint-2, "ShenandoahRequestedGC_lock", true), 47 _requested_gc_cause(GCCause::_no_cause_specified), 48 _degen_point(ShenandoahGC::_degenerated_outside_cycle), 49 _allocs_seen(0) { 50 set_name("Shenandoah Control Thread"); 51 reset_gc_id(); 52 create_and_start(); 53 } 54 55 void ShenandoahControlThread::run_service() { 56 ShenandoahHeap* heap = ShenandoahHeap::heap(); 57 58 const GCMode default_mode = concurrent_normal; 59 const GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc; 60 int sleep = ShenandoahControlIntervalMin; 61 62 double last_shrink_time = os::elapsedTime(); 63 double last_sleep_adjust_time = os::elapsedTime(); 64 65 // Shrink period avoids constantly polling regions for shrinking. 66 // Having a period 10x lower than the delay would mean we hit the 67 // shrinking with lag of less than 1/10-th of true delay. 68 // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds. 69 const double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10; 70 71 ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy(); 72 ShenandoahHeuristics* const heuristics = heap->heuristics(); 73 while (!in_graceful_shutdown() && !should_terminate()) { 74 // Figure out if we have pending requests. 75 const bool alloc_failure_pending = _alloc_failure_gc.is_set(); 76 const bool is_gc_requested = _gc_requested.is_set(); 77 const GCCause::Cause requested_gc_cause = _requested_gc_cause; 78 79 // This control loop iteration has seen this much allocation. 80 const size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed); 81 82 // Check if we have seen a new target for soft max heap size. 83 const bool soft_max_changed = heap->check_soft_max_changed(); 84 85 // Choose which GC mode to run in. The block below should select a single mode. 86 GCMode mode = none; 87 GCCause::Cause cause = GCCause::_last_gc_cause; 88 ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset; 89 90 if (alloc_failure_pending) { 91 // Allocation failure takes precedence: we have to deal with it first thing 92 log_info(gc)("Trigger: Handle Allocation Failure"); 93 94 cause = GCCause::_allocation_failure; 95 96 // Consume the degen point, and seed it with default value 97 degen_point = _degen_point; 98 _degen_point = ShenandoahGC::_degenerated_outside_cycle; 99 100 if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) { 101 heuristics->record_allocation_failure_gc(); 102 policy->record_alloc_failure_to_degenerated(degen_point); 103 mode = stw_degenerated; 104 } else { 105 heuristics->record_allocation_failure_gc(); 106 policy->record_alloc_failure_to_full(); 107 mode = stw_full; 108 } 109 110 } else if (is_gc_requested) { 111 cause = requested_gc_cause; 112 log_info(gc)("Trigger: GC request (%s)", GCCause::to_string(cause)); 113 heuristics->record_requested_gc(); 114 115 if (ShenandoahCollectorPolicy::should_run_full_gc(cause)) { 116 mode = stw_full; 117 } else { 118 mode = default_mode; 119 // Unload and clean up everything 120 heap->set_unload_classes(heuristics->can_unload_classes()); 121 } 122 } else { 123 // Potential normal cycle: ask heuristics if it wants to act 124 if (heuristics->should_start_gc()) { 125 mode = default_mode; 126 cause = default_cause; 127 } 128 129 // Ask policy if this cycle wants to process references or unload classes 130 heap->set_unload_classes(heuristics->should_unload_classes()); 131 } 132 133 // Blow all soft references on this cycle, if handling allocation failure, 134 // either implicit or explicit GC request, or we are requested to do so unconditionally. 135 if (alloc_failure_pending || is_gc_requested || ShenandoahAlwaysClearSoftRefs) { 136 heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); 137 } 138 139 const bool gc_requested = (mode != none); 140 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set"); 141 142 if (gc_requested) { 143 // GC is starting, bump the internal ID 144 update_gc_id(); 145 146 heap->reset_bytes_allocated_since_gc_start(); 147 148 MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics(); 149 150 // If GC was requested, we are sampling the counters even without actual triggers 151 // from allocation machinery. This captures GC phases more accurately. 152 heap->set_forced_counters_update(true); 153 154 // If GC was requested, we better dump freeset data for performance debugging 155 { 156 ShenandoahHeapLocker locker(heap->lock()); 157 heap->free_set()->log_status(); 158 } 159 160 switch (mode) { 161 case concurrent_normal: 162 service_concurrent_normal_cycle(cause); 163 break; 164 case stw_degenerated: 165 service_stw_degenerated_cycle(cause, degen_point); 166 break; 167 case stw_full: 168 service_stw_full_cycle(cause); 169 break; 170 default: 171 ShouldNotReachHere(); 172 } 173 174 // If this was the requested GC cycle, notify waiters about it 175 if (is_gc_requested) { 176 notify_gc_waiters(); 177 } 178 179 // If this was the allocation failure GC cycle, notify waiters about it 180 if (alloc_failure_pending) { 181 notify_alloc_failure_waiters(); 182 } 183 184 // Report current free set state at the end of cycle, whether 185 // it is a normal completion, or the abort. 186 { 187 ShenandoahHeapLocker locker(heap->lock()); 188 heap->free_set()->log_status(); 189 190 // Notify Universe about new heap usage. This has implications for 191 // global soft refs policy, and we better report it every time heap 192 // usage goes down. 193 heap->update_capacity_and_used_at_gc(); 194 195 // Signal that we have completed a visit to all live objects. 196 heap->record_whole_heap_examined_timestamp(); 197 } 198 199 // Disable forced counters update, and update counters one more time 200 // to capture the state at the end of GC session. 201 heap->handle_force_counters_update(); 202 heap->set_forced_counters_update(false); 203 204 // Retract forceful part of soft refs policy 205 heap->soft_ref_policy()->set_should_clear_all_soft_refs(false); 206 207 // Clear metaspace oom flag, if current cycle unloaded classes 208 if (heap->unload_classes()) { 209 heuristics->clear_metaspace_oom(); 210 } 211 212 // Commit worker statistics to cycle data 213 heap->phase_timings()->flush_par_workers_to_cycle(); 214 if (ShenandoahPacing) { 215 heap->pacer()->flush_stats_to_cycle(); 216 } 217 218 // Print GC stats for current cycle 219 { 220 LogTarget(Info, gc, stats) lt; 221 if (lt.is_enabled()) { 222 ResourceMark rm; 223 LogStream ls(lt); 224 heap->phase_timings()->print_cycle_on(&ls); 225 if (ShenandoahPacing) { 226 heap->pacer()->print_cycle_on(&ls); 227 } 228 } 229 } 230 231 // Commit statistics to globals 232 heap->phase_timings()->flush_cycle_to_global(); 233 234 // Print Metaspace change following GC (if logging is enabled). 235 MetaspaceUtils::print_metaspace_change(meta_sizes); 236 237 // GC is over, we are at idle now 238 if (ShenandoahPacing) { 239 heap->pacer()->setup_for_idle(); 240 } 241 } else { 242 // Allow allocators to know we have seen this much regions 243 if (ShenandoahPacing && (allocs_seen > 0)) { 244 heap->pacer()->report_alloc(allocs_seen); 245 } 246 } 247 248 const double current = os::elapsedTime(); 249 250 if (ShenandoahUncommit && (is_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) { 251 // Explicit GC tries to uncommit everything down to min capacity. 252 // Soft max change tries to uncommit everything down to target capacity. 253 // Periodic uncommit tries to uncommit suitable regions down to min capacity. 254 255 double shrink_before = (is_gc_requested || soft_max_changed) ? 256 current : 257 current - (ShenandoahUncommitDelay / 1000.0); 258 259 size_t shrink_until = soft_max_changed ? 260 heap->soft_max_capacity() : 261 heap->min_capacity(); 262 263 heap->maybe_uncommit(shrink_before, shrink_until); 264 heap->phase_timings()->flush_cycle_to_global(); 265 last_shrink_time = current; 266 } 267 268 // Wait before performing the next action. If allocation happened during this wait, 269 // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle, 270 // back off exponentially. 271 if (heap->has_changed()) { 272 sleep = ShenandoahControlIntervalMin; 273 } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){ 274 sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2)); 275 last_sleep_adjust_time = current; 276 } 277 os::naked_short_sleep(sleep); 278 } 279 280 // Wait for the actual stop(), can't leave run_service() earlier. 281 while (!should_terminate()) { 282 os::naked_short_sleep(ShenandoahControlIntervalMin); 283 } 284 } 285 286 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) { 287 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during 288 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there. 289 // If second allocation failure happens during Degenerated GC cycle (for example, when GC 290 // tries to evac something and no memory is available), cycle degrades to Full GC. 291 // 292 // There are also a shortcut through the normal cycle: immediate garbage shortcut, when 293 // heuristics says there are no regions to compact, and all the collection comes from immediately 294 // reclaimable regions. 295 // 296 // ................................................................................................ 297 // 298 // (immediate garbage shortcut) Concurrent GC 299 // /-------------------------------------------\ 300 // | | 301 // | | 302 // | | 303 // | v 304 // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END] 305 // | | | ^ 306 // | (af) | (af) | (af) | 307 // ..................|....................|.................|..............|....................... 308 // | | | | 309 // | | | | Degenerated GC 310 // v v v | 311 // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o 312 // | | | ^ 313 // | (af) | (af) | (af) | 314 // ..................|....................|.................|..............|....................... 315 // | | | | 316 // | v | | Full GC 317 // \------------------->o<----------------/ | 318 // | | 319 // v | 320 // Full GC --------------------------/ 321 // 322 ShenandoahHeap* heap = ShenandoahHeap::heap(); 323 if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return; 324 325 GCIdMark gc_id_mark; 326 ShenandoahGCSession session(cause); 327 328 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 329 330 ShenandoahConcurrentGC gc; 331 if (gc.collect(cause)) { 332 // Cycle is complete 333 heap->heuristics()->record_success_concurrent(); 334 heap->shenandoah_policy()->record_success_concurrent(gc.abbreviated()); 335 } else { 336 assert(heap->cancelled_gc(), "Must have been cancelled"); 337 check_cancellation_or_degen(gc.degen_point()); 338 } 339 } 340 341 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) { 342 ShenandoahHeap* heap = ShenandoahHeap::heap(); 343 if (heap->cancelled_gc()) { 344 assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting"); 345 if (!in_graceful_shutdown()) { 346 assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle, 347 "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point)); 348 _degen_point = point; 349 } 350 return true; 351 } 352 return false; 353 } 354 355 void ShenandoahControlThread::stop_service() { 356 // Nothing to do here. 357 } 358 359 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) { 360 GCIdMark gc_id_mark; 361 ShenandoahGCSession session(cause); 362 363 ShenandoahFullGC gc; 364 gc.collect(cause); 365 } 366 367 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) { 368 assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set"); 369 370 GCIdMark gc_id_mark; 371 ShenandoahGCSession session(cause); 372 373 ShenandoahDegenGC gc(point); 374 gc.collect(cause); 375 } 376 377 void ShenandoahControlThread::request_gc(GCCause::Cause cause) { 378 if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) { 379 handle_requested_gc(cause); 380 } 381 } 382 383 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) { 384 // Make sure we have at least one complete GC cycle before unblocking 385 // from the explicit GC request. 386 // 387 // This is especially important for weak references cleanup and/or native 388 // resources (e.g. DirectByteBuffers) machinery: when explicit GC request 389 // comes very late in the already running cycle, it would miss lots of new 390 // opportunities for cleanup that were made available before the caller 391 // requested the GC. 392 393 MonitorLocker ml(&_gc_waiters_lock); 394 size_t current_gc_id = get_gc_id(); 395 size_t required_gc_id = current_gc_id + 1; 396 while (current_gc_id < required_gc_id) { 397 // Although setting gc request is under _gc_waiters_lock, but read side (run_service()) 398 // does not take the lock. We need to enforce following order, so that read side sees 399 // latest requested gc cause when the flag is set. 400 _requested_gc_cause = cause; 401 _gc_requested.set(); 402 403 if (cause != GCCause::_wb_breakpoint) { 404 ml.wait(); 405 } 406 current_gc_id = get_gc_id(); 407 } 408 } 409 410 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req, bool block) { 411 ShenandoahHeap* heap = ShenandoahHeap::heap(); 412 413 assert(current()->is_Java_thread(), "expect Java thread here"); 414 415 if (try_set_alloc_failure_gc()) { 416 // Only report the first allocation failure 417 log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s", 418 req.type_string(), 419 byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize)); 420 421 // Now that alloc failure GC is scheduled, we can abort everything else 422 heap->cancel_gc(GCCause::_allocation_failure); 423 } 424 425 426 if (block) { 427 MonitorLocker ml(&_alloc_failure_waiters_lock); 428 while (is_alloc_failure_gc()) { 429 ml.wait(); 430 } 431 } 432 } 433 434 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) { 435 ShenandoahHeap* heap = ShenandoahHeap::heap(); 436 437 if (try_set_alloc_failure_gc()) { 438 // Only report the first allocation failure 439 log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation", 440 byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize)); 441 } 442 443 // Forcefully report allocation failure 444 heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac); 445 } 446 447 void ShenandoahControlThread::notify_alloc_failure_waiters() { 448 _alloc_failure_gc.unset(); 449 MonitorLocker ml(&_alloc_failure_waiters_lock); 450 ml.notify_all(); 451 } 452 453 bool ShenandoahControlThread::try_set_alloc_failure_gc() { 454 return _alloc_failure_gc.try_set(); 455 } 456 457 bool ShenandoahControlThread::is_alloc_failure_gc() { 458 return _alloc_failure_gc.is_set(); 459 } 460 461 void ShenandoahControlThread::notify_gc_waiters() { 462 _gc_requested.unset(); 463 MonitorLocker ml(&_gc_waiters_lock); 464 ml.notify_all(); 465 } 466 467 void ShenandoahControlThread::pacing_notify_alloc(size_t words) { 468 assert(ShenandoahPacing, "should only call when pacing is enabled"); 469 Atomic::add(&_allocs_seen, words, memory_order_relaxed); 470 } 471 472 void ShenandoahControlThread::reset_gc_id() { 473 Atomic::store(&_gc_id, (size_t)0); 474 } 475 476 void ShenandoahControlThread::update_gc_id() { 477 Atomic::inc(&_gc_id); 478 } 479 480 size_t ShenandoahControlThread::get_gc_id() { 481 return Atomic::load(&_gc_id); 482 } 483 484 void ShenandoahControlThread::start() { 485 create_and_start(); 486 } 487 488 void ShenandoahControlThread::prepare_for_graceful_shutdown() { 489 _graceful_shutdown.set(); 490 } 491 492 bool ShenandoahControlThread::in_graceful_shutdown() { 493 return _graceful_shutdown.is_set(); 494 }