1 /*
2 * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
28 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
29 #include "gc/shenandoah/shenandoahControlThread.hpp"
30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
31 #include "gc/shenandoah/shenandoahFreeSet.hpp"
32 #include "gc/shenandoah/shenandoahFullGC.hpp"
33 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
35 #include "gc/shenandoah/shenandoahMark.inline.hpp"
36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
38 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
39 #include "gc/shenandoah/shenandoahUtils.hpp"
40 #include "gc/shenandoah/shenandoahVMOperations.hpp"
41 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
42 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
43 #include "memory/iterator.hpp"
44 #include "memory/metaspaceUtils.hpp"
45 #include "memory/metaspaceStats.hpp"
46 #include "memory/universe.hpp"
47 #include "runtime/atomic.hpp"
48
49 ShenandoahControlThread::ShenandoahControlThread() :
50 ConcurrentGCThread(),
51 _alloc_failure_waiters_lock(Mutex::safepoint-2, "ShenandoahAllocFailureGC_lock", true),
52 _gc_waiters_lock(Mutex::safepoint-2, "ShenandoahRequestedGC_lock", true),
53 _periodic_task(this),
54 _requested_gc_cause(GCCause::_no_cause_specified),
55 _degen_point(ShenandoahGC::_degenerated_outside_cycle),
56 _allocs_seen(0) {
57 set_name("Shenandoah Control Thread");
58 reset_gc_id();
59 create_and_start();
60 _periodic_task.enroll();
61 if (ShenandoahPacing) {
62 _periodic_pacer_notify_task.enroll();
63 }
64 }
65
66 ShenandoahControlThread::~ShenandoahControlThread() {
67 // This is here so that super is called.
68 }
69
70 void ShenandoahPeriodicTask::task() {
71 _thread->handle_force_counters_update();
72 _thread->handle_counters_update();
73 }
74
75 void ShenandoahPeriodicPacerNotify::task() {
76 assert(ShenandoahPacing, "Should not be here otherwise");
77 ShenandoahHeap::heap()->pacer()->notify_waiters();
78 }
79
80 void ShenandoahControlThread::run_service() {
81 ShenandoahHeap* heap = ShenandoahHeap::heap();
82
83 GCMode default_mode = concurrent_normal;
84 GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
85 int sleep = ShenandoahControlIntervalMin;
86
87 double last_shrink_time = os::elapsedTime();
88 double last_sleep_adjust_time = os::elapsedTime();
89
90 // Shrink period avoids constantly polling regions for shrinking.
91 // Having a period 10x lower than the delay would mean we hit the
92 // shrinking with lag of less than 1/10-th of true delay.
93 // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
94 double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
95
96 ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
97 ShenandoahHeuristics* heuristics = heap->heuristics();
98 while (!in_graceful_shutdown() && !should_terminate()) {
99 // Figure out if we have pending requests.
100 bool alloc_failure_pending = _alloc_failure_gc.is_set();
101 bool is_gc_requested = _gc_requested.is_set();
102 GCCause::Cause requested_gc_cause = _requested_gc_cause;
103 bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause);
104 bool implicit_gc_requested = is_gc_requested && !is_explicit_gc(requested_gc_cause);
105
106 // This control loop iteration have seen this much allocations.
107 size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
108
109 // Check if we have seen a new target for soft max heap size.
110 bool soft_max_changed = check_soft_max_changed();
111
112 // Choose which GC mode to run in. The block below should select a single mode.
113 GCMode mode = none;
114 GCCause::Cause cause = GCCause::_last_gc_cause;
115 ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
116
117 if (alloc_failure_pending) {
118 // Allocation failure takes precedence: we have to deal with it first thing
119 log_info(gc)("Trigger: Handle Allocation Failure");
120
121 cause = GCCause::_allocation_failure;
122
123 // Consume the degen point, and seed it with default value
124 degen_point = _degen_point;
125 _degen_point = ShenandoahGC::_degenerated_outside_cycle;
126
127 if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
128 heuristics->record_allocation_failure_gc();
129 policy->record_alloc_failure_to_degenerated(degen_point);
130 mode = stw_degenerated;
131 } else {
132 heuristics->record_allocation_failure_gc();
133 policy->record_alloc_failure_to_full();
134 mode = stw_full;
135 }
136
137 } else if (explicit_gc_requested) {
138 cause = requested_gc_cause;
139 log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
140
141 heuristics->record_requested_gc();
142
143 if (ExplicitGCInvokesConcurrent) {
144 policy->record_explicit_to_concurrent();
145 mode = default_mode;
146 // Unload and clean up everything
147 heap->set_unload_classes(heuristics->can_unload_classes());
148 } else {
149 policy->record_explicit_to_full();
150 mode = stw_full;
151 }
152 } else if (implicit_gc_requested) {
153 cause = requested_gc_cause;
154 log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
155
156 heuristics->record_requested_gc();
157
158 if (ShenandoahImplicitGCInvokesConcurrent) {
159 policy->record_implicit_to_concurrent();
160 mode = default_mode;
161
162 // Unload and clean up everything
163 heap->set_unload_classes(heuristics->can_unload_classes());
164 } else {
165 policy->record_implicit_to_full();
166 mode = stw_full;
167 }
168 } else {
169 // Potential normal cycle: ask heuristics if it wants to act
170 if (heuristics->should_start_gc()) {
171 mode = default_mode;
172 cause = default_cause;
173 }
174
175 // Ask policy if this cycle wants to process references or unload classes
176 heap->set_unload_classes(heuristics->should_unload_classes());
177 }
178
179 // Blow all soft references on this cycle, if handling allocation failure,
180 // either implicit or explicit GC request, or we are requested to do so unconditionally.
181 if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) {
182 heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
183 }
184
185 bool gc_requested = (mode != none);
186 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
187
188 if (gc_requested) {
189 // GC is starting, bump the internal ID
190 update_gc_id();
191
192 heap->reset_bytes_allocated_since_gc_start();
193
194 MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
195
196 // If GC was requested, we are sampling the counters even without actual triggers
197 // from allocation machinery. This captures GC phases more accurately.
198 set_forced_counters_update(true);
199
200 // If GC was requested, we better dump freeset data for performance debugging
201 {
202 ShenandoahHeapLocker locker(heap->lock());
203 heap->free_set()->log_status();
204 }
205
206 switch (mode) {
207 case concurrent_normal:
208 service_concurrent_normal_cycle(cause);
209 break;
210 case stw_degenerated:
211 service_stw_degenerated_cycle(cause, degen_point);
212 break;
213 case stw_full:
214 service_stw_full_cycle(cause);
215 break;
216 default:
217 ShouldNotReachHere();
218 }
219
220 // If this was the requested GC cycle, notify waiters about it
221 if (explicit_gc_requested || implicit_gc_requested) {
222 notify_gc_waiters();
223 }
224
225 // If this was the allocation failure GC cycle, notify waiters about it
226 if (alloc_failure_pending) {
227 notify_alloc_failure_waiters();
228 }
229
230 // Report current free set state at the end of cycle, whether
231 // it is a normal completion, or the abort.
232 {
233 ShenandoahHeapLocker locker(heap->lock());
234 heap->free_set()->log_status();
235
236 // Notify Universe about new heap usage. This has implications for
237 // global soft refs policy, and we better report it every time heap
238 // usage goes down.
239 Universe::heap()->update_capacity_and_used_at_gc();
240
241 // Signal that we have completed a visit to all live objects.
242 Universe::heap()->record_whole_heap_examined_timestamp();
243 }
244
245 // Disable forced counters update, and update counters one more time
246 // to capture the state at the end of GC session.
247 handle_force_counters_update();
248 set_forced_counters_update(false);
249
250 // Retract forceful part of soft refs policy
251 heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
252
253 // Clear metaspace oom flag, if current cycle unloaded classes
254 if (heap->unload_classes()) {
255 heuristics->clear_metaspace_oom();
256 }
257
258 // Commit worker statistics to cycle data
259 heap->phase_timings()->flush_par_workers_to_cycle();
260 if (ShenandoahPacing) {
261 heap->pacer()->flush_stats_to_cycle();
262 }
263
264 // Print GC stats for current cycle
265 {
266 LogTarget(Info, gc, stats) lt;
267 if (lt.is_enabled()) {
268 ResourceMark rm;
269 LogStream ls(lt);
270 heap->phase_timings()->print_cycle_on(&ls);
271 if (ShenandoahPacing) {
272 heap->pacer()->print_cycle_on(&ls);
273 }
274 }
275 }
276
277 // Commit statistics to globals
278 heap->phase_timings()->flush_cycle_to_global();
279
280 // Print Metaspace change following GC (if logging is enabled).
281 MetaspaceUtils::print_metaspace_change(meta_sizes);
282
283 // GC is over, we are at idle now
284 if (ShenandoahPacing) {
285 heap->pacer()->setup_for_idle();
286 }
287 } else {
288 // Allow allocators to know we have seen this much regions
289 if (ShenandoahPacing && (allocs_seen > 0)) {
290 heap->pacer()->report_alloc(allocs_seen);
291 }
292 }
293
294 double current = os::elapsedTime();
295
296 if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
297 // Explicit GC tries to uncommit everything down to min capacity.
298 // Soft max change tries to uncommit everything down to target capacity.
299 // Periodic uncommit tries to uncommit suitable regions down to min capacity.
300
301 double shrink_before = (explicit_gc_requested || soft_max_changed) ?
302 current :
303 current - (ShenandoahUncommitDelay / 1000.0);
304
305 size_t shrink_until = soft_max_changed ?
306 heap->soft_max_capacity() :
307 heap->min_capacity();
308
309 service_uncommit(shrink_before, shrink_until);
310 heap->phase_timings()->flush_cycle_to_global();
311 last_shrink_time = current;
312 }
313
314 // Wait before performing the next action. If allocation happened during this wait,
315 // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
316 // back off exponentially.
317 if (_heap_changed.try_unset()) {
318 sleep = ShenandoahControlIntervalMin;
319 } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
320 sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
321 last_sleep_adjust_time = current;
322 }
323 os::naked_short_sleep(sleep);
324 }
325
326 // Wait for the actual stop(), can't leave run_service() earlier.
327 while (!should_terminate()) {
328 os::naked_short_sleep(ShenandoahControlIntervalMin);
329 }
330 }
331
332 bool ShenandoahControlThread::check_soft_max_changed() const {
333 ShenandoahHeap* heap = ShenandoahHeap::heap();
334 size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
335 size_t old_soft_max = heap->soft_max_capacity();
336 if (new_soft_max != old_soft_max) {
337 new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
338 new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
339 if (new_soft_max != old_soft_max) {
340 log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
341 byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
342 byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
343 );
344 heap->set_soft_max_capacity(new_soft_max);
345 return true;
346 }
347 }
348 return false;
349 }
350
351 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
352 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
353 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
354 // If second allocation failure happens during Degenerated GC cycle (for example, when GC
355 // tries to evac something and no memory is available), cycle degrades to Full GC.
356 //
357 // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
358 // heuristics says there are no regions to compact, and all the collection comes from immediately
359 // reclaimable regions.
360 //
361 // ................................................................................................
362 //
363 // (immediate garbage shortcut) Concurrent GC
364 // /-------------------------------------------\
365 // | |
366 // | |
367 // | |
368 // | v
369 // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
370 // | | | ^
371 // | (af) | (af) | (af) |
372 // ..................|....................|.................|..............|.......................
373 // | | | |
374 // | | | | Degenerated GC
375 // v v v |
376 // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
377 // | | | ^
378 // | (af) | (af) | (af) |
379 // ..................|....................|.................|..............|.......................
380 // | | | |
381 // | v | | Full GC
382 // \------------------->o<----------------/ |
383 // | |
384 // v |
385 // Full GC --------------------------/
386 //
387 ShenandoahHeap* heap = ShenandoahHeap::heap();
388 if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
389
390 GCIdMark gc_id_mark;
391 ShenandoahGCSession session(cause);
392
393 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
394
395 ShenandoahConcurrentGC gc;
396 if (gc.collect(cause)) {
397 // Cycle is complete
398 heap->heuristics()->record_success_concurrent();
399 heap->shenandoah_policy()->record_success_concurrent();
400 } else {
401 assert(heap->cancelled_gc(), "Must have been cancelled");
402 check_cancellation_or_degen(gc.degen_point());
403 }
404 }
405
406 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
407 ShenandoahHeap* heap = ShenandoahHeap::heap();
408 if (heap->cancelled_gc()) {
409 assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
410 if (!in_graceful_shutdown()) {
411 assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle,
412 "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
413 _degen_point = point;
414 }
415 return true;
416 }
417 return false;
418 }
419
420 void ShenandoahControlThread::stop_service() {
421 // Nothing to do here.
422 }
423
424 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
425 GCIdMark gc_id_mark;
426 ShenandoahGCSession session(cause);
427
428 ShenandoahFullGC gc;
429 gc.collect(cause);
430
431 ShenandoahHeap* const heap = ShenandoahHeap::heap();
432 heap->heuristics()->record_success_full();
433 heap->shenandoah_policy()->record_success_full();
434 }
435
436 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
437 assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
438
439 GCIdMark gc_id_mark;
440 ShenandoahGCSession session(cause);
441
442 ShenandoahDegenGC gc(point);
443 gc.collect(cause);
444
445 ShenandoahHeap* const heap = ShenandoahHeap::heap();
446 heap->heuristics()->record_success_degenerated();
447 heap->shenandoah_policy()->record_success_degenerated();
448 }
449
450 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
451 ShenandoahHeap* heap = ShenandoahHeap::heap();
452
453 // Determine if there is work to do. This avoids taking heap lock if there is
454 // no work available, avoids spamming logs with superfluous logging messages,
455 // and minimises the amount of work while locks are taken.
456
457 if (heap->committed() <= shrink_until) return;
458
459 bool has_work = false;
460 for (size_t i = 0; i < heap->num_regions(); i++) {
461 ShenandoahHeapRegion *r = heap->get_region(i);
462 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
463 has_work = true;
464 break;
465 }
466 }
467
468 if (has_work) {
469 heap->entry_uncommit(shrink_before, shrink_until);
470 }
471 }
472
473 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
474 return GCCause::is_user_requested_gc(cause) ||
475 GCCause::is_serviceability_requested_gc(cause);
476 }
477
478 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
479 assert(GCCause::is_user_requested_gc(cause) ||
480 GCCause::is_serviceability_requested_gc(cause) ||
481 cause == GCCause::_metadata_GC_clear_soft_refs ||
482 cause == GCCause::_codecache_GC_aggressive ||
483 cause == GCCause::_codecache_GC_threshold ||
484 cause == GCCause::_full_gc_alot ||
485 cause == GCCause::_wb_young_gc ||
486 cause == GCCause::_wb_full_gc ||
487 cause == GCCause::_wb_breakpoint ||
488 cause == GCCause::_scavenge_alot,
489 "only requested GCs here: %s", GCCause::to_string(cause));
490
491 if (is_explicit_gc(cause)) {
492 if (!DisableExplicitGC) {
493 handle_requested_gc(cause);
494 }
495 } else {
496 handle_requested_gc(cause);
497 }
498 }
499
500 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
501 // Make sure we have at least one complete GC cycle before unblocking
502 // from the explicit GC request.
503 //
504 // This is especially important for weak references cleanup and/or native
505 // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
506 // comes very late in the already running cycle, it would miss lots of new
507 // opportunities for cleanup that were made available before the caller
508 // requested the GC.
509
510 MonitorLocker ml(&_gc_waiters_lock);
511 size_t current_gc_id = get_gc_id();
512 size_t required_gc_id = current_gc_id + 1;
513 while (current_gc_id < required_gc_id) {
514 // Although setting gc request is under _gc_waiters_lock, but read side (run_service())
515 // does not take the lock. We need to enforce following order, so that read side sees
516 // latest requested gc cause when the flag is set.
517 _requested_gc_cause = cause;
518 _gc_requested.set();
519
520 if (cause != GCCause::_wb_breakpoint) {
521 ml.wait();
522 }
523 current_gc_id = get_gc_id();
524 }
525 }
526
527 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req, bool block) {
528 ShenandoahHeap* heap = ShenandoahHeap::heap();
529
530 assert(current()->is_Java_thread(), "expect Java thread here");
531
532 if (try_set_alloc_failure_gc()) {
533 // Only report the first allocation failure
534 log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
535 req.type_string(),
536 byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
537
538 // Now that alloc failure GC is scheduled, we can abort everything else
539 heap->cancel_gc(GCCause::_allocation_failure);
540 }
541
542
543 if (block) {
544 MonitorLocker ml(&_alloc_failure_waiters_lock);
545 while (is_alloc_failure_gc()) {
546 ml.wait();
547 }
548 }
549 }
550
551 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
552 ShenandoahHeap* heap = ShenandoahHeap::heap();
553
554 if (try_set_alloc_failure_gc()) {
555 // Only report the first allocation failure
556 log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
557 byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
558 }
559
560 // Forcefully report allocation failure
561 heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
562 }
563
564 void ShenandoahControlThread::notify_alloc_failure_waiters() {
565 _alloc_failure_gc.unset();
566 MonitorLocker ml(&_alloc_failure_waiters_lock);
567 ml.notify_all();
568 }
569
570 bool ShenandoahControlThread::try_set_alloc_failure_gc() {
571 return _alloc_failure_gc.try_set();
572 }
573
574 bool ShenandoahControlThread::is_alloc_failure_gc() {
575 return _alloc_failure_gc.is_set();
576 }
577
578 void ShenandoahControlThread::notify_gc_waiters() {
579 _gc_requested.unset();
580 MonitorLocker ml(&_gc_waiters_lock);
581 ml.notify_all();
582 }
583
584 void ShenandoahControlThread::handle_counters_update() {
585 if (_do_counters_update.is_set()) {
586 _do_counters_update.unset();
587 ShenandoahHeap::heap()->monitoring_support()->update_counters();
588 }
589 }
590
591 void ShenandoahControlThread::handle_force_counters_update() {
592 if (_force_counters_update.is_set()) {
593 _do_counters_update.unset(); // reset these too, we do update now!
594 ShenandoahHeap::heap()->monitoring_support()->update_counters();
595 }
596 }
597
598 void ShenandoahControlThread::notify_heap_changed() {
599 // This is called from allocation path, and thus should be fast.
600
601 // Update monitoring counters when we took a new region. This amortizes the
602 // update costs on slow path.
603 if (_do_counters_update.is_unset()) {
604 _do_counters_update.set();
605 }
606 // Notify that something had changed.
607 if (_heap_changed.is_unset()) {
608 _heap_changed.set();
609 }
610 }
611
612 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
613 assert(ShenandoahPacing, "should only call when pacing is enabled");
614 Atomic::add(&_allocs_seen, words, memory_order_relaxed);
615 }
616
617 void ShenandoahControlThread::set_forced_counters_update(bool value) {
618 _force_counters_update.set_cond(value);
619 }
620
621 void ShenandoahControlThread::reset_gc_id() {
622 Atomic::store(&_gc_id, (size_t)0);
623 }
624
625 void ShenandoahControlThread::update_gc_id() {
626 Atomic::inc(&_gc_id);
627 }
628
629 size_t ShenandoahControlThread::get_gc_id() {
630 return Atomic::load(&_gc_id);
631 }
632
633 void ShenandoahControlThread::start() {
634 create_and_start();
635 }
636
637 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
638 _graceful_shutdown.set();
639 }
640
641 bool ShenandoahControlThread::in_graceful_shutdown() {
642 return _graceful_shutdown.is_set();
643 }
|
1 /*
2 * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
29 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
30 #include "gc/shenandoah/shenandoahControlThread.hpp"
31 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
32 #include "gc/shenandoah/shenandoahEvacTracker.hpp"
33 #include "gc/shenandoah/shenandoahFreeSet.hpp"
34 #include "gc/shenandoah/shenandoahFullGC.hpp"
35 #include "gc/shenandoah/shenandoahGeneration.hpp"
36 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
37 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
38 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
40 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
41 #include "gc/shenandoah/shenandoahMark.inline.hpp"
42 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
43 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
44 #include "gc/shenandoah/shenandoahOldGC.hpp"
45 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
46 #include "gc/shenandoah/shenandoahUtils.hpp"
47 #include "gc/shenandoah/shenandoahVMOperations.hpp"
48 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
49 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
50 #include "gc/shenandoah/mode/shenandoahMode.hpp"
51 #include "logging/log.hpp"
52 #include "memory/iterator.hpp"
53 #include "memory/metaspaceUtils.hpp"
54 #include "memory/metaspaceStats.hpp"
55 #include "memory/universe.hpp"
56 #include "runtime/atomic.hpp"
57
58 ShenandoahControlThread::ShenandoahControlThread() :
59 ConcurrentGCThread(),
60 _alloc_failure_waiters_lock(Mutex::safepoint - 2, "ShenandoahAllocFailureGC_lock", true),
61 _gc_waiters_lock(Mutex::safepoint - 2, "ShenandoahRequestedGC_lock", true),
62 _control_lock(Mutex::nosafepoint - 2, "ShenandoahControlGC_lock", true),
63 _regulator_lock(Mutex::nosafepoint - 2, "ShenandoahRegulatorGC_lock", true),
64 _periodic_task(this),
65 _requested_gc_cause(GCCause::_no_cause_specified),
66 _requested_generation(select_global_generation()),
67 _degen_point(ShenandoahGC::_degenerated_outside_cycle),
68 _degen_generation(nullptr),
69 _allocs_seen(0),
70 _mode(none) {
71 set_name("Shenandoah Control Thread");
72 reset_gc_id();
73 create_and_start();
74 _periodic_task.enroll();
75 if (ShenandoahPacing) {
76 _periodic_pacer_notify_task.enroll();
77 }
78 }
79
80 ShenandoahControlThread::~ShenandoahControlThread() {
81 // This is here so that super is called.
82 }
83
84 void ShenandoahPeriodicTask::task() {
85 _thread->handle_force_counters_update();
86 _thread->handle_counters_update();
87 }
88
89 void ShenandoahPeriodicPacerNotify::task() {
90 assert(ShenandoahPacing, "Should not be here otherwise");
91 ShenandoahHeap::heap()->pacer()->notify_waiters();
92 }
93
94 void ShenandoahControlThread::run_service() {
95 ShenandoahHeap* heap = ShenandoahHeap::heap();
96
97 GCMode default_mode = concurrent_normal;
98 ShenandoahGenerationType generation = select_global_generation();
99 GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
100
101 double last_shrink_time = os::elapsedTime();
102 uint age_period = 0;
103
104 // Shrink period avoids constantly polling regions for shrinking.
105 // Having a period 10x lower than the delay would mean we hit the
106 // shrinking with lag of less than 1/10-th of true delay.
107 // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
108 double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
109
110 ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
111
112 // Heuristics are notified of allocation failures here and other outcomes
113 // of the cycle. They're also used here to control whether the Nth consecutive
114 // degenerated cycle should be 'promoted' to a full cycle. The decision to
115 // trigger a cycle or not is evaluated on the regulator thread.
116 ShenandoahHeuristics* global_heuristics = heap->global_generation()->heuristics();
117 while (!in_graceful_shutdown() && !should_terminate()) {
118 // Figure out if we have pending requests.
119 bool alloc_failure_pending = _alloc_failure_gc.is_set();
120 bool humongous_alloc_failure_pending = _humongous_alloc_failure_gc.is_set();
121 bool is_gc_requested = _gc_requested.is_set();
122 GCCause::Cause requested_gc_cause = _requested_gc_cause;
123 bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause);
124 bool implicit_gc_requested = is_gc_requested && is_implicit_gc(requested_gc_cause);
125
126 // This control loop iteration have seen this much allocations.
127 size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
128
129 // Check if we have seen a new target for soft max heap size.
130 bool soft_max_changed = check_soft_max_changed();
131
132 // Choose which GC mode to run in. The block below should select a single mode.
133 set_gc_mode(none);
134 GCCause::Cause cause = GCCause::_last_gc_cause;
135 ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
136
137 if (alloc_failure_pending) {
138 // Allocation failure takes precedence: we have to deal with it first thing
139 log_info(gc)("Trigger: Handle Allocation Failure");
140
141 cause = GCCause::_allocation_failure;
142
143 // Consume the degen point, and seed it with default value
144 degen_point = _degen_point;
145 _degen_point = ShenandoahGC::_degenerated_outside_cycle;
146
147 if (degen_point == ShenandoahGC::_degenerated_outside_cycle) {
148 _degen_generation = heap->mode()->is_generational() ?
149 heap->young_generation() : heap->global_generation();
150 } else {
151 assert(_degen_generation != nullptr, "Need to know which generation to resume");
152 }
153
154 ShenandoahHeuristics* heuristics = _degen_generation->heuristics();
155 generation = _degen_generation->type();
156 bool old_gen_evacuation_failed = heap->clear_old_evacuation_failure();
157
158 // Do not bother with degenerated cycle if old generation evacuation failed or if humongous allocation failed
159 if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() &&
160 !old_gen_evacuation_failed && !humongous_alloc_failure_pending) {
161 heuristics->record_allocation_failure_gc();
162 policy->record_alloc_failure_to_degenerated(degen_point);
163 set_gc_mode(stw_degenerated);
164 } else {
165 // TODO: if humongous_alloc_failure_pending, there might be value in trying a "compacting" degen before
166 // going all the way to full. But it's a lot of work to implement this, and it may not provide value.
167 // A compacting degen can move young regions around without doing full old-gen mark (relying upon the
168 // remembered set scan), so it might be faster than a full gc.
169 //
170 // Longer term, think about how to defragment humongous memory concurrently.
171
172 heuristics->record_allocation_failure_gc();
173 policy->record_alloc_failure_to_full();
174 generation = select_global_generation();
175 set_gc_mode(stw_full);
176 }
177 } else if (explicit_gc_requested) {
178 cause = requested_gc_cause;
179 generation = select_global_generation();
180 log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
181
182 global_heuristics->record_requested_gc();
183
184 if (ExplicitGCInvokesConcurrent) {
185 policy->record_explicit_to_concurrent();
186 set_gc_mode(default_mode);
187 // Unload and clean up everything
188 heap->set_unload_classes(global_heuristics->can_unload_classes());
189 } else {
190 policy->record_explicit_to_full();
191 set_gc_mode(stw_full);
192 }
193 } else if (implicit_gc_requested) {
194 cause = requested_gc_cause;
195 generation = select_global_generation();
196 log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
197
198 global_heuristics->record_requested_gc();
199
200 if (ShenandoahImplicitGCInvokesConcurrent) {
201 policy->record_implicit_to_concurrent();
202 set_gc_mode(default_mode);
203
204 // Unload and clean up everything
205 heap->set_unload_classes(global_heuristics->can_unload_classes());
206 } else {
207 policy->record_implicit_to_full();
208 set_gc_mode(stw_full);
209 }
210 } else {
211 // We should only be here if the regulator requested a cycle or if
212 // there is an old generation mark in progress.
213 if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) {
214 if (_requested_generation == OLD && heap->doing_mixed_evacuations()) {
215 // If a request to start an old cycle arrived while an old cycle was running, but _before_
216 // it chose any regions for evacuation we don't want to start a new old cycle. Rather, we want
217 // the heuristic to run a young collection so that we can evacuate some old regions.
218 assert(!heap->is_concurrent_old_mark_in_progress(), "Should not be running mixed collections and concurrent marking");
219 generation = YOUNG;
220 } else {
221 generation = _requested_generation;
222 }
223 // preemption was requested or this is a regular cycle
224 cause = GCCause::_shenandoah_concurrent_gc;
225 set_gc_mode(default_mode);
226
227 // Don't start a new old marking if there is one already in progress
228 if (generation == OLD && heap->is_concurrent_old_mark_in_progress()) {
229 set_gc_mode(servicing_old);
230 }
231
232 if (generation == select_global_generation()) {
233 heap->set_unload_classes(global_heuristics->should_unload_classes());
234 } else {
235 heap->set_unload_classes(false);
236 }
237
238 // Don't want to spin in this loop and start a cycle every time, so
239 // clear requested gc cause. This creates a race with callers of the
240 // blocking 'request_gc' method, but there it loops and resets the
241 // '_requested_gc_cause' until a full cycle is completed.
242 _requested_gc_cause = GCCause::_no_gc;
243 } else if (heap->is_concurrent_old_mark_in_progress() || heap->is_prepare_for_old_mark_in_progress()) {
244 // Nobody asked us to do anything, but we have an old-generation mark or old-generation preparation for
245 // mixed evacuation in progress, so resume working on that.
246 log_info(gc)("Resume old GC: marking is%s in progress, preparing is%s in progress",
247 heap->is_concurrent_old_mark_in_progress() ? "" : " NOT",
248 heap->is_prepare_for_old_mark_in_progress() ? "" : " NOT");
249
250 cause = GCCause::_shenandoah_concurrent_gc;
251 generation = OLD;
252 set_gc_mode(servicing_old);
253 heap->set_unload_classes(false);
254 }
255 }
256
257 // Blow all soft references on this cycle, if handling allocation failure,
258 // either implicit or explicit GC request, or we are requested to do so unconditionally.
259 if (generation == select_global_generation() && (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs)) {
260 heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
261 }
262
263 bool gc_requested = (gc_mode() != none);
264 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
265
266 if (gc_requested) {
267 // GC is starting, bump the internal ID
268 update_gc_id();
269
270 heap->reset_bytes_allocated_since_gc_start();
271
272 MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
273
274 // If GC was requested, we are sampling the counters even without actual triggers
275 // from allocation machinery. This captures GC phases more accurately.
276 set_forced_counters_update(true);
277
278 // If GC was requested, we better dump freeset data for performance debugging
279 {
280 ShenandoahHeapLocker locker(heap->lock());
281 heap->free_set()->log_status();
282 }
283 // In case this is a degenerated cycle, remember whether original cycle was aging.
284 bool was_aging_cycle = heap->is_aging_cycle();
285 heap->set_aging_cycle(false);
286
287 switch (gc_mode()) {
288 case concurrent_normal: {
289 // At this point:
290 // if (generation == YOUNG), this is a normal YOUNG cycle
291 // if (generation == OLD), this is a bootstrap OLD cycle
292 // if (generation == GLOBAL), this is a GLOBAL cycle triggered by System.gc()
293 // In all three cases, we want to age old objects if this is an aging cycle
294 if (age_period-- == 0) {
295 heap->set_aging_cycle(true);
296 age_period = ShenandoahAgingCyclePeriod - 1;
297 }
298 service_concurrent_normal_cycle(heap, generation, cause);
299 break;
300 }
301 case stw_degenerated: {
302 heap->set_aging_cycle(was_aging_cycle);
303 service_stw_degenerated_cycle(cause, degen_point);
304 break;
305 }
306 case stw_full: {
307 if (age_period-- == 0) {
308 heap->set_aging_cycle(true);
309 age_period = ShenandoahAgingCyclePeriod - 1;
310 }
311 service_stw_full_cycle(cause);
312 break;
313 }
314 case servicing_old: {
315 assert(generation == OLD, "Expected old generation here");
316 GCIdMark gc_id_mark;
317 service_concurrent_old_cycle(heap, cause);
318 break;
319 }
320 default:
321 ShouldNotReachHere();
322 }
323
324 // If this was the requested GC cycle, notify waiters about it
325 if (explicit_gc_requested || implicit_gc_requested) {
326 notify_gc_waiters();
327 }
328
329 // If this was the allocation failure GC cycle, notify waiters about it
330 if (alloc_failure_pending) {
331 notify_alloc_failure_waiters();
332 }
333
334 // Report current free set state at the end of cycle, whether
335 // it is a normal completion, or the abort.
336 {
337 ShenandoahHeapLocker locker(heap->lock());
338 heap->free_set()->log_status();
339
340 // Notify Universe about new heap usage. This has implications for
341 // global soft refs policy, and we better report it every time heap
342 // usage goes down.
343 Universe::heap()->update_capacity_and_used_at_gc();
344
345 // Signal that we have completed a visit to all live objects.
346 Universe::heap()->record_whole_heap_examined_timestamp();
347 }
348
349 // Disable forced counters update, and update counters one more time
350 // to capture the state at the end of GC session.
351 handle_force_counters_update();
352 set_forced_counters_update(false);
353
354 // Retract forceful part of soft refs policy
355 heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
356
357 // Clear metaspace oom flag, if current cycle unloaded classes
358 if (heap->unload_classes()) {
359 global_heuristics->clear_metaspace_oom();
360 }
361
362 process_phase_timings(heap);
363
364 // Print Metaspace change following GC (if logging is enabled).
365 MetaspaceUtils::print_metaspace_change(meta_sizes);
366
367 // GC is over, we are at idle now
368 if (ShenandoahPacing) {
369 heap->pacer()->setup_for_idle();
370 }
371 } else {
372 // Allow allocators to know we have seen this much regions
373 if (ShenandoahPacing && (allocs_seen > 0)) {
374 heap->pacer()->report_alloc(allocs_seen);
375 }
376 }
377
378 double current = os::elapsedTime();
379
380 if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
381 // Explicit GC tries to uncommit everything down to min capacity.
382 // Soft max change tries to uncommit everything down to target capacity.
383 // Periodic uncommit tries to uncommit suitable regions down to min capacity.
384
385 double shrink_before = (explicit_gc_requested || soft_max_changed) ?
386 current :
387 current - (ShenandoahUncommitDelay / 1000.0);
388
389 size_t shrink_until = soft_max_changed ?
390 heap->soft_max_capacity() :
391 heap->min_capacity();
392
393 service_uncommit(shrink_before, shrink_until);
394 heap->phase_timings()->flush_cycle_to_global();
395 last_shrink_time = current;
396 }
397
398 // Don't wait around if there was an allocation failure - start the next cycle immediately.
399 if (!is_alloc_failure_gc()) {
400 // The timed wait is necessary because this thread has a responsibility to send
401 // 'alloc_words' to the pacer when it does not perform a GC.
402 MonitorLocker lock(&_control_lock, Mutex::_no_safepoint_check_flag);
403 lock.wait(ShenandoahControlIntervalMax);
404 }
405 }
406
407 // Wait for the actual stop(), can't leave run_service() earlier.
408 while (!should_terminate()) {
409 os::naked_short_sleep(ShenandoahControlIntervalMin);
410 }
411 }
412
413 void ShenandoahControlThread::process_phase_timings(const ShenandoahHeap* heap) {
414 // Commit worker statistics to cycle data
415 heap->phase_timings()->flush_par_workers_to_cycle();
416 if (ShenandoahPacing) {
417 heap->pacer()->flush_stats_to_cycle();
418 }
419
420 ShenandoahEvacuationTracker* evac_tracker = heap->evac_tracker();
421 ShenandoahCycleStats evac_stats = evac_tracker->flush_cycle_to_global();
422
423 // Print GC stats for current cycle
424 {
425 LogTarget(Info, gc, stats) lt;
426 if (lt.is_enabled()) {
427 ResourceMark rm;
428 LogStream ls(lt);
429 heap->phase_timings()->print_cycle_on(&ls);
430 evac_tracker->print_evacuations_on(&ls, &evac_stats.workers,
431 &evac_stats.mutators);
432 if (ShenandoahPacing) {
433 heap->pacer()->print_cycle_on(&ls);
434 }
435 }
436 }
437
438 // Commit statistics to globals
439 heap->phase_timings()->flush_cycle_to_global();
440 }
441
442 // Young and old concurrent cycles are initiated by the regulator. Implicit
443 // and explicit GC requests are handled by the controller thread and always
444 // run a global cycle (which is concurrent by default, but may be overridden
445 // by command line options). Old cycles always degenerate to a global cycle.
446 // Young cycles are degenerated to complete the young cycle. Young
447 // and old degen may upgrade to Full GC. Full GC may also be
448 // triggered directly by a System.gc() invocation.
449 //
450 //
451 // +-----+ Idle +-----+-----------+---------------------+
452 // | + | | |
453 // | | | | |
454 // | | v | |
455 // | | Bootstrap Old +-- | ------------+ |
456 // | | + | | |
457 // | | | | | |
458 // | v v v v |
459 // | Resume Old <----------+ Young +--> Young Degen |
460 // | + + ^ + + |
461 // v | | | | | |
462 // Global <-+ | +----------------------------+ | |
463 // + | | |
464 // | v v |
465 // +---> Global Degen +--------------------> Full <----+
466 //
467 void ShenandoahControlThread::service_concurrent_normal_cycle(ShenandoahHeap* heap,
468 const ShenandoahGenerationType generation,
469 GCCause::Cause cause) {
470 GCIdMark gc_id_mark;
471 ShenandoahGeneration* the_generation = nullptr;
472 switch (generation) {
473 case YOUNG: {
474 // Run a young cycle. This might or might not, have interrupted an ongoing
475 // concurrent mark in the old generation. We need to think about promotions
476 // in this case. Promoted objects should be above the TAMS in the old regions
477 // they end up in, but we have to be sure we don't promote into any regions
478 // that are in the cset.
479 log_info(gc, ergo)("Start GC cycle (YOUNG)");
480 the_generation = heap->young_generation();
481 service_concurrent_cycle(the_generation, cause, false);
482 break;
483 }
484 case OLD: {
485 log_info(gc, ergo)("Start GC cycle (OLD)");
486 the_generation = heap->old_generation();
487 service_concurrent_old_cycle(heap, cause);
488 break;
489 }
490 case GLOBAL_GEN: {
491 log_info(gc, ergo)("Start GC cycle (GLOBAL)");
492 the_generation = heap->global_generation();
493 service_concurrent_cycle(the_generation, cause, false);
494 break;
495 }
496 case GLOBAL_NON_GEN: {
497 log_info(gc, ergo)("Start GC cycle");
498 the_generation = heap->global_generation();
499 service_concurrent_cycle(the_generation, cause, false);
500 break;
501 }
502 default:
503 ShouldNotReachHere();
504 }
505 }
506
507 void ShenandoahControlThread::service_concurrent_old_cycle(ShenandoahHeap* heap, GCCause::Cause &cause) {
508 ShenandoahOldGeneration* old_generation = heap->old_generation();
509 ShenandoahYoungGeneration* young_generation = heap->young_generation();
510 ShenandoahOldGeneration::State original_state = old_generation->state();
511
512 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
513
514 switch (original_state) {
515 case ShenandoahOldGeneration::FILLING: {
516 _allow_old_preemption.set();
517 old_generation->entry_coalesce_and_fill();
518 _allow_old_preemption.unset();
519
520 // Before bootstrapping begins, we must acknowledge any cancellation request.
521 // If the gc has not been cancelled, this does nothing. If it has been cancelled,
522 // this will clear the cancellation request and exit before starting the bootstrap
523 // phase. This will allow the young GC cycle to proceed normally. If we do not
524 // acknowledge the cancellation request, the subsequent young cycle will observe
525 // the request and essentially cancel itself.
526 if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) {
527 log_info(gc)("Preparation for old generation cycle was cancelled");
528 return;
529 }
530
531 // Coalescing threads completed and nothing was cancelled. it is safe to transition from this state.
532 old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
533 return;
534 }
535 case ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP:
536 old_generation->transition_to(ShenandoahOldGeneration::BOOTSTRAPPING);
537 case ShenandoahOldGeneration::BOOTSTRAPPING: {
538 // Configure the young generation's concurrent mark to put objects in
539 // old regions into the concurrent mark queues associated with the old
540 // generation. The young cycle will run as normal except that rather than
541 // ignore old references it will mark and enqueue them in the old concurrent
542 // task queues but it will not traverse them.
543 set_gc_mode(bootstrapping_old);
544 young_generation->set_old_gen_task_queues(old_generation->task_queues());
545 ShenandoahGCSession session(cause, young_generation);
546 service_concurrent_cycle(heap, young_generation, cause, true);
547 process_phase_timings(heap);
548 if (heap->cancelled_gc()) {
549 // Young generation bootstrap cycle has failed. Concurrent mark for old generation
550 // is going to resume after degenerated bootstrap cycle completes.
551 log_info(gc)("Bootstrap cycle for old generation was cancelled");
552 return;
553 }
554
555 // Reset the degenerated point. Normally this would happen at the top
556 // of the control loop, but here we have just completed a young cycle
557 // which has bootstrapped the old concurrent marking.
558 _degen_point = ShenandoahGC::_degenerated_outside_cycle;
559
560 // From here we will 'resume' the old concurrent mark. This will skip reset
561 // and init mark for the concurrent mark. All of that work will have been
562 // done by the bootstrapping young cycle.
563 set_gc_mode(servicing_old);
564 old_generation->transition_to(ShenandoahOldGeneration::MARKING);
565 }
566 case ShenandoahOldGeneration::MARKING: {
567 ShenandoahGCSession session(cause, old_generation);
568 bool marking_complete = resume_concurrent_old_cycle(old_generation, cause);
569 if (marking_complete) {
570 assert(old_generation->state() != ShenandoahOldGeneration::MARKING, "Should not still be marking");
571 if (original_state == ShenandoahOldGeneration::MARKING) {
572 heap->mmu_tracker()->record_old_marking_increment(true);
573 heap->log_heap_status("At end of Concurrent Old Marking finishing increment");
574 }
575 } else if (original_state == ShenandoahOldGeneration::MARKING) {
576 heap->mmu_tracker()->record_old_marking_increment(false);
577 heap->log_heap_status("At end of Concurrent Old Marking increment");
578 }
579 break;
580 }
581 default:
582 fatal("Unexpected state for old GC: %s", ShenandoahOldGeneration::state_name(old_generation->state()));
583 }
584 }
585
586 bool ShenandoahControlThread::resume_concurrent_old_cycle(ShenandoahGeneration* generation, GCCause::Cause cause) {
587 assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(), "Old mark should be in progress");
588 log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued", generation->task_queues()->tasks());
589
590 ShenandoahHeap* heap = ShenandoahHeap::heap();
591
592 // We can only tolerate being cancelled during concurrent marking or during preparation for mixed
593 // evacuation. This flag here (passed by reference) is used to control precisely where the regulator
594 // is allowed to cancel a GC.
595 ShenandoahOldGC gc(generation, _allow_old_preemption);
596 if (gc.collect(cause)) {
597 generation->record_success_concurrent(false);
598 }
599
600 if (heap->cancelled_gc()) {
601 // It's possible the gc cycle was cancelled after the last time
602 // the collection checked for cancellation. In which case, the
603 // old gc cycle is still completed, and we have to deal with this
604 // cancellation. We set the degeneration point to be outside
605 // the cycle because if this is an allocation failure, that is
606 // what must be done (there is no degenerated old cycle). If the
607 // cancellation was due to a heuristic wanting to start a young
608 // cycle, then we are not actually going to a degenerated cycle,
609 // so the degenerated point doesn't matter here.
610 check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle);
611 if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) {
612 heap->shenandoah_policy()->record_interrupted_old();
613 }
614 return false;
615 }
616 return true;
617 }
618
619 bool ShenandoahControlThread::check_soft_max_changed() const {
620 ShenandoahHeap* heap = ShenandoahHeap::heap();
621 size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
622 size_t old_soft_max = heap->soft_max_capacity();
623 if (new_soft_max != old_soft_max) {
624 new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
625 new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
626 if (new_soft_max != old_soft_max) {
627 log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
628 byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
629 byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
630 );
631 heap->set_soft_max_capacity(new_soft_max);
632 return true;
633 }
634 }
635 return false;
636 }
637
638 void ShenandoahControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool do_old_gc_bootstrap) {
639 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
640 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
641 // If second allocation failure happens during Degenerated GC cycle (for example, when GC
642 // tries to evac something and no memory is available), cycle degrades to Full GC.
643 //
644 // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
645 // heuristics says there are no regions to compact, and all the collection comes from immediately
646 // reclaimable regions.
647 //
648 // ................................................................................................
649 //
650 // (immediate garbage shortcut) Concurrent GC
651 // /-------------------------------------------\
652 // | |
653 // | |
654 // | |
655 // | v
656 // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
657 // | | | ^
658 // | (af) | (af) | (af) |
659 // ..................|....................|.................|..............|.......................
660 // | | | |
661 // | | | | Degenerated GC
662 // v v v |
663 // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
664 // | | | ^
665 // | (af) | (af) | (af) |
666 // ..................|....................|.................|..............|.......................
667 // | | | |
668 // | v | | Full GC
669 // \------------------->o<----------------/ |
670 // | |
671 // v |
672 // Full GC --------------------------/
673 //
674 if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
675
676 ShenandoahHeap* heap = ShenandoahHeap::heap();
677 ShenandoahGCSession session(cause, generation);
678 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
679
680 service_concurrent_cycle(heap, generation, cause, do_old_gc_bootstrap);
681 }
682
683 void ShenandoahControlThread::service_concurrent_cycle(ShenandoahHeap* heap,
684 ShenandoahGeneration* generation,
685 GCCause::Cause& cause,
686 bool do_old_gc_bootstrap) {
687 ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap);
688 if (gc.collect(cause)) {
689 // Cycle is complete
690 generation->record_success_concurrent(gc.abbreviated());
691 } else {
692 assert(heap->cancelled_gc(), "Must have been cancelled");
693 check_cancellation_or_degen(gc.degen_point());
694 assert(!generation->is_old(), "Old GC takes a different control path");
695 // Concurrent young-gen collection degenerates to young
696 // collection. Same for global collections.
697 _degen_generation = generation;
698 }
699 const char* msg;
700 if (heap->mode()->is_generational()) {
701 ShenandoahMmuTracker* mmu_tracker = heap->mmu_tracker();
702 if (generation->is_young()) {
703 if (heap->cancelled_gc()) {
704 msg = (do_old_gc_bootstrap) ? "At end of Interrupted Concurrent Bootstrap GC":
705 "At end of Interrupted Concurrent Young GC";
706 } else {
707 // We only record GC results if GC was successful
708 msg = (do_old_gc_bootstrap) ? "At end of Concurrent Bootstrap GC":
709 "At end of Concurrent Young GC";
710 if (heap->collection_set()->has_old_regions()) {
711 mmu_tracker->record_mixed(get_gc_id());
712 } else if (do_old_gc_bootstrap) {
713 mmu_tracker->record_bootstrap(get_gc_id());
714 } else {
715 mmu_tracker->record_young(get_gc_id());
716 }
717 }
718 } else {
719 assert(generation->is_global(), "If not young, must be GLOBAL");
720 assert(!do_old_gc_bootstrap, "Do not bootstrap with GLOBAL GC");
721 if (heap->cancelled_gc()) {
722 msg = "At end of Interrupted Concurrent GLOBAL GC";
723 } else {
724 // We only record GC results if GC was successful
725 msg = "At end of Concurrent Global GC";
726 mmu_tracker->record_global(get_gc_id());
727 }
728 }
729 } else {
730 msg = heap->cancelled_gc() ? "At end of cancelled GC" :
731 "At end of GC";
732 }
733 heap->log_heap_status(msg);
734 }
735
736 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
737 ShenandoahHeap* heap = ShenandoahHeap::heap();
738 if (!heap->cancelled_gc()) {
739 return false;
740 }
741
742 if (in_graceful_shutdown()) {
743 return true;
744 }
745
746 assert(_degen_point == ShenandoahGC::_degenerated_outside_cycle,
747 "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
748
749 if (is_alloc_failure_gc()) {
750 _degen_point = point;
751 _preemption_requested.unset();
752 return true;
753 }
754
755 if (_preemption_requested.is_set()) {
756 assert(_requested_generation == YOUNG, "Only young GCs may preempt old.");
757 _preemption_requested.unset();
758
759 // Old generation marking is only cancellable during concurrent marking.
760 // Once final mark is complete, the code does not check again for cancellation.
761 // If old generation was cancelled for an allocation failure, we wouldn't
762 // make it to this case. The calling code is responsible for forcing a
763 // cancellation due to allocation failure into a degenerated cycle.
764 _degen_point = point;
765 heap->clear_cancelled_gc(false /* clear oom handler */);
766 return true;
767 }
768
769 fatal("Cancel GC either for alloc failure GC, or gracefully exiting, or to pause old generation marking");
770 return false;
771 }
772
773 void ShenandoahControlThread::stop_service() {
774 // Nothing to do here.
775 }
776
777 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
778 ShenandoahHeap* const heap = ShenandoahHeap::heap();
779
780 GCIdMark gc_id_mark;
781 ShenandoahGCSession session(cause, heap->global_generation());
782
783 ShenandoahFullGC gc;
784 gc.collect(cause);
785 }
786
787 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause,
788 ShenandoahGC::ShenandoahDegenPoint point) {
789 assert(point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
790 ShenandoahHeap* const heap = ShenandoahHeap::heap();
791
792 GCIdMark gc_id_mark;
793 ShenandoahGCSession session(cause, _degen_generation);
794
795 ShenandoahDegenGC gc(point, _degen_generation);
796 gc.collect(cause);
797
798 assert(heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks");
799 if (_degen_generation->is_global()) {
800 assert(heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks");
801 assert(heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks");
802 } else {
803 assert(_degen_generation->is_young(), "Expected degenerated young cycle, if not global.");
804 ShenandoahOldGeneration* old = heap->old_generation();
805 if (old->state() == ShenandoahOldGeneration::BOOTSTRAPPING) {
806 old->transition_to(ShenandoahOldGeneration::MARKING);
807 }
808 }
809 }
810
811 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
812 ShenandoahHeap* heap = ShenandoahHeap::heap();
813
814 // Determine if there is work to do. This avoids taking heap lock if there is
815 // no work available, avoids spamming logs with superfluous logging messages,
816 // and minimises the amount of work while locks are taken.
817
818 if (heap->committed() <= shrink_until) return;
819
820 bool has_work = false;
821 for (size_t i = 0; i < heap->num_regions(); i++) {
822 ShenandoahHeapRegion *r = heap->get_region(i);
823 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
824 has_work = true;
825 break;
826 }
827 }
828
829 if (has_work) {
830 heap->entry_uncommit(shrink_before, shrink_until);
831 }
832 }
833
834 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
835 return GCCause::is_user_requested_gc(cause) ||
836 GCCause::is_serviceability_requested_gc(cause);
837 }
838
839 bool ShenandoahControlThread::is_implicit_gc(GCCause::Cause cause) const {
840 return !is_explicit_gc(cause)
841 && cause != GCCause::_shenandoah_concurrent_gc
842 && cause != GCCause::_no_gc;
843 }
844
845 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
846 assert(GCCause::is_user_requested_gc(cause) ||
847 GCCause::is_serviceability_requested_gc(cause) ||
848 cause == GCCause::_metadata_GC_clear_soft_refs ||
849 cause == GCCause::_codecache_GC_aggressive ||
850 cause == GCCause::_codecache_GC_threshold ||
851 cause == GCCause::_full_gc_alot ||
852 cause == GCCause::_wb_young_gc ||
853 cause == GCCause::_wb_full_gc ||
854 cause == GCCause::_wb_breakpoint ||
855 cause == GCCause::_scavenge_alot,
856 "only requested GCs here: %s", GCCause::to_string(cause));
857
858 if (is_explicit_gc(cause)) {
859 if (!DisableExplicitGC) {
860 handle_requested_gc(cause);
861 }
862 } else {
863 handle_requested_gc(cause);
864 }
865 }
866
867 bool ShenandoahControlThread::request_concurrent_gc(ShenandoahGenerationType generation) {
868 if (_preemption_requested.is_set() || _gc_requested.is_set() || ShenandoahHeap::heap()->cancelled_gc()) {
869 // Ignore subsequent requests from the heuristics
870 log_debug(gc, thread)("Reject request for concurrent gc: preemption_requested: %s, gc_requested: %s, gc_cancelled: %s",
871 BOOL_TO_STR(_preemption_requested.is_set()),
872 BOOL_TO_STR(_gc_requested.is_set()),
873 BOOL_TO_STR(ShenandoahHeap::heap()->cancelled_gc()));
874 return false;
875 }
876
877 if (gc_mode() == none) {
878 _requested_gc_cause = GCCause::_shenandoah_concurrent_gc;
879 _requested_generation = generation;
880 notify_control_thread();
881
882 MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
883 while (gc_mode() == none) {
884 ml.wait();
885 }
886 return true;
887 }
888
889 if (preempt_old_marking(generation)) {
890 log_info(gc)("Preempting old generation mark to allow %s GC", shenandoah_generation_name(generation));
891 assert(gc_mode() == servicing_old, "Expected to be servicing old, but was: %s.", gc_mode_name(gc_mode()));
892 _requested_gc_cause = GCCause::_shenandoah_concurrent_gc;
893 _requested_generation = generation;
894 _preemption_requested.set();
895 ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc);
896 notify_control_thread();
897
898 MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
899 while (gc_mode() == servicing_old) {
900 ml.wait();
901 }
902 return true;
903 }
904
905 log_debug(gc, thread)("Reject request for concurrent gc: mode: %s, allow_old_preemption: %s",
906 gc_mode_name(gc_mode()),
907 BOOL_TO_STR(_allow_old_preemption.is_set()));
908 return false;
909 }
910
911 void ShenandoahControlThread::notify_control_thread() {
912 MonitorLocker locker(&_control_lock, Mutex::_no_safepoint_check_flag);
913 _control_lock.notify();
914 }
915
916 bool ShenandoahControlThread::preempt_old_marking(ShenandoahGenerationType generation) {
917 return (generation == YOUNG) && _allow_old_preemption.try_unset();
918 }
919
920 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
921 // Make sure we have at least one complete GC cycle before unblocking
922 // from the explicit GC request.
923 //
924 // This is especially important for weak references cleanup and/or native
925 // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
926 // comes very late in the already running cycle, it would miss lots of new
927 // opportunities for cleanup that were made available before the caller
928 // requested the GC.
929
930 MonitorLocker ml(&_gc_waiters_lock);
931 size_t current_gc_id = get_gc_id();
932 size_t required_gc_id = current_gc_id + 1;
933 while (current_gc_id < required_gc_id) {
934 // Although setting gc request is under _gc_waiters_lock, but read side (run_service())
935 // does not take the lock. We need to enforce following order, so that read side sees
936 // latest requested gc cause when the flag is set.
937 _requested_gc_cause = cause;
938 _gc_requested.set();
939 notify_control_thread();
940 if (cause != GCCause::_wb_breakpoint) {
941 ml.wait();
942 }
943 current_gc_id = get_gc_id();
944 }
945 }
946
947 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req, bool block) {
948 ShenandoahHeap* heap = ShenandoahHeap::heap();
949
950 assert(current()->is_Java_thread(), "expect Java thread here");
951 bool is_humongous = req.size() > ShenandoahHeapRegion::region_size_words();
952
953 if (try_set_alloc_failure_gc(is_humongous)) {
954 // Only report the first allocation failure
955 log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
956 req.type_string(),
957 byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
958 // Now that alloc failure GC is scheduled, we can abort everything else
959 heap->cancel_gc(GCCause::_allocation_failure);
960 }
961
962
963 if (block) {
964 MonitorLocker ml(&_alloc_failure_waiters_lock);
965 while (is_alloc_failure_gc()) {
966 ml.wait();
967 }
968 }
969 }
970
971 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
972 ShenandoahHeap* heap = ShenandoahHeap::heap();
973 bool is_humongous = (words > ShenandoahHeapRegion::region_size_words());
974
975 if (try_set_alloc_failure_gc(is_humongous)) {
976 // Only report the first allocation failure
977 log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
978 byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
979 }
980
981 // Forcefully report allocation failure
982 heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
983 }
984
985 void ShenandoahControlThread::notify_alloc_failure_waiters() {
986 _alloc_failure_gc.unset();
987 _humongous_alloc_failure_gc.unset();
988 MonitorLocker ml(&_alloc_failure_waiters_lock);
989 ml.notify_all();
990 }
991
992 bool ShenandoahControlThread::try_set_alloc_failure_gc(bool is_humongous) {
993 if (is_humongous) {
994 _humongous_alloc_failure_gc.try_set();
995 }
996 return _alloc_failure_gc.try_set();
997 }
998
999 bool ShenandoahControlThread::is_alloc_failure_gc() {
1000 return _alloc_failure_gc.is_set();
1001 }
1002
1003 bool ShenandoahControlThread::is_humongous_alloc_failure_gc() {
1004 return _humongous_alloc_failure_gc.is_set();
1005 }
1006
1007 void ShenandoahControlThread::notify_gc_waiters() {
1008 _gc_requested.unset();
1009 MonitorLocker ml(&_gc_waiters_lock);
1010 ml.notify_all();
1011 }
1012
1013 void ShenandoahControlThread::handle_counters_update() {
1014 if (_do_counters_update.is_set()) {
1015 _do_counters_update.unset();
1016 ShenandoahHeap::heap()->monitoring_support()->update_counters();
1017 }
1018 }
1019
1020 void ShenandoahControlThread::handle_force_counters_update() {
1021 if (_force_counters_update.is_set()) {
1022 _do_counters_update.unset(); // reset these too, we do update now!
1023 ShenandoahHeap::heap()->monitoring_support()->update_counters();
1024 }
1025 }
1026
1027 void ShenandoahControlThread::notify_heap_changed() {
1028 // This is called from allocation path, and thus should be fast.
1029
1030 // Update monitoring counters when we took a new region. This amortizes the
1031 // update costs on slow path.
1032 if (_do_counters_update.is_unset()) {
1033 _do_counters_update.set();
1034 }
1035 }
1036
1037 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
1038 assert(ShenandoahPacing, "should only call when pacing is enabled");
1039 Atomic::add(&_allocs_seen, words, memory_order_relaxed);
1040 }
1041
1042 void ShenandoahControlThread::set_forced_counters_update(bool value) {
1043 _force_counters_update.set_cond(value);
1044 }
1045
1046 void ShenandoahControlThread::reset_gc_id() {
1047 Atomic::store(&_gc_id, (size_t)0);
1048 }
1049
1050 void ShenandoahControlThread::update_gc_id() {
1051 Atomic::inc(&_gc_id);
1052 }
1053
1054 size_t ShenandoahControlThread::get_gc_id() {
1055 return Atomic::load(&_gc_id);
1056 }
1057
1058 void ShenandoahControlThread::start() {
1059 create_and_start();
1060 }
1061
1062 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
1063 _graceful_shutdown.set();
1064 }
1065
1066 bool ShenandoahControlThread::in_graceful_shutdown() {
1067 return _graceful_shutdown.is_set();
1068 }
1069
1070 const char* ShenandoahControlThread::gc_mode_name(ShenandoahControlThread::GCMode mode) {
1071 switch (mode) {
1072 case none: return "idle";
1073 case concurrent_normal: return "normal";
1074 case stw_degenerated: return "degenerated";
1075 case stw_full: return "full";
1076 case servicing_old: return "old";
1077 case bootstrapping_old: return "bootstrap";
1078 default: return "unknown";
1079 }
1080 }
1081
1082 void ShenandoahControlThread::set_gc_mode(ShenandoahControlThread::GCMode new_mode) {
1083 if (_mode != new_mode) {
1084 log_info(gc)("Transition from: %s to: %s", gc_mode_name(_mode), gc_mode_name(new_mode));
1085 MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
1086 _mode = new_mode;
1087 ml.notify_all();
1088 }
1089 }
1090
1091 ShenandoahGenerationType ShenandoahControlThread::select_global_generation() {
1092 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1093 return GLOBAL_GEN;
1094 } else {
1095 return GLOBAL_NON_GEN;
1096 }
1097 }
|