1 /*
2 * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2022, Tencent. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
27 #include "gc/shenandoah/mode/shenandoahMode.hpp"
28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
29 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
30 #include "gc/shenandoah/shenandoahControlThread.hpp"
31 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
32 #include "gc/shenandoah/shenandoahFreeSet.hpp"
33 #include "gc/shenandoah/shenandoahFullGC.hpp"
34 #include "gc/shenandoah/shenandoahGeneration.hpp"
35 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
37 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
38 #include "gc/shenandoah/shenandoahUtils.hpp"
39 #include "logging/log.hpp"
40 #include "memory/metaspaceStats.hpp"
41 #include "memory/metaspaceUtils.hpp"
42
43 ShenandoahControlThread::ShenandoahControlThread() :
44 ShenandoahController(),
45 _requested_gc_cause(GCCause::_no_cause_specified),
46 _degen_point(ShenandoahGC::_degenerated_outside_cycle) {
47 set_name("Shenandoah Control Thread");
48 create_and_start();
49 }
50
51 void ShenandoahControlThread::run_service() {
52 ShenandoahHeap* const heap = ShenandoahHeap::heap();
53 const GCMode default_mode = concurrent_normal;
54 const GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
55 int sleep = ShenandoahControlIntervalMin;
56
57 double last_sleep_adjust_time = os::elapsedTime();
58
59 ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy();
60 ShenandoahHeuristics* const heuristics = heap->heuristics();
61 while (!should_terminate()) {
62 const GCCause::Cause cancelled_cause = heap->cancelled_cause();
63 if (cancelled_cause == GCCause::_shenandoah_stop_vm) {
64 break;
65 }
66
67 // Figure out if we have pending requests.
68 const bool alloc_failure_pending = ShenandoahCollectorPolicy::is_allocation_failure(cancelled_cause);
69 const bool is_gc_requested = _gc_requested.is_set();
70 const GCCause::Cause requested_gc_cause = _requested_gc_cause;
71
72 // This control loop iteration has seen this much allocation.
73 const size_t allocs_seen = reset_allocs_seen();
74
75 // Choose which GC mode to run in. The block below should select a single mode.
76 GCMode mode = none;
77 GCCause::Cause cause = GCCause::_last_gc_cause;
78 ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
79
80 if (alloc_failure_pending) {
81 // Allocation failure takes precedence: we have to deal with it first thing
82 heuristics->log_trigger("Handle Allocation Failure");
83
84 cause = GCCause::_allocation_failure;
85
86 // Consume the degen point, and seed it with default value
87 degen_point = _degen_point;
88 _degen_point = ShenandoahGC::_degenerated_outside_cycle;
89
90 if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
91 heuristics->record_allocation_failure_gc();
92 policy->record_alloc_failure_to_degenerated(degen_point);
93 mode = stw_degenerated;
94 } else {
95 heuristics->record_allocation_failure_gc();
96 policy->record_alloc_failure_to_full();
97 mode = stw_full;
98 }
99 } else if (is_gc_requested) {
100 cause = requested_gc_cause;
101 heuristics->log_trigger("GC request (%s)", GCCause::to_string(cause));
102 heuristics->record_requested_gc();
103
104 if (ShenandoahCollectorPolicy::should_run_full_gc(cause)) {
105 mode = stw_full;
106 } else {
107 mode = default_mode;
108 // Unload and clean up everything
109 heap->set_unload_classes(heuristics->can_unload_classes());
110 }
111 } else {
112 // Potential normal cycle: ask heuristics if it wants to act
113 if (heuristics->should_start_gc()) {
114 mode = default_mode;
115 cause = default_cause;
116 }
117
118 // Ask policy if this cycle wants to process references or unload classes
119 heap->set_unload_classes(heuristics->should_unload_classes());
120 }
121
122 // Blow all soft references on this cycle, if handling allocation failure,
123 // either implicit or explicit GC request, or we are requested to do so unconditionally.
124 if (alloc_failure_pending || is_gc_requested || ShenandoahAlwaysClearSoftRefs) {
125 heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
126 }
127
128 const bool gc_requested = (mode != none);
129 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
130
131 if (gc_requested) {
132 // Cannot uncommit bitmap slices during concurrent reset
133 ShenandoahNoUncommitMark forbid_region_uncommit(heap);
134
135 // GC is starting, bump the internal ID
136 update_gc_id();
137
138 GCIdMark gc_id_mark;
139
140 heuristics->cancel_trigger_request();
141
142 heap->reset_bytes_allocated_since_gc_start();
143
144 MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
145
146 // If GC was requested, we are sampling the counters even without actual triggers
147 // from allocation machinery. This captures GC phases more accurately.
148 heap->set_forced_counters_update(true);
149
150 // If GC was requested, we better dump freeset data for performance debugging
151 heap->free_set()->log_status_under_lock();
152
153 heap->print_before_gc();
154 switch (mode) {
155 case concurrent_normal:
156 service_concurrent_normal_cycle(cause);
157 break;
158 case stw_degenerated:
159 service_stw_degenerated_cycle(cause, degen_point);
160 break;
161 case stw_full:
162 service_stw_full_cycle(cause);
163 break;
164 default:
165 ShouldNotReachHere();
166 }
167 heap->print_after_gc();
168
169 // If this was the requested GC cycle, notify waiters about it
170 if (is_gc_requested) {
171 notify_gc_waiters();
172 }
173
174 // If this cycle completed without being cancelled, notify waiters about it
175 if (!heap->cancelled_gc()) {
176 notify_alloc_failure_waiters();
177 }
178
179 // Report current free set state at the end of cycle, whether
180 // it is a normal completion, or the abort.
181 heap->free_set()->log_status_under_lock();
182
183 {
184 // Notify Universe about new heap usage. This has implications for
185 // global soft refs policy, and we better report it every time heap
186 // usage goes down.
187 ShenandoahHeapLocker locker(heap->lock());
188 heap->update_capacity_and_used_at_gc();
189 }
190
191 // Signal that we have completed a visit to all live objects.
192 heap->record_whole_heap_examined_timestamp();
193
194 // Disable forced counters update, and update counters one more time
195 // to capture the state at the end of GC session.
196 heap->handle_force_counters_update();
197 heap->set_forced_counters_update(false);
198
199 // Retract forceful part of soft refs policy
200 heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
201
202 // Clear metaspace oom flag, if current cycle unloaded classes
203 if (heap->unload_classes()) {
204 heuristics->clear_metaspace_oom();
205 }
206
207 // Commit worker statistics to cycle data
208 heap->phase_timings()->flush_par_workers_to_cycle();
209 if (ShenandoahPacing) {
210 heap->pacer()->flush_stats_to_cycle();
211 }
212
213 // Print GC stats for current cycle
214 {
215 LogTarget(Info, gc, stats) lt;
216 if (lt.is_enabled()) {
217 ResourceMark rm;
218 LogStream ls(lt);
219 heap->phase_timings()->print_cycle_on(&ls);
220 if (ShenandoahPacing) {
221 heap->pacer()->print_cycle_on(&ls);
222 }
223 }
224 }
225
226 // Commit statistics to globals
227 heap->phase_timings()->flush_cycle_to_global();
228
229 // Print Metaspace change following GC (if logging is enabled).
230 MetaspaceUtils::print_metaspace_change(meta_sizes);
231
232 // GC is over, we are at idle now
233 if (ShenandoahPacing) {
234 heap->pacer()->setup_for_idle();
235 }
236 } else {
237 // Report to pacer that we have seen this many words allocated
238 if (ShenandoahPacing && (allocs_seen > 0)) {
239 heap->pacer()->report_alloc(allocs_seen);
240 }
241 }
242
243 // Check if we have seen a new target for soft max heap size or if a gc was requested.
244 // Either of these conditions will attempt to uncommit regions.
245 if (ShenandoahUncommit) {
246 if (heap->check_soft_max_changed()) {
247 heap->notify_soft_max_changed();
248 } else if (is_gc_requested) {
249 heap->notify_explicit_gc_requested();
250 }
251 }
252
253 // Wait before performing the next action. If allocation happened during this wait,
254 // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
255 // back off exponentially.
256 const double current = os::elapsedTime();
257 if (heap->has_changed()) {
258 sleep = ShenandoahControlIntervalMin;
259 } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
260 sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
261 last_sleep_adjust_time = current;
262 }
263 os::naked_short_sleep(sleep);
264 }
265 }
266
267 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
268 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
269 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
270 // If second allocation failure happens during Degenerated GC cycle (for example, when GC
271 // tries to evac something and no memory is available), cycle degrades to Full GC.
272 //
273 // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
274 // heuristics says there are no regions to compact, and all the collection comes from immediately
275 // reclaimable regions.
276 //
277 // ................................................................................................
278 //
279 // (immediate garbage shortcut) Concurrent GC
280 // /-------------------------------------------\
281 // | |
282 // | |
283 // | |
284 // | v
285 // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
286 // | | | ^
287 // | (af) | (af) | (af) |
288 // ..................|....................|.................|..............|.......................
289 // | | | |
290 // | | | | Degenerated GC
291 // v v v |
292 // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
293 // | | | ^
294 // | (af) | (af) | (af) |
295 // ..................|....................|.................|..............|.......................
296 // | | | |
297 // | v | | Full GC
298 // \------------------->o<----------------/ |
299 // | |
300 // v |
301 // Full GC --------------------------/
302 //
303 ShenandoahHeap* heap = ShenandoahHeap::heap();
304 if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) {
305 log_info(gc)("Cancelled");
306 return;
307 }
308
309 ShenandoahGCSession session(cause, heap->global_generation());
310
311 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
312
313 ShenandoahConcurrentGC gc(heap->global_generation(), false);
314 if (gc.collect(cause)) {
315 // Cycle is complete. There were no failed allocation requests and no degeneration, so count this as good progress.
316 heap->notify_gc_progress();
317 heap->global_generation()->heuristics()->record_success_concurrent();
318 heap->shenandoah_policy()->record_success_concurrent(false, gc.abbreviated());
319 heap->log_heap_status("At end of GC");
320 } else {
321 assert(heap->cancelled_gc(), "Must have been cancelled");
322 check_cancellation_or_degen(gc.degen_point());
323 heap->log_heap_status("At end of cancelled GC");
324 }
325 }
326
327 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
328 ShenandoahHeap* heap = ShenandoahHeap::heap();
329 if (heap->cancelled_gc()) {
330 if (heap->cancelled_cause() == GCCause::_shenandoah_stop_vm) {
331 return true;
332 }
333
334 if (ShenandoahCollectorPolicy::is_allocation_failure(heap->cancelled_cause())) {
335 assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle,
336 "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
337 _degen_point = point;
338 return true;
339 }
340
341 fatal("Unexpected reason for cancellation: %s", GCCause::to_string(heap->cancelled_cause()));
342 }
343 return false;
344 }
345
346 void ShenandoahControlThread::stop_service() {
347 ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_stop_vm);
348 }
349
350 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
351 ShenandoahHeap* const heap = ShenandoahHeap::heap();
352 ShenandoahGCSession session(cause, heap->global_generation());
353
354 ShenandoahFullGC gc;
355 gc.collect(cause);
356 }
357
358 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
359 assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
360 ShenandoahHeap* const heap = ShenandoahHeap::heap();
361 ShenandoahGCSession session(cause, heap->global_generation());
362
363 ShenandoahDegenGC gc(point, heap->global_generation());
364 gc.collect(cause);
365 }
366
367 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
368 if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) {
369 handle_requested_gc(cause);
370 }
371 }
372
373 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
374 if (should_terminate()) {
375 log_info(gc)("Control thread is terminating, no more GCs");
376 return;
377 }
378
379 // For normal requested GCs (System.gc) we want to block the caller. However,
380 // for whitebox requested GC, we want to initiate the GC and return immediately.
381 // The whitebox caller thread will arrange for itself to wait until the GC notifies
382 // it that has reached the requested breakpoint (phase in the GC).
383 if (cause == GCCause::_wb_breakpoint) {
384 _requested_gc_cause = cause;
385 _gc_requested.set();
386 return;
387 }
388
389 // Make sure we have at least one complete GC cycle before unblocking
390 // from the explicit GC request.
391 //
392 // This is especially important for weak references cleanup and/or native
393 // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
394 // comes very late in the already running cycle, it would miss lots of new
395 // opportunities for cleanup that were made available before the caller
396 // requested the GC.
397
398 MonitorLocker ml(&_gc_waiters_lock);
399 size_t current_gc_id = get_gc_id();
400 size_t required_gc_id = current_gc_id + 1;
401 while (current_gc_id < required_gc_id && !should_terminate()) {
402 // Although setting gc request is under _gc_waiters_lock, but read side (run_service())
403 // does not take the lock. We need to enforce following order, so that read side sees
404 // latest requested gc cause when the flag is set.
405 _requested_gc_cause = cause;
406 _gc_requested.set();
407
408 ml.wait();
409 current_gc_id = get_gc_id();
410 }
411 }
412
413 void ShenandoahControlThread::notify_gc_waiters() {
414 _gc_requested.unset();
415 MonitorLocker ml(&_gc_waiters_lock);
416 ml.notify_all();
417 }