1 /*
  2  * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
  3  * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 28 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
 29 #include "gc/shenandoah/shenandoahControlThread.hpp"
 30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 31 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 32 #include "gc/shenandoah/shenandoahFullGC.hpp"
 33 #include "gc/shenandoah/shenandoahGeneration.hpp"
 34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 35 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 36 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
 37 #include "gc/shenandoah/shenandoahUtils.hpp"
 38 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
 39 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 40 #include "logging/log.hpp"
 41 #include "memory/metaspaceUtils.hpp"
 42 #include "memory/metaspaceStats.hpp"
 43 
 44 ShenandoahControlThread::ShenandoahControlThread() :
 45   ShenandoahController(),
 46   _requested_gc_cause(GCCause::_no_cause_specified),
 47   _degen_point(ShenandoahGC::_degenerated_outside_cycle) {
 48   set_name("Shenandoah Control Thread");
 49   create_and_start();
 50 }
 51 
 52 void ShenandoahControlThread::run_service() {
 53   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 54 
 55   const GCMode default_mode = concurrent_normal;
 56   const GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
 57   int sleep = ShenandoahControlIntervalMin;
 58 
 59   double last_shrink_time = os::elapsedTime();
 60   double last_sleep_adjust_time = os::elapsedTime();
 61 
 62   // Shrink period avoids constantly polling regions for shrinking.
 63   // Having a period 10x lower than the delay would mean we hit the
 64   // shrinking with lag of less than 1/10-th of true delay.
 65   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
 66   const double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
 67 
 68   ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy();
 69   ShenandoahHeuristics* const heuristics = heap->heuristics();
 70   while (!in_graceful_shutdown() && !should_terminate()) {
 71     // Figure out if we have pending requests.
 72     const bool alloc_failure_pending = _alloc_failure_gc.is_set();
 73     const bool is_gc_requested = _gc_requested.is_set();
 74     const GCCause::Cause requested_gc_cause = _requested_gc_cause;
 75 
 76     // This control loop iteration has seen this much allocation.
 77     const size_t allocs_seen = reset_allocs_seen();
 78 
 79     // Check if we have seen a new target for soft max heap size.
 80     const bool soft_max_changed = heap->check_soft_max_changed();
 81 
 82     // Choose which GC mode to run in. The block below should select a single mode.
 83     GCMode mode = none;
 84     GCCause::Cause cause = GCCause::_last_gc_cause;
 85     ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
 86 
 87     if (alloc_failure_pending) {
 88       // Allocation failure takes precedence: we have to deal with it first thing
 89       heuristics->log_trigger("Handle Allocation Failure");
 90 
 91       cause = GCCause::_allocation_failure;
 92 
 93       // Consume the degen point, and seed it with default value
 94       degen_point = _degen_point;
 95       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
 96 
 97       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
 98         heuristics->record_allocation_failure_gc();
 99         policy->record_alloc_failure_to_degenerated(degen_point);
100         mode = stw_degenerated;
101       } else {
102         heuristics->record_allocation_failure_gc();
103         policy->record_alloc_failure_to_full();
104         mode = stw_full;
105       }
106     } else if (is_gc_requested) {
107       cause = requested_gc_cause;
108       heuristics->log_trigger("GC request (%s)", GCCause::to_string(cause));
109       heuristics->record_requested_gc();
110 
111       if (ShenandoahCollectorPolicy::should_run_full_gc(cause)) {
112         mode = stw_full;
113       } else {
114         mode = default_mode;
115         // Unload and clean up everything
116         heap->set_unload_classes(heuristics->can_unload_classes());
117       }
118     } else {
119       // Potential normal cycle: ask heuristics if it wants to act
120       if (heuristics->should_start_gc()) {
121         mode = default_mode;
122         cause = default_cause;
123       }
124 
125       // Ask policy if this cycle wants to process references or unload classes
126       heap->set_unload_classes(heuristics->should_unload_classes());
127     }
128 
129     // Blow all soft references on this cycle, if handling allocation failure,
130     // either implicit or explicit GC request,  or we are requested to do so unconditionally.
131     if (alloc_failure_pending || is_gc_requested || ShenandoahAlwaysClearSoftRefs) {
132       heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
133     }
134 
135     const bool gc_requested = (mode != none);
136     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
137 
138     if (gc_requested) {
139       // GC is starting, bump the internal ID
140       update_gc_id();
141 
142       heap->reset_bytes_allocated_since_gc_start();
143 
144       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
145 
146       // If GC was requested, we are sampling the counters even without actual triggers
147       // from allocation machinery. This captures GC phases more accurately.
148       heap->set_forced_counters_update(true);
149 
150       // If GC was requested, we better dump freeset data for performance debugging
151       heap->free_set()->log_status_under_lock();
152 
153       switch (mode) {
154         case concurrent_normal:
155           service_concurrent_normal_cycle(cause);
156           break;
157         case stw_degenerated:
158           service_stw_degenerated_cycle(cause, degen_point);
159           break;
160         case stw_full:
161           service_stw_full_cycle(cause);
162           break;
163         default:
164           ShouldNotReachHere();
165       }
166 
167       // If this was the requested GC cycle, notify waiters about it
168       if (is_gc_requested) {
169         notify_gc_waiters();
170       }
171 
172       // If this was the allocation failure GC cycle, notify waiters about it
173       if (alloc_failure_pending) {
174         notify_alloc_failure_waiters();
175       }
176 
177       // Report current free set state at the end of cycle, whether
178       // it is a normal completion, or the abort.
179       heap->free_set()->log_status_under_lock();
180 
181       {
182         // Notify Universe about new heap usage. This has implications for
183         // global soft refs policy, and we better report it every time heap
184         // usage goes down.
185         ShenandoahHeapLocker locker(heap->lock());
186         heap->update_capacity_and_used_at_gc();
187       }
188 
189       // Signal that we have completed a visit to all live objects.
190       heap->record_whole_heap_examined_timestamp();
191 
192       // Disable forced counters update, and update counters one more time
193       // to capture the state at the end of GC session.
194       heap->handle_force_counters_update();
195       heap->set_forced_counters_update(false);
196 
197       // Retract forceful part of soft refs policy
198       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
199 
200       // Clear metaspace oom flag, if current cycle unloaded classes
201       if (heap->unload_classes()) {
202         heuristics->clear_metaspace_oom();
203       }
204 
205       // Commit worker statistics to cycle data
206       heap->phase_timings()->flush_par_workers_to_cycle();
207       if (ShenandoahPacing) {
208         heap->pacer()->flush_stats_to_cycle();
209       }
210 
211       // Print GC stats for current cycle
212       {
213         LogTarget(Info, gc, stats) lt;
214         if (lt.is_enabled()) {
215           ResourceMark rm;
216           LogStream ls(lt);
217           heap->phase_timings()->print_cycle_on(&ls);
218           if (ShenandoahPacing) {
219             heap->pacer()->print_cycle_on(&ls);
220           }
221         }
222       }
223 
224       // Commit statistics to globals
225       heap->phase_timings()->flush_cycle_to_global();
226 
227       // Print Metaspace change following GC (if logging is enabled).
228       MetaspaceUtils::print_metaspace_change(meta_sizes);
229 
230       // GC is over, we are at idle now
231       if (ShenandoahPacing) {
232         heap->pacer()->setup_for_idle();
233       }
234     } else {
235       // Report to pacer that we have seen this many words allocated
236       if (ShenandoahPacing && (allocs_seen > 0)) {
237         heap->pacer()->report_alloc(allocs_seen);
238       }
239     }
240 
241     const double current = os::elapsedTime();
242 
243     if (ShenandoahUncommit && (is_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
244       // Explicit GC tries to uncommit everything down to min capacity.
245       // Soft max change tries to uncommit everything down to target capacity.
246       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
247 
248       double shrink_before = (is_gc_requested || soft_max_changed) ?
249                              current :
250                              current - (ShenandoahUncommitDelay / 1000.0);
251 
252       size_t shrink_until = soft_max_changed ?
253                              heap->soft_max_capacity() :
254                              heap->min_capacity();
255 
256       heap->maybe_uncommit(shrink_before, shrink_until);
257       heap->phase_timings()->flush_cycle_to_global();
258       last_shrink_time = current;
259     }
260 
261     // Wait before performing the next action. If allocation happened during this wait,
262     // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
263     // back off exponentially.
264     if (heap->has_changed()) {
265       sleep = ShenandoahControlIntervalMin;
266     } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
267       sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
268       last_sleep_adjust_time = current;
269     }
270     os::naked_short_sleep(sleep);
271   }
272 
273   // Wait for the actual stop(), can't leave run_service() earlier.
274   while (!should_terminate()) {
275     os::naked_short_sleep(ShenandoahControlIntervalMin);
276   }
277 }
278 
279 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
280   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
281   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
282   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
283   // tries to evac something and no memory is available), cycle degrades to Full GC.
284   //
285   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
286   // heuristics says there are no regions to compact, and all the collection comes from immediately
287   // reclaimable regions.
288   //
289   // ................................................................................................
290   //
291   //                                    (immediate garbage shortcut)                Concurrent GC
292   //                             /-------------------------------------------\
293   //                             |                                           |
294   //                             |                                           |
295   //                             |                                           |
296   //                             |                                           v
297   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
298   //                   |                    |                 |              ^
299   //                   | (af)               | (af)            | (af)         |
300   // ..................|....................|.................|..............|.......................
301   //                   |                    |                 |              |
302   //                   |                    |                 |              |      Degenerated GC
303   //                   v                    v                 v              |
304   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
305   //                   |                    |                 |              ^
306   //                   | (af)               | (af)            | (af)         |
307   // ..................|....................|.................|..............|.......................
308   //                   |                    |                 |              |
309   //                   |                    v                 |              |      Full GC
310   //                   \------------------->o<----------------/              |
311   //                                        |                                |
312   //                                        v                                |
313   //                                      Full GC  --------------------------/
314   //
315   ShenandoahHeap* heap = ShenandoahHeap::heap();
316   if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
317 
318   GCIdMark gc_id_mark;
319   ShenandoahGCSession session(cause, heap->global_generation());
320 
321   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
322 
323   ShenandoahConcurrentGC gc(heap->global_generation(), false);
324   if (gc.collect(cause)) {
325     // Cycle is complete.  There were no failed allocation requests and no degeneration, so count this as good progress.
326     heap->notify_gc_progress();
327     heap->global_generation()->heuristics()->record_success_concurrent();
328     heap->shenandoah_policy()->record_success_concurrent(false, gc.abbreviated());
329     heap->log_heap_status("At end of GC");
330   } else {
331     assert(heap->cancelled_gc(), "Must have been cancelled");
332     check_cancellation_or_degen(gc.degen_point());
333     heap->log_heap_status("At end of cancelled GC");
334   }
335 }
336 
337 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
338   ShenandoahHeap* heap = ShenandoahHeap::heap();
339   if (heap->cancelled_gc()) {
340     assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
341     if (!in_graceful_shutdown()) {
342       assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle,
343               "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
344       _degen_point = point;
345     }
346     return true;
347   }
348   return false;
349 }
350 
351 void ShenandoahControlThread::stop_service() {
352   // Nothing to do here.
353 }
354 
355 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
356   ShenandoahHeap* const heap = ShenandoahHeap::heap();
357   GCIdMark gc_id_mark;
358   ShenandoahGCSession session(cause, heap->global_generation());
359 
360   ShenandoahFullGC gc;
361   gc.collect(cause);
362 }
363 
364 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
365   assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
366   ShenandoahHeap* const heap = ShenandoahHeap::heap();
367   GCIdMark gc_id_mark;
368   ShenandoahGCSession session(cause, heap->global_generation());
369 
370   ShenandoahDegenGC gc(point, heap->global_generation());
371   gc.collect(cause);
372 }
373 
374 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
375   if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) {
376     handle_requested_gc(cause);
377   }
378 }
379 
380 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
381   // For normal requested GCs (System.gc) we want to block the caller. However,
382   // for whitebox requested GC, we want to initiate the GC and return immediately.
383   // The whitebox caller thread will arrange for itself to wait until the GC notifies
384   // it that has reached the requested breakpoint (phase in the GC).
385   if (cause == GCCause::_wb_breakpoint) {
386     _requested_gc_cause = cause;
387     _gc_requested.set();
388     return;
389   }
390 
391   // Make sure we have at least one complete GC cycle before unblocking
392   // from the explicit GC request.
393   //
394   // This is especially important for weak references cleanup and/or native
395   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
396   // comes very late in the already running cycle, it would miss lots of new
397   // opportunities for cleanup that were made available before the caller
398   // requested the GC.
399 
400   MonitorLocker ml(&_gc_waiters_lock);
401   size_t current_gc_id = get_gc_id();
402   size_t required_gc_id = current_gc_id + 1;
403   while (current_gc_id < required_gc_id) {
404     // Although setting gc request is under _gc_waiters_lock, but read side (run_service())
405     // does not take the lock. We need to enforce following order, so that read side sees
406     // latest requested gc cause when the flag is set.
407     _requested_gc_cause = cause;
408     _gc_requested.set();
409 
410     ml.wait();
411     current_gc_id = get_gc_id();
412   }
413 }
414 
415 void ShenandoahControlThread::notify_gc_waiters() {
416   _gc_requested.unset();
417   MonitorLocker ml(&_gc_waiters_lock);
418   ml.notify_all();
419 }