1 /*
  2  * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
  3  * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
  4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #include "gc/shenandoah/shenandoahAsserts.hpp"
 28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 29 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
 30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 31 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 32 #include "gc/shenandoah/shenandoahFullGC.hpp"
 33 #include "gc/shenandoah/shenandoahGeneration.hpp"
 34 #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp"
 35 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 36 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 37 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 38 #include "gc/shenandoah/shenandoahOldGC.hpp"
 39 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 40 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
 41 #include "gc/shenandoah/shenandoahUtils.hpp"
 42 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 43 #include "logging/log.hpp"
 44 #include "memory/metaspaceStats.hpp"
 45 #include "memory/metaspaceUtils.hpp"
 46 #include "runtime/atomic.hpp"
 47 #include "utilities/events.hpp"
 48 
 49 ShenandoahGenerationalControlThread::ShenandoahGenerationalControlThread() :
 50   _control_lock(Mutex::nosafepoint - 2, "ShenandoahGCRequest_lock", true),
 51   _requested_gc_cause(GCCause::_no_gc),
 52   _requested_generation(nullptr),
 53   _gc_mode(none),
 54   _degen_point(ShenandoahGC::_degenerated_unset),
 55   _heap(ShenandoahGenerationalHeap::heap()),
 56   _age_period(0) {
 57   shenandoah_assert_generational();
 58   set_name("Shenandoah Control Thread");
 59   create_and_start();
 60 }
 61 
 62 void ShenandoahGenerationalControlThread::run_service() {
 63 
 64   const int64_t wait_ms = ShenandoahPacing ? ShenandoahControlIntervalMin : 0;
 65   ShenandoahGCRequest request;
 66   while (!should_terminate()) {
 67 
 68     // This control loop iteration has seen this much allocation.
 69     const size_t allocs_seen = reset_allocs_seen();
 70 
 71     // Figure out if we have pending requests.
 72     check_for_request(request);
 73 
 74     if (request.cause == GCCause::_shenandoah_stop_vm) {
 75       break;
 76     }
 77 
 78     if (request.cause != GCCause::_no_gc) {
 79       run_gc_cycle(request);
 80     } else {
 81       // Report to pacer that we have seen this many words allocated
 82       if (ShenandoahPacing && (allocs_seen > 0)) {
 83         _heap->pacer()->report_alloc(allocs_seen);
 84       }
 85     }
 86 
 87     // If the cycle was cancelled, continue the next iteration to deal with it. Otherwise,
 88     // if there was no other cycle requested, cleanup and wait for the next request.
 89     if (!_heap->cancelled_gc()) {
 90       MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag);
 91       if (_requested_gc_cause == GCCause::_no_gc) {
 92         set_gc_mode(ml, none);
 93         ml.wait(wait_ms);
 94       }
 95     }
 96   }
 97 
 98   // In case any threads are waiting for a cycle to happen, notify them so they observe the shutdown.
 99   notify_gc_waiters();
100   notify_alloc_failure_waiters();
101   set_gc_mode(stopped);
102 }
103 
104 void ShenandoahGenerationalControlThread::stop_service() {
105   log_debug(gc, thread)("Stopping control thread");
106   MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag);
107   _heap->cancel_gc(GCCause::_shenandoah_stop_vm);
108   _requested_gc_cause = GCCause::_shenandoah_stop_vm;
109   notify_cancellation(ml, GCCause::_shenandoah_stop_vm);
110   // We can't wait here because it may interfere with the active cycle's ability
111   // to reach a safepoint (this runs on a java thread).
112 }
113 
114 void ShenandoahGenerationalControlThread::check_for_request(ShenandoahGCRequest& request) {
115   // Hold the lock while we read request cause and generation
116   MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag);
117   if (_heap->cancelled_gc()) {
118     // The previous request was cancelled. Either it was cancelled for an allocation
119     // failure (degenerated cycle), or old marking was cancelled to run a young collection.
120     // In either case, the correct generation for the next cycle can be determined by
121     // the cancellation cause.
122     request.cause = _heap->cancelled_cause();
123     if (request.cause == GCCause::_shenandoah_concurrent_gc) {
124       request.generation = _heap->young_generation();
125       _heap->clear_cancelled_gc(false);
126     }
127   } else {
128     request.cause = _requested_gc_cause;
129     request.generation = _requested_generation;
130 
131     // Only clear these if we made a request from them. In the case of a cancelled gc,
132     // we do not want to inadvertently lose this pending request.
133     _requested_gc_cause = GCCause::_no_gc;
134     _requested_generation = nullptr;
135   }
136 
137   if (request.cause == GCCause::_no_gc || request.cause == GCCause::_shenandoah_stop_vm) {
138     return;
139   }
140 
141   GCMode mode;
142   if (ShenandoahCollectorPolicy::is_allocation_failure(request.cause)) {
143     mode = prepare_for_allocation_failure_gc(request);
144   } else if (ShenandoahCollectorPolicy::is_explicit_gc(request.cause)) {
145     mode = prepare_for_explicit_gc(request);
146   } else {
147     mode = prepare_for_concurrent_gc(request);
148   }
149   set_gc_mode(ml, mode);
150 }
151 
152 ShenandoahGenerationalControlThread::GCMode ShenandoahGenerationalControlThread::prepare_for_allocation_failure_gc(ShenandoahGCRequest &request) {
153 
154   if (_degen_point == ShenandoahGC::_degenerated_unset) {
155     _degen_point = ShenandoahGC::_degenerated_outside_cycle;
156     request.generation = _heap->young_generation();
157   } else if (request.generation->is_old()) {
158     // This means we degenerated during the young bootstrap for the old generation
159     // cycle. The following degenerated cycle should therefore also be young.
160     request.generation = _heap->young_generation();
161   }
162 
163   ShenandoahHeuristics* heuristics = request.generation->heuristics();
164   bool old_gen_evacuation_failed = _heap->old_generation()->clear_failed_evacuation();
165 
166   heuristics->log_trigger("Handle Allocation Failure");
167 
168   // Do not bother with degenerated cycle if old generation evacuation failed or if humongous allocation failed
169   if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() &&
170       !old_gen_evacuation_failed && request.cause != GCCause::_shenandoah_humongous_allocation_failure) {
171     heuristics->record_allocation_failure_gc();
172     _heap->shenandoah_policy()->record_alloc_failure_to_degenerated(_degen_point);
173     return stw_degenerated;
174   } else {
175     heuristics->record_allocation_failure_gc();
176     _heap->shenandoah_policy()->record_alloc_failure_to_full();
177     request.generation = _heap->global_generation();
178     return stw_full;
179   }
180 }
181 
182 ShenandoahGenerationalControlThread::GCMode ShenandoahGenerationalControlThread::prepare_for_explicit_gc(ShenandoahGCRequest &request) const {
183   ShenandoahHeuristics* global_heuristics = _heap->global_generation()->heuristics();
184   request.generation = _heap->global_generation();
185   global_heuristics->log_trigger("GC request (%s)", GCCause::to_string(request.cause));
186   global_heuristics->record_requested_gc();
187 
188   if (ShenandoahCollectorPolicy::should_run_full_gc(request.cause)) {
189     return stw_full;;
190   } else {
191     // Unload and clean up everything. Note that this is an _explicit_ request and so does not use
192     // the same `should_unload_classes` call as the regulator's concurrent gc request.
193     _heap->set_unload_classes(global_heuristics->can_unload_classes());
194     return concurrent_normal;
195   }
196 }
197 
198 ShenandoahGenerationalControlThread::GCMode ShenandoahGenerationalControlThread::prepare_for_concurrent_gc(const ShenandoahGCRequest &request) const {
199   assert(!(request.generation->is_old() && _heap->old_generation()->is_doing_mixed_evacuations()),
200              "Old heuristic should not request cycles while it waits for mixed evacuations");
201 
202   if (request.generation->is_global()) {
203     ShenandoahHeuristics* global_heuristics = _heap->global_generation()->heuristics();
204     _heap->set_unload_classes(global_heuristics->should_unload_classes());
205   } else {
206     _heap->set_unload_classes(false);
207   }
208 
209   // preemption was requested or this is a regular cycle
210   return request.generation->is_old() ? servicing_old : concurrent_normal;
211 }
212 
213 void ShenandoahGenerationalControlThread::maybe_set_aging_cycle() {
214   if (_age_period-- == 0) {
215     _heap->set_aging_cycle(true);
216     _age_period = ShenandoahAgingCyclePeriod - 1;
217   } else {
218     _heap->set_aging_cycle(false);
219   }
220 }
221 
222 void ShenandoahGenerationalControlThread::run_gc_cycle(const ShenandoahGCRequest& request) {
223 
224   log_debug(gc, thread)("Starting GC (%s): %s, %s", gc_mode_name(gc_mode()), GCCause::to_string(request.cause), request.generation->name());
225   assert(gc_mode() != none, "GC mode cannot be none here");
226 
227   // Blow away all soft references on this cycle, if handling allocation failure,
228   // either implicit or explicit GC request, or we are requested to do so unconditionally.
229   if (request.generation->is_global() && (ShenandoahCollectorPolicy::is_allocation_failure(request.cause) || ShenandoahCollectorPolicy::is_explicit_gc(request.cause) || ShenandoahAlwaysClearSoftRefs)) {
230     _heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
231   }
232 
233   // GC is starting, bump the internal ID
234   update_gc_id();
235 
236   GCIdMark gc_id_mark;
237 
238   _heap->reset_bytes_allocated_since_gc_start();
239 
240   MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
241 
242   // If GC was requested, we are sampling the counters even without actual triggers
243   // from allocation machinery. This captures GC phases more accurately.
244   _heap->set_forced_counters_update(true);
245 
246   // If GC was requested, we better dump freeset data for performance debugging
247   _heap->free_set()->log_status_under_lock();
248 
249   {
250     // Cannot uncommit bitmap slices during concurrent reset
251     ShenandoahNoUncommitMark forbid_region_uncommit(_heap);
252 
253     _heap->print_before_gc();
254     switch (gc_mode()) {
255       case concurrent_normal: {
256         service_concurrent_normal_cycle(request);
257         break;
258       }
259       case stw_degenerated: {
260         service_stw_degenerated_cycle(request);
261         break;
262       }
263       case stw_full: {
264         service_stw_full_cycle(request.cause);
265         break;
266       }
267       case servicing_old: {
268         assert(request.generation->is_old(), "Expected old generation here");
269         service_concurrent_old_cycle(request);
270         break;
271       }
272       default:
273         ShouldNotReachHere();
274     }
275     _heap->print_after_gc();
276   }
277 
278   // If this cycle completed successfully, notify threads waiting for gc
279   if (!_heap->cancelled_gc()) {
280     notify_gc_waiters();
281     notify_alloc_failure_waiters();
282   }
283 
284   // Report current free set state at the end of cycle, whether
285   // it is a normal completion, or the abort.
286   _heap->free_set()->log_status_under_lock();
287 
288   // Notify Universe about new heap usage. This has implications for
289   // global soft refs policy, and we better report it every time heap
290   // usage goes down.
291   _heap->update_capacity_and_used_at_gc();
292 
293   // Signal that we have completed a visit to all live objects.
294   _heap->record_whole_heap_examined_timestamp();
295 
296   // Disable forced counters update, and update counters one more time
297   // to capture the state at the end of GC session.
298   _heap->handle_force_counters_update();
299   _heap->set_forced_counters_update(false);
300 
301   // Retract forceful part of soft refs policy
302   _heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
303 
304   // Clear metaspace oom flag, if current cycle unloaded classes
305   if (_heap->unload_classes()) {
306     _heap->global_generation()->heuristics()->clear_metaspace_oom();
307   }
308 
309   process_phase_timings();
310 
311   // Print Metaspace change following GC (if logging is enabled).
312   MetaspaceUtils::print_metaspace_change(meta_sizes);
313 
314   // GC is over, we are at idle now
315   if (ShenandoahPacing) {
316     _heap->pacer()->setup_for_idle();
317   }
318 
319   // Check if we have seen a new target for soft max heap size or if a gc was requested.
320   // Either of these conditions will attempt to uncommit regions.
321   if (ShenandoahUncommit) {
322     if (_heap->check_soft_max_changed()) {
323       _heap->notify_soft_max_changed();
324     } else if (ShenandoahCollectorPolicy::is_explicit_gc(request.cause)) {
325       _heap->notify_explicit_gc_requested();
326     }
327   }
328 
329   log_debug(gc, thread)("Completed GC (%s): %s, %s, cancelled: %s",
330     gc_mode_name(gc_mode()), GCCause::to_string(request.cause), request.generation->name(), GCCause::to_string(_heap->cancelled_cause()));
331 }
332 
333 void ShenandoahGenerationalControlThread::process_phase_timings() const {
334   // Commit worker statistics to cycle data
335   _heap->phase_timings()->flush_par_workers_to_cycle();
336   if (ShenandoahPacing) {
337     _heap->pacer()->flush_stats_to_cycle();
338   }
339 
340   ShenandoahEvacuationTracker* evac_tracker = _heap->evac_tracker();
341   ShenandoahCycleStats         evac_stats   = evac_tracker->flush_cycle_to_global();
342 
343   // Print GC stats for current cycle
344   {
345     LogTarget(Info, gc, stats) lt;
346     if (lt.is_enabled()) {
347       ResourceMark rm;
348       LogStream ls(lt);
349       _heap->phase_timings()->print_cycle_on(&ls);
350       evac_tracker->print_evacuations_on(&ls, &evac_stats.workers,
351                                               &evac_stats.mutators);
352       if (ShenandoahPacing) {
353         _heap->pacer()->print_cycle_on(&ls);
354       }
355     }
356   }
357 
358   // Commit statistics to globals
359   _heap->phase_timings()->flush_cycle_to_global();
360 }
361 
362 // Young and old concurrent cycles are initiated by the regulator. Implicit
363 // and explicit GC requests are handled by the controller thread and always
364 // run a global cycle (which is concurrent by default, but may be overridden
365 // by command line options). Old cycles always degenerate to a global cycle.
366 // Young cycles are degenerated to complete the young cycle.  Young
367 // and old degen may upgrade to Full GC.  Full GC may also be
368 // triggered directly by a System.gc() invocation.
369 //
370 //
371 //      +-----+ Idle +-----+-----------+---------------------+
372 //      |         +        |           |                     |
373 //      |         |        |           |                     |
374 //      |         |        v           |                     |
375 //      |         |  Bootstrap Old +-- | ------------+       |
376 //      |         |   +                |             |       |
377 //      |         |   |                |             |       |
378 //      |         v   v                v             v       |
379 //      |    Resume Old <----------+ Young +--> Young Degen  |
380 //      |     +  +   ^                            +  +       |
381 //      v     |  |   |                            |  |       |
382 //   Global <-+  |   +----------------------------+  |       |
383 //      +        |                                   |       |
384 //      |        v                                   v       |
385 //      +--->  Global Degen +--------------------> Full <----+
386 //
387 void ShenandoahGenerationalControlThread::service_concurrent_normal_cycle(const ShenandoahGCRequest& request) {
388   log_info(gc, ergo)("Start GC cycle (%s)", request.generation->name());
389   if (request.generation->is_old()) {
390     service_concurrent_old_cycle(request);
391   } else {
392     service_concurrent_cycle(request.generation, request.cause, false);
393   }
394 }
395 
396 void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(const ShenandoahGCRequest& request) {
397   ShenandoahOldGeneration* old_generation = _heap->old_generation();
398   ShenandoahYoungGeneration* young_generation = _heap->young_generation();
399   ShenandoahOldGeneration::State original_state = old_generation->state();
400 
401   TraceCollectorStats tcs(_heap->monitoring_support()->concurrent_collection_counters());
402 
403   switch (original_state) {
404     case ShenandoahOldGeneration::FILLING: {
405       ShenandoahGCSession session(request.cause, old_generation);
406       assert(gc_mode() == servicing_old, "Filling should be servicing old");
407       _allow_old_preemption.set();
408       old_generation->entry_coalesce_and_fill();
409       _allow_old_preemption.unset();
410 
411       // Before bootstrapping begins, we must acknowledge any cancellation request.
412       // If the gc has not been cancelled, this does nothing. If it has been cancelled,
413       // this will clear the cancellation request and exit before starting the bootstrap
414       // phase. This will allow the young GC cycle to proceed normally. If we do not
415       // acknowledge the cancellation request, the subsequent young cycle will observe
416       // the request and essentially cancel itself.
417       if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) {
418         log_info(gc, thread)("Preparation for old generation cycle was cancelled");
419         return;
420       }
421 
422       // Coalescing threads completed and nothing was cancelled. it is safe to transition from this state.
423       old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
424       return;
425     }
426     case ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP:
427       old_generation->transition_to(ShenandoahOldGeneration::BOOTSTRAPPING);
428     case ShenandoahOldGeneration::BOOTSTRAPPING: {
429       // Configure the young generation's concurrent mark to put objects in
430       // old regions into the concurrent mark queues associated with the old
431       // generation. The young cycle will run as normal except that rather than
432       // ignore old references it will mark and enqueue them in the old concurrent
433       // task queues but it will not traverse them.
434       set_gc_mode(bootstrapping_old);
435       young_generation->set_old_gen_task_queues(old_generation->task_queues());
436       service_concurrent_cycle(young_generation, request.cause, true);
437       process_phase_timings();
438       if (_heap->cancelled_gc()) {
439         // Young generation bootstrap cycle has failed. Concurrent mark for old generation
440         // is going to resume after degenerated bootstrap cycle completes.
441         log_info(gc)("Bootstrap cycle for old generation was cancelled");
442         return;
443       }
444 
445       assert(_degen_point == ShenandoahGC::_degenerated_unset, "Degen point should not be set if gc wasn't cancelled");
446 
447       // From here we will 'resume' the old concurrent mark. This will skip reset
448       // and init mark for the concurrent mark. All of that work will have been
449       // done by the bootstrapping young cycle.
450       set_gc_mode(servicing_old);
451       old_generation->transition_to(ShenandoahOldGeneration::MARKING);
452     }
453     case ShenandoahOldGeneration::MARKING: {
454       ShenandoahGCSession session(request.cause, old_generation);
455       bool marking_complete = resume_concurrent_old_cycle(old_generation, request.cause);
456       if (marking_complete) {
457         assert(old_generation->state() != ShenandoahOldGeneration::MARKING, "Should not still be marking");
458         if (original_state == ShenandoahOldGeneration::MARKING) {
459           _heap->mmu_tracker()->record_old_marking_increment(true);
460           _heap->log_heap_status("At end of Concurrent Old Marking finishing increment");
461         }
462       } else if (original_state == ShenandoahOldGeneration::MARKING) {
463         _heap->mmu_tracker()->record_old_marking_increment(false);
464         _heap->log_heap_status("At end of Concurrent Old Marking increment");
465       }
466       break;
467     }
468     default:
469       fatal("Unexpected state for old GC: %s", ShenandoahOldGeneration::state_name(old_generation->state()));
470   }
471 }
472 
473 bool ShenandoahGenerationalControlThread::resume_concurrent_old_cycle(ShenandoahOldGeneration* generation, GCCause::Cause cause) {
474   assert(_heap->is_concurrent_old_mark_in_progress(), "Old mark should be in progress");
475   log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued", generation->task_queues()->tasks());
476 
477   // We can only tolerate being cancelled during concurrent marking or during preparation for mixed
478   // evacuation. This flag here (passed by reference) is used to control precisely where the regulator
479   // is allowed to cancel a GC.
480   ShenandoahOldGC gc(generation, _allow_old_preemption);
481   if (gc.collect(cause)) {
482     _heap->notify_gc_progress();
483     generation->record_success_concurrent(false);
484   }
485 
486   if (_heap->cancelled_gc()) {
487     // It's possible the gc cycle was cancelled after the last time the collection checked for cancellation. In which
488     // case, the old gc cycle is still completed, and we have to deal with this cancellation. We set the degeneration
489     // point to be outside the cycle because if this is an allocation failure, that is what must be done (there is no
490     // degenerated old cycle). If the cancellation was due to a heuristic wanting to start a young cycle, then we are
491     // not actually going to a degenerated cycle, so don't set the degeneration point here.
492     if (ShenandoahCollectorPolicy::is_allocation_failure(cause)) {
493       check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle);
494     } else if (cause == GCCause::_shenandoah_concurrent_gc) {
495       _heap->shenandoah_policy()->record_interrupted_old();
496     }
497     return false;
498   }
499   return true;
500 }
501 
502 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
503 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
504 // If second allocation failure happens during Degenerated GC cycle (for example, when GC
505 // tries to evac something and no memory is available), cycle degrades to Full GC.
506 //
507 // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
508 // heuristics says there are no regions to compact, and all the collection comes from immediately
509 // reclaimable regions.
510 //
511 // ................................................................................................
512 //
513 //                                    (immediate garbage shortcut)                Concurrent GC
514 //                             /-------------------------------------------\
515 //                             |                                           |
516 //                             |                                           |
517 //                             |                                           |
518 //                             |                                           v
519 // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
520 //                   |                    |                 |              ^
521 //                   | (af)               | (af)            | (af)         |
522 // ..................|....................|.................|..............|.......................
523 //                   |                    |                 |              |
524 //                   |                    |                 |              |      Degenerated GC
525 //                   v                    v                 v              |
526 //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
527 //                   |                    |                 |              ^
528 //                   | (af)               | (af)            | (af)         |
529 // ..................|....................|.................|..............|.......................
530 //                   |                    |                 |              |
531 //                   |                    v                 |              |      Full GC
532 //                   \------------------->o<----------------/              |
533 //                                        |                                |
534 //                                        v                                |
535 //                                      Full GC  --------------------------/
536 //
537 void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahGeneration* generation,
538                                                                    GCCause::Cause cause,
539                                                                    bool do_old_gc_bootstrap) {
540   // At this point:
541   //  if (generation == YOUNG), this is a normal young cycle or a bootstrap cycle
542   //  if (generation == GLOBAL), this is a GLOBAL cycle
543   // In either case, we want to age old objects if this is an aging cycle
544   maybe_set_aging_cycle();
545 
546   ShenandoahGCSession session(cause, generation);
547   TraceCollectorStats tcs(_heap->monitoring_support()->concurrent_collection_counters());
548 
549   assert(!generation->is_old(), "Old GC takes a different control path");
550 
551   ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap);
552   if (gc.collect(cause)) {
553     // Cycle is complete
554     _heap->notify_gc_progress();
555     generation->record_success_concurrent(gc.abbreviated());
556   } else {
557     assert(_heap->cancelled_gc(), "Must have been cancelled");
558     check_cancellation_or_degen(gc.degen_point());
559   }
560 
561   const char* msg;
562   ShenandoahMmuTracker* mmu_tracker = _heap->mmu_tracker();
563   if (generation->is_young()) {
564     if (_heap->cancelled_gc()) {
565       msg = (do_old_gc_bootstrap) ? "At end of Interrupted Concurrent Bootstrap GC" :
566             "At end of Interrupted Concurrent Young GC";
567     } else {
568       // We only record GC results if GC was successful
569       msg = (do_old_gc_bootstrap) ? "At end of Concurrent Bootstrap GC" :
570             "At end of Concurrent Young GC";
571       if (_heap->collection_set()->has_old_regions()) {
572         mmu_tracker->record_mixed(get_gc_id());
573       } else if (do_old_gc_bootstrap) {
574         mmu_tracker->record_bootstrap(get_gc_id());
575       } else {
576         mmu_tracker->record_young(get_gc_id());
577       }
578     }
579   } else {
580     assert(generation->is_global(), "If not young, must be GLOBAL");
581     assert(!do_old_gc_bootstrap, "Do not bootstrap with GLOBAL GC");
582     if (_heap->cancelled_gc()) {
583       msg = "At end of Interrupted Concurrent GLOBAL GC";
584     } else {
585       // We only record GC results if GC was successful
586       msg = "At end of Concurrent Global GC";
587       mmu_tracker->record_global(get_gc_id());
588     }
589   }
590   _heap->log_heap_status(msg);
591 }
592 
593 bool ShenandoahGenerationalControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
594   if (!_heap->cancelled_gc()) {
595     return false;
596   }
597 
598   if (_heap->cancelled_cause() == GCCause::_shenandoah_stop_vm
599     || _heap->cancelled_cause() == GCCause::_shenandoah_concurrent_gc) {
600     log_debug(gc, thread)("Cancellation detected, reason: %s", GCCause::to_string(_heap->cancelled_cause()));
601     return true;
602   }
603 
604   if (ShenandoahCollectorPolicy::is_allocation_failure(_heap->cancelled_cause())) {
605     assert(_degen_point == ShenandoahGC::_degenerated_unset,
606            "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
607     _degen_point = point;
608     log_debug(gc, thread)("Cancellation detected:, reason: %s, degen point: %s",
609                           GCCause::to_string(_heap->cancelled_cause()),
610                           ShenandoahGC::degen_point_to_string(_degen_point));
611     return true;
612   }
613 
614   fatal("Cancel GC either for alloc failure GC, or gracefully exiting, or to pause old generation marking");
615   return false;
616 }
617 
618 void ShenandoahGenerationalControlThread::service_stw_full_cycle(GCCause::Cause cause) {
619   ShenandoahGCSession session(cause, _heap->global_generation());
620   maybe_set_aging_cycle();
621   ShenandoahFullGC gc;
622   gc.collect(cause);
623   _degen_point = ShenandoahGC::_degenerated_unset;
624 }
625 
626 void ShenandoahGenerationalControlThread::service_stw_degenerated_cycle(const ShenandoahGCRequest& request) {
627   assert(_degen_point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
628 
629   ShenandoahGCSession session(request.cause, request.generation);
630 
631   ShenandoahDegenGC gc(_degen_point, request.generation);
632   gc.collect(request.cause);
633   _degen_point = ShenandoahGC::_degenerated_unset;
634 
635   assert(_heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks");
636   if (request.generation->is_global()) {
637     assert(_heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks");
638     assert(_heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks");
639   } else {
640     assert(request.generation->is_young(), "Expected degenerated young cycle, if not global.");
641     ShenandoahOldGeneration* old = _heap->old_generation();
642     if (old->is_bootstrapping()) {
643       old->transition_to(ShenandoahOldGeneration::MARKING);
644     }
645   }
646 }
647 
648 void ShenandoahGenerationalControlThread::request_gc(GCCause::Cause cause) {
649   if (ShenandoahCollectorPolicy::is_allocation_failure(cause)) {
650     // GC should already be cancelled. Here we are just notifying the control thread to
651     // wake up and handle the cancellation request, so we don't need to set _requested_gc_cause.
652     notify_cancellation(cause);
653   } else if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) {
654     handle_requested_gc(cause);
655   }
656 }
657 
658 bool ShenandoahGenerationalControlThread::request_concurrent_gc(ShenandoahGeneration* generation) {
659   if (_heap->cancelled_gc()) {
660     // Ignore subsequent requests from the heuristics
661     log_debug(gc, thread)("Reject request for concurrent gc: gc_requested: %s, gc_cancelled: %s",
662                           GCCause::to_string(_requested_gc_cause),
663                           BOOL_TO_STR(_heap->cancelled_gc()));
664     return false;
665   }
666 
667   MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag);
668   if (gc_mode() == servicing_old) {
669     if (!preempt_old_marking(generation)) {
670       log_debug(gc, thread)("Cannot start young, old collection is not preemptible");
671       return false;
672     }
673 
674     // Cancel the old GC and wait for the control thread to start servicing the new request.
675     log_info(gc)("Preempting old generation mark to allow %s GC", generation->name());
676     while (gc_mode() == servicing_old) {
677       ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc);
678       notify_cancellation(ml, GCCause::_shenandoah_concurrent_gc);
679       ml.wait();
680     }
681     return true;
682   }
683 
684   if (gc_mode() == none) {
685     const size_t current_gc_id = get_gc_id();
686     while (gc_mode() == none && current_gc_id == get_gc_id()) {
687       if (_requested_gc_cause != GCCause::_no_gc) {
688         log_debug(gc, thread)("Reject request for concurrent gc because another gc is pending: %s", GCCause::to_string(_requested_gc_cause));
689         return false;
690       }
691 
692       notify_control_thread(ml, GCCause::_shenandoah_concurrent_gc, generation);
693       ml.wait();
694     }
695     return true;
696   }
697 
698 
699   log_debug(gc, thread)("Reject request for concurrent gc: mode: %s, allow_old_preemption: %s",
700                         gc_mode_name(gc_mode()),
701                         BOOL_TO_STR(_allow_old_preemption.is_set()));
702   return false;
703 }
704 
705 void ShenandoahGenerationalControlThread::notify_control_thread(GCCause::Cause cause, ShenandoahGeneration* generation) {
706   MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag);
707   notify_control_thread(ml, cause, generation);
708 }
709 
710 void ShenandoahGenerationalControlThread::notify_control_thread(MonitorLocker& ml, GCCause::Cause cause, ShenandoahGeneration* generation) {
711   assert(_control_lock.is_locked(), "Request lock must be held here");
712   log_debug(gc, thread)("Notify control (%s): %s, %s", gc_mode_name(gc_mode()), GCCause::to_string(cause), generation->name());
713   _requested_gc_cause = cause;
714   _requested_generation = generation;
715   ml.notify();
716 }
717 
718 void ShenandoahGenerationalControlThread::notify_cancellation(GCCause::Cause cause) {
719   MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag);
720   notify_cancellation(ml, cause);
721 }
722 
723 void ShenandoahGenerationalControlThread::notify_cancellation(MonitorLocker& ml, GCCause::Cause cause) {
724   assert(_heap->cancelled_gc(), "GC should already be cancelled");
725   log_debug(gc,thread)("Notify control (%s): %s", gc_mode_name(gc_mode()), GCCause::to_string(cause));
726   ml.notify();
727 }
728 
729 bool ShenandoahGenerationalControlThread::preempt_old_marking(ShenandoahGeneration* generation) {
730   return generation->is_young() && _allow_old_preemption.try_unset();
731 }
732 
733 void ShenandoahGenerationalControlThread::handle_requested_gc(GCCause::Cause cause) {
734   // For normal requested GCs (System.gc) we want to block the caller. However,
735   // for whitebox requested GC, we want to initiate the GC and return immediately.
736   // The whitebox caller thread will arrange for itself to wait until the GC notifies
737   // it that has reached the requested breakpoint (phase in the GC).
738   if (cause == GCCause::_wb_breakpoint) {
739     notify_control_thread(cause, ShenandoahHeap::heap()->global_generation());
740     return;
741   }
742 
743   // Make sure we have at least one complete GC cycle before unblocking
744   // from the explicit GC request.
745   //
746   // This is especially important for weak references cleanup and/or native
747   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
748   // comes very late in the already running cycle, it would miss lots of new
749   // opportunities for cleanup that were made available before the caller
750   // requested the GC.
751 
752   MonitorLocker ml(&_gc_waiters_lock);
753   size_t current_gc_id = get_gc_id();
754   const size_t required_gc_id = current_gc_id + 1;
755   while (current_gc_id < required_gc_id && !should_terminate()) {
756     // Make requests to run a global cycle until at least one is completed
757     notify_control_thread(cause, ShenandoahHeap::heap()->global_generation());
758     ml.wait();
759     current_gc_id = get_gc_id();
760   }
761 }
762 
763 void ShenandoahGenerationalControlThread::notify_gc_waiters() {
764   MonitorLocker ml(&_gc_waiters_lock);
765   ml.notify_all();
766 }
767 
768 const char* ShenandoahGenerationalControlThread::gc_mode_name(GCMode mode) {
769   switch (mode) {
770     case none:              return "idle";
771     case concurrent_normal: return "normal";
772     case stw_degenerated:   return "degenerated";
773     case stw_full:          return "full";
774     case servicing_old:     return "old";
775     case bootstrapping_old: return "bootstrap";
776     case stopped:           return "stopped";
777     default:                return "unknown";
778   }
779 }
780 
781 void ShenandoahGenerationalControlThread::set_gc_mode(GCMode new_mode) {
782   MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag);
783   set_gc_mode(ml, new_mode);
784 }
785 
786 void ShenandoahGenerationalControlThread::set_gc_mode(MonitorLocker& ml, GCMode new_mode) {
787   if (_gc_mode != new_mode) {
788     log_debug(gc, thread)("Transition from: %s to: %s", gc_mode_name(_gc_mode), gc_mode_name(new_mode));
789     EventMark event("Control thread transition from: %s, to %s", gc_mode_name(_gc_mode), gc_mode_name(new_mode));
790     _gc_mode = new_mode;
791     ml.notify_all();
792   }
793 }