1 /*
  2  * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
  3  * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
  4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #include "precompiled.hpp"
 28 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 29 #include "gc/shenandoah/shenandoahAsserts.hpp"
 30 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 31 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
 32 #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp"
 33 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 35 #include "gc/shenandoah/shenandoahFullGC.hpp"
 36 #include "gc/shenandoah/shenandoahGeneration.hpp"
 37 #include "gc/shenandoah/shenandoahOldGC.hpp"
 38 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 41 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
 42 #include "gc/shenandoah/shenandoahUtils.hpp"
 43 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 44 #include "logging/log.hpp"
 45 #include "memory/metaspaceUtils.hpp"
 46 #include "memory/metaspaceStats.hpp"
 47 #include "runtime/atomic.hpp"
 48 
 49 ShenandoahGenerationalControlThread::ShenandoahGenerationalControlThread() :
 50   ShenandoahController(),
 51   _control_lock(Mutex::nosafepoint - 2, "ShenandoahControlGC_lock", true),
 52   _regulator_lock(Mutex::nosafepoint - 2, "ShenandoahRegulatorGC_lock", true),
 53   _requested_gc_cause(GCCause::_no_gc),
 54   _requested_generation(GLOBAL),
 55   _degen_point(ShenandoahGC::_degenerated_outside_cycle),
 56   _degen_generation(nullptr),
 57   _mode(none) {
 58   shenandoah_assert_generational();
 59   set_name("Shenandoah Control Thread");
 60   create_and_start();
 61 }
 62 
 63 void ShenandoahGenerationalControlThread::run_service() {
 64   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 65 
 66   const GCMode default_mode = concurrent_normal;
 67   ShenandoahGenerationType generation = GLOBAL;
 68 
 69   double last_shrink_time = os::elapsedTime();
 70   uint age_period = 0;
 71 
 72   // Shrink period avoids constantly polling regions for shrinking.
 73   // Having a period 10x lower than the delay would mean we hit the
 74   // shrinking with lag of less than 1/10-th of true delay.
 75   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
 76   const double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
 77 
 78   ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy();
 79 
 80   // Heuristics are notified of allocation failures here and other outcomes
 81   // of the cycle. They're also used here to control whether the Nth consecutive
 82   // degenerated cycle should be 'promoted' to a full cycle. The decision to
 83   // trigger a cycle or not is evaluated on the regulator thread.
 84   ShenandoahHeuristics* global_heuristics = heap->global_generation()->heuristics();
 85   while (!in_graceful_shutdown() && !should_terminate()) {
 86     // Figure out if we have pending requests.
 87     const bool alloc_failure_pending = _alloc_failure_gc.is_set();
 88     const bool humongous_alloc_failure_pending = _humongous_alloc_failure_gc.is_set();
 89 
 90     GCCause::Cause cause = Atomic::xchg(&_requested_gc_cause, GCCause::_no_gc);
 91 
 92     const bool is_gc_requested = ShenandoahCollectorPolicy::is_requested_gc(cause);
 93 
 94     // This control loop iteration has seen this much allocation.
 95     const size_t allocs_seen = reset_allocs_seen();
 96 
 97     // Check if we have seen a new target for soft max heap size.
 98     const bool soft_max_changed = heap->check_soft_max_changed();
 99 
100     // Choose which GC mode to run in. The block below should select a single mode.
101     set_gc_mode(none);
102     ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
103 
104     if (alloc_failure_pending) {
105       // Allocation failure takes precedence: we have to deal with it first thing
106       log_info(gc)("Trigger: Handle Allocation Failure");
107 
108       cause = GCCause::_allocation_failure;
109 
110       // Consume the degen point, and seed it with default value
111       degen_point = _degen_point;
112       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
113 
114       if (degen_point == ShenandoahGC::_degenerated_outside_cycle) {
115         _degen_generation = heap->young_generation();
116       } else {
117         assert(_degen_generation != nullptr, "Need to know which generation to resume");
118       }
119 
120       ShenandoahHeuristics* heuristics = _degen_generation->heuristics();
121       generation = _degen_generation->type();
122       bool old_gen_evacuation_failed = heap->old_generation()->clear_failed_evacuation();
123 
124       // Do not bother with degenerated cycle if old generation evacuation failed or if humongous allocation failed
125       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() &&
126           !old_gen_evacuation_failed && !humongous_alloc_failure_pending) {
127         heuristics->record_allocation_failure_gc();
128         policy->record_alloc_failure_to_degenerated(degen_point);
129         set_gc_mode(stw_degenerated);
130       } else {
131         // TODO: if humongous_alloc_failure_pending, there might be value in trying a "compacting" degen before
132         // going all the way to full.  But it's a lot of work to implement this, and it may not provide value.
133         // A compacting degen can move young regions around without doing full old-gen mark (relying upon the
134         // remembered set scan), so it might be faster than a full gc.
135         //
136         // Longer term, think about how to defragment humongous memory concurrently.
137 
138         heuristics->record_allocation_failure_gc();
139         policy->record_alloc_failure_to_full();
140         generation = GLOBAL;
141         set_gc_mode(stw_full);
142       }
143     } else if (is_gc_requested) {
144       generation = GLOBAL;
145       log_info(gc)("Trigger: GC request (%s)", GCCause::to_string(cause));
146       global_heuristics->record_requested_gc();
147 
148       if (ShenandoahCollectorPolicy::should_run_full_gc(cause)) {
149         set_gc_mode(stw_full);
150       } else {
151         set_gc_mode(default_mode);
152         // Unload and clean up everything
153         heap->set_unload_classes(global_heuristics->can_unload_classes());
154       }
155     } else {
156       // We should only be here if the regulator requested a cycle or if
157       // there is an old generation mark in progress.
158       if (cause == GCCause::_shenandoah_concurrent_gc) {
159         if (_requested_generation == OLD && heap->old_generation()->is_doing_mixed_evacuations()) {
160           // If a request to start an old cycle arrived while an old cycle was running, but _before_
161           // it chose any regions for evacuation we don't want to start a new old cycle. Rather, we want
162           // the heuristic to run a young collection so that we can evacuate some old regions.
163           assert(!heap->is_concurrent_old_mark_in_progress(), "Should not be running mixed collections and concurrent marking");
164           generation = YOUNG;
165         } else {
166           generation = _requested_generation;
167         }
168 
169         // preemption was requested or this is a regular cycle
170         set_gc_mode(default_mode);
171 
172         // Don't start a new old marking if there is one already in progress
173         if (generation == OLD && heap->is_concurrent_old_mark_in_progress()) {
174           set_gc_mode(servicing_old);
175         }
176 
177         if (generation == GLOBAL) {
178           heap->set_unload_classes(global_heuristics->should_unload_classes());
179         } else {
180           heap->set_unload_classes(false);
181         }
182       } else if (heap->is_concurrent_old_mark_in_progress() || heap->is_prepare_for_old_mark_in_progress()) {
183         // Nobody asked us to do anything, but we have an old-generation mark or old-generation preparation for
184         // mixed evacuation in progress, so resume working on that.
185         log_info(gc)("Resume old GC: marking is%s in progress, preparing is%s in progress",
186                      heap->is_concurrent_old_mark_in_progress() ? "" : " NOT",
187                      heap->is_prepare_for_old_mark_in_progress() ? "" : " NOT");
188 
189         cause = GCCause::_shenandoah_concurrent_gc;
190         generation = OLD;
191         set_gc_mode(servicing_old);
192         heap->set_unload_classes(false);
193       }
194     }
195 
196     const bool gc_requested = (gc_mode() != none);
197     assert (!gc_requested || cause != GCCause::_no_gc, "GC cause should be set");
198 
199     if (gc_requested) {
200       // Blow away all soft references on this cycle, if handling allocation failure,
201       // either implicit or explicit GC request, or we are requested to do so unconditionally.
202       if (generation == GLOBAL && (alloc_failure_pending || is_gc_requested || ShenandoahAlwaysClearSoftRefs)) {
203         heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
204       }
205 
206       // GC is starting, bump the internal ID
207       update_gc_id();
208 
209       heap->reset_bytes_allocated_since_gc_start();
210 
211       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
212 
213       // If GC was requested, we are sampling the counters even without actual triggers
214       // from allocation machinery. This captures GC phases more accurately.
215       heap->set_forced_counters_update(true);
216 
217       // If GC was requested, we better dump freeset data for performance debugging
218       {
219         ShenandoahHeapLocker locker(heap->lock());
220         heap->free_set()->log_status();
221       }
222       // In case this is a degenerated cycle, remember whether original cycle was aging.
223       const bool was_aging_cycle = heap->is_aging_cycle();
224       heap->set_aging_cycle(false);
225 
226       switch (gc_mode()) {
227         case concurrent_normal: {
228           // At this point:
229           //  if (generation == YOUNG), this is a normal YOUNG cycle
230           //  if (generation == OLD), this is a bootstrap OLD cycle
231           //  if (generation == GLOBAL), this is a GLOBAL cycle triggered by System.gc()
232           // In all three cases, we want to age old objects if this is an aging cycle
233           if (age_period-- == 0) {
234              heap->set_aging_cycle(true);
235              age_period = ShenandoahAgingCyclePeriod - 1;
236           }
237           service_concurrent_normal_cycle(heap, generation, cause);
238           break;
239         }
240         case stw_degenerated: {
241           heap->set_aging_cycle(was_aging_cycle);
242           service_stw_degenerated_cycle(cause, degen_point);
243           break;
244         }
245         case stw_full: {
246           if (age_period-- == 0) {
247             heap->set_aging_cycle(true);
248             age_period = ShenandoahAgingCyclePeriod - 1;
249           }
250           service_stw_full_cycle(cause);
251           break;
252         }
253         case servicing_old: {
254           assert(generation == OLD, "Expected old generation here");
255           GCIdMark gc_id_mark;
256           service_concurrent_old_cycle(heap, cause);
257           break;
258         }
259         default:
260           ShouldNotReachHere();
261       }
262 
263       // If this was the requested GC cycle, notify waiters about it
264       if (is_gc_requested) {
265         notify_gc_waiters();
266       }
267 
268       // If this was the allocation failure GC cycle, notify waiters about it
269       if (alloc_failure_pending) {
270         notify_alloc_failure_waiters();
271       }
272 
273       // Report current free set state at the end of cycle, whether
274       // it is a normal completion, or the abort.
275       {
276         ShenandoahHeapLocker locker(heap->lock());
277         heap->free_set()->log_status();
278 
279         // Notify Universe about new heap usage. This has implications for
280         // global soft refs policy, and we better report it every time heap
281         // usage goes down.
282         heap->update_capacity_and_used_at_gc();
283 
284         // Signal that we have completed a visit to all live objects.
285         heap->record_whole_heap_examined_timestamp();
286       }
287 
288       // Disable forced counters update, and update counters one more time
289       // to capture the state at the end of GC session.
290       heap->handle_force_counters_update();
291       heap->set_forced_counters_update(false);
292 
293       // Retract forceful part of soft refs policy
294       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
295 
296       // Clear metaspace oom flag, if current cycle unloaded classes
297       if (heap->unload_classes()) {
298         global_heuristics->clear_metaspace_oom();
299       }
300 
301       process_phase_timings(heap);
302 
303       // Print Metaspace change following GC (if logging is enabled).
304       MetaspaceUtils::print_metaspace_change(meta_sizes);
305 
306       // GC is over, we are at idle now
307       if (ShenandoahPacing) {
308         heap->pacer()->setup_for_idle();
309       }
310     } else {
311       // Report to pacer that we have seen this many words allocated
312       if (ShenandoahPacing && (allocs_seen > 0)) {
313         heap->pacer()->report_alloc(allocs_seen);
314       }
315     }
316 
317     const double current = os::elapsedTime();
318 
319     if (ShenandoahUncommit && (is_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
320       // Explicit GC tries to uncommit everything down to min capacity.
321       // Soft max change tries to uncommit everything down to target capacity.
322       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
323 
324       double shrink_before = (is_gc_requested || soft_max_changed) ?
325                              current :
326                              current - (ShenandoahUncommitDelay / 1000.0);
327 
328       size_t shrink_until = soft_max_changed ?
329                              heap->soft_max_capacity() :
330                              heap->min_capacity();
331 
332       heap->maybe_uncommit(shrink_before, shrink_until);
333       heap->phase_timings()->flush_cycle_to_global();
334       last_shrink_time = current;
335     }
336 
337     // Wait for ShenandoahControlIntervalMax unless there was an allocation failure or another request was made mid-cycle.
338     if (!is_alloc_failure_gc() && _requested_gc_cause == GCCause::_no_gc) {
339       // The timed wait is necessary because this thread has a responsibility to send
340       // 'alloc_words' to the pacer when it does not perform a GC.
341       MonitorLocker lock(&_control_lock, Mutex::_no_safepoint_check_flag);
342       lock.wait(ShenandoahControlIntervalMax);
343     }
344   }
345 
346   // Wait for the actual stop(), can't leave run_service() earlier.
347   while (!should_terminate()) {
348     os::naked_short_sleep(ShenandoahControlIntervalMin);
349   }
350 }
351 
352 void ShenandoahGenerationalControlThread::process_phase_timings(const ShenandoahHeap* heap) {
353   // Commit worker statistics to cycle data
354   heap->phase_timings()->flush_par_workers_to_cycle();
355   if (ShenandoahPacing) {
356     heap->pacer()->flush_stats_to_cycle();
357   }
358 
359   ShenandoahEvacuationTracker* evac_tracker = heap->evac_tracker();
360   ShenandoahCycleStats         evac_stats   = evac_tracker->flush_cycle_to_global();
361 
362   // Print GC stats for current cycle
363   {
364     LogTarget(Info, gc, stats) lt;
365     if (lt.is_enabled()) {
366       ResourceMark rm;
367       LogStream ls(lt);
368       heap->phase_timings()->print_cycle_on(&ls);
369       evac_tracker->print_evacuations_on(&ls, &evac_stats.workers,
370                                               &evac_stats.mutators);
371       if (ShenandoahPacing) {
372         heap->pacer()->print_cycle_on(&ls);
373       }
374     }
375   }
376 
377   // Commit statistics to globals
378   heap->phase_timings()->flush_cycle_to_global();
379 }
380 
381 // Young and old concurrent cycles are initiated by the regulator. Implicit
382 // and explicit GC requests are handled by the controller thread and always
383 // run a global cycle (which is concurrent by default, but may be overridden
384 // by command line options). Old cycles always degenerate to a global cycle.
385 // Young cycles are degenerated to complete the young cycle.  Young
386 // and old degen may upgrade to Full GC.  Full GC may also be
387 // triggered directly by a System.gc() invocation.
388 //
389 //
390 //      +-----+ Idle +-----+-----------+---------------------+
391 //      |         +        |           |                     |
392 //      |         |        |           |                     |
393 //      |         |        v           |                     |
394 //      |         |  Bootstrap Old +-- | ------------+       |
395 //      |         |   +                |             |       |
396 //      |         |   |                |             |       |
397 //      |         v   v                v             v       |
398 //      |    Resume Old <----------+ Young +--> Young Degen  |
399 //      |     +  +   ^                            +  +       |
400 //      v     |  |   |                            |  |       |
401 //   Global <-+  |   +----------------------------+  |       |
402 //      +        |                                   |       |
403 //      |        v                                   v       |
404 //      +--->  Global Degen +--------------------> Full <----+
405 //
406 void ShenandoahGenerationalControlThread::service_concurrent_normal_cycle(ShenandoahHeap* heap,
407                                                               const ShenandoahGenerationType generation,
408                                                               GCCause::Cause cause) {
409   GCIdMark gc_id_mark;
410   switch (generation) {
411     case YOUNG: {
412       // Run a young cycle. This might or might not, have interrupted an ongoing
413       // concurrent mark in the old generation. We need to think about promotions
414       // in this case. Promoted objects should be above the TAMS in the old regions
415       // they end up in, but we have to be sure we don't promote into any regions
416       // that are in the cset.
417       log_info(gc, ergo)("Start GC cycle (YOUNG)");
418       service_concurrent_cycle(heap->young_generation(), cause, false);
419       break;
420     }
421     case OLD: {
422       log_info(gc, ergo)("Start GC cycle (OLD)");
423       service_concurrent_old_cycle(heap, cause);
424       break;
425     }
426     case GLOBAL: {
427       log_info(gc, ergo)("Start GC cycle (GLOBAL)");
428       service_concurrent_cycle(heap->global_generation(), cause, false);
429       break;
430     }
431     default:
432       ShouldNotReachHere();
433   }
434 }
435 
436 void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(ShenandoahHeap* heap, GCCause::Cause &cause) {
437   ShenandoahOldGeneration* old_generation = heap->old_generation();
438   ShenandoahYoungGeneration* young_generation = heap->young_generation();
439   ShenandoahOldGeneration::State original_state = old_generation->state();
440 
441   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
442 
443   switch (original_state) {
444     case ShenandoahOldGeneration::FILLING: {
445       _allow_old_preemption.set();
446       old_generation->entry_coalesce_and_fill();
447       _allow_old_preemption.unset();
448 
449       // Before bootstrapping begins, we must acknowledge any cancellation request.
450       // If the gc has not been cancelled, this does nothing. If it has been cancelled,
451       // this will clear the cancellation request and exit before starting the bootstrap
452       // phase. This will allow the young GC cycle to proceed normally. If we do not
453       // acknowledge the cancellation request, the subsequent young cycle will observe
454       // the request and essentially cancel itself.
455       if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) {
456         log_info(gc)("Preparation for old generation cycle was cancelled");
457         return;
458       }
459 
460       // Coalescing threads completed and nothing was cancelled. it is safe to transition from this state.
461       old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
462       return;
463     }
464     case ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP:
465       old_generation->transition_to(ShenandoahOldGeneration::BOOTSTRAPPING);
466     case ShenandoahOldGeneration::BOOTSTRAPPING: {
467       // Configure the young generation's concurrent mark to put objects in
468       // old regions into the concurrent mark queues associated with the old
469       // generation. The young cycle will run as normal except that rather than
470       // ignore old references it will mark and enqueue them in the old concurrent
471       // task queues but it will not traverse them.
472       set_gc_mode(bootstrapping_old);
473       young_generation->set_old_gen_task_queues(old_generation->task_queues());
474       ShenandoahGCSession session(cause, young_generation);
475       service_concurrent_cycle(heap, young_generation, cause, true);
476       process_phase_timings(heap);
477       if (heap->cancelled_gc()) {
478         // Young generation bootstrap cycle has failed. Concurrent mark for old generation
479         // is going to resume after degenerated bootstrap cycle completes.
480         log_info(gc)("Bootstrap cycle for old generation was cancelled");
481         return;
482       }
483 
484       // Reset the degenerated point. Normally this would happen at the top
485       // of the control loop, but here we have just completed a young cycle
486       // which has bootstrapped the old concurrent marking.
487       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
488 
489       // From here we will 'resume' the old concurrent mark. This will skip reset
490       // and init mark for the concurrent mark. All of that work will have been
491       // done by the bootstrapping young cycle.
492       set_gc_mode(servicing_old);
493       old_generation->transition_to(ShenandoahOldGeneration::MARKING);
494     }
495     case ShenandoahOldGeneration::MARKING: {
496       ShenandoahGCSession session(cause, old_generation);
497       bool marking_complete = resume_concurrent_old_cycle(old_generation, cause);
498       if (marking_complete) {
499         assert(old_generation->state() != ShenandoahOldGeneration::MARKING, "Should not still be marking");
500         if (original_state == ShenandoahOldGeneration::MARKING) {
501           heap->mmu_tracker()->record_old_marking_increment(true);
502           heap->log_heap_status("At end of Concurrent Old Marking finishing increment");
503         }
504       } else if (original_state == ShenandoahOldGeneration::MARKING) {
505         heap->mmu_tracker()->record_old_marking_increment(false);
506         heap->log_heap_status("At end of Concurrent Old Marking increment");
507       }
508       break;
509     }
510     default:
511       fatal("Unexpected state for old GC: %s", ShenandoahOldGeneration::state_name(old_generation->state()));
512   }
513 }
514 
515 bool ShenandoahGenerationalControlThread::resume_concurrent_old_cycle(ShenandoahOldGeneration* generation, GCCause::Cause cause) {
516   assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(), "Old mark should be in progress");
517   log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued", generation->task_queues()->tasks());
518 
519   ShenandoahHeap* heap = ShenandoahHeap::heap();
520 
521   // We can only tolerate being cancelled during concurrent marking or during preparation for mixed
522   // evacuation. This flag here (passed by reference) is used to control precisely where the regulator
523   // is allowed to cancel a GC.
524   ShenandoahOldGC gc(generation, _allow_old_preemption);
525   if (gc.collect(cause)) {
526     generation->record_success_concurrent(false);
527   }
528 
529   if (heap->cancelled_gc()) {
530     // It's possible the gc cycle was cancelled after the last time
531     // the collection checked for cancellation. In which case, the
532     // old gc cycle is still completed, and we have to deal with this
533     // cancellation. We set the degeneration point to be outside
534     // the cycle because if this is an allocation failure, that is
535     // what must be done (there is no degenerated old cycle). If the
536     // cancellation was due to a heuristic wanting to start a young
537     // cycle, then we are not actually going to a degenerated cycle,
538     // so the degenerated point doesn't matter here.
539     check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle);
540     if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) {
541       heap->shenandoah_policy()->record_interrupted_old();
542     }
543     return false;
544   }
545   return true;
546 }
547 
548 void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool do_old_gc_bootstrap) {
549   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
550   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
551   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
552   // tries to evac something and no memory is available), cycle degrades to Full GC.
553   //
554   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
555   // heuristics says there are no regions to compact, and all the collection comes from immediately
556   // reclaimable regions.
557   //
558   // ................................................................................................
559   //
560   //                                    (immediate garbage shortcut)                Concurrent GC
561   //                             /-------------------------------------------\
562   //                             |                                           |
563   //                             |                                           |
564   //                             |                                           |
565   //                             |                                           v
566   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
567   //                   |                    |                 |              ^
568   //                   | (af)               | (af)            | (af)         |
569   // ..................|....................|.................|..............|.......................
570   //                   |                    |                 |              |
571   //                   |                    |                 |              |      Degenerated GC
572   //                   v                    v                 v              |
573   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
574   //                   |                    |                 |              ^
575   //                   | (af)               | (af)            | (af)         |
576   // ..................|....................|.................|..............|.......................
577   //                   |                    |                 |              |
578   //                   |                    v                 |              |      Full GC
579   //                   \------------------->o<----------------/              |
580   //                                        |                                |
581   //                                        v                                |
582   //                                      Full GC  --------------------------/
583   //
584   if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
585 
586   ShenandoahHeap* heap = ShenandoahHeap::heap();
587   ShenandoahGCSession session(cause, generation);
588   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
589 
590   service_concurrent_cycle(heap, generation, cause, do_old_gc_bootstrap);
591 }
592 
593 void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahHeap* heap,
594                                                        ShenandoahGeneration* generation,
595                                                        GCCause::Cause& cause,
596                                                        bool do_old_gc_bootstrap) {
597   assert(!generation->is_old(), "Old GC takes a different control path");
598 
599   ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap);
600   if (gc.collect(cause)) {
601     // Cycle is complete
602     generation->record_success_concurrent(gc.abbreviated());
603   } else {
604     assert(heap->cancelled_gc(), "Must have been cancelled");
605     check_cancellation_or_degen(gc.degen_point());
606 
607     // Concurrent young-gen collection degenerates to young
608     // collection.  Same for global collections.
609     _degen_generation = generation;
610   }
611   const char* msg;
612   ShenandoahMmuTracker* mmu_tracker = heap->mmu_tracker();
613   if (generation->is_young()) {
614     if (heap->cancelled_gc()) {
615       msg = (do_old_gc_bootstrap) ? "At end of Interrupted Concurrent Bootstrap GC" :
616             "At end of Interrupted Concurrent Young GC";
617     } else {
618       // We only record GC results if GC was successful
619       msg = (do_old_gc_bootstrap) ? "At end of Concurrent Bootstrap GC" :
620             "At end of Concurrent Young GC";
621       if (heap->collection_set()->has_old_regions()) {
622         mmu_tracker->record_mixed(get_gc_id());
623       } else if (do_old_gc_bootstrap) {
624         mmu_tracker->record_bootstrap(get_gc_id());
625       } else {
626         mmu_tracker->record_young(get_gc_id());
627       }
628     }
629   } else {
630     assert(generation->is_global(), "If not young, must be GLOBAL");
631     assert(!do_old_gc_bootstrap, "Do not bootstrap with GLOBAL GC");
632     if (heap->cancelled_gc()) {
633       msg = "At end of Interrupted Concurrent GLOBAL GC";
634     } else {
635       // We only record GC results if GC was successful
636       msg = "At end of Concurrent Global GC";
637       mmu_tracker->record_global(get_gc_id());
638     }
639   }
640   heap->log_heap_status(msg);
641 }
642 
643 bool ShenandoahGenerationalControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
644   ShenandoahHeap* heap = ShenandoahHeap::heap();
645   if (!heap->cancelled_gc()) {
646     return false;
647   }
648 
649   if (in_graceful_shutdown()) {
650     return true;
651   }
652 
653   assert(_degen_point == ShenandoahGC::_degenerated_outside_cycle,
654          "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
655 
656   if (is_alloc_failure_gc()) {
657     _degen_point = point;
658     _preemption_requested.unset();
659     return true;
660   }
661 
662   if (_preemption_requested.is_set()) {
663     assert(_requested_generation == YOUNG, "Only young GCs may preempt old.");
664     _preemption_requested.unset();
665 
666     // Old generation marking is only cancellable during concurrent marking.
667     // Once final mark is complete, the code does not check again for cancellation.
668     // If old generation was cancelled for an allocation failure, we wouldn't
669     // make it to this case. The calling code is responsible for forcing a
670     // cancellation due to allocation failure into a degenerated cycle.
671     _degen_point = point;
672     heap->clear_cancelled_gc(false /* clear oom handler */);
673     return true;
674   }
675 
676   fatal("Cancel GC either for alloc failure GC, or gracefully exiting, or to pause old generation marking");
677   return false;
678 }
679 
680 void ShenandoahGenerationalControlThread::stop_service() {
681   // Nothing to do here.
682 }
683 
684 void ShenandoahGenerationalControlThread::service_stw_full_cycle(GCCause::Cause cause) {
685   ShenandoahHeap* const heap = ShenandoahHeap::heap();
686 
687   GCIdMark gc_id_mark;
688   ShenandoahGCSession session(cause, heap->global_generation());
689 
690   ShenandoahFullGC gc;
691   gc.collect(cause);
692 }
693 
694 void ShenandoahGenerationalControlThread::service_stw_degenerated_cycle(GCCause::Cause cause,
695                                                             ShenandoahGC::ShenandoahDegenPoint point) {
696   assert(point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
697   ShenandoahHeap* const heap = ShenandoahHeap::heap();
698 
699   GCIdMark gc_id_mark;
700   ShenandoahGCSession session(cause, _degen_generation);
701 
702   ShenandoahDegenGC gc(point, _degen_generation);
703   gc.collect(cause);
704 
705   assert(heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks");
706   if (_degen_generation->is_global()) {
707     assert(heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks");
708     assert(heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks");
709   } else {
710     assert(_degen_generation->is_young(), "Expected degenerated young cycle, if not global.");
711     ShenandoahOldGeneration* old = heap->old_generation();
712     if (old->is_bootstrapping()) {
713       old->transition_to(ShenandoahOldGeneration::MARKING);
714     }
715   }
716 }
717 
718 void ShenandoahGenerationalControlThread::request_gc(GCCause::Cause cause) {
719   if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) {
720     handle_requested_gc(cause);
721   }
722 }
723 
724 bool ShenandoahGenerationalControlThread::request_concurrent_gc(ShenandoahGenerationType generation) {
725   if (_preemption_requested.is_set() || _requested_gc_cause != GCCause::_no_gc || ShenandoahHeap::heap()->cancelled_gc()) {
726     // Ignore subsequent requests from the heuristics
727     log_debug(gc, thread)("Reject request for concurrent gc: preemption_requested: %s, gc_requested: %s, gc_cancelled: %s",
728                           BOOL_TO_STR(_preemption_requested.is_set()),
729                           GCCause::to_string(_requested_gc_cause),
730                           BOOL_TO_STR(ShenandoahHeap::heap()->cancelled_gc()));
731     return false;
732   }
733 
734   if (gc_mode() == none) {
735     GCCause::Cause existing = Atomic::cmpxchg(&_requested_gc_cause, GCCause::_no_gc, GCCause::_shenandoah_concurrent_gc);
736     if (existing != GCCause::_no_gc) {
737       log_debug(gc, thread)("Reject request for concurrent gc because another gc is pending: %s", GCCause::to_string(existing));
738       return false;
739     }
740 
741     _requested_generation = generation;
742     notify_control_thread();
743 
744     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
745     while (gc_mode() == none) {
746       ml.wait();
747     }
748     return true;
749   }
750 
751   if (preempt_old_marking(generation)) {
752     assert(gc_mode() == servicing_old, "Expected to be servicing old, but was: %s.", gc_mode_name(gc_mode()));
753     GCCause::Cause existing = Atomic::cmpxchg(&_requested_gc_cause, GCCause::_no_gc, GCCause::_shenandoah_concurrent_gc);
754     if (existing != GCCause::_no_gc) {
755       log_debug(gc, thread)("Reject request to interrupt old gc because another gc is pending: %s", GCCause::to_string(existing));
756       return false;
757     }
758 
759     log_info(gc)("Preempting old generation mark to allow %s GC", shenandoah_generation_name(generation));
760     _requested_generation = generation;
761     _preemption_requested.set();
762     ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc);
763     notify_control_thread();
764 
765     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
766     while (gc_mode() == servicing_old) {
767       ml.wait();
768     }
769     return true;
770   }
771 
772   log_debug(gc, thread)("Reject request for concurrent gc: mode: %s, allow_old_preemption: %s",
773                         gc_mode_name(gc_mode()),
774                         BOOL_TO_STR(_allow_old_preemption.is_set()));
775   return false;
776 }
777 
778 void ShenandoahGenerationalControlThread::notify_control_thread() {
779   MonitorLocker locker(&_control_lock, Mutex::_no_safepoint_check_flag);
780   _control_lock.notify();
781 }
782 
783 bool ShenandoahGenerationalControlThread::preempt_old_marking(ShenandoahGenerationType generation) {
784   return (generation == YOUNG) && _allow_old_preemption.try_unset();
785 }
786 
787 void ShenandoahGenerationalControlThread::handle_requested_gc(GCCause::Cause cause) {
788   // Make sure we have at least one complete GC cycle before unblocking
789   // from the explicit GC request.
790   //
791   // This is especially important for weak references cleanup and/or native
792   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
793   // comes very late in the already running cycle, it would miss lots of new
794   // opportunities for cleanup that were made available before the caller
795   // requested the GC.
796 
797   MonitorLocker ml(&_gc_waiters_lock);
798   size_t current_gc_id = get_gc_id();
799   size_t required_gc_id = current_gc_id + 1;
800   while (current_gc_id < required_gc_id) {
801     // This races with the regulator thread to start a concurrent gc and the
802     // control thread to clear it at the start of a cycle. Threads here are
803     // allowed to escalate a heuristic's request for concurrent gc.
804     GCCause::Cause existing = Atomic::xchg(&_requested_gc_cause, cause);
805     if (existing != GCCause::_no_gc) {
806       log_debug(gc, thread)("GC request supersedes existing request: %s", GCCause::to_string(existing));
807     }
808 
809     notify_control_thread();
810     if (cause != GCCause::_wb_breakpoint) {
811       ml.wait();
812     }
813     current_gc_id = get_gc_id();
814   }
815 }
816 
817 void ShenandoahGenerationalControlThread::notify_gc_waiters() {
818   MonitorLocker ml(&_gc_waiters_lock);
819   ml.notify_all();
820 }
821 
822 const char* ShenandoahGenerationalControlThread::gc_mode_name(ShenandoahGenerationalControlThread::GCMode mode) {
823   switch (mode) {
824     case none:              return "idle";
825     case concurrent_normal: return "normal";
826     case stw_degenerated:   return "degenerated";
827     case stw_full:          return "full";
828     case servicing_old:     return "old";
829     case bootstrapping_old: return "bootstrap";
830     default:                return "unknown";
831   }
832 }
833 
834 void ShenandoahGenerationalControlThread::set_gc_mode(ShenandoahGenerationalControlThread::GCMode new_mode) {
835   if (_mode != new_mode) {
836     log_info(gc)("Transition from: %s to: %s", gc_mode_name(_mode), gc_mode_name(new_mode));
837     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
838     _mode = new_mode;
839     ml.notify_all();
840   }
841 }