1 /*
  2  * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
  3  * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
  4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #include "precompiled.hpp"
 28 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 29 #include "gc/shenandoah/shenandoahAsserts.hpp"
 30 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 31 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
 32 #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp"
 33 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 35 #include "gc/shenandoah/shenandoahFullGC.hpp"
 36 #include "gc/shenandoah/shenandoahGeneration.hpp"
 37 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 38 #include "gc/shenandoah/shenandoahOldGC.hpp"
 39 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 40 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 41 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 42 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
 43 #include "gc/shenandoah/shenandoahUtils.hpp"
 44 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 45 #include "logging/log.hpp"
 46 #include "memory/metaspaceUtils.hpp"
 47 #include "memory/metaspaceStats.hpp"
 48 #include "runtime/atomic.hpp"
 49 
 50 ShenandoahGenerationalControlThread::ShenandoahGenerationalControlThread() :
 51   ShenandoahController(),
 52   _control_lock(Mutex::nosafepoint - 2, "ShenandoahControlGC_lock", true),
 53   _regulator_lock(Mutex::nosafepoint - 2, "ShenandoahRegulatorGC_lock", true),
 54   _requested_gc_cause(GCCause::_no_gc),
 55   _requested_generation(GLOBAL),
 56   _degen_point(ShenandoahGC::_degenerated_outside_cycle),
 57   _degen_generation(nullptr),
 58   _mode(none) {
 59   shenandoah_assert_generational();
 60   set_name("Shenandoah Control Thread");
 61   create_and_start();
 62 }
 63 
 64 void ShenandoahGenerationalControlThread::run_service() {
 65   ShenandoahGenerationalHeap* const heap = ShenandoahGenerationalHeap::heap();
 66 
 67   const GCMode default_mode = concurrent_normal;
 68   ShenandoahGenerationType generation = GLOBAL;
 69 
 70   double last_shrink_time = os::elapsedTime();
 71   uint age_period = 0;
 72 
 73   // Shrink period avoids constantly polling regions for shrinking.
 74   // Having a period 10x lower than the delay would mean we hit the
 75   // shrinking with lag of less than 1/10-th of true delay.
 76   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
 77   const double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
 78 
 79   ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy();
 80 
 81   // Heuristics are notified of allocation failures here and other outcomes
 82   // of the cycle. They're also used here to control whether the Nth consecutive
 83   // degenerated cycle should be 'promoted' to a full cycle. The decision to
 84   // trigger a cycle or not is evaluated on the regulator thread.
 85   ShenandoahHeuristics* global_heuristics = heap->global_generation()->heuristics();
 86   while (!in_graceful_shutdown() && !should_terminate()) {
 87     // Figure out if we have pending requests.
 88     const bool alloc_failure_pending = _alloc_failure_gc.is_set();
 89     const bool humongous_alloc_failure_pending = _humongous_alloc_failure_gc.is_set();
 90 
 91     GCCause::Cause cause = Atomic::xchg(&_requested_gc_cause, GCCause::_no_gc);
 92 
 93     const bool is_gc_requested = ShenandoahCollectorPolicy::is_requested_gc(cause);
 94 
 95     // This control loop iteration has seen this much allocation.
 96     const size_t allocs_seen = reset_allocs_seen();
 97 
 98     // Check if we have seen a new target for soft max heap size.
 99     const bool soft_max_changed = heap->check_soft_max_changed();
100 
101     // Choose which GC mode to run in. The block below should select a single mode.
102     set_gc_mode(none);
103     ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
104 
105     if (alloc_failure_pending) {
106       // Allocation failure takes precedence: we have to deal with it first thing
107       cause = GCCause::_allocation_failure;
108 
109       // Consume the degen point, and seed it with default value
110       degen_point = _degen_point;
111       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
112 
113       if (degen_point == ShenandoahGC::_degenerated_outside_cycle) {
114         _degen_generation = heap->young_generation();
115       } else {
116         assert(_degen_generation != nullptr, "Need to know which generation to resume");
117       }
118 
119       ShenandoahHeuristics* heuristics = _degen_generation->heuristics();
120       generation = _degen_generation->type();
121       bool old_gen_evacuation_failed = heap->old_generation()->clear_failed_evacuation();
122 
123       heuristics->log_trigger("Handle Allocation Failure");
124 
125       // Do not bother with degenerated cycle if old generation evacuation failed or if humongous allocation failed
126       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() &&
127           !old_gen_evacuation_failed && !humongous_alloc_failure_pending) {
128         heuristics->record_allocation_failure_gc();
129         policy->record_alloc_failure_to_degenerated(degen_point);
130         set_gc_mode(stw_degenerated);
131       } else {
132         heuristics->record_allocation_failure_gc();
133         policy->record_alloc_failure_to_full();
134         generation = GLOBAL;
135         set_gc_mode(stw_full);
136       }
137     } else if (is_gc_requested) {
138       generation = GLOBAL;
139       global_heuristics->log_trigger("GC request (%s)", GCCause::to_string(cause));
140       global_heuristics->record_requested_gc();
141 
142       if (ShenandoahCollectorPolicy::should_run_full_gc(cause)) {
143         set_gc_mode(stw_full);
144       } else {
145         set_gc_mode(default_mode);
146         // Unload and clean up everything
147         heap->set_unload_classes(global_heuristics->can_unload_classes());
148       }
149     } else {
150       // We should only be here if the regulator requested a cycle or if
151       // there is an old generation mark in progress.
152       if (cause == GCCause::_shenandoah_concurrent_gc) {
153         if (_requested_generation == OLD && heap->old_generation()->is_doing_mixed_evacuations()) {
154           // If a request to start an old cycle arrived while an old cycle was running, but _before_
155           // it chose any regions for evacuation we don't want to start a new old cycle. Rather, we want
156           // the heuristic to run a young collection so that we can evacuate some old regions.
157           assert(!heap->is_concurrent_old_mark_in_progress(), "Should not be running mixed collections and concurrent marking");
158           generation = YOUNG;
159         } else {
160           generation = _requested_generation;
161         }
162 
163         // preemption was requested or this is a regular cycle
164         set_gc_mode(default_mode);
165 
166         // Don't start a new old marking if there is one already in progress
167         if (generation == OLD && heap->is_concurrent_old_mark_in_progress()) {
168           set_gc_mode(servicing_old);
169         }
170 
171         if (generation == GLOBAL) {
172           heap->set_unload_classes(global_heuristics->should_unload_classes());
173         } else {
174           heap->set_unload_classes(false);
175         }
176       } else if (heap->is_concurrent_old_mark_in_progress() || heap->is_prepare_for_old_mark_in_progress()) {
177         // Nobody asked us to do anything, but we have an old-generation mark or old-generation preparation for
178         // mixed evacuation in progress, so resume working on that.
179         log_info(gc)("Resume old GC: marking is%s in progress, preparing is%s in progress",
180                      heap->is_concurrent_old_mark_in_progress() ? "" : " NOT",
181                      heap->is_prepare_for_old_mark_in_progress() ? "" : " NOT");
182 
183         cause = GCCause::_shenandoah_concurrent_gc;
184         generation = OLD;
185         set_gc_mode(servicing_old);
186         heap->set_unload_classes(false);
187       }
188     }
189 
190     const bool gc_requested = (gc_mode() != none);
191     assert (!gc_requested || cause != GCCause::_no_gc, "GC cause should be set");
192 
193     if (gc_requested) {
194       // Blow away all soft references on this cycle, if handling allocation failure,
195       // either implicit or explicit GC request, or we are requested to do so unconditionally.
196       if (generation == GLOBAL && (alloc_failure_pending || is_gc_requested || ShenandoahAlwaysClearSoftRefs)) {
197         heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
198       }
199 
200       // GC is starting, bump the internal ID
201       update_gc_id();
202 
203       heap->reset_bytes_allocated_since_gc_start();
204 
205       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
206 
207       // If GC was requested, we are sampling the counters even without actual triggers
208       // from allocation machinery. This captures GC phases more accurately.
209       heap->set_forced_counters_update(true);
210 
211       // If GC was requested, we better dump freeset data for performance debugging
212       heap->free_set()->log_status_under_lock();
213 
214       // In case this is a degenerated cycle, remember whether original cycle was aging.
215       const bool was_aging_cycle = heap->is_aging_cycle();
216       heap->set_aging_cycle(false);
217 
218       switch (gc_mode()) {
219         case concurrent_normal: {
220           // At this point:
221           //  if (generation == YOUNG), this is a normal YOUNG cycle
222           //  if (generation == OLD), this is a bootstrap OLD cycle
223           //  if (generation == GLOBAL), this is a GLOBAL cycle triggered by System.gc()
224           // In all three cases, we want to age old objects if this is an aging cycle
225           if (age_period-- == 0) {
226              heap->set_aging_cycle(true);
227              age_period = ShenandoahAgingCyclePeriod - 1;
228           }
229           service_concurrent_normal_cycle(heap, generation, cause);
230           break;
231         }
232         case stw_degenerated: {
233           heap->set_aging_cycle(was_aging_cycle);
234           service_stw_degenerated_cycle(cause, degen_point);
235           break;
236         }
237         case stw_full: {
238           if (age_period-- == 0) {
239             heap->set_aging_cycle(true);
240             age_period = ShenandoahAgingCyclePeriod - 1;
241           }
242           service_stw_full_cycle(cause);
243           break;
244         }
245         case servicing_old: {
246           assert(generation == OLD, "Expected old generation here");
247           GCIdMark gc_id_mark;
248           service_concurrent_old_cycle(heap, cause);
249           break;
250         }
251         default:
252           ShouldNotReachHere();
253       }
254 
255       // If this was the requested GC cycle, notify waiters about it
256       if (is_gc_requested) {
257         notify_gc_waiters();
258       }
259 
260       // If this was the allocation failure GC cycle, notify waiters about it
261       if (alloc_failure_pending) {
262         notify_alloc_failure_waiters();
263       }
264 
265       // Report current free set state at the end of cycle, whether
266       // it is a normal completion, or the abort.
267       heap->free_set()->log_status_under_lock();
268 
269       {
270         // Notify Universe about new heap usage. This has implications for
271         // global soft refs policy, and we better report it every time heap
272         // usage goes down.
273         ShenandoahHeapLocker locker(heap->lock());
274         heap->update_capacity_and_used_at_gc();
275       }
276 
277       // Signal that we have completed a visit to all live objects.
278       heap->record_whole_heap_examined_timestamp();
279 
280       // Disable forced counters update, and update counters one more time
281       // to capture the state at the end of GC session.
282       heap->handle_force_counters_update();
283       heap->set_forced_counters_update(false);
284 
285       // Retract forceful part of soft refs policy
286       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
287 
288       // Clear metaspace oom flag, if current cycle unloaded classes
289       if (heap->unload_classes()) {
290         global_heuristics->clear_metaspace_oom();
291       }
292 
293       process_phase_timings(heap);
294 
295       // Print Metaspace change following GC (if logging is enabled).
296       MetaspaceUtils::print_metaspace_change(meta_sizes);
297 
298       // GC is over, we are at idle now
299       if (ShenandoahPacing) {
300         heap->pacer()->setup_for_idle();
301       }
302     } else {
303       // Report to pacer that we have seen this many words allocated
304       if (ShenandoahPacing && (allocs_seen > 0)) {
305         heap->pacer()->report_alloc(allocs_seen);
306       }
307     }
308 
309     const double current = os::elapsedTime();
310 
311     if (ShenandoahUncommit && (is_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
312       // Explicit GC tries to uncommit everything down to min capacity.
313       // Soft max change tries to uncommit everything down to target capacity.
314       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
315 
316       double shrink_before = (is_gc_requested || soft_max_changed) ?
317                              current :
318                              current - (ShenandoahUncommitDelay / 1000.0);
319 
320       size_t shrink_until = soft_max_changed ?
321                              heap->soft_max_capacity() :
322                              heap->min_capacity();
323 
324       heap->maybe_uncommit(shrink_before, shrink_until);
325       heap->phase_timings()->flush_cycle_to_global();
326       last_shrink_time = current;
327     }
328 
329     // Wait for ShenandoahControlIntervalMax unless there was an allocation failure or another request was made mid-cycle.
330     if (!is_alloc_failure_gc() && _requested_gc_cause == GCCause::_no_gc) {
331       // The timed wait is necessary because this thread has a responsibility to send
332       // 'alloc_words' to the pacer when it does not perform a GC.
333       MonitorLocker lock(&_control_lock, Mutex::_no_safepoint_check_flag);
334       lock.wait(ShenandoahControlIntervalMax);
335     }
336   }
337 
338   set_gc_mode(stopped);
339 
340   // Wait for the actual stop(), can't leave run_service() earlier.
341   while (!should_terminate()) {
342     os::naked_short_sleep(ShenandoahControlIntervalMin);
343   }
344 }
345 
346 void ShenandoahGenerationalControlThread::process_phase_timings(const ShenandoahGenerationalHeap* heap) {
347   // Commit worker statistics to cycle data
348   heap->phase_timings()->flush_par_workers_to_cycle();
349   if (ShenandoahPacing) {
350     heap->pacer()->flush_stats_to_cycle();
351   }
352 
353   ShenandoahEvacuationTracker* evac_tracker = heap->evac_tracker();
354   ShenandoahCycleStats         evac_stats   = evac_tracker->flush_cycle_to_global();
355 
356   // Print GC stats for current cycle
357   {
358     LogTarget(Info, gc, stats) lt;
359     if (lt.is_enabled()) {
360       ResourceMark rm;
361       LogStream ls(lt);
362       heap->phase_timings()->print_cycle_on(&ls);
363       evac_tracker->print_evacuations_on(&ls, &evac_stats.workers,
364                                               &evac_stats.mutators);
365       if (ShenandoahPacing) {
366         heap->pacer()->print_cycle_on(&ls);
367       }
368     }
369   }
370 
371   // Commit statistics to globals
372   heap->phase_timings()->flush_cycle_to_global();
373 }
374 
375 // Young and old concurrent cycles are initiated by the regulator. Implicit
376 // and explicit GC requests are handled by the controller thread and always
377 // run a global cycle (which is concurrent by default, but may be overridden
378 // by command line options). Old cycles always degenerate to a global cycle.
379 // Young cycles are degenerated to complete the young cycle.  Young
380 // and old degen may upgrade to Full GC.  Full GC may also be
381 // triggered directly by a System.gc() invocation.
382 //
383 //
384 //      +-----+ Idle +-----+-----------+---------------------+
385 //      |         +        |           |                     |
386 //      |         |        |           |                     |
387 //      |         |        v           |                     |
388 //      |         |  Bootstrap Old +-- | ------------+       |
389 //      |         |   +                |             |       |
390 //      |         |   |                |             |       |
391 //      |         v   v                v             v       |
392 //      |    Resume Old <----------+ Young +--> Young Degen  |
393 //      |     +  +   ^                            +  +       |
394 //      v     |  |   |                            |  |       |
395 //   Global <-+  |   +----------------------------+  |       |
396 //      +        |                                   |       |
397 //      |        v                                   v       |
398 //      +--->  Global Degen +--------------------> Full <----+
399 //
400 void ShenandoahGenerationalControlThread::service_concurrent_normal_cycle(ShenandoahGenerationalHeap* heap,
401                                                                           const ShenandoahGenerationType generation,
402                                                                           GCCause::Cause cause) {
403   GCIdMark gc_id_mark;
404   switch (generation) {
405     case YOUNG: {
406       // Run a young cycle. This might or might not, have interrupted an ongoing
407       // concurrent mark in the old generation. We need to think about promotions
408       // in this case. Promoted objects should be above the TAMS in the old regions
409       // they end up in, but we have to be sure we don't promote into any regions
410       // that are in the cset.
411       log_info(gc, ergo)("Start GC cycle (Young)");
412       service_concurrent_cycle(heap->young_generation(), cause, false);
413       break;
414     }
415     case OLD: {
416       log_info(gc, ergo)("Start GC cycle (Old)");
417       service_concurrent_old_cycle(heap, cause);
418       break;
419     }
420     case GLOBAL: {
421       log_info(gc, ergo)("Start GC cycle (Global)");
422       service_concurrent_cycle(heap->global_generation(), cause, false);
423       break;
424     }
425     default:
426       ShouldNotReachHere();
427   }
428 }
429 
430 void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(ShenandoahGenerationalHeap* heap, GCCause::Cause &cause) {
431   ShenandoahOldGeneration* old_generation = heap->old_generation();
432   ShenandoahYoungGeneration* young_generation = heap->young_generation();
433   ShenandoahOldGeneration::State original_state = old_generation->state();
434 
435   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
436 
437   switch (original_state) {
438     case ShenandoahOldGeneration::FILLING: {
439       ShenandoahGCSession session(cause, old_generation);
440       _allow_old_preemption.set();
441       old_generation->entry_coalesce_and_fill();
442       _allow_old_preemption.unset();
443 
444       // Before bootstrapping begins, we must acknowledge any cancellation request.
445       // If the gc has not been cancelled, this does nothing. If it has been cancelled,
446       // this will clear the cancellation request and exit before starting the bootstrap
447       // phase. This will allow the young GC cycle to proceed normally. If we do not
448       // acknowledge the cancellation request, the subsequent young cycle will observe
449       // the request and essentially cancel itself.
450       if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) {
451         log_info(gc)("Preparation for old generation cycle was cancelled");
452         return;
453       }
454 
455       // Coalescing threads completed and nothing was cancelled. it is safe to transition from this state.
456       old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
457       return;
458     }
459     case ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP:
460       old_generation->transition_to(ShenandoahOldGeneration::BOOTSTRAPPING);
461     case ShenandoahOldGeneration::BOOTSTRAPPING: {
462       // Configure the young generation's concurrent mark to put objects in
463       // old regions into the concurrent mark queues associated with the old
464       // generation. The young cycle will run as normal except that rather than
465       // ignore old references it will mark and enqueue them in the old concurrent
466       // task queues but it will not traverse them.
467       set_gc_mode(bootstrapping_old);
468       young_generation->set_old_gen_task_queues(old_generation->task_queues());
469       ShenandoahGCSession session(cause, young_generation);
470       service_concurrent_cycle(heap, young_generation, cause, true);
471       process_phase_timings(heap);
472       if (heap->cancelled_gc()) {
473         // Young generation bootstrap cycle has failed. Concurrent mark for old generation
474         // is going to resume after degenerated bootstrap cycle completes.
475         log_info(gc)("Bootstrap cycle for old generation was cancelled");
476         return;
477       }
478 
479       // Reset the degenerated point. Normally this would happen at the top
480       // of the control loop, but here we have just completed a young cycle
481       // which has bootstrapped the old concurrent marking.
482       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
483 
484       // From here we will 'resume' the old concurrent mark. This will skip reset
485       // and init mark for the concurrent mark. All of that work will have been
486       // done by the bootstrapping young cycle.
487       set_gc_mode(servicing_old);
488       old_generation->transition_to(ShenandoahOldGeneration::MARKING);
489     }
490     case ShenandoahOldGeneration::MARKING: {
491       ShenandoahGCSession session(cause, old_generation);
492       bool marking_complete = resume_concurrent_old_cycle(old_generation, cause);
493       if (marking_complete) {
494         assert(old_generation->state() != ShenandoahOldGeneration::MARKING, "Should not still be marking");
495         if (original_state == ShenandoahOldGeneration::MARKING) {
496           heap->mmu_tracker()->record_old_marking_increment(true);
497           heap->log_heap_status("At end of Concurrent Old Marking finishing increment");
498         }
499       } else if (original_state == ShenandoahOldGeneration::MARKING) {
500         heap->mmu_tracker()->record_old_marking_increment(false);
501         heap->log_heap_status("At end of Concurrent Old Marking increment");
502       }
503       break;
504     }
505     default:
506       fatal("Unexpected state for old GC: %s", ShenandoahOldGeneration::state_name(old_generation->state()));
507   }
508 }
509 
510 bool ShenandoahGenerationalControlThread::resume_concurrent_old_cycle(ShenandoahOldGeneration* generation, GCCause::Cause cause) {
511   assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(), "Old mark should be in progress");
512   log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued", generation->task_queues()->tasks());
513 
514   ShenandoahHeap* heap = ShenandoahHeap::heap();
515 
516   // We can only tolerate being cancelled during concurrent marking or during preparation for mixed
517   // evacuation. This flag here (passed by reference) is used to control precisely where the regulator
518   // is allowed to cancel a GC.
519   ShenandoahOldGC gc(generation, _allow_old_preemption);
520   if (gc.collect(cause)) {
521     heap->notify_gc_progress();
522     generation->record_success_concurrent(false);
523   }
524 
525   if (heap->cancelled_gc()) {
526     // It's possible the gc cycle was cancelled after the last time
527     // the collection checked for cancellation. In which case, the
528     // old gc cycle is still completed, and we have to deal with this
529     // cancellation. We set the degeneration point to be outside
530     // the cycle because if this is an allocation failure, that is
531     // what must be done (there is no degenerated old cycle). If the
532     // cancellation was due to a heuristic wanting to start a young
533     // cycle, then we are not actually going to a degenerated cycle,
534     // so the degenerated point doesn't matter here.
535     check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle);
536     if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) {
537       heap->shenandoah_policy()->record_interrupted_old();
538     }
539     return false;
540   }
541   return true;
542 }
543 
544 void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool do_old_gc_bootstrap) {
545   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
546   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
547   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
548   // tries to evac something and no memory is available), cycle degrades to Full GC.
549   //
550   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
551   // heuristics says there are no regions to compact, and all the collection comes from immediately
552   // reclaimable regions.
553   //
554   // ................................................................................................
555   //
556   //                                    (immediate garbage shortcut)                Concurrent GC
557   //                             /-------------------------------------------\
558   //                             |                                           |
559   //                             |                                           |
560   //                             |                                           |
561   //                             |                                           v
562   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
563   //                   |                    |                 |              ^
564   //                   | (af)               | (af)            | (af)         |
565   // ..................|....................|.................|..............|.......................
566   //                   |                    |                 |              |
567   //                   |                    |                 |              |      Degenerated GC
568   //                   v                    v                 v              |
569   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
570   //                   |                    |                 |              ^
571   //                   | (af)               | (af)            | (af)         |
572   // ..................|....................|.................|..............|.......................
573   //                   |                    |                 |              |
574   //                   |                    v                 |              |      Full GC
575   //                   \------------------->o<----------------/              |
576   //                                        |                                |
577   //                                        v                                |
578   //                                      Full GC  --------------------------/
579   //
580   if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
581 
582   ShenandoahHeap* heap = ShenandoahHeap::heap();
583   ShenandoahGCSession session(cause, generation);
584   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
585 
586   service_concurrent_cycle(heap, generation, cause, do_old_gc_bootstrap);
587 }
588 
589 void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahHeap* heap,
590                                                        ShenandoahGeneration* generation,
591                                                        GCCause::Cause& cause,
592                                                        bool do_old_gc_bootstrap) {
593   assert(!generation->is_old(), "Old GC takes a different control path");
594 
595   ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap);
596   if (gc.collect(cause)) {
597     // Cycle is complete
598     heap->notify_gc_progress();
599     generation->record_success_concurrent(gc.abbreviated());
600   } else {
601     assert(heap->cancelled_gc(), "Must have been cancelled");
602     check_cancellation_or_degen(gc.degen_point());
603 
604     // Concurrent young-gen collection degenerates to young
605     // collection.  Same for global collections.
606     _degen_generation = generation;
607   }
608   const char* msg;
609   ShenandoahMmuTracker* mmu_tracker = heap->mmu_tracker();
610   if (generation->is_young()) {
611     if (heap->cancelled_gc()) {
612       msg = (do_old_gc_bootstrap) ? "At end of Interrupted Concurrent Bootstrap GC" :
613             "At end of Interrupted Concurrent Young GC";
614     } else {
615       // We only record GC results if GC was successful
616       msg = (do_old_gc_bootstrap) ? "At end of Concurrent Bootstrap GC" :
617             "At end of Concurrent Young GC";
618       if (heap->collection_set()->has_old_regions()) {
619         mmu_tracker->record_mixed(get_gc_id());
620       } else if (do_old_gc_bootstrap) {
621         mmu_tracker->record_bootstrap(get_gc_id());
622       } else {
623         mmu_tracker->record_young(get_gc_id());
624       }
625     }
626   } else {
627     assert(generation->is_global(), "If not young, must be GLOBAL");
628     assert(!do_old_gc_bootstrap, "Do not bootstrap with GLOBAL GC");
629     if (heap->cancelled_gc()) {
630       msg = "At end of Interrupted Concurrent GLOBAL GC";
631     } else {
632       // We only record GC results if GC was successful
633       msg = "At end of Concurrent Global GC";
634       mmu_tracker->record_global(get_gc_id());
635     }
636   }
637   heap->log_heap_status(msg);
638 }
639 
640 bool ShenandoahGenerationalControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
641   ShenandoahHeap* heap = ShenandoahHeap::heap();
642   if (!heap->cancelled_gc()) {
643     return false;
644   }
645 
646   if (in_graceful_shutdown()) {
647     return true;
648   }
649 
650   assert(_degen_point == ShenandoahGC::_degenerated_outside_cycle,
651          "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
652 
653   if (is_alloc_failure_gc()) {
654     _degen_point = point;
655     _preemption_requested.unset();
656     return true;
657   }
658 
659   if (_preemption_requested.is_set()) {
660     assert(_requested_generation == YOUNG, "Only young GCs may preempt old.");
661     _preemption_requested.unset();
662 
663     // Old generation marking is only cancellable during concurrent marking.
664     // Once final mark is complete, the code does not check again for cancellation.
665     // If old generation was cancelled for an allocation failure, we wouldn't
666     // make it to this case. The calling code is responsible for forcing a
667     // cancellation due to allocation failure into a degenerated cycle.
668     _degen_point = point;
669     heap->clear_cancelled_gc(false /* clear oom handler */);
670     return true;
671   }
672 
673   fatal("Cancel GC either for alloc failure GC, or gracefully exiting, or to pause old generation marking");
674   return false;
675 }
676 
677 void ShenandoahGenerationalControlThread::stop_service() {
678   // Nothing to do here.
679 }
680 
681 void ShenandoahGenerationalControlThread::service_stw_full_cycle(GCCause::Cause cause) {
682   ShenandoahHeap* const heap = ShenandoahHeap::heap();
683 
684   GCIdMark gc_id_mark;
685   ShenandoahGCSession session(cause, heap->global_generation());
686 
687   ShenandoahFullGC gc;
688   gc.collect(cause);
689 }
690 
691 void ShenandoahGenerationalControlThread::service_stw_degenerated_cycle(GCCause::Cause cause,
692                                                             ShenandoahGC::ShenandoahDegenPoint point) {
693   assert(point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
694   ShenandoahHeap* const heap = ShenandoahHeap::heap();
695 
696   GCIdMark gc_id_mark;
697   ShenandoahGCSession session(cause, _degen_generation);
698 
699   ShenandoahDegenGC gc(point, _degen_generation);
700   gc.collect(cause);
701 
702   assert(heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks");
703   if (_degen_generation->is_global()) {
704     assert(heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks");
705     assert(heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks");
706   } else {
707     assert(_degen_generation->is_young(), "Expected degenerated young cycle, if not global.");
708     ShenandoahOldGeneration* old = heap->old_generation();
709     if (old->is_bootstrapping()) {
710       old->transition_to(ShenandoahOldGeneration::MARKING);
711     }
712   }
713 }
714 
715 void ShenandoahGenerationalControlThread::request_gc(GCCause::Cause cause) {
716   if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) {
717     handle_requested_gc(cause);
718   }
719 }
720 
721 bool ShenandoahGenerationalControlThread::request_concurrent_gc(ShenandoahGenerationType generation) {
722   if (_preemption_requested.is_set() || _requested_gc_cause != GCCause::_no_gc || ShenandoahHeap::heap()->cancelled_gc()) {
723     // Ignore subsequent requests from the heuristics
724     log_debug(gc, thread)("Reject request for concurrent gc: preemption_requested: %s, gc_requested: %s, gc_cancelled: %s",
725                           BOOL_TO_STR(_preemption_requested.is_set()),
726                           GCCause::to_string(_requested_gc_cause),
727                           BOOL_TO_STR(ShenandoahHeap::heap()->cancelled_gc()));
728     return false;
729   }
730 
731   if (gc_mode() == none) {
732     GCCause::Cause existing = Atomic::cmpxchg(&_requested_gc_cause, GCCause::_no_gc, GCCause::_shenandoah_concurrent_gc);
733     if (existing != GCCause::_no_gc) {
734       log_debug(gc, thread)("Reject request for concurrent gc because another gc is pending: %s", GCCause::to_string(existing));
735       return false;
736     }
737 
738     _requested_generation = generation;
739     notify_control_thread();
740 
741     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
742     while (gc_mode() == none) {
743       ml.wait();
744     }
745     return true;
746   }
747 
748   if (preempt_old_marking(generation)) {
749     assert(gc_mode() == servicing_old, "Expected to be servicing old, but was: %s.", gc_mode_name(gc_mode()));
750     GCCause::Cause existing = Atomic::cmpxchg(&_requested_gc_cause, GCCause::_no_gc, GCCause::_shenandoah_concurrent_gc);
751     if (existing != GCCause::_no_gc) {
752       log_debug(gc, thread)("Reject request to interrupt old gc because another gc is pending: %s", GCCause::to_string(existing));
753       return false;
754     }
755 
756     log_info(gc)("Preempting old generation mark to allow %s GC", shenandoah_generation_name(generation));
757     _requested_generation = generation;
758     _preemption_requested.set();
759     ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc);
760     notify_control_thread();
761 
762     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
763     while (gc_mode() == servicing_old) {
764       ml.wait();
765     }
766     return true;
767   }
768 
769   log_debug(gc, thread)("Reject request for concurrent gc: mode: %s, allow_old_preemption: %s",
770                         gc_mode_name(gc_mode()),
771                         BOOL_TO_STR(_allow_old_preemption.is_set()));
772   return false;
773 }
774 
775 void ShenandoahGenerationalControlThread::notify_control_thread() {
776   MonitorLocker locker(&_control_lock, Mutex::_no_safepoint_check_flag);
777   _control_lock.notify();
778 }
779 
780 bool ShenandoahGenerationalControlThread::preempt_old_marking(ShenandoahGenerationType generation) {
781   return (generation == YOUNG) && _allow_old_preemption.try_unset();
782 }
783 
784 void ShenandoahGenerationalControlThread::handle_requested_gc(GCCause::Cause cause) {
785   // For normal requested GCs (System.gc) we want to block the caller. However,
786   // for whitebox requested GC, we want to initiate the GC and return immediately.
787   // The whitebox caller thread will arrange for itself to wait until the GC notifies
788   // it that has reached the requested breakpoint (phase in the GC).
789   if (cause == GCCause::_wb_breakpoint) {
790     Atomic::xchg(&_requested_gc_cause, cause);
791     notify_control_thread();
792     return;
793   }
794 
795   // Make sure we have at least one complete GC cycle before unblocking
796   // from the explicit GC request.
797   //
798   // This is especially important for weak references cleanup and/or native
799   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
800   // comes very late in the already running cycle, it would miss lots of new
801   // opportunities for cleanup that were made available before the caller
802   // requested the GC.
803 
804   MonitorLocker ml(&_gc_waiters_lock);
805   size_t current_gc_id = get_gc_id();
806   size_t required_gc_id = current_gc_id + 1;
807   while (current_gc_id < required_gc_id) {
808     // This races with the regulator thread to start a concurrent gc and the
809     // control thread to clear it at the start of a cycle. Threads here are
810     // allowed to escalate a heuristic's request for concurrent gc.
811     GCCause::Cause existing = Atomic::xchg(&_requested_gc_cause, cause);
812     if (existing != GCCause::_no_gc) {
813       log_debug(gc, thread)("GC request supersedes existing request: %s", GCCause::to_string(existing));
814     }
815 
816     notify_control_thread();
817     ml.wait();
818     current_gc_id = get_gc_id();
819   }
820 }
821 
822 void ShenandoahGenerationalControlThread::notify_gc_waiters() {
823   MonitorLocker ml(&_gc_waiters_lock);
824   ml.notify_all();
825 }
826 
827 const char* ShenandoahGenerationalControlThread::gc_mode_name(ShenandoahGenerationalControlThread::GCMode mode) {
828   switch (mode) {
829     case none:              return "idle";
830     case concurrent_normal: return "normal";
831     case stw_degenerated:   return "degenerated";
832     case stw_full:          return "full";
833     case servicing_old:     return "old";
834     case bootstrapping_old: return "bootstrap";
835     case stopped:           return "stopped";
836     default:                return "unknown";
837   }
838 }
839 
840 void ShenandoahGenerationalControlThread::set_gc_mode(ShenandoahGenerationalControlThread::GCMode new_mode) {
841   if (_mode != new_mode) {
842     log_debug(gc)("Transition from: %s to: %s", gc_mode_name(_mode), gc_mode_name(new_mode));
843     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
844     _mode = new_mode;
845     ml.notify_all();
846   }
847 }