1 /*
  2  * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
  3  * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
  4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #include "precompiled.hpp"
 28 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 29 #include "gc/shenandoah/shenandoahAsserts.hpp"
 30 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 31 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
 32 #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp"
 33 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 35 #include "gc/shenandoah/shenandoahFullGC.hpp"
 36 #include "gc/shenandoah/shenandoahGeneration.hpp"
 37 #include "gc/shenandoah/shenandoahOldGC.hpp"
 38 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 41 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
 42 #include "gc/shenandoah/shenandoahUtils.hpp"
 43 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 44 #include "logging/log.hpp"
 45 #include "memory/metaspaceUtils.hpp"
 46 #include "memory/metaspaceStats.hpp"
 47 #include "runtime/atomic.hpp"
 48 
 49 ShenandoahGenerationalControlThread::ShenandoahGenerationalControlThread() :
 50   ShenandoahController(),
 51   _control_lock(Mutex::nosafepoint - 2, "ShenandoahControlGC_lock", true),
 52   _regulator_lock(Mutex::nosafepoint - 2, "ShenandoahRegulatorGC_lock", true),
 53   _requested_gc_cause(GCCause::_no_gc),
 54   _requested_generation(GLOBAL),
 55   _degen_point(ShenandoahGC::_degenerated_outside_cycle),
 56   _degen_generation(nullptr),
 57   _mode(none) {
 58   shenandoah_assert_generational();
 59   set_name("Shenandoah Control Thread");
 60   create_and_start();
 61 }
 62 
 63 void ShenandoahGenerationalControlThread::run_service() {
 64   ShenandoahGenerationalHeap* const heap = ShenandoahGenerationalHeap::heap();
 65 
 66   const GCMode default_mode = concurrent_normal;
 67   ShenandoahGenerationType generation = GLOBAL;
 68 
 69   double last_shrink_time = os::elapsedTime();
 70   uint age_period = 0;
 71 
 72   // Shrink period avoids constantly polling regions for shrinking.
 73   // Having a period 10x lower than the delay would mean we hit the
 74   // shrinking with lag of less than 1/10-th of true delay.
 75   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
 76   const double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
 77 
 78   ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy();
 79 
 80   // Heuristics are notified of allocation failures here and other outcomes
 81   // of the cycle. They're also used here to control whether the Nth consecutive
 82   // degenerated cycle should be 'promoted' to a full cycle. The decision to
 83   // trigger a cycle or not is evaluated on the regulator thread.
 84   ShenandoahHeuristics* global_heuristics = heap->global_generation()->heuristics();
 85   while (!in_graceful_shutdown() && !should_terminate()) {
 86     // Figure out if we have pending requests.
 87     const bool alloc_failure_pending = _alloc_failure_gc.is_set();
 88     const bool humongous_alloc_failure_pending = _humongous_alloc_failure_gc.is_set();
 89 
 90     GCCause::Cause cause = Atomic::xchg(&_requested_gc_cause, GCCause::_no_gc);
 91 
 92     const bool is_gc_requested = ShenandoahCollectorPolicy::is_requested_gc(cause);
 93 
 94     // This control loop iteration has seen this much allocation.
 95     const size_t allocs_seen = reset_allocs_seen();
 96 
 97     // Check if we have seen a new target for soft max heap size.
 98     const bool soft_max_changed = heap->check_soft_max_changed();
 99 
100     // Choose which GC mode to run in. The block below should select a single mode.
101     set_gc_mode(none);
102     ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
103 
104     if (alloc_failure_pending) {
105       // Allocation failure takes precedence: we have to deal with it first thing
106       log_info(gc)("Trigger: Handle Allocation Failure");
107 
108       cause = GCCause::_allocation_failure;
109 
110       // Consume the degen point, and seed it with default value
111       degen_point = _degen_point;
112       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
113 
114       if (degen_point == ShenandoahGC::_degenerated_outside_cycle) {
115         _degen_generation = heap->young_generation();
116       } else {
117         assert(_degen_generation != nullptr, "Need to know which generation to resume");
118       }
119 
120       ShenandoahHeuristics* heuristics = _degen_generation->heuristics();
121       generation = _degen_generation->type();
122       bool old_gen_evacuation_failed = heap->old_generation()->clear_failed_evacuation();
123 
124       // Do not bother with degenerated cycle if old generation evacuation failed or if humongous allocation failed
125       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() &&
126           !old_gen_evacuation_failed && !humongous_alloc_failure_pending) {
127         heuristics->record_allocation_failure_gc();
128         policy->record_alloc_failure_to_degenerated(degen_point);
129         set_gc_mode(stw_degenerated);
130       } else {
131         // TODO: if humongous_alloc_failure_pending, there might be value in trying a "compacting" degen before
132         // going all the way to full.  But it's a lot of work to implement this, and it may not provide value.
133         // A compacting degen can move young regions around without doing full old-gen mark (relying upon the
134         // remembered set scan), so it might be faster than a full gc.
135         //
136         // Longer term, think about how to defragment humongous memory concurrently.
137 
138         heuristics->record_allocation_failure_gc();
139         policy->record_alloc_failure_to_full();
140         generation = GLOBAL;
141         set_gc_mode(stw_full);
142       }
143     } else if (is_gc_requested) {
144       generation = GLOBAL;
145       log_info(gc)("Trigger: GC request (%s)", GCCause::to_string(cause));
146       global_heuristics->record_requested_gc();
147 
148       if (ShenandoahCollectorPolicy::should_run_full_gc(cause)) {
149         set_gc_mode(stw_full);
150       } else {
151         set_gc_mode(default_mode);
152         // Unload and clean up everything
153         heap->set_unload_classes(global_heuristics->can_unload_classes());
154       }
155     } else {
156       // We should only be here if the regulator requested a cycle or if
157       // there is an old generation mark in progress.
158       if (cause == GCCause::_shenandoah_concurrent_gc) {
159         if (_requested_generation == OLD && heap->old_generation()->is_doing_mixed_evacuations()) {
160           // If a request to start an old cycle arrived while an old cycle was running, but _before_
161           // it chose any regions for evacuation we don't want to start a new old cycle. Rather, we want
162           // the heuristic to run a young collection so that we can evacuate some old regions.
163           assert(!heap->is_concurrent_old_mark_in_progress(), "Should not be running mixed collections and concurrent marking");
164           generation = YOUNG;
165         } else {
166           generation = _requested_generation;
167         }
168 
169         // preemption was requested or this is a regular cycle
170         set_gc_mode(default_mode);
171 
172         // Don't start a new old marking if there is one already in progress
173         if (generation == OLD && heap->is_concurrent_old_mark_in_progress()) {
174           set_gc_mode(servicing_old);
175         }
176 
177         if (generation == GLOBAL) {
178           heap->set_unload_classes(global_heuristics->should_unload_classes());
179         } else {
180           heap->set_unload_classes(false);
181         }
182       } else if (heap->is_concurrent_old_mark_in_progress() || heap->is_prepare_for_old_mark_in_progress()) {
183         // Nobody asked us to do anything, but we have an old-generation mark or old-generation preparation for
184         // mixed evacuation in progress, so resume working on that.
185         log_info(gc)("Resume old GC: marking is%s in progress, preparing is%s in progress",
186                      heap->is_concurrent_old_mark_in_progress() ? "" : " NOT",
187                      heap->is_prepare_for_old_mark_in_progress() ? "" : " NOT");
188 
189         cause = GCCause::_shenandoah_concurrent_gc;
190         generation = OLD;
191         set_gc_mode(servicing_old);
192         heap->set_unload_classes(false);
193       }
194     }
195 
196     const bool gc_requested = (gc_mode() != none);
197     assert (!gc_requested || cause != GCCause::_no_gc, "GC cause should be set");
198 
199     if (gc_requested) {
200       // Blow away all soft references on this cycle, if handling allocation failure,
201       // either implicit or explicit GC request, or we are requested to do so unconditionally.
202       if (generation == GLOBAL && (alloc_failure_pending || is_gc_requested || ShenandoahAlwaysClearSoftRefs)) {
203         heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
204       }
205 
206       // GC is starting, bump the internal ID
207       update_gc_id();
208 
209       heap->reset_bytes_allocated_since_gc_start();
210 
211       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
212 
213       // If GC was requested, we are sampling the counters even without actual triggers
214       // from allocation machinery. This captures GC phases more accurately.
215       heap->set_forced_counters_update(true);
216 
217       // If GC was requested, we better dump freeset data for performance debugging
218       {
219         ShenandoahHeapLocker locker(heap->lock());
220         heap->free_set()->log_status();
221       }
222       // In case this is a degenerated cycle, remember whether original cycle was aging.
223       const bool was_aging_cycle = heap->is_aging_cycle();
224       heap->set_aging_cycle(false);
225 
226       switch (gc_mode()) {
227         case concurrent_normal: {
228           // At this point:
229           //  if (generation == YOUNG), this is a normal YOUNG cycle
230           //  if (generation == OLD), this is a bootstrap OLD cycle
231           //  if (generation == GLOBAL), this is a GLOBAL cycle triggered by System.gc()
232           // In all three cases, we want to age old objects if this is an aging cycle
233           if (age_period-- == 0) {
234              heap->set_aging_cycle(true);
235              age_period = ShenandoahAgingCyclePeriod - 1;
236           }
237           service_concurrent_normal_cycle(heap, generation, cause);
238           break;
239         }
240         case stw_degenerated: {
241           heap->set_aging_cycle(was_aging_cycle);
242           service_stw_degenerated_cycle(cause, degen_point);
243           break;
244         }
245         case stw_full: {
246           if (age_period-- == 0) {
247             heap->set_aging_cycle(true);
248             age_period = ShenandoahAgingCyclePeriod - 1;
249           }
250           service_stw_full_cycle(cause);
251           break;
252         }
253         case servicing_old: {
254           assert(generation == OLD, "Expected old generation here");
255           GCIdMark gc_id_mark;
256           service_concurrent_old_cycle(heap, cause);
257           break;
258         }
259         default:
260           ShouldNotReachHere();
261       }
262 
263       // If this was the requested GC cycle, notify waiters about it
264       if (is_gc_requested) {
265         notify_gc_waiters();
266       }
267 
268       // If this was the allocation failure GC cycle, notify waiters about it
269       if (alloc_failure_pending) {
270         notify_alloc_failure_waiters();
271       }
272 
273       // Report current free set state at the end of cycle, whether
274       // it is a normal completion, or the abort.
275       {
276         ShenandoahHeapLocker locker(heap->lock());
277         heap->free_set()->log_status();
278 
279         // Notify Universe about new heap usage. This has implications for
280         // global soft refs policy, and we better report it every time heap
281         // usage goes down.
282         heap->update_capacity_and_used_at_gc();
283 
284         // Signal that we have completed a visit to all live objects.
285         heap->record_whole_heap_examined_timestamp();
286       }
287 
288       // Disable forced counters update, and update counters one more time
289       // to capture the state at the end of GC session.
290       heap->handle_force_counters_update();
291       heap->set_forced_counters_update(false);
292 
293       // Retract forceful part of soft refs policy
294       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
295 
296       // Clear metaspace oom flag, if current cycle unloaded classes
297       if (heap->unload_classes()) {
298         global_heuristics->clear_metaspace_oom();
299       }
300 
301       process_phase_timings(heap);
302 
303       // Print Metaspace change following GC (if logging is enabled).
304       MetaspaceUtils::print_metaspace_change(meta_sizes);
305 
306       // GC is over, we are at idle now
307       if (ShenandoahPacing) {
308         heap->pacer()->setup_for_idle();
309       }
310     } else {
311       // Report to pacer that we have seen this many words allocated
312       if (ShenandoahPacing && (allocs_seen > 0)) {
313         heap->pacer()->report_alloc(allocs_seen);
314       }
315     }
316 
317     const double current = os::elapsedTime();
318 
319     if (ShenandoahUncommit && (is_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
320       // Explicit GC tries to uncommit everything down to min capacity.
321       // Soft max change tries to uncommit everything down to target capacity.
322       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
323 
324       double shrink_before = (is_gc_requested || soft_max_changed) ?
325                              current :
326                              current - (ShenandoahUncommitDelay / 1000.0);
327 
328       size_t shrink_until = soft_max_changed ?
329                              heap->soft_max_capacity() :
330                              heap->min_capacity();
331 
332       heap->maybe_uncommit(shrink_before, shrink_until);
333       heap->phase_timings()->flush_cycle_to_global();
334       last_shrink_time = current;
335     }
336 
337     // Wait for ShenandoahControlIntervalMax unless there was an allocation failure or another request was made mid-cycle.
338     if (!is_alloc_failure_gc() && _requested_gc_cause == GCCause::_no_gc) {
339       // The timed wait is necessary because this thread has a responsibility to send
340       // 'alloc_words' to the pacer when it does not perform a GC.
341       MonitorLocker lock(&_control_lock, Mutex::_no_safepoint_check_flag);
342       lock.wait(ShenandoahControlIntervalMax);
343     }
344   }
345 
346   // Wait for the actual stop(), can't leave run_service() earlier.
347   while (!should_terminate()) {
348     os::naked_short_sleep(ShenandoahControlIntervalMin);
349   }
350 }
351 
352 void ShenandoahGenerationalControlThread::process_phase_timings(const ShenandoahHeap* heap) {
353   // Commit worker statistics to cycle data
354   heap->phase_timings()->flush_par_workers_to_cycle();
355   if (ShenandoahPacing) {
356     heap->pacer()->flush_stats_to_cycle();
357   }
358 
359   ShenandoahEvacuationTracker* evac_tracker = heap->evac_tracker();
360   ShenandoahCycleStats         evac_stats   = evac_tracker->flush_cycle_to_global();
361 
362   // Print GC stats for current cycle
363   {
364     LogTarget(Info, gc, stats) lt;
365     if (lt.is_enabled()) {
366       ResourceMark rm;
367       LogStream ls(lt);
368       heap->phase_timings()->print_cycle_on(&ls);
369       evac_tracker->print_evacuations_on(&ls, &evac_stats.workers,
370                                               &evac_stats.mutators);
371       if (ShenandoahPacing) {
372         heap->pacer()->print_cycle_on(&ls);
373       }
374     }
375   }
376 
377   // Commit statistics to globals
378   heap->phase_timings()->flush_cycle_to_global();
379 }
380 
381 // Young and old concurrent cycles are initiated by the regulator. Implicit
382 // and explicit GC requests are handled by the controller thread and always
383 // run a global cycle (which is concurrent by default, but may be overridden
384 // by command line options). Old cycles always degenerate to a global cycle.
385 // Young cycles are degenerated to complete the young cycle.  Young
386 // and old degen may upgrade to Full GC.  Full GC may also be
387 // triggered directly by a System.gc() invocation.
388 //
389 //
390 //      +-----+ Idle +-----+-----------+---------------------+
391 //      |         +        |           |                     |
392 //      |         |        |           |                     |
393 //      |         |        v           |                     |
394 //      |         |  Bootstrap Old +-- | ------------+       |
395 //      |         |   +                |             |       |
396 //      |         |   |                |             |       |
397 //      |         v   v                v             v       |
398 //      |    Resume Old <----------+ Young +--> Young Degen  |
399 //      |     +  +   ^                            +  +       |
400 //      v     |  |   |                            |  |       |
401 //   Global <-+  |   +----------------------------+  |       |
402 //      +        |                                   |       |
403 //      |        v                                   v       |
404 //      +--->  Global Degen +--------------------> Full <----+
405 //
406 void ShenandoahGenerationalControlThread::service_concurrent_normal_cycle(ShenandoahHeap* heap,
407                                                               const ShenandoahGenerationType generation,
408                                                               GCCause::Cause cause) {
409   GCIdMark gc_id_mark;
410   switch (generation) {
411     case YOUNG: {
412       // Run a young cycle. This might or might not, have interrupted an ongoing
413       // concurrent mark in the old generation. We need to think about promotions
414       // in this case. Promoted objects should be above the TAMS in the old regions
415       // they end up in, but we have to be sure we don't promote into any regions
416       // that are in the cset.
417       log_info(gc, ergo)("Start GC cycle (YOUNG)");
418       service_concurrent_cycle(heap->young_generation(), cause, false);
419       break;
420     }
421     case OLD: {
422       log_info(gc, ergo)("Start GC cycle (OLD)");
423       service_concurrent_old_cycle(heap, cause);
424       break;
425     }
426     case GLOBAL: {
427       log_info(gc, ergo)("Start GC cycle (GLOBAL)");
428       service_concurrent_cycle(heap->global_generation(), cause, false);
429       break;
430     }
431     default:
432       ShouldNotReachHere();
433   }
434 }
435 
436 void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(ShenandoahHeap* heap, GCCause::Cause &cause) {
437   ShenandoahOldGeneration* old_generation = heap->old_generation();
438   ShenandoahYoungGeneration* young_generation = heap->young_generation();
439   ShenandoahOldGeneration::State original_state = old_generation->state();
440 
441   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
442 
443   switch (original_state) {
444     case ShenandoahOldGeneration::FILLING: {
445       ShenandoahGCSession session(cause, old_generation);
446       _allow_old_preemption.set();
447       old_generation->entry_coalesce_and_fill();
448       _allow_old_preemption.unset();
449 
450       // Before bootstrapping begins, we must acknowledge any cancellation request.
451       // If the gc has not been cancelled, this does nothing. If it has been cancelled,
452       // this will clear the cancellation request and exit before starting the bootstrap
453       // phase. This will allow the young GC cycle to proceed normally. If we do not
454       // acknowledge the cancellation request, the subsequent young cycle will observe
455       // the request and essentially cancel itself.
456       if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) {
457         log_info(gc)("Preparation for old generation cycle was cancelled");
458         return;
459       }
460 
461       // Coalescing threads completed and nothing was cancelled. it is safe to transition from this state.
462       old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
463       return;
464     }
465     case ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP:
466       old_generation->transition_to(ShenandoahOldGeneration::BOOTSTRAPPING);
467     case ShenandoahOldGeneration::BOOTSTRAPPING: {
468       // Configure the young generation's concurrent mark to put objects in
469       // old regions into the concurrent mark queues associated with the old
470       // generation. The young cycle will run as normal except that rather than
471       // ignore old references it will mark and enqueue them in the old concurrent
472       // task queues but it will not traverse them.
473       set_gc_mode(bootstrapping_old);
474       young_generation->set_old_gen_task_queues(old_generation->task_queues());
475       ShenandoahGCSession session(cause, young_generation);
476       service_concurrent_cycle(heap, young_generation, cause, true);
477       process_phase_timings(heap);
478       if (heap->cancelled_gc()) {
479         // Young generation bootstrap cycle has failed. Concurrent mark for old generation
480         // is going to resume after degenerated bootstrap cycle completes.
481         log_info(gc)("Bootstrap cycle for old generation was cancelled");
482         return;
483       }
484 
485       // Reset the degenerated point. Normally this would happen at the top
486       // of the control loop, but here we have just completed a young cycle
487       // which has bootstrapped the old concurrent marking.
488       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
489 
490       // From here we will 'resume' the old concurrent mark. This will skip reset
491       // and init mark for the concurrent mark. All of that work will have been
492       // done by the bootstrapping young cycle.
493       set_gc_mode(servicing_old);
494       old_generation->transition_to(ShenandoahOldGeneration::MARKING);
495     }
496     case ShenandoahOldGeneration::MARKING: {
497       ShenandoahGCSession session(cause, old_generation);
498       bool marking_complete = resume_concurrent_old_cycle(old_generation, cause);
499       if (marking_complete) {
500         assert(old_generation->state() != ShenandoahOldGeneration::MARKING, "Should not still be marking");
501         if (original_state == ShenandoahOldGeneration::MARKING) {
502           heap->mmu_tracker()->record_old_marking_increment(true);
503           heap->log_heap_status("At end of Concurrent Old Marking finishing increment");
504         }
505       } else if (original_state == ShenandoahOldGeneration::MARKING) {
506         heap->mmu_tracker()->record_old_marking_increment(false);
507         heap->log_heap_status("At end of Concurrent Old Marking increment");
508       }
509       break;
510     }
511     default:
512       fatal("Unexpected state for old GC: %s", ShenandoahOldGeneration::state_name(old_generation->state()));
513   }
514 }
515 
516 bool ShenandoahGenerationalControlThread::resume_concurrent_old_cycle(ShenandoahOldGeneration* generation, GCCause::Cause cause) {
517   assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(), "Old mark should be in progress");
518   log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued", generation->task_queues()->tasks());
519 
520   ShenandoahHeap* heap = ShenandoahHeap::heap();
521 
522   // We can only tolerate being cancelled during concurrent marking or during preparation for mixed
523   // evacuation. This flag here (passed by reference) is used to control precisely where the regulator
524   // is allowed to cancel a GC.
525   ShenandoahOldGC gc(generation, _allow_old_preemption);
526   if (gc.collect(cause)) {
527     generation->record_success_concurrent(false);
528   }
529 
530   if (heap->cancelled_gc()) {
531     // It's possible the gc cycle was cancelled after the last time
532     // the collection checked for cancellation. In which case, the
533     // old gc cycle is still completed, and we have to deal with this
534     // cancellation. We set the degeneration point to be outside
535     // the cycle because if this is an allocation failure, that is
536     // what must be done (there is no degenerated old cycle). If the
537     // cancellation was due to a heuristic wanting to start a young
538     // cycle, then we are not actually going to a degenerated cycle,
539     // so the degenerated point doesn't matter here.
540     check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle);
541     if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) {
542       heap->shenandoah_policy()->record_interrupted_old();
543     }
544     return false;
545   }
546   return true;
547 }
548 
549 void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool do_old_gc_bootstrap) {
550   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
551   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
552   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
553   // tries to evac something and no memory is available), cycle degrades to Full GC.
554   //
555   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
556   // heuristics says there are no regions to compact, and all the collection comes from immediately
557   // reclaimable regions.
558   //
559   // ................................................................................................
560   //
561   //                                    (immediate garbage shortcut)                Concurrent GC
562   //                             /-------------------------------------------\
563   //                             |                                           |
564   //                             |                                           |
565   //                             |                                           |
566   //                             |                                           v
567   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
568   //                   |                    |                 |              ^
569   //                   | (af)               | (af)            | (af)         |
570   // ..................|....................|.................|..............|.......................
571   //                   |                    |                 |              |
572   //                   |                    |                 |              |      Degenerated GC
573   //                   v                    v                 v              |
574   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
575   //                   |                    |                 |              ^
576   //                   | (af)               | (af)            | (af)         |
577   // ..................|....................|.................|..............|.......................
578   //                   |                    |                 |              |
579   //                   |                    v                 |              |      Full GC
580   //                   \------------------->o<----------------/              |
581   //                                        |                                |
582   //                                        v                                |
583   //                                      Full GC  --------------------------/
584   //
585   if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
586 
587   ShenandoahHeap* heap = ShenandoahHeap::heap();
588   ShenandoahGCSession session(cause, generation);
589   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
590 
591   service_concurrent_cycle(heap, generation, cause, do_old_gc_bootstrap);
592 }
593 
594 void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahHeap* heap,
595                                                        ShenandoahGeneration* generation,
596                                                        GCCause::Cause& cause,
597                                                        bool do_old_gc_bootstrap) {
598   assert(!generation->is_old(), "Old GC takes a different control path");
599 
600   ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap);
601   if (gc.collect(cause)) {
602     // Cycle is complete
603     generation->record_success_concurrent(gc.abbreviated());
604   } else {
605     assert(heap->cancelled_gc(), "Must have been cancelled");
606     check_cancellation_or_degen(gc.degen_point());
607 
608     // Concurrent young-gen collection degenerates to young
609     // collection.  Same for global collections.
610     _degen_generation = generation;
611   }
612   const char* msg;
613   ShenandoahMmuTracker* mmu_tracker = heap->mmu_tracker();
614   if (generation->is_young()) {
615     if (heap->cancelled_gc()) {
616       msg = (do_old_gc_bootstrap) ? "At end of Interrupted Concurrent Bootstrap GC" :
617             "At end of Interrupted Concurrent Young GC";
618     } else {
619       // We only record GC results if GC was successful
620       msg = (do_old_gc_bootstrap) ? "At end of Concurrent Bootstrap GC" :
621             "At end of Concurrent Young GC";
622       if (heap->collection_set()->has_old_regions()) {
623         mmu_tracker->record_mixed(get_gc_id());
624       } else if (do_old_gc_bootstrap) {
625         mmu_tracker->record_bootstrap(get_gc_id());
626       } else {
627         mmu_tracker->record_young(get_gc_id());
628       }
629     }
630   } else {
631     assert(generation->is_global(), "If not young, must be GLOBAL");
632     assert(!do_old_gc_bootstrap, "Do not bootstrap with GLOBAL GC");
633     if (heap->cancelled_gc()) {
634       msg = "At end of Interrupted Concurrent GLOBAL GC";
635     } else {
636       // We only record GC results if GC was successful
637       msg = "At end of Concurrent Global GC";
638       mmu_tracker->record_global(get_gc_id());
639     }
640   }
641   heap->log_heap_status(msg);
642 }
643 
644 bool ShenandoahGenerationalControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
645   ShenandoahHeap* heap = ShenandoahHeap::heap();
646   if (!heap->cancelled_gc()) {
647     return false;
648   }
649 
650   if (in_graceful_shutdown()) {
651     return true;
652   }
653 
654   assert(_degen_point == ShenandoahGC::_degenerated_outside_cycle,
655          "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
656 
657   if (is_alloc_failure_gc()) {
658     _degen_point = point;
659     _preemption_requested.unset();
660     return true;
661   }
662 
663   if (_preemption_requested.is_set()) {
664     assert(_requested_generation == YOUNG, "Only young GCs may preempt old.");
665     _preemption_requested.unset();
666 
667     // Old generation marking is only cancellable during concurrent marking.
668     // Once final mark is complete, the code does not check again for cancellation.
669     // If old generation was cancelled for an allocation failure, we wouldn't
670     // make it to this case. The calling code is responsible for forcing a
671     // cancellation due to allocation failure into a degenerated cycle.
672     _degen_point = point;
673     heap->clear_cancelled_gc(false /* clear oom handler */);
674     return true;
675   }
676 
677   fatal("Cancel GC either for alloc failure GC, or gracefully exiting, or to pause old generation marking");
678   return false;
679 }
680 
681 void ShenandoahGenerationalControlThread::stop_service() {
682   // Nothing to do here.
683 }
684 
685 void ShenandoahGenerationalControlThread::service_stw_full_cycle(GCCause::Cause cause) {
686   ShenandoahHeap* const heap = ShenandoahHeap::heap();
687 
688   GCIdMark gc_id_mark;
689   ShenandoahGCSession session(cause, heap->global_generation());
690 
691   ShenandoahFullGC gc;
692   gc.collect(cause);
693 }
694 
695 void ShenandoahGenerationalControlThread::service_stw_degenerated_cycle(GCCause::Cause cause,
696                                                             ShenandoahGC::ShenandoahDegenPoint point) {
697   assert(point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
698   ShenandoahHeap* const heap = ShenandoahHeap::heap();
699 
700   GCIdMark gc_id_mark;
701   ShenandoahGCSession session(cause, _degen_generation);
702 
703   ShenandoahDegenGC gc(point, _degen_generation);
704   gc.collect(cause);
705 
706   assert(heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks");
707   if (_degen_generation->is_global()) {
708     assert(heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks");
709     assert(heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks");
710   } else {
711     assert(_degen_generation->is_young(), "Expected degenerated young cycle, if not global.");
712     ShenandoahOldGeneration* old = heap->old_generation();
713     if (old->is_bootstrapping()) {
714       old->transition_to(ShenandoahOldGeneration::MARKING);
715     }
716   }
717 }
718 
719 void ShenandoahGenerationalControlThread::request_gc(GCCause::Cause cause) {
720   if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) {
721     handle_requested_gc(cause);
722   }
723 }
724 
725 bool ShenandoahGenerationalControlThread::request_concurrent_gc(ShenandoahGenerationType generation) {
726   if (_preemption_requested.is_set() || _requested_gc_cause != GCCause::_no_gc || ShenandoahHeap::heap()->cancelled_gc()) {
727     // Ignore subsequent requests from the heuristics
728     log_debug(gc, thread)("Reject request for concurrent gc: preemption_requested: %s, gc_requested: %s, gc_cancelled: %s",
729                           BOOL_TO_STR(_preemption_requested.is_set()),
730                           GCCause::to_string(_requested_gc_cause),
731                           BOOL_TO_STR(ShenandoahHeap::heap()->cancelled_gc()));
732     return false;
733   }
734 
735   if (gc_mode() == none) {
736     GCCause::Cause existing = Atomic::cmpxchg(&_requested_gc_cause, GCCause::_no_gc, GCCause::_shenandoah_concurrent_gc);
737     if (existing != GCCause::_no_gc) {
738       log_debug(gc, thread)("Reject request for concurrent gc because another gc is pending: %s", GCCause::to_string(existing));
739       return false;
740     }
741 
742     _requested_generation = generation;
743     notify_control_thread();
744 
745     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
746     while (gc_mode() == none) {
747       ml.wait();
748     }
749     return true;
750   }
751 
752   if (preempt_old_marking(generation)) {
753     assert(gc_mode() == servicing_old, "Expected to be servicing old, but was: %s.", gc_mode_name(gc_mode()));
754     GCCause::Cause existing = Atomic::cmpxchg(&_requested_gc_cause, GCCause::_no_gc, GCCause::_shenandoah_concurrent_gc);
755     if (existing != GCCause::_no_gc) {
756       log_debug(gc, thread)("Reject request to interrupt old gc because another gc is pending: %s", GCCause::to_string(existing));
757       return false;
758     }
759 
760     log_info(gc)("Preempting old generation mark to allow %s GC", shenandoah_generation_name(generation));
761     _requested_generation = generation;
762     _preemption_requested.set();
763     ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc);
764     notify_control_thread();
765 
766     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
767     while (gc_mode() == servicing_old) {
768       ml.wait();
769     }
770     return true;
771   }
772 
773   log_debug(gc, thread)("Reject request for concurrent gc: mode: %s, allow_old_preemption: %s",
774                         gc_mode_name(gc_mode()),
775                         BOOL_TO_STR(_allow_old_preemption.is_set()));
776   return false;
777 }
778 
779 void ShenandoahGenerationalControlThread::notify_control_thread() {
780   MonitorLocker locker(&_control_lock, Mutex::_no_safepoint_check_flag);
781   _control_lock.notify();
782 }
783 
784 bool ShenandoahGenerationalControlThread::preempt_old_marking(ShenandoahGenerationType generation) {
785   return (generation == YOUNG) && _allow_old_preemption.try_unset();
786 }
787 
788 void ShenandoahGenerationalControlThread::handle_requested_gc(GCCause::Cause cause) {
789   // For normal requested GCs (System.gc) we want to block the caller. However,
790   // for whitebox requested GC, we want to initiate the GC and return immediately.
791   // The whitebox caller thread will arrange for itself to wait until the GC notifies
792   // it that has reached the requested breakpoint (phase in the GC).
793   if (cause == GCCause::_wb_breakpoint) {
794     Atomic::xchg(&_requested_gc_cause, cause);
795     notify_control_thread();
796     return;
797   }
798 
799   // Make sure we have at least one complete GC cycle before unblocking
800   // from the explicit GC request.
801   //
802   // This is especially important for weak references cleanup and/or native
803   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
804   // comes very late in the already running cycle, it would miss lots of new
805   // opportunities for cleanup that were made available before the caller
806   // requested the GC.
807 
808   MonitorLocker ml(&_gc_waiters_lock);
809   size_t current_gc_id = get_gc_id();
810   size_t required_gc_id = current_gc_id + 1;
811   while (current_gc_id < required_gc_id) {
812     // This races with the regulator thread to start a concurrent gc and the
813     // control thread to clear it at the start of a cycle. Threads here are
814     // allowed to escalate a heuristic's request for concurrent gc.
815     GCCause::Cause existing = Atomic::xchg(&_requested_gc_cause, cause);
816     if (existing != GCCause::_no_gc) {
817       log_debug(gc, thread)("GC request supersedes existing request: %s", GCCause::to_string(existing));
818     }
819 
820     notify_control_thread();
821     ml.wait();
822     current_gc_id = get_gc_id();
823   }
824 }
825 
826 void ShenandoahGenerationalControlThread::notify_gc_waiters() {
827   MonitorLocker ml(&_gc_waiters_lock);
828   ml.notify_all();
829 }
830 
831 const char* ShenandoahGenerationalControlThread::gc_mode_name(ShenandoahGenerationalControlThread::GCMode mode) {
832   switch (mode) {
833     case none:              return "idle";
834     case concurrent_normal: return "normal";
835     case stw_degenerated:   return "degenerated";
836     case stw_full:          return "full";
837     case servicing_old:     return "old";
838     case bootstrapping_old: return "bootstrap";
839     default:                return "unknown";
840   }
841 }
842 
843 void ShenandoahGenerationalControlThread::set_gc_mode(ShenandoahGenerationalControlThread::GCMode new_mode) {
844   if (_mode != new_mode) {
845     log_info(gc)("Transition from: %s to: %s", gc_mode_name(_mode), gc_mode_name(new_mode));
846     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
847     _mode = new_mode;
848     ml.notify_all();
849   }
850 }