1 /*
  2  * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 27 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
 28 #include "gc/shenandoah/shenandoahControlThread.hpp"
 29 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 30 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 31 #include "gc/shenandoah/shenandoahFullGC.hpp"
 32 #include "gc/shenandoah/shenandoahGeneration.hpp"
 33 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 34 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
 35 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 36 #include "gc/shenandoah/shenandoahMark.inline.hpp"
 37 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 38 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 39 #include "gc/shenandoah/shenandoahOldGC.hpp"
 40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
 41 #include "gc/shenandoah/shenandoahUtils.hpp"
 42 #include "gc/shenandoah/shenandoahVMOperations.hpp"
 43 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 44 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
 45 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 46 #include "memory/iterator.hpp"
 47 #include "memory/metaspaceUtils.hpp"
 48 #include "memory/metaspaceStats.hpp"
 49 #include "memory/universe.hpp"
 50 #include "runtime/atomic.hpp"
 51 
 52 ShenandoahControlThread::ShenandoahControlThread() :
 53   ConcurrentGCThread(),
 54   _alloc_failure_waiters_lock(Mutex::safepoint - 1, "ShenandoahAllocFailureGC_lock", true),
 55   _gc_waiters_lock(Mutex::safepoint - 1, "ShenandoahRequestedGC_lock", true),
 56   _control_lock(Mutex::nosafepoint - 1, "ShenandoahControlGC_lock", true),
 57   _regulator_lock(Mutex::nosafepoint - 1, "ShenandoahRegulatorGC_lock", true),
 58   _periodic_task(this),
 59   _requested_gc_cause(GCCause::_no_cause_specified),
 60   _requested_generation(GenerationMode::GLOBAL),
 61   _degen_point(ShenandoahGC::_degenerated_outside_cycle),
 62   _degen_generation(NULL),
 63   _allocs_seen(0),
 64   _mode(none) {
 65 
 66   reset_gc_id();
 67   create_and_start();
 68   _periodic_task.enroll();
 69   if (ShenandoahPacing) {
 70     _periodic_pacer_notify_task.enroll();
 71   }
 72 }
 73 
 74 ShenandoahControlThread::~ShenandoahControlThread() {
 75   // This is here so that super is called.
 76 }
 77 
 78 void ShenandoahPeriodicTask::task() {
 79   _thread->handle_force_counters_update();
 80   _thread->handle_counters_update();
 81 }
 82 
 83 void ShenandoahPeriodicPacerNotify::task() {
 84   assert(ShenandoahPacing, "Should not be here otherwise");
 85   ShenandoahHeap::heap()->pacer()->notify_waiters();
 86 }
 87 
 88 void ShenandoahControlThread::run_service() {
 89   ShenandoahHeap* heap = ShenandoahHeap::heap();
 90 
 91   GCMode default_mode = concurrent_normal;
 92   GenerationMode generation = GLOBAL;
 93   GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
 94 
 95   double last_shrink_time = os::elapsedTime();
 96   uint age_period = 0;
 97 
 98   // Shrink period avoids constantly polling regions for shrinking.
 99   // Having a period 10x lower than the delay would mean we hit the
100   // shrinking with lag of less than 1/10-th of true delay.
101   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
102   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
103 
104   ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
105 
106   // Heuristics are notified of allocation failures here and other outcomes
107   // of the cycle. They're also used here to control whether the Nth consecutive
108   // degenerated cycle should be 'promoted' to a full cycle. The decision to
109   // trigger a cycle or not is evaluated on the regulator thread.
110   ShenandoahHeuristics* global_heuristics = heap->global_generation()->heuristics();
111   while (!in_graceful_shutdown() && !should_terminate()) {
112     // Figure out if we have pending requests.
113     bool alloc_failure_pending = _alloc_failure_gc.is_set();
114     bool is_gc_requested = _gc_requested.is_set();
115     GCCause::Cause requested_gc_cause = _requested_gc_cause;
116     bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause);
117     bool implicit_gc_requested = is_gc_requested && is_implicit_gc(requested_gc_cause);
118 
119     // This control loop iteration have seen this much allocations.
120     size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
121 
122     // Check if we have seen a new target for soft max heap size.
123     bool soft_max_changed = check_soft_max_changed();
124 
125     // Choose which GC mode to run in. The block below should select a single mode.
126     set_gc_mode(none);
127     GCCause::Cause cause = GCCause::_last_gc_cause;
128     ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
129 
130     if (alloc_failure_pending) {
131       // Allocation failure takes precedence: we have to deal with it first thing
132       log_info(gc)("Trigger: Handle Allocation Failure");
133 
134       cause = GCCause::_allocation_failure;
135 
136       // Consume the degen point, and seed it with default value
137       degen_point = _degen_point;
138       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
139 
140       if (degen_point == ShenandoahGC::_degenerated_outside_cycle) {
141         _degen_generation = heap->mode()->is_generational() ? heap->young_generation() : heap->global_generation();
142       } else {
143         assert(_degen_generation != NULL, "Need to know which generation to resume.");
144       }
145 
146       ShenandoahHeuristics* heuristics = _degen_generation->heuristics();
147       generation = _degen_generation->generation_mode();
148       bool old_gen_evacuation_failed = heap->clear_old_evacuation_failure();
149 
150       // Do not bother with degenerated cycle if old generation evacuation failed.
151       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() && !old_gen_evacuation_failed) {
152         heuristics->record_allocation_failure_gc();
153         policy->record_alloc_failure_to_degenerated(degen_point);
154         set_gc_mode(stw_degenerated);
155       } else {
156         heuristics->record_allocation_failure_gc();
157         policy->record_alloc_failure_to_full();
158         generation = GLOBAL;
159         set_gc_mode(stw_full);
160       }
161     } else if (explicit_gc_requested) {
162       cause = requested_gc_cause;
163       generation = GLOBAL;
164       log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
165 
166       global_heuristics->record_requested_gc();
167 
168       if (ExplicitGCInvokesConcurrent) {
169         policy->record_explicit_to_concurrent();
170         set_gc_mode(default_mode);
171         // Unload and clean up everything
172         heap->set_unload_classes(global_heuristics->can_unload_classes());
173       } else {
174         policy->record_explicit_to_full();
175         set_gc_mode(stw_full);
176       }
177     } else if (implicit_gc_requested) {
178       cause = requested_gc_cause;
179       generation = GLOBAL;
180       log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
181 
182       global_heuristics->record_requested_gc();
183 
184       if (ShenandoahImplicitGCInvokesConcurrent) {
185         policy->record_implicit_to_concurrent();
186         set_gc_mode(default_mode);
187 
188         // Unload and clean up everything
189         heap->set_unload_classes(global_heuristics->can_unload_classes());
190       } else {
191         policy->record_implicit_to_full();
192         set_gc_mode(stw_full);
193       }
194     } else {
195       // We should only be here if the regulator requested a cycle or if
196       // there is an old generation mark in progress.
197       if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) {
198         // preemption was requested or this is a regular cycle
199         cause = GCCause::_shenandoah_concurrent_gc;
200         generation = _requested_generation;
201         set_gc_mode(default_mode);
202 
203         // Don't start a new old marking if there is one already in progress.
204         if (generation == OLD && heap->is_concurrent_old_mark_in_progress()) {
205           set_gc_mode(marking_old);
206         }
207 
208         if (generation == GLOBAL) {
209           heap->set_unload_classes(global_heuristics->should_unload_classes());
210         } else {
211           heap->set_unload_classes(false);
212         }
213 
214         // Don't want to spin in this loop and start a cycle every time, so
215         // clear requested gc cause. This creates a race with callers of the
216         // blocking 'request_gc' method, but there it loops and resets the
217         // '_requested_gc_cause' until a full cycle is completed.
218         _requested_gc_cause = GCCause::_no_gc;
219       } else if (heap->is_concurrent_old_mark_in_progress() || heap->is_concurrent_prep_for_mixed_evacuation_in_progress()) {
220         // Nobody asked us to do anything, but we have an old-generation mark or old-generation preparation for
221         // mixed evacuation in progress, so resume working on that.
222         log_info(gc)("Resume old gc: marking=%s, preparing=%s",
223                      BOOL_TO_STR(heap->is_concurrent_old_mark_in_progress()),
224                      BOOL_TO_STR(heap->is_concurrent_prep_for_mixed_evacuation_in_progress()));
225 
226         cause = GCCause::_shenandoah_concurrent_gc;
227         generation = OLD;
228         set_gc_mode(marking_old);
229       }
230     }
231 
232     // Blow all soft references on this cycle, if handling allocation failure,
233     // either implicit or explicit GC request, or we are requested to do so unconditionally.
234     if (generation == GLOBAL && (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs)) {
235       heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
236     }
237 
238     bool gc_requested = (_mode != none);
239     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
240 
241     if (gc_requested) {
242       // GC is starting, bump the internal ID
243       update_gc_id();
244 
245       heap->reset_bytes_allocated_since_gc_start();
246 
247       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
248 
249       // If GC was requested, we are sampling the counters even without actual triggers
250       // from allocation machinery. This captures GC phases more accurately.
251       set_forced_counters_update(true);
252 
253       // If GC was requested, we better dump freeset data for performance debugging
254       {
255         ShenandoahHeapLocker locker(heap->lock());
256         heap->free_set()->log_status();
257       }
258 
259       heap->set_aging_cycle(false);
260       {
261         switch (_mode) {
262           case concurrent_normal: {
263             if ((generation == YOUNG) && (age_period-- == 0)) {
264               heap->set_aging_cycle(true);
265               age_period = ShenandoahAgingCyclePeriod - 1;
266             }
267             service_concurrent_normal_cycle(heap, generation, cause);
268             break;
269           }
270           case stw_degenerated: {
271             if (!service_stw_degenerated_cycle(cause, degen_point)) {
272               // The degenerated GC was upgraded to a Full GC
273               generation = GLOBAL;
274             }
275             break;
276           }
277           case stw_full: {
278             service_stw_full_cycle(cause);
279             break;
280           }
281           case marking_old: {
282             assert(generation == OLD, "Expected old generation here");
283             resume_concurrent_old_cycle(heap->old_generation(), cause);
284             break;
285           }
286           default: {
287             ShouldNotReachHere();
288           }
289         }
290       }
291 
292       // If this was the requested GC cycle, notify waiters about it
293       if (explicit_gc_requested || implicit_gc_requested) {
294         notify_gc_waiters();
295       }
296 
297       // If this was the allocation failure GC cycle, notify waiters about it
298       if (alloc_failure_pending) {
299         notify_alloc_failure_waiters();
300       }
301 
302       // Report current free set state at the end of cycle, whether
303       // it is a normal completion, or the abort.
304       {
305         ShenandoahHeapLocker locker(heap->lock());
306         heap->free_set()->log_status();
307 
308         // Notify Universe about new heap usage. This has implications for
309         // global soft refs policy, and we better report it every time heap
310         // usage goes down.
311         Universe::heap()->update_capacity_and_used_at_gc();
312 
313         // Signal that we have completed a visit to all live objects.
314         Universe::heap()->record_whole_heap_examined_timestamp();
315       }
316 
317       // Disable forced counters update, and update counters one more time
318       // to capture the state at the end of GC session.
319       handle_force_counters_update();
320       set_forced_counters_update(false);
321 
322       // Retract forceful part of soft refs policy
323       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
324 
325       // Clear metaspace oom flag, if current cycle unloaded classes
326       if (heap->unload_classes()) {
327         assert(generation == GLOBAL, "Only unload classes during GLOBAL cycle");
328         global_heuristics->clear_metaspace_oom();
329       }
330 
331       process_phase_timings(heap);
332 
333       // Print Metaspace change following GC (if logging is enabled).
334       MetaspaceUtils::print_metaspace_change(meta_sizes);
335 
336       // GC is over, we are at idle now
337       if (ShenandoahPacing) {
338         heap->pacer()->setup_for_idle();
339       }
340     } else {
341       // Allow allocators to know we have seen this much regions
342       if (ShenandoahPacing && (allocs_seen > 0)) {
343         heap->pacer()->report_alloc(allocs_seen);
344       }
345     }
346 
347     double current = os::elapsedTime();
348 
349     if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
350       // Explicit GC tries to uncommit everything down to min capacity.
351       // Soft max change tries to uncommit everything down to target capacity.
352       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
353 
354       double shrink_before = (explicit_gc_requested || soft_max_changed) ?
355                              current :
356                              current - (ShenandoahUncommitDelay / 1000.0);
357 
358       size_t shrink_until = soft_max_changed ?
359                              heap->soft_max_capacity() :
360                              heap->min_capacity();
361 
362       service_uncommit(shrink_before, shrink_until);
363       heap->phase_timings()->flush_cycle_to_global();
364       last_shrink_time = current;
365     }
366 
367     // Don't wait around if there was an allocation failure - start the next cycle immediately.
368     if (!is_alloc_failure_gc()) {
369       // The timed wait is necessary because this thread has a responsibility to send
370       // 'alloc_words' to the pacer when it does not perform a GC.
371       MonitorLocker lock(&_control_lock, Mutex::_no_safepoint_check_flag);
372       lock.wait(ShenandoahControlIntervalMax);
373     }
374   }
375 
376   // Wait for the actual stop(), can't leave run_service() earlier.
377   while (!should_terminate()) {
378     os::naked_short_sleep(ShenandoahControlIntervalMin);
379   }
380 }
381 
382 void ShenandoahControlThread::process_phase_timings(const ShenandoahHeap* heap) {
383 
384   // Commit worker statistics to cycle data
385   heap->phase_timings()->flush_par_workers_to_cycle();
386   if (ShenandoahPacing) {
387     heap->pacer()->flush_stats_to_cycle();
388   }
389 
390   // Print GC stats for current cycle
391   {
392     LogTarget(Info, gc, stats) lt;
393     if (lt.is_enabled()) {
394       ResourceMark rm;
395       LogStream ls(lt);
396       heap->phase_timings()->print_cycle_on(&ls);
397       if (ShenandoahPacing) {
398         heap->pacer()->print_cycle_on(&ls);
399       }
400     }
401   }
402 
403   // Commit statistics to globals
404   heap->phase_timings()->flush_cycle_to_global();
405 }
406 
407 // Young and old concurrent cycles are initiated by the regulator. Implicit
408 // and explicit GC requests are handled by the controller thread and always
409 // run a global cycle (which is concurrent by default, but may be overridden
410 // by command line options). Old cycles always degenerate to a global cycle.
411 // Young cycles are degenerated to complete the young cycle.  Young
412 // and old degen may upgrade to Full GC.  Full GC may also be
413 // triggered directly by a System.gc() invocation.
414 //
415 //
416 //      +-----+ Idle +-----+-----------+---------------------+
417 //      |         +        |           |                     |
418 //      |         |        |           |                     |
419 //      |         |        v           |                     |
420 //      |         |  Bootstrap Old +-- | ------------+       |
421 //      |         |   +                |             |       |
422 //      |         |   |                |             |       |
423 //      |         v   v                v             v       |
424 //      |    Resume Old <----------+ Young +--> Young Degen  |
425 //      |     +  +                                   +       |
426 //      v     |  |                                   |       |
427 //   Global <-+  |                                   |       |
428 //      +        |                                   |       |
429 //      |        v                                   v       |
430 //      +--->  Global Degen +--------------------> Full <----+
431 //
432 void ShenandoahControlThread::service_concurrent_normal_cycle(
433   const ShenandoahHeap* heap, const GenerationMode generation, GCCause::Cause cause) {
434 
435   switch (generation) {
436     case YOUNG: {
437       // Run a young cycle. This might or might not, have interrupted an ongoing
438       // concurrent mark in the old generation. We need to think about promotions
439       // in this case. Promoted objects should be above the TAMS in the old regions
440       // they end up in, but we have to be sure we don't promote into any regions
441       // that are in the cset.
442       log_info(gc, ergo)("Start GC cycle (YOUNG)");
443       service_concurrent_cycle(heap->young_generation(), cause, false);
444       heap->young_generation()->log_status();
445       break;
446     }
447     case GLOBAL: {
448       log_info(gc, ergo)("Start GC cycle (GLOBAL)");
449       service_concurrent_cycle(heap->global_generation(), cause, false);
450       heap->global_generation()->log_status();
451       break;
452     }
453     case OLD: {
454       log_info(gc, ergo)("Start GC cycle (OLD)");
455       service_concurrent_old_cycle(heap, cause);
456       heap->old_generation()->log_status();
457       break;
458     }
459     default:
460       ShouldNotReachHere();
461   }
462 }
463 
464 void ShenandoahControlThread::service_concurrent_old_cycle(const ShenandoahHeap* heap, GCCause::Cause &cause) {
465   // Configure the young generation's concurrent mark to put objects in
466   // old regions into the concurrent mark queues associated with the old
467   // generation. The young cycle will run as normal except that rather than
468   // ignore old references it will mark and enqueue them in the old concurrent
469   // mark but it will not traverse them.
470   ShenandoahGeneration* old_generation = heap->old_generation();
471   ShenandoahYoungGeneration* young_generation = heap->young_generation();
472 
473   assert(!heap->is_concurrent_old_mark_in_progress(), "Old already in progress.");
474   assert(old_generation->task_queues()->is_empty(), "Old mark queues should be empty.");
475 
476   young_generation->set_old_gen_task_queues(old_generation->task_queues());
477   young_generation->set_mark_incomplete();
478   old_generation->set_mark_incomplete();
479   service_concurrent_cycle(young_generation, cause, true);
480 
481   process_phase_timings(heap);
482 
483   if (heap->cancelled_gc()) {
484     // Young generation bootstrap cycle has failed. Concurrent mark for old generation
485     // is not going to resume after degenerated young cycle completes.
486     log_info(gc)("Bootstrap cycle for old generation was cancelled.");
487   } else {
488     // Reset the degenerated point. Normally this would happen at the top
489     // of the control loop, but here we have just completed a young cycle
490     // which has bootstrapped the old concurrent marking.
491     _degen_point = ShenandoahGC::_degenerated_outside_cycle;
492 
493     // From here we will 'resume' the old concurrent mark. This will skip reset
494     // and init mark for the concurrent mark. All of that work will have been
495     // done by the bootstrapping young cycle. In order to simplify the debugging
496     // effort, the old cycle will ONLY complete the mark phase. No actual
497     // collection of the old generation is happening here.
498     set_gc_mode(marking_old);
499     resume_concurrent_old_cycle(old_generation, cause);
500   }
501 }
502 
503 bool ShenandoahControlThread::check_soft_max_changed() const {
504   ShenandoahHeap* heap = ShenandoahHeap::heap();
505   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
506   size_t old_soft_max = heap->soft_max_capacity();
507   if (new_soft_max != old_soft_max) {
508     new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
509     new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
510     if (new_soft_max != old_soft_max) {
511       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
512                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
513                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
514       );
515       heap->set_soft_max_capacity(new_soft_max);
516       return true;
517     }
518   }
519   return false;
520 }
521 
522 void ShenandoahControlThread::resume_concurrent_old_cycle(ShenandoahGeneration* generation, GCCause::Cause cause) {
523 
524   assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress() ||
525          ShenandoahHeap::heap()->is_concurrent_prep_for_mixed_evacuation_in_progress(),
526          "Old mark or mixed-evac prep should be in progress");
527   log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued.", generation->task_queues()->tasks());
528 
529   ShenandoahHeap* heap = ShenandoahHeap::heap();
530 
531   GCIdMark gc_id_mark;
532   ShenandoahGCSession session(cause, generation);
533 
534   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
535   // We can only tolerate being cancelled during concurrent marking or during preparation for mixed
536   // evacuation. This flag here (passed by reference) is used to control precisely where the regulator
537   // is allowed to cancel a GC.
538   ShenandoahOldGC gc(generation, _allow_old_preemption);
539   if (gc.collect(cause)) {
540     generation->heuristics()->record_success_concurrent(false);
541     heap->shenandoah_policy()->record_success_old();
542   }
543 
544   if (heap->cancelled_gc()) {
545     // It's possible the gc cycle was cancelled after the last time
546     // the collection checked for cancellation. In which case, the
547     // old gc cycle is still completed, and we have to deal with this
548     // cancellation. We set the degeneration point to be outside
549     // the cycle because if this is an allocation failure, that is
550     // what must be done (there is no degenerated old cycle). If the
551     // cancellation was due to a heuristic wanting to start a young
552     // cycle, then we are not actually going to a degenerated cycle,
553     // so the degenerated point doesn't matter here.
554     check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle);
555     if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) {
556       heap->shenandoah_policy()->record_interrupted_old();
557     }
558   }
559 }
560 
561 void ShenandoahControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool do_old_gc_bootstrap) {
562   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
563   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
564   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
565   // tries to evac something and no memory is available), cycle degrades to Full GC.
566   //
567   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
568   // heuristics says there are no regions to compact, and all the collection comes from immediately
569   // reclaimable regions.
570   //
571   // ................................................................................................
572   //
573   //                                    (immediate garbage shortcut)                Concurrent GC
574   //                             /-------------------------------------------\
575   //                             |                                           |
576   //                             |                                           |
577   //                             |                                           |
578   //                             |                                           v
579   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
580   //                   |                    |                 |              ^
581   //                   | (af)               | (af)            | (af)         |
582   // ..................|....................|.................|..............|.......................
583   //                   |                    |                 |              |
584   //                   |                    |                 |              |      Degenerated GC
585   //                   v                    v                 v              |
586   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
587   //                   |                    |                 |              ^
588   //                   | (af)               | (af)            | (af)         |
589   // ..................|....................|.................|..............|.......................
590   //                   |                    |                 |              |
591   //                   |                    v                 |              |      Full GC
592   //                   \------------------->o<----------------/              |
593   //                                        |                                |
594   //                                        v                                |
595   //                                      Full GC  --------------------------/
596   //
597   ShenandoahHeap* heap = ShenandoahHeap::heap();
598   if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
599 
600   GCIdMark gc_id_mark;
601   ShenandoahGCSession session(cause, generation);
602 
603   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
604 
605   ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap);
606   if (gc.collect(cause)) {
607     // Cycle is complete
608     generation->heuristics()->record_success_concurrent(gc.abbreviated());
609     heap->shenandoah_policy()->record_success_concurrent();
610   } else {
611     assert(heap->cancelled_gc(), "Must have been cancelled");
612     check_cancellation_or_degen(gc.degen_point());
613     assert(generation->generation_mode() != OLD, "Old GC takes a different control path");
614     // Concurrent young-gen collection degenerates to young
615     // collection.  Same for global collections.
616     _degen_generation = generation;
617   }
618 }
619 
620 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
621   ShenandoahHeap* heap = ShenandoahHeap::heap();
622   if (!heap->cancelled_gc()) {
623     return false;
624   }
625 
626   if (in_graceful_shutdown()) {
627     return true;
628   }
629 
630   assert(_degen_point == ShenandoahGC::_degenerated_outside_cycle,
631          "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
632 
633   if (is_alloc_failure_gc()) {
634     _degen_point = point;
635     return true;
636   }
637 
638   if (_preemption_requested.is_set()) {
639     assert(_requested_generation == YOUNG, "Only young GCs may preempt old.");
640     _preemption_requested.unset();
641 
642     // Old generation marking is only cancellable during concurrent marking.
643     // Once final mark is complete, the code does not check again for cancellation.
644     // If old generation was cancelled for an allocation failure, we wouldn't
645     // make it to this case. The calling code is responsible for forcing a
646     // cancellation due to allocation failure into a degenerated cycle.
647     _degen_point = point;
648     heap->clear_cancelled_gc(false /* clear oom handler */);
649     return true;
650   }
651 
652   fatal("Cancel GC either for alloc failure GC, or gracefully exiting, or to pause old generation marking.");
653   return false;
654 }
655 
656 void ShenandoahControlThread::stop_service() {
657   // Nothing to do here.
658 }
659 
660 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
661   ShenandoahHeap* const heap = ShenandoahHeap::heap();
662 
663   GCIdMark gc_id_mark;
664   ShenandoahGCSession session(cause, heap->global_generation());
665 
666   ShenandoahFullGC gc;
667   gc.collect(cause);
668 
669   heap->global_generation()->heuristics()->record_success_full();
670   heap->shenandoah_policy()->record_success_full();
671 }
672 
673 bool ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
674   assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
675   ShenandoahHeap* const heap = ShenandoahHeap::heap();
676 
677   GCIdMark gc_id_mark;
678   ShenandoahGCSession session(cause, _degen_generation);
679 
680   ShenandoahDegenGC gc(point, _degen_generation);
681   gc.collect(cause);
682 
683   assert(heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks");
684   if (_degen_generation->generation_mode() == GLOBAL) {
685     assert(heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks");
686     assert(heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks");
687   }
688 
689   _degen_generation->heuristics()->record_success_degenerated();
690   heap->shenandoah_policy()->record_success_degenerated();
691   return !gc.upgraded_to_full();
692 }
693 
694 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
695   ShenandoahHeap* heap = ShenandoahHeap::heap();
696 
697   // Determine if there is work to do. This avoids taking heap lock if there is
698   // no work available, avoids spamming logs with superfluous logging messages,
699   // and minimises the amount of work while locks are taken.
700 
701   if (heap->committed() <= shrink_until) return;
702 
703   bool has_work = false;
704   for (size_t i = 0; i < heap->num_regions(); i++) {
705     ShenandoahHeapRegion *r = heap->get_region(i);
706     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
707       has_work = true;
708       break;
709     }
710   }
711 
712   if (has_work) {
713     heap->entry_uncommit(shrink_before, shrink_until);
714   }
715 }
716 
717 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
718   return GCCause::is_user_requested_gc(cause) ||
719          GCCause::is_serviceability_requested_gc(cause);
720 }
721 
722 bool ShenandoahControlThread::is_implicit_gc(GCCause::Cause cause) const {
723   return !is_explicit_gc(cause) && cause != GCCause::_shenandoah_concurrent_gc;
724 }
725 
726 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
727   assert(GCCause::is_user_requested_gc(cause) ||
728          GCCause::is_serviceability_requested_gc(cause) ||
729          cause == GCCause::_metadata_GC_clear_soft_refs ||
730          cause == GCCause::_full_gc_alot ||
731          cause == GCCause::_wb_full_gc ||
732          cause == GCCause::_wb_breakpoint ||
733          cause == GCCause::_scavenge_alot,
734          "only requested GCs here");
735 
736   if (is_explicit_gc(cause)) {
737     if (!DisableExplicitGC) {
738       handle_requested_gc(cause);
739     }
740   } else {
741     handle_requested_gc(cause);
742   }
743 }
744 
745 bool ShenandoahControlThread::request_concurrent_gc(GenerationMode generation) {
746   if (_preemption_requested.is_set() || _gc_requested.is_set() || ShenandoahHeap::heap()->cancelled_gc()) {
747     // ignore subsequent requests from the heuristics
748     return false;
749   }
750 
751   if (_mode == none) {
752     _requested_gc_cause = GCCause::_shenandoah_concurrent_gc;
753     _requested_generation = generation;
754     notify_control_thread();
755     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
756     ml.wait();
757     return true;
758   }
759 
760   if (preempt_old_marking(generation)) {
761     log_info(gc)("Preempting old generation mark to allow %s GC.", generation_name(generation));
762     _requested_gc_cause = GCCause::_shenandoah_concurrent_gc;
763     _requested_generation = generation;
764     _preemption_requested.set();
765     ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc);
766     notify_control_thread();
767 
768     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
769     ml.wait();
770     return true;
771   }
772 
773   return false;
774 }
775 
776 void ShenandoahControlThread::notify_control_thread() {
777   MonitorLocker locker(&_control_lock, Mutex::_no_safepoint_check_flag);
778   _control_lock.notify();
779 }
780 
781 bool ShenandoahControlThread::preempt_old_marking(GenerationMode generation) {
782   return generation == YOUNG && _allow_old_preemption.try_unset();
783 }
784 
785 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
786   // Make sure we have at least one complete GC cycle before unblocking
787   // from the explicit GC request.
788   //
789   // This is especially important for weak references cleanup and/or native
790   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
791   // comes very late in the already running cycle, it would miss lots of new
792   // opportunities for cleanup that were made available before the caller
793   // requested the GC.
794 
795   MonitorLocker ml(&_gc_waiters_lock);
796   size_t current_gc_id = get_gc_id();
797   size_t required_gc_id = current_gc_id + 1;
798   while (current_gc_id < required_gc_id) {
799     // Although setting gc request is under _gc_waiters_lock, but read side (run_service())
800     // does not take the lock. We need to enforce following order, so that read side sees
801     // latest requested gc cause when the flag is set.
802     _requested_gc_cause = cause;
803     _gc_requested.set();
804     notify_control_thread();
805     if (cause != GCCause::_wb_breakpoint) {
806       ml.wait();
807     }
808     current_gc_id = get_gc_id();
809   }
810 }
811 
812 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
813   ShenandoahHeap* heap = ShenandoahHeap::heap();
814 
815   assert(current()->is_Java_thread(), "expect Java thread here");
816 
817   if (try_set_alloc_failure_gc()) {
818     // Only report the first allocation failure
819     log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
820                  req.type_string(),
821                  byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
822 
823     // Now that alloc failure GC is scheduled, we can abort everything else
824     heap->cancel_gc(GCCause::_allocation_failure);
825   }
826 
827   MonitorLocker ml(&_alloc_failure_waiters_lock);
828   while (is_alloc_failure_gc()) {
829     ml.wait();
830   }
831 }
832 
833 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
834   ShenandoahHeap* heap = ShenandoahHeap::heap();
835 
836   if (try_set_alloc_failure_gc()) {
837     // Only report the first allocation failure
838     log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
839                  byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
840   }
841 
842   // Forcefully report allocation failure
843   heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
844 }
845 
846 void ShenandoahControlThread::notify_alloc_failure_waiters() {
847   _alloc_failure_gc.unset();
848   MonitorLocker ml(&_alloc_failure_waiters_lock);
849   ml.notify_all();
850 }
851 
852 bool ShenandoahControlThread::try_set_alloc_failure_gc() {
853   return _alloc_failure_gc.try_set();
854 }
855 
856 bool ShenandoahControlThread::is_alloc_failure_gc() {
857   return _alloc_failure_gc.is_set();
858 }
859 
860 void ShenandoahControlThread::notify_gc_waiters() {
861   _gc_requested.unset();
862   MonitorLocker ml(&_gc_waiters_lock);
863   ml.notify_all();
864 }
865 
866 void ShenandoahControlThread::handle_counters_update() {
867   if (_do_counters_update.is_set()) {
868     _do_counters_update.unset();
869     ShenandoahHeap::heap()->monitoring_support()->update_counters();
870   }
871 }
872 
873 void ShenandoahControlThread::handle_force_counters_update() {
874   if (_force_counters_update.is_set()) {
875     _do_counters_update.unset(); // reset these too, we do update now!
876     ShenandoahHeap::heap()->monitoring_support()->update_counters();
877   }
878 }
879 
880 void ShenandoahControlThread::notify_heap_changed() {
881   // This is called from allocation path, and thus should be fast.
882 
883   // Update monitoring counters when we took a new region. This amortizes the
884   // update costs on slow path.
885   if (_do_counters_update.is_unset()) {
886     _do_counters_update.set();
887   }
888 }
889 
890 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
891   assert(ShenandoahPacing, "should only call when pacing is enabled");
892   Atomic::add(&_allocs_seen, words, memory_order_relaxed);
893 }
894 
895 void ShenandoahControlThread::set_forced_counters_update(bool value) {
896   _force_counters_update.set_cond(value);
897 }
898 
899 void ShenandoahControlThread::reset_gc_id() {
900   Atomic::store(&_gc_id, (size_t)0);
901 }
902 
903 void ShenandoahControlThread::update_gc_id() {
904   Atomic::inc(&_gc_id);
905 }
906 
907 size_t ShenandoahControlThread::get_gc_id() {
908   return Atomic::load(&_gc_id);
909 }
910 
911 void ShenandoahControlThread::print() const {
912   print_on(tty);
913 }
914 
915 void ShenandoahControlThread::print_on(outputStream* st) const {
916   st->print("Shenandoah Concurrent Thread");
917   Thread::print_on(st);
918   st->cr();
919 }
920 
921 void ShenandoahControlThread::start() {
922   create_and_start();
923 }
924 
925 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
926   _graceful_shutdown.set();
927 }
928 
929 bool ShenandoahControlThread::in_graceful_shutdown() {
930   return _graceful_shutdown.is_set();
931 }
932 
933 const char* ShenandoahControlThread::gc_mode_name(ShenandoahControlThread::GCMode mode) {
934   switch (mode) {
935     case none:              return "idle";
936     case concurrent_normal: return "normal";
937     case stw_degenerated:   return "degenerated";
938     case stw_full:          return "full";
939     case marking_old:       return "old mark";
940     default:                return "unknown";
941   }
942 }
943 
944 void ShenandoahControlThread::set_gc_mode(ShenandoahControlThread::GCMode new_mode) {
945   if (_mode != new_mode) {
946     log_info(gc)("Transition from: %s to: %s", gc_mode_name(_mode), gc_mode_name(new_mode));
947     _mode = new_mode;
948     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
949     ml.notify_all();
950   }
951 }