< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp

Print this page

 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 27 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
 28 #include "gc/shenandoah/shenandoahControlThread.hpp"
 29 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 30 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 31 #include "gc/shenandoah/shenandoahFullGC.hpp"


 32 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
 33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 34 #include "gc/shenandoah/shenandoahMark.inline.hpp"
 35 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 36 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"

 37 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
 38 #include "gc/shenandoah/shenandoahUtils.hpp"
 39 #include "gc/shenandoah/shenandoahVMOperations.hpp"
 40 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 41 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"

 42 #include "memory/iterator.hpp"
 43 #include "memory/metaspaceUtils.hpp"
 44 #include "memory/metaspaceStats.hpp"
 45 #include "memory/universe.hpp"
 46 #include "runtime/atomic.hpp"
 47 
 48 ShenandoahControlThread::ShenandoahControlThread() :
 49   ConcurrentGCThread(),
 50   _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", Monitor::_safepoint_check_always, true),
 51   _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", Monitor::_safepoint_check_always, true),

 52   _periodic_task(this),
 53   _requested_gc_cause(GCCause::_no_cause_specified),

 54   _degen_point(ShenandoahGC::_degenerated_outside_cycle),
 55   _allocs_seen(0) {


 56 
 57   reset_gc_id();
 58   create_and_start();
 59   _periodic_task.enroll();
 60   if (ShenandoahPacing) {
 61     _periodic_pacer_notify_task.enroll();
 62   }
 63 }
 64 
 65 ShenandoahControlThread::~ShenandoahControlThread() {
 66   // This is here so that super is called.
 67 }
 68 
 69 void ShenandoahPeriodicTask::task() {
 70   _thread->handle_force_counters_update();
 71   _thread->handle_counters_update();
 72 }
 73 
 74 void ShenandoahPeriodicPacerNotify::task() {
 75   assert(ShenandoahPacing, "Should not be here otherwise");
 76   ShenandoahHeap::heap()->pacer()->notify_waiters();
 77 }
 78 
 79 void ShenandoahControlThread::run_service() {
 80   ShenandoahHeap* heap = ShenandoahHeap::heap();
 81 
 82   GCMode default_mode = concurrent_normal;

 83   GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
 84   int sleep = ShenandoahControlIntervalMin;
 85 
 86   double last_shrink_time = os::elapsedTime();
 87   double last_sleep_adjust_time = os::elapsedTime();
 88 
 89   // Shrink period avoids constantly polling regions for shrinking.
 90   // Having a period 10x lower than the delay would mean we hit the
 91   // shrinking with lag of less than 1/10-th of true delay.
 92   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
 93   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
 94 
 95   ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
 96   ShenandoahHeuristics* heuristics = heap->heuristics();





 97   while (!in_graceful_shutdown() && !should_terminate()) {
 98     // Figure out if we have pending requests.
 99     bool alloc_failure_pending = _alloc_failure_gc.is_set();
100     bool explicit_gc_requested = _gc_requested.is_set() &&  is_explicit_gc(_requested_gc_cause);
101     bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);
102 
103     // This control loop iteration have seen this much allocations.
104     size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
105 
106     // Check if we have seen a new target for soft max heap size.
107     bool soft_max_changed = check_soft_max_changed();
108 
109     // Choose which GC mode to run in. The block below should select a single mode.
110     GCMode mode = none;
111     GCCause::Cause cause = GCCause::_last_gc_cause;
112     ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
113 
114     if (alloc_failure_pending) {
115       // Allocation failure takes precedence: we have to deal with it first thing
116       log_info(gc)("Trigger: Handle Allocation Failure");
117 
118       cause = GCCause::_allocation_failure;
119 
120       // Consume the degen point, and seed it with default value
121       degen_point = _degen_point;
122       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
123 
124       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {











125         heuristics->record_allocation_failure_gc();
126         policy->record_alloc_failure_to_degenerated(degen_point);
127         mode = stw_degenerated;
128       } else {
129         heuristics->record_allocation_failure_gc();
130         policy->record_alloc_failure_to_full();
131         mode = stw_full;

132       }
133 
134     } else if (explicit_gc_requested) {

135       cause = _requested_gc_cause;
136       log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
137 
138       heuristics->record_requested_gc();
139 
140       if (ExplicitGCInvokesConcurrent) {
141         policy->record_explicit_to_concurrent();
142         mode = default_mode;
143         // Unload and clean up everything
144         heap->set_unload_classes(heuristics->can_unload_classes());
145       } else {
146         policy->record_explicit_to_full();
147         mode = stw_full;
148       }
149     } else if (implicit_gc_requested) {

150       cause = _requested_gc_cause;
151       log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
152 
153       heuristics->record_requested_gc();
154 
155       if (ShenandoahImplicitGCInvokesConcurrent) {
156         policy->record_implicit_to_concurrent();
157         mode = default_mode;
158 
159         // Unload and clean up everything
160         heap->set_unload_classes(heuristics->can_unload_classes());
161       } else {
162         policy->record_implicit_to_full();
163         mode = stw_full;
164       }
165     } else {
166       // Potential normal cycle: ask heuristics if it wants to act
167       if (heuristics->should_start_gc()) {
168         mode = default_mode;
169         cause = default_cause;




















170       }
171 
172       // Ask policy if this cycle wants to process references or unload classes
173       heap->set_unload_classes(heuristics->should_unload_classes());



174     }
175 
176     // Blow all soft references on this cycle, if handling allocation failure,
177     // either implicit or explicit GC request,  or we are requested to do so unconditionally.
178     if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) {
179       heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
180     }
181 
182     bool gc_requested = (mode != none);
183     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
184 
185     if (gc_requested) {
186       // GC is starting, bump the internal ID
187       update_gc_id();
188 
189       heap->reset_bytes_allocated_since_gc_start();
190 
191       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
192 
193       // If GC was requested, we are sampling the counters even without actual triggers
194       // from allocation machinery. This captures GC phases more accurately.
195       set_forced_counters_update(true);
196 
197       // If GC was requested, we better dump freeset data for performance debugging
198       {
199         ShenandoahHeapLocker locker(heap->lock());
200         heap->free_set()->log_status();
201       }
202 
203       switch (mode) {
204         case concurrent_normal:
205           service_concurrent_normal_cycle(cause);
206           break;
207         case stw_degenerated:
208           service_stw_degenerated_cycle(cause, degen_point);
209           break;
210         case stw_full:
211           service_stw_full_cycle(cause);
212           break;
213         default:
214           ShouldNotReachHere();



















215       }
216 
217       // If this was the requested GC cycle, notify waiters about it
218       if (explicit_gc_requested || implicit_gc_requested) {
219         notify_gc_waiters();
220       }
221 
222       // If this was the allocation failure GC cycle, notify waiters about it
223       if (alloc_failure_pending) {
224         notify_alloc_failure_waiters();
225       }
226 
227       // Report current free set state at the end of cycle, whether
228       // it is a normal completion, or the abort.
229       {
230         ShenandoahHeapLocker locker(heap->lock());
231         heap->free_set()->log_status();
232 
233         // Notify Universe about new heap usage. This has implications for
234         // global soft refs policy, and we better report it every time heap
235         // usage goes down.
236         Universe::heap()->update_capacity_and_used_at_gc();
237 
238         // Signal that we have completed a visit to all live objects.
239         Universe::heap()->record_whole_heap_examined_timestamp();
240       }
241 
242       // Disable forced counters update, and update counters one more time
243       // to capture the state at the end of GC session.
244       handle_force_counters_update();
245       set_forced_counters_update(false);
246 
247       // Retract forceful part of soft refs policy
248       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
249 
250       // Clear metaspace oom flag, if current cycle unloaded classes
251       if (heap->unload_classes()) {
252         heuristics->clear_metaspace_oom();

253       }
254 
255       // Commit worker statistics to cycle data
256       heap->phase_timings()->flush_par_workers_to_cycle();
257       if (ShenandoahPacing) {
258         heap->pacer()->flush_stats_to_cycle();
259       }
260 
261       // Print GC stats for current cycle
262       {
263         LogTarget(Info, gc, stats) lt;
264         if (lt.is_enabled()) {
265           ResourceMark rm;
266           LogStream ls(lt);
267           heap->phase_timings()->print_cycle_on(&ls);
268           if (ShenandoahPacing) {
269             heap->pacer()->print_cycle_on(&ls);
270           }
271         }
272       }

291     double current = os::elapsedTime();
292 
293     if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
294       // Explicit GC tries to uncommit everything down to min capacity.
295       // Soft max change tries to uncommit everything down to target capacity.
296       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
297 
298       double shrink_before = (explicit_gc_requested || soft_max_changed) ?
299                              current :
300                              current - (ShenandoahUncommitDelay / 1000.0);
301 
302       size_t shrink_until = soft_max_changed ?
303                              heap->soft_max_capacity() :
304                              heap->min_capacity();
305 
306       service_uncommit(shrink_before, shrink_until);
307       heap->phase_timings()->flush_cycle_to_global();
308       last_shrink_time = current;
309     }
310 
311     // Wait before performing the next action. If allocation happened during this wait,
312     // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
313     // back off exponentially.
314     if (_heap_changed.try_unset()) {
315       sleep = ShenandoahControlIntervalMin;
316     } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
317       sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
318       last_sleep_adjust_time = current;
319     }
320     os::naked_short_sleep(sleep);
321   }
322 
323   // Wait for the actual stop(), can't leave run_service() earlier.
324   while (!should_terminate()) {
325     os::naked_short_sleep(ShenandoahControlIntervalMin);
326   }
327 }
328 






























































































329 bool ShenandoahControlThread::check_soft_max_changed() const {
330   ShenandoahHeap* heap = ShenandoahHeap::heap();
331   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
332   size_t old_soft_max = heap->soft_max_capacity();
333   if (new_soft_max != old_soft_max) {
334     new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
335     new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
336     if (new_soft_max != old_soft_max) {
337       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
338                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
339                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
340       );
341       heap->set_soft_max_capacity(new_soft_max);
342       return true;
343     }
344   }
345   return false;
346 }
347 
348 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {







































349   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
350   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
351   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
352   // tries to evac something and no memory is available), cycle degrades to Full GC.
353   //
354   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
355   // heuristics says there are no regions to compact, and all the collection comes from immediately
356   // reclaimable regions.
357   //
358   // ................................................................................................
359   //
360   //                                    (immediate garbage shortcut)                Concurrent GC
361   //                             /-------------------------------------------\
362   //                             |                                           |
363   //                             |                                           |
364   //                             |                                           |
365   //                             |                                           v
366   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
367   //                   |                    |                 |              ^
368   //                   | (af)               | (af)            | (af)         |
369   // ..................|....................|.................|..............|.......................
370   //                   |                    |                 |              |
371   //                   |                    |                 |              |      Degenerated GC
372   //                   v                    v                 v              |
373   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
374   //                   |                    |                 |              ^
375   //                   | (af)               | (af)            | (af)         |
376   // ..................|....................|.................|..............|.......................
377   //                   |                    |                 |              |
378   //                   |                    v                 |              |      Full GC
379   //                   \------------------->o<----------------/              |
380   //                                        |                                |
381   //                                        v                                |
382   //                                      Full GC  --------------------------/
383   //
384   ShenandoahHeap* heap = ShenandoahHeap::heap();
385   if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
386 
387   GCIdMark gc_id_mark;
388   ShenandoahGCSession session(cause);
389 
390   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
391 
392   ShenandoahConcurrentGC gc;
393   if (gc.collect(cause)) {
394     // Cycle is complete
395     heap->heuristics()->record_success_concurrent();
396     heap->shenandoah_policy()->record_success_concurrent();
397   } else {
398     assert(heap->cancelled_gc(), "Must have been cancelled");
399     check_cancellation_or_degen(gc.degen_point());




400   }
401 }
402 
403 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
404   ShenandoahHeap* heap = ShenandoahHeap::heap();
405   if (heap->cancelled_gc()) {
406     assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
407     if (!in_graceful_shutdown()) {
408       assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle,
409               "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
410       _degen_point = point;
411     }
412     return true;
413   }
























414   return false;
415 }
416 
417 void ShenandoahControlThread::stop_service() {
418   // Nothing to do here.
419 }
420 
421 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {


422   GCIdMark gc_id_mark;
423   ShenandoahGCSession session(cause);
424 
425   ShenandoahFullGC gc;
426   gc.collect(cause);
427 
428   ShenandoahHeap* const heap = ShenandoahHeap::heap();
429   heap->heuristics()->record_success_full();
430   heap->shenandoah_policy()->record_success_full();
431 }
432 
433 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
434   assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");

435 
436   GCIdMark gc_id_mark;
437   ShenandoahGCSession session(cause);





438 
439   ShenandoahDegenGC gc(point);
440   gc.collect(cause);
441 
442   ShenandoahHeap* const heap = ShenandoahHeap::heap();
443   heap->heuristics()->record_success_degenerated();



444   heap->shenandoah_policy()->record_success_degenerated();

445 }
446 
447 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
448   ShenandoahHeap* heap = ShenandoahHeap::heap();
449 
450   // Determine if there is work to do. This avoids taking heap lock if there is
451   // no work available, avoids spamming logs with superfluous logging messages,
452   // and minimises the amount of work while locks are taken.
453 
454   if (heap->committed() <= shrink_until) return;
455 
456   bool has_work = false;
457   for (size_t i = 0; i < heap->num_regions(); i++) {
458     ShenandoahHeapRegion *r = heap->get_region(i);
459     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
460       has_work = true;
461       break;
462     }
463   }
464 
465   if (has_work) {
466     heap->entry_uncommit(shrink_before, shrink_until);
467   }
468 }
469 
470 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
471   return GCCause::is_user_requested_gc(cause) ||
472          GCCause::is_serviceability_requested_gc(cause);
473 }
474 




475 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
476   assert(GCCause::is_user_requested_gc(cause) ||
477          GCCause::is_serviceability_requested_gc(cause) ||
478          cause == GCCause::_metadata_GC_clear_soft_refs ||
479          cause == GCCause::_full_gc_alot ||
480          cause == GCCause::_wb_full_gc ||
481          cause == GCCause::_wb_breakpoint ||
482          cause == GCCause::_scavenge_alot,
483          "only requested GCs here");
484 
485   if (is_explicit_gc(cause)) {
486     if (!DisableExplicitGC) {
487       handle_requested_gc(cause);
488     }
489   } else {
490     handle_requested_gc(cause);
491   }
492 }
493 



































494 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
495   // Make sure we have at least one complete GC cycle before unblocking
496   // from the explicit GC request.
497   //
498   // This is especially important for weak references cleanup and/or native
499   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
500   // comes very late in the already running cycle, it would miss lots of new
501   // opportunities for cleanup that were made available before the caller
502   // requested the GC.
503 
504   MonitorLocker ml(&_gc_waiters_lock);
505   size_t current_gc_id = get_gc_id();
506   size_t required_gc_id = current_gc_id + 1;
507   while (current_gc_id < required_gc_id) {
508     _gc_requested.set();
509     _requested_gc_cause = cause;
510 
511     if (cause != GCCause::_wb_breakpoint) {
512       ml.wait();
513     }
514     current_gc_id = get_gc_id();
515   }
516 }
517 
518 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
519   ShenandoahHeap* heap = ShenandoahHeap::heap();
520 
521   assert(current()->is_Java_thread(), "expect Java thread here");
522 
523   if (try_set_alloc_failure_gc()) {
524     // Only report the first allocation failure
525     log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
526                  req.type_string(),
527                  byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
528 
529     // Now that alloc failure GC is scheduled, we can abort everything else
530     heap->cancel_gc(GCCause::_allocation_failure);

574     _do_counters_update.unset();
575     ShenandoahHeap::heap()->monitoring_support()->update_counters();
576   }
577 }
578 
579 void ShenandoahControlThread::handle_force_counters_update() {
580   if (_force_counters_update.is_set()) {
581     _do_counters_update.unset(); // reset these too, we do update now!
582     ShenandoahHeap::heap()->monitoring_support()->update_counters();
583   }
584 }
585 
586 void ShenandoahControlThread::notify_heap_changed() {
587   // This is called from allocation path, and thus should be fast.
588 
589   // Update monitoring counters when we took a new region. This amortizes the
590   // update costs on slow path.
591   if (_do_counters_update.is_unset()) {
592     _do_counters_update.set();
593   }
594   // Notify that something had changed.
595   if (_heap_changed.is_unset()) {
596     _heap_changed.set();
597   }
598 }
599 
600 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
601   assert(ShenandoahPacing, "should only call when pacing is enabled");
602   Atomic::add(&_allocs_seen, words, memory_order_relaxed);
603 }
604 
605 void ShenandoahControlThread::set_forced_counters_update(bool value) {
606   _force_counters_update.set_cond(value);
607 }
608 
609 void ShenandoahControlThread::reset_gc_id() {
610   Atomic::store(&_gc_id, (size_t)0);
611 }
612 
613 void ShenandoahControlThread::update_gc_id() {
614   Atomic::inc(&_gc_id);
615 }
616 
617 size_t ShenandoahControlThread::get_gc_id() {

622   print_on(tty);
623 }
624 
625 void ShenandoahControlThread::print_on(outputStream* st) const {
626   st->print("Shenandoah Concurrent Thread");
627   Thread::print_on(st);
628   st->cr();
629 }
630 
631 void ShenandoahControlThread::start() {
632   create_and_start();
633 }
634 
635 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
636   _graceful_shutdown.set();
637 }
638 
639 bool ShenandoahControlThread::in_graceful_shutdown() {
640   return _graceful_shutdown.is_set();
641 }



















 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 27 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
 28 #include "gc/shenandoah/shenandoahControlThread.hpp"
 29 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 30 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 31 #include "gc/shenandoah/shenandoahFullGC.hpp"
 32 #include "gc/shenandoah/shenandoahGeneration.hpp"
 33 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 34 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
 35 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 36 #include "gc/shenandoah/shenandoahMark.inline.hpp"
 37 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 38 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 39 #include "gc/shenandoah/shenandoahOldGC.hpp"
 40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
 41 #include "gc/shenandoah/shenandoahUtils.hpp"
 42 #include "gc/shenandoah/shenandoahVMOperations.hpp"
 43 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 44 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
 45 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 46 #include "memory/iterator.hpp"
 47 #include "memory/metaspaceUtils.hpp"
 48 #include "memory/metaspaceStats.hpp"
 49 #include "memory/universe.hpp"
 50 #include "runtime/atomic.hpp"
 51 
 52 ShenandoahControlThread::ShenandoahControlThread() :
 53   ConcurrentGCThread(),
 54   _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", Monitor::_safepoint_check_always, true),
 55   _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", Monitor::_safepoint_check_always, true),
 56   _control_lock(Mutex::leaf - 1, "ShenandoahControlGC_lock", Monitor::_safepoint_check_never, true),
 57   _periodic_task(this),
 58   _requested_gc_cause(GCCause::_no_cause_specified),
 59   _requested_generation(GenerationMode::GLOBAL),
 60   _degen_point(ShenandoahGC::_degenerated_outside_cycle),
 61   _degen_generation(NULL),
 62   _allocs_seen(0),
 63   _mode(none) {
 64 
 65   reset_gc_id();
 66   create_and_start();
 67   _periodic_task.enroll();
 68   if (ShenandoahPacing) {
 69     _periodic_pacer_notify_task.enroll();
 70   }
 71 }
 72 
 73 ShenandoahControlThread::~ShenandoahControlThread() {
 74   // This is here so that super is called.
 75 }
 76 
 77 void ShenandoahPeriodicTask::task() {
 78   _thread->handle_force_counters_update();
 79   _thread->handle_counters_update();
 80 }
 81 
 82 void ShenandoahPeriodicPacerNotify::task() {
 83   assert(ShenandoahPacing, "Should not be here otherwise");
 84   ShenandoahHeap::heap()->pacer()->notify_waiters();
 85 }
 86 
 87 void ShenandoahControlThread::run_service() {
 88   ShenandoahHeap* heap = ShenandoahHeap::heap();
 89 
 90   GCMode default_mode = concurrent_normal;
 91   GenerationMode generation = GLOBAL;
 92   GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;

 93 
 94   double last_shrink_time = os::elapsedTime();
 95   uint age_period = 0;
 96 
 97   // Shrink period avoids constantly polling regions for shrinking.
 98   // Having a period 10x lower than the delay would mean we hit the
 99   // shrinking with lag of less than 1/10-th of true delay.
100   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
101   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
102 
103   ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
104 
105   // Heuristics are notified of allocation failures here and other outcomes
106   // of the cycle. They're also used here to control whether the Nth consecutive
107   // degenerated cycle should be 'promoted' to a full cycle. The decision to
108   // trigger a cycle or not is evaluated on the regulator thread.
109   ShenandoahHeuristics* global_heuristics = heap->global_generation()->heuristics();
110   while (!in_graceful_shutdown() && !should_terminate()) {
111     // Figure out if we have pending requests.
112     bool alloc_failure_pending = _alloc_failure_gc.is_set();
113     bool explicit_gc_requested = _gc_requested.is_set() && is_explicit_gc(_requested_gc_cause);
114     bool implicit_gc_requested = _gc_requested.is_set() && is_implicit_gc(_requested_gc_cause);
115 
116     // This control loop iteration have seen this much allocations.
117     size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
118 
119     // Check if we have seen a new target for soft max heap size.
120     bool soft_max_changed = check_soft_max_changed();
121 
122     // Choose which GC mode to run in. The block below should select a single mode.
123     set_gc_mode(none);
124     GCCause::Cause cause = GCCause::_last_gc_cause;
125     ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
126 
127     if (alloc_failure_pending) {
128       // Allocation failure takes precedence: we have to deal with it first thing
129       log_info(gc)("Trigger: Handle Allocation Failure");
130 
131       cause = GCCause::_allocation_failure;
132 
133       // Consume the degen point, and seed it with default value
134       degen_point = _degen_point;
135       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
136 
137       if (degen_point == ShenandoahGC::_degenerated_outside_cycle) {
138         _degen_generation = heap->mode()->is_generational() ? heap->young_generation() : heap->global_generation();
139       } else {
140         assert(_degen_generation != NULL, "Need to know which generation to resume.");
141       }
142 
143       ShenandoahHeuristics* heuristics = _degen_generation->heuristics();
144       generation = _degen_generation->generation_mode();
145       bool old_gen_evacuation_failed = heap->clear_old_evacuation_failure();
146 
147       // Do not bother with degenerated cycle if old generation evacuation failed.
148       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() && !old_gen_evacuation_failed) {
149         heuristics->record_allocation_failure_gc();
150         policy->record_alloc_failure_to_degenerated(degen_point);
151         set_gc_mode(stw_degenerated);
152       } else {
153         heuristics->record_allocation_failure_gc();
154         policy->record_alloc_failure_to_full();
155         generation = GLOBAL;
156         set_gc_mode(stw_full);
157       }

158     } else if (explicit_gc_requested) {
159       generation = GLOBAL;
160       cause = _requested_gc_cause;
161       log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
162 
163       global_heuristics->record_requested_gc();
164 
165       if (ExplicitGCInvokesConcurrent) {
166         policy->record_explicit_to_concurrent();
167         set_gc_mode(default_mode);
168         // Unload and clean up everything
169         heap->set_unload_classes(global_heuristics->can_unload_classes());
170       } else {
171         policy->record_explicit_to_full();
172         set_gc_mode(stw_full);
173       }
174     } else if (implicit_gc_requested) {
175       generation = GLOBAL;
176       cause = _requested_gc_cause;
177       log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
178 
179       global_heuristics->record_requested_gc();
180 
181       if (ShenandoahImplicitGCInvokesConcurrent) {
182         policy->record_implicit_to_concurrent();
183         set_gc_mode(default_mode);
184 
185         // Unload and clean up everything
186         heap->set_unload_classes(global_heuristics->can_unload_classes());
187       } else {
188         policy->record_implicit_to_full();
189         set_gc_mode(stw_full);
190       }
191     } else {
192       // We should only be here if the regulator requested a cycle or if
193       // there is an old generation mark in progress.
194       if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) {
195         // preemption was requested or this is a regular cycle
196         cause = GCCause::_shenandoah_concurrent_gc;
197         generation = _requested_generation;
198         set_gc_mode(default_mode);
199 
200         // Don't start a new old marking if there is one already in progress.
201         if (generation == OLD && heap->is_concurrent_old_mark_in_progress()) {
202           set_gc_mode(marking_old);
203         }
204 
205         if (generation == GLOBAL) {
206           heap->set_unload_classes(global_heuristics->should_unload_classes());
207         } else {
208           heap->set_unload_classes(false);
209         }
210       } else if (heap->is_concurrent_old_mark_in_progress() || heap->is_concurrent_prep_for_mixed_evacuation_in_progress()) {
211         // Nobody asked us to do anything, but we have an old-generation mark or old-generation preparation for
212         // mixed evacuation in progress, so resume working on that.
213         cause = GCCause::_shenandoah_concurrent_gc;
214         generation = OLD;
215         set_gc_mode(marking_old);
216       }
217 
218       // Don't want to spin in this loop and start a cycle every time, so
219       // clear requested gc cause. This creates a race with callers of the
220       // blocking 'request_gc' method, but there it loops and resets the
221       // '_requested_gc_cause' until a full cycle is completed.
222       _requested_gc_cause = GCCause::_no_gc;
223     }
224 
225     // Blow all soft references on this cycle, if handling allocation failure,
226     // either implicit or explicit GC request, or we are requested to do so unconditionally.
227     if (generation == GLOBAL && (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs)) {
228       heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
229     }
230 
231     bool gc_requested = (_mode != none);
232     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
233 
234     if (gc_requested) {
235       // GC is starting, bump the internal ID
236       update_gc_id();
237 
238       heap->reset_bytes_allocated_since_gc_start();
239 
240       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
241 
242       // If GC was requested, we are sampling the counters even without actual triggers
243       // from allocation machinery. This captures GC phases more accurately.
244       set_forced_counters_update(true);
245 
246       // If GC was requested, we better dump freeset data for performance debugging
247       {
248         ShenandoahHeapLocker locker(heap->lock());
249         heap->free_set()->log_status();
250       }
251 
252       heap->set_aging_cycle(false);
253       {
254         switch (_mode) {
255           case concurrent_normal: {
256             if ((generation == YOUNG) && (age_period-- == 0)) {
257               heap->set_aging_cycle(true);
258               age_period = ShenandoahAgingCyclePeriod - 1;
259             }
260             service_concurrent_normal_cycle(heap, generation, cause);
261             break;
262           }
263           case stw_degenerated: {
264             if (!service_stw_degenerated_cycle(cause, degen_point)) {
265               // The degenerated GC was upgraded to a Full GC
266               generation = GLOBAL;
267             }
268             break;
269           }
270           case stw_full: {
271             service_stw_full_cycle(cause);
272             break;
273           }
274           case marking_old: {
275             assert(generation == OLD, "Expected old generation here");
276             resume_concurrent_old_cycle(heap->old_generation(), cause);
277             break;
278           }
279           default: {
280             ShouldNotReachHere();
281           }
282         }
283       }
284 
285       // If this was the requested GC cycle, notify waiters about it
286       if (explicit_gc_requested || implicit_gc_requested) {
287         notify_gc_waiters();
288       }
289 
290       // If this was the allocation failure GC cycle, notify waiters about it
291       if (alloc_failure_pending) {
292         notify_alloc_failure_waiters();
293       }
294 
295       // Report current free set state at the end of cycle, whether
296       // it is a normal completion, or the abort.
297       {
298         ShenandoahHeapLocker locker(heap->lock());
299         heap->free_set()->log_status();
300 
301         // Notify Universe about new heap usage. This has implications for
302         // global soft refs policy, and we better report it every time heap
303         // usage goes down.
304         Universe::heap()->update_capacity_and_used_at_gc();
305 
306         // Signal that we have completed a visit to all live objects.
307         Universe::heap()->record_whole_heap_examined_timestamp();
308       }
309 
310       // Disable forced counters update, and update counters one more time
311       // to capture the state at the end of GC session.
312       handle_force_counters_update();
313       set_forced_counters_update(false);
314 
315       // Retract forceful part of soft refs policy
316       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
317 
318       // Clear metaspace oom flag, if current cycle unloaded classes
319       if (heap->unload_classes()) {
320         assert(generation == GLOBAL, "Only unload classes during GLOBAL cycle");
321         global_heuristics->clear_metaspace_oom();
322       }
323 
324       // Commit worker statistics to cycle data
325       heap->phase_timings()->flush_par_workers_to_cycle();
326       if (ShenandoahPacing) {
327         heap->pacer()->flush_stats_to_cycle();
328       }
329 
330       // Print GC stats for current cycle
331       {
332         LogTarget(Info, gc, stats) lt;
333         if (lt.is_enabled()) {
334           ResourceMark rm;
335           LogStream ls(lt);
336           heap->phase_timings()->print_cycle_on(&ls);
337           if (ShenandoahPacing) {
338             heap->pacer()->print_cycle_on(&ls);
339           }
340         }
341       }

360     double current = os::elapsedTime();
361 
362     if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
363       // Explicit GC tries to uncommit everything down to min capacity.
364       // Soft max change tries to uncommit everything down to target capacity.
365       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
366 
367       double shrink_before = (explicit_gc_requested || soft_max_changed) ?
368                              current :
369                              current - (ShenandoahUncommitDelay / 1000.0);
370 
371       size_t shrink_until = soft_max_changed ?
372                              heap->soft_max_capacity() :
373                              heap->min_capacity();
374 
375       service_uncommit(shrink_before, shrink_until);
376       heap->phase_timings()->flush_cycle_to_global();
377       last_shrink_time = current;
378     }
379 
380     // Don't wait around if there was an allocation failure - start the next cycle immediately.
381     if (!is_alloc_failure_gc()) {
382       // The timed wait is necessary because this thread has a responsibility to send
383       // 'alloc_words' to the pacer when it does not perform a GC.
384       MonitorLocker lock(&_control_lock, Mutex::_no_safepoint_check_flag);
385       lock.wait(ShenandoahControlIntervalMax);


386     }

387   }
388 
389   // Wait for the actual stop(), can't leave run_service() earlier.
390   while (!should_terminate()) {
391     os::naked_short_sleep(ShenandoahControlIntervalMin);
392   }
393 }
394 
395 // Young and old concurrent cycles are initiated by the regulator. Implicit
396 // and explicit GC requests are handled by the controller thread and always
397 // run a global cycle (which is concurrent by default, but may be overridden
398 // by command line options). Old cycles always degenerate to a global cycle.
399 // Young cycles are degenerated to complete the young cycle.  Young
400 // and old degen may upgrade to Full GC.  Full GC may also be
401 // triggered directly by a System.gc() invocation.
402 //
403 //
404 //      +-----+ Idle +-----+-----------+---------------------+
405 //      |         +        |           |                     |
406 //      |         |        |           |                     |
407 //      |         |        v           |                     |
408 //      |         |  Bootstrap Old +-- | ------------+       |
409 //      |         |   +                |             |       |
410 //      |         |   |                |             |       |
411 //      |         v   v                v             v       |
412 //      |    Resume Old <----------+ Young +--> Young Degen  |
413 //      |     +  +                                   +       |
414 //      v     |  |                                   |       |
415 //   Global <-+  |                                   |       |
416 //      +        |                                   |       |
417 //      |        v                                   v       |
418 //      +--->  Global Degen +--------------------> Full <----+
419 //
420 void ShenandoahControlThread::service_concurrent_normal_cycle(
421   const ShenandoahHeap* heap, const GenerationMode generation, GCCause::Cause cause) {
422 
423   switch (generation) {
424     case YOUNG: {
425       // Run a young cycle. This might or might not, have interrupted an ongoing
426       // concurrent mark in the old generation. We need to think about promotions
427       // in this case. Promoted objects should be above the TAMS in the old regions
428       // they end up in, but we have to be sure we don't promote into any regions
429       // that are in the cset.
430       log_info(gc, ergo)("Start GC cycle (YOUNG)");
431       service_concurrent_cycle(heap->young_generation(), cause, false);
432       heap->young_generation()->log_status();
433       break;
434     }
435     case GLOBAL: {
436       log_info(gc, ergo)("Start GC cycle (GLOBAL)");
437       service_concurrent_cycle(heap->global_generation(), cause, false);
438       heap->global_generation()->log_status();
439       break;
440     }
441     case OLD: {
442       log_info(gc, ergo)("Start GC cycle (OLD)");
443       service_concurrent_old_cycle(heap, cause);
444       heap->old_generation()->log_status();
445       break;
446     }
447     default:
448       ShouldNotReachHere();
449   }
450 }
451 
452 void ShenandoahControlThread::service_concurrent_old_cycle(const ShenandoahHeap* heap, GCCause::Cause &cause) {
453   // Configure the young generation's concurrent mark to put objects in
454   // old regions into the concurrent mark queues associated with the old
455   // generation. The young cycle will run as normal except that rather than
456   // ignore old references it will mark and enqueue them in the old concurrent
457   // mark but it will not traverse them.
458   ShenandoahGeneration* old_generation = heap->old_generation();
459   ShenandoahYoungGeneration* young_generation = heap->young_generation();
460 
461   assert(!heap->is_concurrent_old_mark_in_progress(), "Old already in progress.");
462   assert(old_generation->task_queues()->is_empty(), "Old mark queues should be empty.");
463 
464   young_generation->set_old_gen_task_queues(old_generation->task_queues());
465   young_generation->set_mark_incomplete();
466   old_generation->set_mark_incomplete();
467   service_concurrent_cycle(young_generation, cause, true);
468   if (!heap->cancelled_gc()) {
469     // Reset the degenerated point. Normally this would happen at the top
470     // of the control loop, but here we have just completed a young cycle
471     // which has bootstrapped the old concurrent marking.
472     _degen_point = ShenandoahGC::_degenerated_outside_cycle;
473 
474     // TODO: Bit of a hack here to keep the phase timings happy as we transition
475     // to concurrent old marking. We need to revisit this.
476     heap->phase_timings()->flush_par_workers_to_cycle();
477     heap->phase_timings()->flush_cycle_to_global();
478 
479     // From here we will 'resume' the old concurrent mark. This will skip reset
480     // and init mark for the concurrent mark. All of that work will have been
481     // done by the bootstrapping young cycle. In order to simplify the debugging
482     // effort, the old cycle will ONLY complete the mark phase. No actual
483     // collection of the old generation is happening here.
484     set_gc_mode(marking_old);
485     resume_concurrent_old_cycle(old_generation, cause);
486   }
487 }
488 
489 bool ShenandoahControlThread::check_soft_max_changed() const {
490   ShenandoahHeap* heap = ShenandoahHeap::heap();
491   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
492   size_t old_soft_max = heap->soft_max_capacity();
493   if (new_soft_max != old_soft_max) {
494     new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
495     new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
496     if (new_soft_max != old_soft_max) {
497       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
498                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
499                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
500       );
501       heap->set_soft_max_capacity(new_soft_max);
502       return true;
503     }
504   }
505   return false;
506 }
507 
508 void ShenandoahControlThread::resume_concurrent_old_cycle(ShenandoahGeneration* generation, GCCause::Cause cause) {
509 
510   assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress() ||
511          ShenandoahHeap::heap()->is_concurrent_prep_for_mixed_evacuation_in_progress(),
512          "Old mark or mixed-evac prep should be in progress");
513   log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued.", generation->task_queues()->tasks());
514 
515   ShenandoahHeap* heap = ShenandoahHeap::heap();
516 
517   GCIdMark gc_id_mark;
518   ShenandoahGCSession session(cause, generation);
519 
520   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
521   // We can only tolerate being cancelled during concurrent marking or during preparation for mixed
522   // evacuation. This flag here (passed by reference) is used to control precisely where the regulator
523   // is allowed to cancel a GC.
524   ShenandoahOldGC gc(generation, _allow_old_preemption);
525   if (gc.collect(cause)) {
526     // Old collection is complete, the young generation no longer needs this
527     // reference to the old concurrent mark so clean it up.
528     heap->young_generation()->set_old_gen_task_queues(NULL);
529     generation->heuristics()->record_success_concurrent();
530     heap->shenandoah_policy()->record_success_concurrent();
531   }
532 
533   if (heap->cancelled_gc()) {
534     // It's possible the gc cycle was cancelled after the last time
535     // the collection checked for cancellation. In which case, the
536     // old gc cycle is still completed, and we have to deal with this
537     // cancellation. We set the degeneration point to be outside
538     // the cycle because if this is an allocation failure, that is
539     // what must be done (there is no degenerated old cycle). If the
540     // cancellation was due to a heuristic wanting to start a young
541     // cycle, then we are not actually going to a degenerated cycle,
542     // so the degenerated point doesn't matter here.
543     check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle);
544   }
545 }
546 
547 void ShenandoahControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool do_old_gc_bootstrap) {
548   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
549   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
550   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
551   // tries to evac something and no memory is available), cycle degrades to Full GC.
552   //
553   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
554   // heuristics says there are no regions to compact, and all the collection comes from immediately
555   // reclaimable regions.
556   //
557   // ................................................................................................
558   //
559   //                                    (immediate garbage shortcut)                Concurrent GC
560   //                             /-------------------------------------------\
561   //                             |                                           |
562   //                             |                                           |
563   //                             |                                           |
564   //                             |                                           v
565   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
566   //                   |                    |                 |              ^
567   //                   | (af)               | (af)            | (af)         |
568   // ..................|....................|.................|..............|.......................
569   //                   |                    |                 |              |
570   //                   |                    |                 |              |      Degenerated GC
571   //                   v                    v                 v              |
572   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
573   //                   |                    |                 |              ^
574   //                   | (af)               | (af)            | (af)         |
575   // ..................|....................|.................|..............|.......................
576   //                   |                    |                 |              |
577   //                   |                    v                 |              |      Full GC
578   //                   \------------------->o<----------------/              |
579   //                                        |                                |
580   //                                        v                                |
581   //                                      Full GC  --------------------------/
582   //
583   ShenandoahHeap* heap = ShenandoahHeap::heap();
584   if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
585 
586   GCIdMark gc_id_mark;
587   ShenandoahGCSession session(cause, generation);
588 
589   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
590 
591   ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap);
592   if (gc.collect(cause)) {
593     // Cycle is complete
594     generation->heuristics()->record_success_concurrent();
595     heap->shenandoah_policy()->record_success_concurrent();
596   } else {
597     assert(heap->cancelled_gc(), "Must have been cancelled");
598     check_cancellation_or_degen(gc.degen_point());
599     assert(generation->generation_mode() != OLD, "Old GC takes a different control path");
600     // Concurrent young-gen collection degenerates to young
601     // collection.  Same for global collections.
602     _degen_generation = generation;
603   }
604 }
605 
606 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
607   ShenandoahHeap* heap = ShenandoahHeap::heap();
608   if (!heap->cancelled_gc()) {
609     return false;
610   }
611 
612   if (in_graceful_shutdown()) {


613     return true;
614   }
615 
616   assert(_degen_point == ShenandoahGC::_degenerated_outside_cycle,
617          "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
618 
619   if (is_alloc_failure_gc()) {
620     _degen_point = point;
621     return true;
622   }
623 
624   if (_preemption_requested.is_set()) {
625     assert(_requested_generation == YOUNG, "Only young GCs may preempt old.");
626     _preemption_requested.unset();
627 
628     // Old generation marking is only cancellable during concurrent marking.
629     // Once final mark is complete, the code does not check again for cancellation.
630     // If old generation was cancelled for an allocation failure, we wouldn't
631     // make it to this case. The calling code is responsible for forcing a
632     // cancellation due to allocation failure into a degenerated cycle.
633     _degen_point = point;
634     heap->clear_cancelled_gc(false /* clear oom handler */);
635     return true;
636   }
637 
638   fatal("Cancel GC either for alloc failure GC, or gracefully exiting, or to pause old generation marking.");
639   return false;
640 }
641 
642 void ShenandoahControlThread::stop_service() {
643   // Nothing to do here.
644 }
645 
646 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
647   ShenandoahHeap* const heap = ShenandoahHeap::heap();
648 
649   GCIdMark gc_id_mark;
650   ShenandoahGCSession session(cause, heap->global_generation());
651 
652   ShenandoahFullGC gc;
653   gc.collect(cause);
654 
655   heap->global_generation()->heuristics()->record_success_full();

656   heap->shenandoah_policy()->record_success_full();
657 }
658 
659 bool ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
660   assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
661   ShenandoahHeap* const heap = ShenandoahHeap::heap();
662 
663   GCIdMark gc_id_mark;
664   ShenandoahGCSession session(cause, _degen_generation);
665 
666   ShenandoahDegenGC gc(point, _degen_generation);
667 
668   // Just in case degenerated cycle preempted old-gen marking, clear the old-gen task queues.
669   heap->young_generation()->set_old_gen_task_queues(NULL);
670 

671   gc.collect(cause);
672 
673   assert(heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks");
674   assert(heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks");
675   assert(heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks");
676 
677   _degen_generation->heuristics()->record_success_degenerated();
678   heap->shenandoah_policy()->record_success_degenerated();
679   return !gc.upgraded_to_full();
680 }
681 
682 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
683   ShenandoahHeap* heap = ShenandoahHeap::heap();
684 
685   // Determine if there is work to do. This avoids taking heap lock if there is
686   // no work available, avoids spamming logs with superfluous logging messages,
687   // and minimises the amount of work while locks are taken.
688 
689   if (heap->committed() <= shrink_until) return;
690 
691   bool has_work = false;
692   for (size_t i = 0; i < heap->num_regions(); i++) {
693     ShenandoahHeapRegion *r = heap->get_region(i);
694     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
695       has_work = true;
696       break;
697     }
698   }
699 
700   if (has_work) {
701     heap->entry_uncommit(shrink_before, shrink_until);
702   }
703 }
704 
705 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
706   return GCCause::is_user_requested_gc(cause) ||
707          GCCause::is_serviceability_requested_gc(cause);
708 }
709 
710 bool ShenandoahControlThread::is_implicit_gc(GCCause::Cause cause) const {
711   return !is_explicit_gc(cause) && cause != GCCause::_shenandoah_concurrent_gc;
712 }
713 
714 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
715   assert(GCCause::is_user_requested_gc(cause) ||
716          GCCause::is_serviceability_requested_gc(cause) ||
717          cause == GCCause::_metadata_GC_clear_soft_refs ||
718          cause == GCCause::_full_gc_alot ||
719          cause == GCCause::_wb_full_gc ||
720          cause == GCCause::_wb_breakpoint ||
721          cause == GCCause::_scavenge_alot,
722          "only requested GCs here");
723 
724   if (is_explicit_gc(cause)) {
725     if (!DisableExplicitGC) {
726       handle_requested_gc(cause);
727     }
728   } else {
729     handle_requested_gc(cause);
730   }
731 }
732 
733 bool ShenandoahControlThread::request_concurrent_gc(GenerationMode generation) {
734   if (_preemption_requested.is_set() || _gc_requested.is_set() || ShenandoahHeap::heap()->cancelled_gc()) {
735     // ignore subsequent requests from the heuristics
736     return false;
737   }
738 
739   if (_mode == none) {
740     _requested_gc_cause = GCCause::_shenandoah_concurrent_gc;
741     _requested_generation = generation;
742     notify_control_thread();
743     return true;
744   }
745 
746   if (preempt_old_marking(generation)) {
747     log_info(gc)("Preempting old generation mark to allow young GC.");
748     _requested_gc_cause = GCCause::_shenandoah_concurrent_gc;
749     _requested_generation = generation;
750     _preemption_requested.set();
751     ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc);
752     notify_control_thread();
753     return true;
754   }
755 
756   return false;
757 }
758 
759 void ShenandoahControlThread::notify_control_thread() {
760   MonitorLocker locker(&_control_lock, Mutex::_no_safepoint_check_flag);
761   _control_lock.notify();
762 }
763 
764 bool ShenandoahControlThread::preempt_old_marking(GenerationMode generation) {
765   return generation == YOUNG && _allow_old_preemption.try_unset();
766 }
767 
768 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
769   // Make sure we have at least one complete GC cycle before unblocking
770   // from the explicit GC request.
771   //
772   // This is especially important for weak references cleanup and/or native
773   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
774   // comes very late in the already running cycle, it would miss lots of new
775   // opportunities for cleanup that were made available before the caller
776   // requested the GC.
777 
778   MonitorLocker ml(&_gc_waiters_lock);
779   size_t current_gc_id = get_gc_id();
780   size_t required_gc_id = current_gc_id + 1;
781   while (current_gc_id < required_gc_id) {
782     _gc_requested.set();
783     _requested_gc_cause = cause;
784     notify_control_thread();
785     if (cause != GCCause::_wb_breakpoint) {
786       ml.wait();
787     }
788     current_gc_id = get_gc_id();
789   }
790 }
791 
792 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
793   ShenandoahHeap* heap = ShenandoahHeap::heap();
794 
795   assert(current()->is_Java_thread(), "expect Java thread here");
796 
797   if (try_set_alloc_failure_gc()) {
798     // Only report the first allocation failure
799     log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
800                  req.type_string(),
801                  byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
802 
803     // Now that alloc failure GC is scheduled, we can abort everything else
804     heap->cancel_gc(GCCause::_allocation_failure);

848     _do_counters_update.unset();
849     ShenandoahHeap::heap()->monitoring_support()->update_counters();
850   }
851 }
852 
853 void ShenandoahControlThread::handle_force_counters_update() {
854   if (_force_counters_update.is_set()) {
855     _do_counters_update.unset(); // reset these too, we do update now!
856     ShenandoahHeap::heap()->monitoring_support()->update_counters();
857   }
858 }
859 
860 void ShenandoahControlThread::notify_heap_changed() {
861   // This is called from allocation path, and thus should be fast.
862 
863   // Update monitoring counters when we took a new region. This amortizes the
864   // update costs on slow path.
865   if (_do_counters_update.is_unset()) {
866     _do_counters_update.set();
867   }




868 }
869 
870 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
871   assert(ShenandoahPacing, "should only call when pacing is enabled");
872   Atomic::add(&_allocs_seen, words, memory_order_relaxed);
873 }
874 
875 void ShenandoahControlThread::set_forced_counters_update(bool value) {
876   _force_counters_update.set_cond(value);
877 }
878 
879 void ShenandoahControlThread::reset_gc_id() {
880   Atomic::store(&_gc_id, (size_t)0);
881 }
882 
883 void ShenandoahControlThread::update_gc_id() {
884   Atomic::inc(&_gc_id);
885 }
886 
887 size_t ShenandoahControlThread::get_gc_id() {

892   print_on(tty);
893 }
894 
895 void ShenandoahControlThread::print_on(outputStream* st) const {
896   st->print("Shenandoah Concurrent Thread");
897   Thread::print_on(st);
898   st->cr();
899 }
900 
901 void ShenandoahControlThread::start() {
902   create_and_start();
903 }
904 
905 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
906   _graceful_shutdown.set();
907 }
908 
909 bool ShenandoahControlThread::in_graceful_shutdown() {
910   return _graceful_shutdown.is_set();
911 }
912 
913 const char* ShenandoahControlThread::gc_mode_name(ShenandoahControlThread::GCMode mode) {
914   switch (mode) {
915     case none:              return "idle";
916     case concurrent_normal: return "normal";
917     case stw_degenerated:   return "degenerated";
918     case stw_full:          return "full";
919     case marking_old:       return "old mark";
920     default:                return "unknown";
921   }
922 }
923 
924 void ShenandoahControlThread::set_gc_mode(ShenandoahControlThread::GCMode new_mode) {
925   if (_mode != new_mode) {
926     log_info(gc)("Transition from: %s to: %s", gc_mode_name(_mode), gc_mode_name(new_mode));
927     _mode = new_mode;
928   }
929 }
< prev index next >