< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp

Print this page

 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 28 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
 29 #include "gc/shenandoah/shenandoahControlThread.hpp"
 30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 31 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 32 #include "gc/shenandoah/shenandoahFullGC.hpp"



 33 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
 34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 35 #include "gc/shenandoah/shenandoahMark.inline.hpp"
 36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"

 38 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
 39 #include "gc/shenandoah/shenandoahUtils.hpp"
 40 #include "gc/shenandoah/shenandoahVMOperations.hpp"
 41 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 42 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"

 43 #include "memory/iterator.hpp"
 44 #include "memory/metaspaceUtils.hpp"
 45 #include "memory/metaspaceStats.hpp"
 46 #include "memory/universe.hpp"
 47 #include "runtime/atomic.hpp"
 48 
 49 ShenandoahControlThread::ShenandoahControlThread() :
 50   ConcurrentGCThread(),
 51   _alloc_failure_waiters_lock(Mutex::safepoint-1, "ShenandoahAllocFailureGC_lock", true),
 52   _gc_waiters_lock(Mutex::safepoint-1, "ShenandoahRequestedGC_lock", true),


 53   _periodic_task(this),
 54   _requested_gc_cause(GCCause::_no_cause_specified),

 55   _degen_point(ShenandoahGC::_degenerated_outside_cycle),
 56   _allocs_seen(0) {


 57 
 58   reset_gc_id();
 59   create_and_start();
 60   _periodic_task.enroll();
 61   if (ShenandoahPacing) {
 62     _periodic_pacer_notify_task.enroll();
 63   }
 64 }
 65 
 66 ShenandoahControlThread::~ShenandoahControlThread() {
 67   // This is here so that super is called.
 68 }
 69 
 70 void ShenandoahPeriodicTask::task() {
 71   _thread->handle_force_counters_update();
 72   _thread->handle_counters_update();
 73 }
 74 
 75 void ShenandoahPeriodicPacerNotify::task() {
 76   assert(ShenandoahPacing, "Should not be here otherwise");
 77   ShenandoahHeap::heap()->pacer()->notify_waiters();
 78 }
 79 
 80 void ShenandoahControlThread::run_service() {
 81   ShenandoahHeap* heap = ShenandoahHeap::heap();
 82 
 83   GCMode default_mode = concurrent_normal;

 84   GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
 85   int sleep = ShenandoahControlIntervalMin;
 86 
 87   double last_shrink_time = os::elapsedTime();
 88   double last_sleep_adjust_time = os::elapsedTime();
 89 
 90   // Shrink period avoids constantly polling regions for shrinking.
 91   // Having a period 10x lower than the delay would mean we hit the
 92   // shrinking with lag of less than 1/10-th of true delay.
 93   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
 94   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
 95 
 96   ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
 97   ShenandoahHeuristics* heuristics = heap->heuristics();





 98   while (!in_graceful_shutdown() && !should_terminate()) {
 99     // Figure out if we have pending requests.
100     bool alloc_failure_pending = _alloc_failure_gc.is_set();
101     bool is_gc_requested = _gc_requested.is_set();
102     GCCause::Cause requested_gc_cause = _requested_gc_cause;
103     bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause);
104     bool implicit_gc_requested = is_gc_requested && !is_explicit_gc(requested_gc_cause);
105 
106     // This control loop iteration have seen this much allocations.
107     size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
108 
109     // Check if we have seen a new target for soft max heap size.
110     bool soft_max_changed = check_soft_max_changed();
111 
112     // Choose which GC mode to run in. The block below should select a single mode.
113     GCMode mode = none;
114     GCCause::Cause cause = GCCause::_last_gc_cause;
115     ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
116 
117     if (alloc_failure_pending) {
118       // Allocation failure takes precedence: we have to deal with it first thing
119       log_info(gc)("Trigger: Handle Allocation Failure");
120 
121       cause = GCCause::_allocation_failure;
122 
123       // Consume the degen point, and seed it with default value
124       degen_point = _degen_point;
125       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
126 
127       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {











128         heuristics->record_allocation_failure_gc();
129         policy->record_alloc_failure_to_degenerated(degen_point);
130         mode = stw_degenerated;
131       } else {
132         heuristics->record_allocation_failure_gc();
133         policy->record_alloc_failure_to_full();
134         mode = stw_full;

135       }
136 
137     } else if (explicit_gc_requested) {
138       cause = requested_gc_cause;

139       log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
140 
141       heuristics->record_requested_gc();
142 
143       if (ExplicitGCInvokesConcurrent) {
144         policy->record_explicit_to_concurrent();
145         mode = default_mode;
146         // Unload and clean up everything
147         heap->set_unload_classes(heuristics->can_unload_classes());
148       } else {
149         policy->record_explicit_to_full();
150         mode = stw_full;
151       }
152     } else if (implicit_gc_requested) {
153       cause = requested_gc_cause;

154       log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
155 
156       heuristics->record_requested_gc();
157 
158       if (ShenandoahImplicitGCInvokesConcurrent) {
159         policy->record_implicit_to_concurrent();
160         mode = default_mode;
161 
162         // Unload and clean up everything
163         heap->set_unload_classes(heuristics->can_unload_classes());
164       } else {
165         policy->record_implicit_to_full();
166         mode = stw_full;
167       }
168     } else {
169       // Potential normal cycle: ask heuristics if it wants to act
170       if (heuristics->should_start_gc()) {
171         mode = default_mode;
172         cause = default_cause;
173       }











174 
175       // Ask policy if this cycle wants to process references or unload classes
176       heap->set_unload_classes(heuristics->should_unload_classes());

























177     }
178 
179     // Blow all soft references on this cycle, if handling allocation failure,
180     // either implicit or explicit GC request,  or we are requested to do so unconditionally.
181     if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) {
182       heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
183     }
184 
185     bool gc_requested = (mode != none);
186     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
187 
188     if (gc_requested) {
189       // GC is starting, bump the internal ID
190       update_gc_id();
191 
192       heap->reset_bytes_allocated_since_gc_start();
193 
194       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
195 
196       // If GC was requested, we are sampling the counters even without actual triggers
197       // from allocation machinery. This captures GC phases more accurately.
198       set_forced_counters_update(true);
199 
200       // If GC was requested, we better dump freeset data for performance debugging
201       {
202         ShenandoahHeapLocker locker(heap->lock());
203         heap->free_set()->log_status();
204       }
205 
206       switch (mode) {
207         case concurrent_normal:
208           service_concurrent_normal_cycle(cause);
209           break;
210         case stw_degenerated:
211           service_stw_degenerated_cycle(cause, degen_point);
212           break;
213         case stw_full:
214           service_stw_full_cycle(cause);
215           break;
216         default:
217           ShouldNotReachHere();



















218       }
219 
220       // If this was the requested GC cycle, notify waiters about it
221       if (explicit_gc_requested || implicit_gc_requested) {
222         notify_gc_waiters();
223       }
224 
225       // If this was the allocation failure GC cycle, notify waiters about it
226       if (alloc_failure_pending) {
227         notify_alloc_failure_waiters();
228       }
229 
230       // Report current free set state at the end of cycle, whether
231       // it is a normal completion, or the abort.
232       {
233         ShenandoahHeapLocker locker(heap->lock());
234         heap->free_set()->log_status();
235 
236         // Notify Universe about new heap usage. This has implications for
237         // global soft refs policy, and we better report it every time heap
238         // usage goes down.
239         Universe::heap()->update_capacity_and_used_at_gc();
240 
241         // Signal that we have completed a visit to all live objects.
242         Universe::heap()->record_whole_heap_examined_timestamp();
243       }
244 
245       // Disable forced counters update, and update counters one more time
246       // to capture the state at the end of GC session.
247       handle_force_counters_update();
248       set_forced_counters_update(false);
249 
250       // Retract forceful part of soft refs policy
251       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
252 
253       // Clear metaspace oom flag, if current cycle unloaded classes
254       if (heap->unload_classes()) {
255         heuristics->clear_metaspace_oom();

256       }
257 
258       // Commit worker statistics to cycle data
259       heap->phase_timings()->flush_par_workers_to_cycle();
260       if (ShenandoahPacing) {
261         heap->pacer()->flush_stats_to_cycle();
262       }
263 
264       // Print GC stats for current cycle
265       {
266         LogTarget(Info, gc, stats) lt;
267         if (lt.is_enabled()) {
268           ResourceMark rm;
269           LogStream ls(lt);
270           heap->phase_timings()->print_cycle_on(&ls);
271           if (ShenandoahPacing) {
272             heap->pacer()->print_cycle_on(&ls);
273           }
274         }
275       }
276 
277       // Commit statistics to globals
278       heap->phase_timings()->flush_cycle_to_global();
279 
280       // Print Metaspace change following GC (if logging is enabled).
281       MetaspaceUtils::print_metaspace_change(meta_sizes);
282 
283       // GC is over, we are at idle now
284       if (ShenandoahPacing) {
285         heap->pacer()->setup_for_idle();
286       }
287     } else {
288       // Allow allocators to know we have seen this much regions
289       if (ShenandoahPacing && (allocs_seen > 0)) {
290         heap->pacer()->report_alloc(allocs_seen);
291       }
292     }
293 
294     double current = os::elapsedTime();
295 
296     if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
297       // Explicit GC tries to uncommit everything down to min capacity.
298       // Soft max change tries to uncommit everything down to target capacity.
299       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
300 
301       double shrink_before = (explicit_gc_requested || soft_max_changed) ?
302                              current :
303                              current - (ShenandoahUncommitDelay / 1000.0);
304 
305       size_t shrink_until = soft_max_changed ?
306                              heap->soft_max_capacity() :
307                              heap->min_capacity();
308 
309       service_uncommit(shrink_before, shrink_until);
310       heap->phase_timings()->flush_cycle_to_global();
311       last_shrink_time = current;
312     }
313 
314     // Wait before performing the next action. If allocation happened during this wait,
315     // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
316     // back off exponentially.
317     if (_heap_changed.try_unset()) {
318       sleep = ShenandoahControlIntervalMin;
319     } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
320       sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
321       last_sleep_adjust_time = current;
322     }
323     os::naked_short_sleep(sleep);
324   }
325 
326   // Wait for the actual stop(), can't leave run_service() earlier.
327   while (!should_terminate()) {
328     os::naked_short_sleep(ShenandoahControlIntervalMin);
329   }
330 }
331 


























































































































































































332 bool ShenandoahControlThread::check_soft_max_changed() const {
333   ShenandoahHeap* heap = ShenandoahHeap::heap();
334   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
335   size_t old_soft_max = heap->soft_max_capacity();
336   if (new_soft_max != old_soft_max) {
337     new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
338     new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
339     if (new_soft_max != old_soft_max) {
340       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
341                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
342                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
343       );
344       heap->set_soft_max_capacity(new_soft_max);
345       return true;
346     }
347   }
348   return false;
349 }
350 
351 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
352   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
353   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
354   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
355   // tries to evac something and no memory is available), cycle degrades to Full GC.
356   //
357   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
358   // heuristics says there are no regions to compact, and all the collection comes from immediately
359   // reclaimable regions.
360   //
361   // ................................................................................................
362   //
363   //                                    (immediate garbage shortcut)                Concurrent GC
364   //                             /-------------------------------------------\
365   //                             |                                           |
366   //                             |                                           |
367   //                             |                                           |
368   //                             |                                           v
369   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
370   //                   |                    |                 |              ^
371   //                   | (af)               | (af)            | (af)         |
372   // ..................|....................|.................|..............|.......................
373   //                   |                    |                 |              |
374   //                   |                    |                 |              |      Degenerated GC
375   //                   v                    v                 v              |
376   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
377   //                   |                    |                 |              ^
378   //                   | (af)               | (af)            | (af)         |
379   // ..................|....................|.................|..............|.......................
380   //                   |                    |                 |              |
381   //                   |                    v                 |              |      Full GC
382   //                   \------------------->o<----------------/              |
383   //                                        |                                |
384   //                                        v                                |
385   //                                      Full GC  --------------------------/
386   //
387   ShenandoahHeap* heap = ShenandoahHeap::heap();
388   if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
389 

390   GCIdMark gc_id_mark;
391   ShenandoahGCSession session(cause);
392 
393   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
394 
395   ShenandoahConcurrentGC gc;





396   if (gc.collect(cause)) {
397     // Cycle is complete
398     heap->heuristics()->record_success_concurrent();
399     heap->shenandoah_policy()->record_success_concurrent();
400   } else {
401     assert(heap->cancelled_gc(), "Must have been cancelled");
402     check_cancellation_or_degen(gc.degen_point());




403   }
404 }
405 
406 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
407   ShenandoahHeap* heap = ShenandoahHeap::heap();
408   if (heap->cancelled_gc()) {
409     assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
410     if (!in_graceful_shutdown()) {
411       assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle,
412               "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
413       _degen_point = point;
414     }






415     return true;
416   }
















417   return false;
418 }
419 
420 void ShenandoahControlThread::stop_service() {
421   // Nothing to do here.
422 }
423 
424 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {


425   GCIdMark gc_id_mark;
426   ShenandoahGCSession session(cause);
427 
428   ShenandoahFullGC gc;
429   gc.collect(cause);
430 
431   ShenandoahHeap* const heap = ShenandoahHeap::heap();
432   heap->heuristics()->record_success_full();
433   heap->shenandoah_policy()->record_success_full();
434 }
435 
436 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
437   assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");

438 
439   GCIdMark gc_id_mark;
440   ShenandoahGCSession session(cause);
441 
442   ShenandoahDegenGC gc(point);
443   gc.collect(cause);
444 
445   ShenandoahHeap* const heap = ShenandoahHeap::heap();
446   heap->heuristics()->record_success_degenerated();











447   heap->shenandoah_policy()->record_success_degenerated();

448 }
449 
450 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
451   ShenandoahHeap* heap = ShenandoahHeap::heap();
452 
453   // Determine if there is work to do. This avoids taking heap lock if there is
454   // no work available, avoids spamming logs with superfluous logging messages,
455   // and minimises the amount of work while locks are taken.
456 
457   if (heap->committed() <= shrink_until) return;
458 
459   bool has_work = false;
460   for (size_t i = 0; i < heap->num_regions(); i++) {
461     ShenandoahHeapRegion *r = heap->get_region(i);
462     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
463       has_work = true;
464       break;
465     }
466   }
467 
468   if (has_work) {
469     heap->entry_uncommit(shrink_before, shrink_until);
470   }
471 }
472 
473 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
474   return GCCause::is_user_requested_gc(cause) ||
475          GCCause::is_serviceability_requested_gc(cause);
476 }
477 




478 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
479   assert(GCCause::is_user_requested_gc(cause) ||
480          GCCause::is_serviceability_requested_gc(cause) ||
481          cause == GCCause::_metadata_GC_clear_soft_refs ||
482          cause == GCCause::_codecache_GC_threshold ||
483          cause == GCCause::_full_gc_alot ||
484          cause == GCCause::_wb_full_gc ||
485          cause == GCCause::_wb_breakpoint ||
486          cause == GCCause::_scavenge_alot,
487          "only requested GCs here");
488 
489   if (is_explicit_gc(cause)) {
490     if (!DisableExplicitGC) {
491       handle_requested_gc(cause);
492     }
493   } else {
494     handle_requested_gc(cause);
495   }
496 }
497 








































498 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
499   // Make sure we have at least one complete GC cycle before unblocking
500   // from the explicit GC request.
501   //
502   // This is especially important for weak references cleanup and/or native
503   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
504   // comes very late in the already running cycle, it would miss lots of new
505   // opportunities for cleanup that were made available before the caller
506   // requested the GC.
507 
508   MonitorLocker ml(&_gc_waiters_lock);
509   size_t current_gc_id = get_gc_id();
510   size_t required_gc_id = current_gc_id + 1;
511   while (current_gc_id < required_gc_id) {
512     // Although setting gc request is under _gc_waiters_lock, but read side (run_service())
513     // does not take the lock. We need to enforce following order, so that read side sees
514     // latest requested gc cause when the flag is set.
515     _requested_gc_cause = cause;
516     _gc_requested.set();
517 
518     if (cause != GCCause::_wb_breakpoint) {
519       ml.wait();
520     }
521     current_gc_id = get_gc_id();
522   }
523 }
524 
525 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
526   ShenandoahHeap* heap = ShenandoahHeap::heap();
527 
528   assert(current()->is_Java_thread(), "expect Java thread here");
529 
530   if (try_set_alloc_failure_gc()) {
531     // Only report the first allocation failure
532     log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
533                  req.type_string(),
534                  byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
535 
536     // Now that alloc failure GC is scheduled, we can abort everything else
537     heap->cancel_gc(GCCause::_allocation_failure);

581     _do_counters_update.unset();
582     ShenandoahHeap::heap()->monitoring_support()->update_counters();
583   }
584 }
585 
586 void ShenandoahControlThread::handle_force_counters_update() {
587   if (_force_counters_update.is_set()) {
588     _do_counters_update.unset(); // reset these too, we do update now!
589     ShenandoahHeap::heap()->monitoring_support()->update_counters();
590   }
591 }
592 
593 void ShenandoahControlThread::notify_heap_changed() {
594   // This is called from allocation path, and thus should be fast.
595 
596   // Update monitoring counters when we took a new region. This amortizes the
597   // update costs on slow path.
598   if (_do_counters_update.is_unset()) {
599     _do_counters_update.set();
600   }
601   // Notify that something had changed.
602   if (_heap_changed.is_unset()) {
603     _heap_changed.set();
604   }
605 }
606 
607 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
608   assert(ShenandoahPacing, "should only call when pacing is enabled");
609   Atomic::add(&_allocs_seen, words, memory_order_relaxed);
610 }
611 
612 void ShenandoahControlThread::set_forced_counters_update(bool value) {
613   _force_counters_update.set_cond(value);
614 }
615 
616 void ShenandoahControlThread::reset_gc_id() {
617   Atomic::store(&_gc_id, (size_t)0);
618 }
619 
620 void ShenandoahControlThread::update_gc_id() {
621   Atomic::inc(&_gc_id);
622 }
623 
624 size_t ShenandoahControlThread::get_gc_id() {

629   print_on(tty);
630 }
631 
632 void ShenandoahControlThread::print_on(outputStream* st) const {
633   st->print("Shenandoah Concurrent Thread");
634   Thread::print_on(st);
635   st->cr();
636 }
637 
638 void ShenandoahControlThread::start() {
639   create_and_start();
640 }
641 
642 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
643   _graceful_shutdown.set();
644 }
645 
646 bool ShenandoahControlThread::in_graceful_shutdown() {
647   return _graceful_shutdown.is_set();
648 }





















 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 28 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
 29 #include "gc/shenandoah/shenandoahControlThread.hpp"
 30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 31 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 32 #include "gc/shenandoah/shenandoahFullGC.hpp"
 33 #include "gc/shenandoah/shenandoahGeneration.hpp"
 34 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 35 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 36 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
 37 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 38 #include "gc/shenandoah/shenandoahMark.inline.hpp"
 39 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 40 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 41 #include "gc/shenandoah/shenandoahOldGC.hpp"
 42 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
 43 #include "gc/shenandoah/shenandoahUtils.hpp"
 44 #include "gc/shenandoah/shenandoahVMOperations.hpp"
 45 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 46 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
 47 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 48 #include "memory/iterator.hpp"
 49 #include "memory/metaspaceUtils.hpp"
 50 #include "memory/metaspaceStats.hpp"
 51 #include "memory/universe.hpp"
 52 #include "runtime/atomic.hpp"
 53 
 54 ShenandoahControlThread::ShenandoahControlThread() :
 55   ConcurrentGCThread(),
 56   _alloc_failure_waiters_lock(Mutex::safepoint - 1, "ShenandoahAllocFailureGC_lock", true),
 57   _gc_waiters_lock(Mutex::safepoint - 1, "ShenandoahRequestedGC_lock", true),
 58   _control_lock(Mutex::nosafepoint - 1, "ShenandoahControlGC_lock", true),
 59   _regulator_lock(Mutex::nosafepoint - 1, "ShenandoahRegulatorGC_lock", true),
 60   _periodic_task(this),
 61   _requested_gc_cause(GCCause::_no_cause_specified),
 62   _requested_generation(GenerationMode::GLOBAL),
 63   _degen_point(ShenandoahGC::_degenerated_outside_cycle),
 64   _degen_generation(NULL),
 65   _allocs_seen(0),
 66   _mode(none) {
 67 
 68   reset_gc_id();
 69   create_and_start();
 70   _periodic_task.enroll();
 71   if (ShenandoahPacing) {
 72     _periodic_pacer_notify_task.enroll();
 73   }
 74 }
 75 
 76 ShenandoahControlThread::~ShenandoahControlThread() {
 77   // This is here so that super is called.
 78 }
 79 
 80 void ShenandoahPeriodicTask::task() {
 81   _thread->handle_force_counters_update();
 82   _thread->handle_counters_update();
 83 }
 84 
 85 void ShenandoahPeriodicPacerNotify::task() {
 86   assert(ShenandoahPacing, "Should not be here otherwise");
 87   ShenandoahHeap::heap()->pacer()->notify_waiters();
 88 }
 89 
 90 void ShenandoahControlThread::run_service() {
 91   ShenandoahHeap* heap = ShenandoahHeap::heap();
 92 
 93   GCMode default_mode = concurrent_normal;
 94   GenerationMode generation = GLOBAL;
 95   GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;

 96 
 97   double last_shrink_time = os::elapsedTime();
 98   uint age_period = 0;
 99 
100   // Shrink period avoids constantly polling regions for shrinking.
101   // Having a period 10x lower than the delay would mean we hit the
102   // shrinking with lag of less than 1/10-th of true delay.
103   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
104   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
105 
106   ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
107 
108   // Heuristics are notified of allocation failures here and other outcomes
109   // of the cycle. They're also used here to control whether the Nth consecutive
110   // degenerated cycle should be 'promoted' to a full cycle. The decision to
111   // trigger a cycle or not is evaluated on the regulator thread.
112   ShenandoahHeuristics* global_heuristics = heap->global_generation()->heuristics();
113   while (!in_graceful_shutdown() && !should_terminate()) {
114     // Figure out if we have pending requests.
115     bool alloc_failure_pending = _alloc_failure_gc.is_set();
116     bool is_gc_requested = _gc_requested.is_set();
117     GCCause::Cause requested_gc_cause = _requested_gc_cause;
118     bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause);
119     bool implicit_gc_requested = is_gc_requested && is_implicit_gc(requested_gc_cause);
120 
121     // This control loop iteration have seen this much allocations.
122     size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
123 
124     // Check if we have seen a new target for soft max heap size.
125     bool soft_max_changed = check_soft_max_changed();
126 
127     // Choose which GC mode to run in. The block below should select a single mode.
128     set_gc_mode(none);
129     GCCause::Cause cause = GCCause::_last_gc_cause;
130     ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
131 
132     if (alloc_failure_pending) {
133       // Allocation failure takes precedence: we have to deal with it first thing
134       log_info(gc)("Trigger: Handle Allocation Failure");
135 
136       cause = GCCause::_allocation_failure;
137 
138       // Consume the degen point, and seed it with default value
139       degen_point = _degen_point;
140       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
141 
142       if (degen_point == ShenandoahGC::_degenerated_outside_cycle) {
143         _degen_generation = heap->mode()->is_generational() ? heap->young_generation() : heap->global_generation();
144       } else {
145         assert(_degen_generation != NULL, "Need to know which generation to resume.");
146       }
147 
148       ShenandoahHeuristics* heuristics = _degen_generation->heuristics();
149       generation = _degen_generation->generation_mode();
150       bool old_gen_evacuation_failed = heap->clear_old_evacuation_failure();
151 
152       // Do not bother with degenerated cycle if old generation evacuation failed.
153       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() && !old_gen_evacuation_failed) {
154         heuristics->record_allocation_failure_gc();
155         policy->record_alloc_failure_to_degenerated(degen_point);
156         set_gc_mode(stw_degenerated);
157       } else {
158         heuristics->record_allocation_failure_gc();
159         policy->record_alloc_failure_to_full();
160         generation = GLOBAL;
161         set_gc_mode(stw_full);
162       }

163     } else if (explicit_gc_requested) {
164       cause = requested_gc_cause;
165       generation = GLOBAL;
166       log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
167 
168       global_heuristics->record_requested_gc();
169 
170       if (ExplicitGCInvokesConcurrent) {
171         policy->record_explicit_to_concurrent();
172         set_gc_mode(default_mode);
173         // Unload and clean up everything
174         heap->set_unload_classes(global_heuristics->can_unload_classes());
175       } else {
176         policy->record_explicit_to_full();
177         set_gc_mode(stw_full);
178       }
179     } else if (implicit_gc_requested) {
180       cause = requested_gc_cause;
181       generation = GLOBAL;
182       log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
183 
184       global_heuristics->record_requested_gc();
185 
186       if (ShenandoahImplicitGCInvokesConcurrent) {
187         policy->record_implicit_to_concurrent();
188         set_gc_mode(default_mode);
189 
190         // Unload and clean up everything
191         heap->set_unload_classes(global_heuristics->can_unload_classes());
192       } else {
193         policy->record_implicit_to_full();
194         set_gc_mode(stw_full);
195       }
196     } else {
197       // We should only be here if the regulator requested a cycle or if
198       // there is an old generation mark in progress.
199       if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) {
200         if (_requested_generation == OLD && heap->doing_mixed_evacuations()) {
201           // If a request to start an old cycle arrived while an old cycle was running, but _before_
202           // it chose any regions for evacuation we don't want to start a new old cycle. Rather, we want
203           // the heuristic to run a young collection so that we can evacuate some old regions.
204           assert(!heap->is_concurrent_old_mark_in_progress(), "Should not be running mixed collections and concurrent marking.");
205           generation = YOUNG;
206         } else {
207           generation = _requested_generation;
208         }
209 
210         // preemption was requested or this is a regular cycle
211         cause = GCCause::_shenandoah_concurrent_gc;
212         set_gc_mode(default_mode);
213 
214         // Don't start a new old marking if there is one already in progress.
215         if (generation == OLD && heap->is_concurrent_old_mark_in_progress()) {
216           set_gc_mode(servicing_old);
217         }
218 
219         if (generation == GLOBAL) {
220           heap->set_unload_classes(global_heuristics->should_unload_classes());
221         } else {
222           heap->set_unload_classes(false);
223         }
224 
225         // Don't want to spin in this loop and start a cycle every time, so
226         // clear requested gc cause. This creates a race with callers of the
227         // blocking 'request_gc' method, but there it loops and resets the
228         // '_requested_gc_cause' until a full cycle is completed.
229         _requested_gc_cause = GCCause::_no_gc;
230       } else if (heap->is_concurrent_old_mark_in_progress() || heap->is_prepare_for_old_mark_in_progress()) {
231         // Nobody asked us to do anything, but we have an old-generation mark or old-generation preparation for
232         // mixed evacuation in progress, so resume working on that.
233         log_info(gc)("Resume old gc: marking=%s, preparing=%s",
234                      BOOL_TO_STR(heap->is_concurrent_old_mark_in_progress()),
235                      BOOL_TO_STR(heap->is_prepare_for_old_mark_in_progress()));
236 
237         cause = GCCause::_shenandoah_concurrent_gc;
238         generation = OLD;
239         set_gc_mode(servicing_old);
240       }
241     }
242 
243     // Blow all soft references on this cycle, if handling allocation failure,
244     // either implicit or explicit GC request, or we are requested to do so unconditionally.
245     if (generation == GLOBAL && (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs)) {
246       heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
247     }
248 
249     bool gc_requested = (_mode != none);
250     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
251 
252     if (gc_requested) {
253       // GC is starting, bump the internal ID
254       update_gc_id();
255 
256       heap->reset_bytes_allocated_since_gc_start();
257 
258       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
259 
260       // If GC was requested, we are sampling the counters even without actual triggers
261       // from allocation machinery. This captures GC phases more accurately.
262       set_forced_counters_update(true);
263 
264       // If GC was requested, we better dump freeset data for performance debugging
265       {
266         ShenandoahHeapLocker locker(heap->lock());
267         heap->free_set()->log_status();
268       }
269 
270       heap->set_aging_cycle(false);
271       {
272         switch (_mode) {
273           case concurrent_normal: {
274             if ((generation == YOUNG) && (age_period-- == 0)) {
275               heap->set_aging_cycle(true);
276               age_period = ShenandoahAgingCyclePeriod - 1;
277             }
278             service_concurrent_normal_cycle(heap, generation, cause);
279             break;
280           }
281           case stw_degenerated: {
282             if (!service_stw_degenerated_cycle(cause, degen_point)) {
283               // The degenerated GC was upgraded to a Full GC
284               generation = GLOBAL;
285             }
286             break;
287           }
288           case stw_full: {
289             service_stw_full_cycle(cause);
290             break;
291           }
292           case servicing_old: {
293             assert(generation == OLD, "Expected old generation here");
294             service_concurrent_old_cycle(heap, cause);
295             break;
296           }
297           default: {
298             ShouldNotReachHere();
299           }
300         }
301       }
302 
303       // If this was the requested GC cycle, notify waiters about it
304       if (explicit_gc_requested || implicit_gc_requested) {
305         notify_gc_waiters();
306       }
307 
308       // If this was the allocation failure GC cycle, notify waiters about it
309       if (alloc_failure_pending) {
310         notify_alloc_failure_waiters();
311       }
312 
313       // Report current free set state at the end of cycle, whether
314       // it is a normal completion, or the abort.
315       {
316         ShenandoahHeapLocker locker(heap->lock());
317         heap->free_set()->log_status();
318 
319         // Notify Universe about new heap usage. This has implications for
320         // global soft refs policy, and we better report it every time heap
321         // usage goes down.
322         Universe::heap()->update_capacity_and_used_at_gc();
323 
324         // Signal that we have completed a visit to all live objects.
325         Universe::heap()->record_whole_heap_examined_timestamp();
326       }
327 
328       // Disable forced counters update, and update counters one more time
329       // to capture the state at the end of GC session.
330       handle_force_counters_update();
331       set_forced_counters_update(false);
332 
333       // Retract forceful part of soft refs policy
334       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
335 
336       // Clear metaspace oom flag, if current cycle unloaded classes
337       if (heap->unload_classes()) {
338         assert(generation == GLOBAL, "Only unload classes during GLOBAL cycle");
339         global_heuristics->clear_metaspace_oom();
340       }
341 
342       process_phase_timings(heap);




















343 
344       // Print Metaspace change following GC (if logging is enabled).
345       MetaspaceUtils::print_metaspace_change(meta_sizes);
346 
347       // GC is over, we are at idle now
348       if (ShenandoahPacing) {
349         heap->pacer()->setup_for_idle();
350       }
351     } else {
352       // Allow allocators to know we have seen this much regions
353       if (ShenandoahPacing && (allocs_seen > 0)) {
354         heap->pacer()->report_alloc(allocs_seen);
355       }
356     }
357 
358     double current = os::elapsedTime();
359 
360     if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
361       // Explicit GC tries to uncommit everything down to min capacity.
362       // Soft max change tries to uncommit everything down to target capacity.
363       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
364 
365       double shrink_before = (explicit_gc_requested || soft_max_changed) ?
366                              current :
367                              current - (ShenandoahUncommitDelay / 1000.0);
368 
369       size_t shrink_until = soft_max_changed ?
370                              heap->soft_max_capacity() :
371                              heap->min_capacity();
372 
373       service_uncommit(shrink_before, shrink_until);
374       heap->phase_timings()->flush_cycle_to_global();
375       last_shrink_time = current;
376     }
377 
378     // Don't wait around if there was an allocation failure - start the next cycle immediately.
379     if (!is_alloc_failure_gc()) {
380       // The timed wait is necessary because this thread has a responsibility to send
381       // 'alloc_words' to the pacer when it does not perform a GC.
382       MonitorLocker lock(&_control_lock, Mutex::_no_safepoint_check_flag);
383       lock.wait(ShenandoahControlIntervalMax);


384     }

385   }
386 
387   // Wait for the actual stop(), can't leave run_service() earlier.
388   while (!should_terminate()) {
389     os::naked_short_sleep(ShenandoahControlIntervalMin);
390   }
391 }
392 
393 void ShenandoahControlThread::process_phase_timings(const ShenandoahHeap* heap) {
394 
395   // Commit worker statistics to cycle data
396   heap->phase_timings()->flush_par_workers_to_cycle();
397   if (ShenandoahPacing) {
398     heap->pacer()->flush_stats_to_cycle();
399   }
400 
401   // Print GC stats for current cycle
402   {
403     LogTarget(Info, gc, stats) lt;
404     if (lt.is_enabled()) {
405       ResourceMark rm;
406       LogStream ls(lt);
407       heap->phase_timings()->print_cycle_on(&ls);
408       if (ShenandoahPacing) {
409         heap->pacer()->print_cycle_on(&ls);
410       }
411     }
412   }
413 
414   // Commit statistics to globals
415   heap->phase_timings()->flush_cycle_to_global();
416 }
417 
418 // Young and old concurrent cycles are initiated by the regulator. Implicit
419 // and explicit GC requests are handled by the controller thread and always
420 // run a global cycle (which is concurrent by default, but may be overridden
421 // by command line options). Old cycles always degenerate to a global cycle.
422 // Young cycles are degenerated to complete the young cycle.  Young
423 // and old degen may upgrade to Full GC.  Full GC may also be
424 // triggered directly by a System.gc() invocation.
425 //
426 //
427 //      +-----+ Idle +-----+-----------+---------------------+
428 //      |         +        |           |                     |
429 //      |         |        |           |                     |
430 //      |         |        v           |                     |
431 //      |         |  Bootstrap Old +-- | ------------+       |
432 //      |         |   +                |             |       |
433 //      |         |   |                |             |       |
434 //      |         v   v                v             v       |
435 //      |    Resume Old <----------+ Young +--> Young Degen  |
436 //      |     +  +   ^                            +  +       |
437 //      v     |  |   |                            |  |       |
438 //   Global <-+  |   +----------------------------+  |       |
439 //      +        |                                   |       |
440 //      |        v                                   v       |
441 //      +--->  Global Degen +--------------------> Full <----+
442 //
443 void ShenandoahControlThread::service_concurrent_normal_cycle(
444   const ShenandoahHeap* heap, const GenerationMode generation, GCCause::Cause cause) {
445 
446   switch (generation) {
447     case YOUNG: {
448       // Run a young cycle. This might or might not, have interrupted an ongoing
449       // concurrent mark in the old generation. We need to think about promotions
450       // in this case. Promoted objects should be above the TAMS in the old regions
451       // they end up in, but we have to be sure we don't promote into any regions
452       // that are in the cset.
453       log_info(gc, ergo)("Start GC cycle (YOUNG)");
454       service_concurrent_cycle(heap->young_generation(), cause, false);
455       heap->young_generation()->log_status();
456       break;
457     }
458     case GLOBAL: {
459       log_info(gc, ergo)("Start GC cycle (GLOBAL)");
460       service_concurrent_cycle(heap->global_generation(), cause, false);
461       heap->global_generation()->log_status();
462       break;
463     }
464     case OLD: {
465       log_info(gc, ergo)("Start GC cycle (OLD)");
466       service_concurrent_old_cycle(heap, cause);
467       heap->old_generation()->log_status();
468       break;
469     }
470     default:
471       ShouldNotReachHere();
472   }
473 }
474 
475 void ShenandoahControlThread::service_concurrent_old_cycle(const ShenandoahHeap* heap, GCCause::Cause &cause) {
476 
477   ShenandoahOldGeneration* old_generation = heap->old_generation();
478   ShenandoahYoungGeneration* young_generation = heap->young_generation();
479 
480   GCIdMark gc_id_mark;
481   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
482 
483   switch (old_generation->state()) {
484     case ShenandoahOldGeneration::IDLE: {
485       assert(!heap->is_concurrent_old_mark_in_progress(), "Old already in progress.");
486       assert(old_generation->task_queues()->is_empty(), "Old mark queues should be empty.");
487     }
488     case ShenandoahOldGeneration::FILLING: {
489       _allow_old_preemption.set();
490       ShenandoahGCSession session(cause, old_generation);
491       old_generation->prepare_gc();
492       _allow_old_preemption.unset();
493 
494       if (heap->is_prepare_for_old_mark_in_progress()) {
495         assert(old_generation->state() == ShenandoahOldGeneration::FILLING, "Prepare for mark should be in progress.");
496         return;
497       }
498 
499       assert(old_generation->state() == ShenandoahOldGeneration::BOOTSTRAPPING, "Finished with filling, should be bootstrapping.");
500     }
501     case ShenandoahOldGeneration::BOOTSTRAPPING: {
502       // Configure the young generation's concurrent mark to put objects in
503       // old regions into the concurrent mark queues associated with the old
504       // generation. The young cycle will run as normal except that rather than
505       // ignore old references it will mark and enqueue them in the old concurrent
506       // task queues but it will not traverse them.
507       young_generation->set_old_gen_task_queues(old_generation->task_queues());
508       ShenandoahGCSession session(cause, young_generation);
509       service_concurrent_cycle(heap,young_generation, cause, true);
510       process_phase_timings(heap);
511       if (heap->cancelled_gc()) {
512         // Young generation bootstrap cycle has failed. Concurrent mark for old generation
513         // is going to resume after degenerated bootstrap cycle completes.
514         log_info(gc)("Bootstrap cycle for old generation was cancelled.");
515         return;
516       }
517 
518       // Reset the degenerated point. Normally this would happen at the top
519       // of the control loop, but here we have just completed a young cycle
520       // which has bootstrapped the old concurrent marking.
521       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
522 
523       // From here we will 'resume' the old concurrent mark. This will skip reset
524       // and init mark for the concurrent mark. All of that work will have been
525       // done by the bootstrapping young cycle. In order to simplify the debugging
526       // effort, the old cycle will ONLY complete the mark phase. No actual
527       // collection of the old generation is happening here.
528       set_gc_mode(servicing_old);
529       old_generation->transition_to(ShenandoahOldGeneration::MARKING);
530     }
531     case ShenandoahOldGeneration::MARKING: {
532       ShenandoahGCSession session(cause, old_generation);
533       bool marking_complete = resume_concurrent_old_cycle(old_generation, cause);
534       if (marking_complete) {
535         assert(old_generation->state() != ShenandoahOldGeneration::MARKING, "Should not still be marking.");
536       }
537       break;
538     }
539     default:
540       log_error(gc)("Unexpected state for old GC: %d", old_generation->state());
541       ShouldNotReachHere();
542   }
543 }
544 
545 bool ShenandoahControlThread::resume_concurrent_old_cycle(ShenandoahGeneration* generation, GCCause::Cause cause) {
546 
547   assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(), "Old mark should be in progress");
548   log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued.", generation->task_queues()->tasks());
549 
550   ShenandoahHeap* heap = ShenandoahHeap::heap();
551 
552   // We can only tolerate being cancelled during concurrent marking or during preparation for mixed
553   // evacuation. This flag here (passed by reference) is used to control precisely where the regulator
554   // is allowed to cancel a GC.
555   ShenandoahOldGC gc(generation, _allow_old_preemption);
556   if (gc.collect(cause)) {
557     generation->record_success_concurrent(false);
558   }
559 
560   if (heap->cancelled_gc()) {
561     // It's possible the gc cycle was cancelled after the last time
562     // the collection checked for cancellation. In which case, the
563     // old gc cycle is still completed, and we have to deal with this
564     // cancellation. We set the degeneration point to be outside
565     // the cycle because if this is an allocation failure, that is
566     // what must be done (there is no degenerated old cycle). If the
567     // cancellation was due to a heuristic wanting to start a young
568     // cycle, then we are not actually going to a degenerated cycle,
569     // so the degenerated point doesn't matter here.
570     check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle);
571     if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) {
572       heap->shenandoah_policy()->record_interrupted_old();
573     }
574     return false;
575   }
576   return true;
577 }
578 
579 bool ShenandoahControlThread::check_soft_max_changed() const {
580   ShenandoahHeap* heap = ShenandoahHeap::heap();
581   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
582   size_t old_soft_max = heap->soft_max_capacity();
583   if (new_soft_max != old_soft_max) {
584     new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
585     new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
586     if (new_soft_max != old_soft_max) {
587       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
588                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
589                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
590       );
591       heap->set_soft_max_capacity(new_soft_max);
592       return true;
593     }
594   }
595   return false;
596 }
597 
598 void ShenandoahControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool do_old_gc_bootstrap) {
599   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
600   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
601   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
602   // tries to evac something and no memory is available), cycle degrades to Full GC.
603   //
604   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
605   // heuristics says there are no regions to compact, and all the collection comes from immediately
606   // reclaimable regions.
607   //
608   // ................................................................................................
609   //
610   //                                    (immediate garbage shortcut)                Concurrent GC
611   //                             /-------------------------------------------\
612   //                             |                                           |
613   //                             |                                           |
614   //                             |                                           |
615   //                             |                                           v
616   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
617   //                   |                    |                 |              ^
618   //                   | (af)               | (af)            | (af)         |
619   // ..................|....................|.................|..............|.......................
620   //                   |                    |                 |              |
621   //                   |                    |                 |              |      Degenerated GC
622   //                   v                    v                 v              |
623   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
624   //                   |                    |                 |              ^
625   //                   | (af)               | (af)            | (af)         |
626   // ..................|....................|.................|..............|.......................
627   //                   |                    |                 |              |
628   //                   |                    v                 |              |      Full GC
629   //                   \------------------->o<----------------/              |
630   //                                        |                                |
631   //                                        v                                |
632   //                                      Full GC  --------------------------/
633   //

634   if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
635 
636   ShenandoahHeap* heap = ShenandoahHeap::heap();
637   GCIdMark gc_id_mark;
638   ShenandoahGCSession session(cause, generation);

639   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
640 
641   service_concurrent_cycle(heap, generation, cause, do_old_gc_bootstrap);
642 }
643 
644 void ShenandoahControlThread::service_concurrent_cycle(const ShenandoahHeap* heap, ShenandoahGeneration* generation,
645                                                        GCCause::Cause &cause, bool do_old_gc_bootstrap) {
646   ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap);
647   if (gc.collect(cause)) {
648     // Cycle is complete
649     generation->record_success_concurrent(gc.abbreviated());

650   } else {
651     assert(heap->cancelled_gc(), "Must have been cancelled");
652     check_cancellation_or_degen(gc.degen_point());
653     assert(generation->generation_mode() != OLD, "Old GC takes a different control path");
654     // Concurrent young-gen collection degenerates to young
655     // collection.  Same for global collections.
656     _degen_generation = generation;
657   }
658 }
659 
660 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
661   ShenandoahHeap* heap = ShenandoahHeap::heap();
662   if (!heap->cancelled_gc()) {
663     return false;
664   }
665 
666   if (in_graceful_shutdown()) {
667     return true;
668   }
669 
670   assert(_degen_point == ShenandoahGC::_degenerated_outside_cycle,
671          "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
672 
673   if (is_alloc_failure_gc()) {
674     _degen_point = point;
675     return true;
676   }
677 
678   if (_preemption_requested.is_set()) {
679     assert(_requested_generation == YOUNG, "Only young GCs may preempt old.");
680     _preemption_requested.unset();
681 
682     // Old generation marking is only cancellable during concurrent marking.
683     // Once final mark is complete, the code does not check again for cancellation.
684     // If old generation was cancelled for an allocation failure, we wouldn't
685     // make it to this case. The calling code is responsible for forcing a
686     // cancellation due to allocation failure into a degenerated cycle.
687     _degen_point = point;
688     heap->clear_cancelled_gc(false /* clear oom handler */);
689     return true;
690   }
691 
692   fatal("Cancel GC either for alloc failure GC, or gracefully exiting, or to pause old generation marking.");
693   return false;
694 }
695 
696 void ShenandoahControlThread::stop_service() {
697   // Nothing to do here.
698 }
699 
700 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
701   ShenandoahHeap* const heap = ShenandoahHeap::heap();
702 
703   GCIdMark gc_id_mark;
704   ShenandoahGCSession session(cause, heap->global_generation());
705 
706   ShenandoahFullGC gc;
707   gc.collect(cause);
708 
709   heap->global_generation()->heuristics()->record_success_full();

710   heap->shenandoah_policy()->record_success_full();
711 }
712 
713 bool ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
714   assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
715   ShenandoahHeap* const heap = ShenandoahHeap::heap();
716 
717   GCIdMark gc_id_mark;
718   ShenandoahGCSession session(cause, _degen_generation);
719 
720   ShenandoahDegenGC gc(point, _degen_generation);
721   gc.collect(cause);
722 
723   assert(heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks");
724   if (_degen_generation->generation_mode() == GLOBAL) {
725     assert(heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks");
726     assert(heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks");
727   } else {
728     assert(_degen_generation->generation_mode() == YOUNG, "Expected degenerated young cycle, if not global.");
729     ShenandoahOldGeneration* old_generation = (ShenandoahOldGeneration*) heap->old_generation();
730     if (old_generation->state() == ShenandoahOldGeneration::BOOTSTRAPPING && !gc.upgraded_to_full()) {
731       old_generation->transition_to(ShenandoahOldGeneration::MARKING);
732     }
733   }
734 
735   _degen_generation->heuristics()->record_success_degenerated();
736   heap->shenandoah_policy()->record_success_degenerated();
737   return !gc.upgraded_to_full();
738 }
739 
740 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
741   ShenandoahHeap* heap = ShenandoahHeap::heap();
742 
743   // Determine if there is work to do. This avoids taking heap lock if there is
744   // no work available, avoids spamming logs with superfluous logging messages,
745   // and minimises the amount of work while locks are taken.
746 
747   if (heap->committed() <= shrink_until) return;
748 
749   bool has_work = false;
750   for (size_t i = 0; i < heap->num_regions(); i++) {
751     ShenandoahHeapRegion *r = heap->get_region(i);
752     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
753       has_work = true;
754       break;
755     }
756   }
757 
758   if (has_work) {
759     heap->entry_uncommit(shrink_before, shrink_until);
760   }
761 }
762 
763 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
764   return GCCause::is_user_requested_gc(cause) ||
765          GCCause::is_serviceability_requested_gc(cause);
766 }
767 
768 bool ShenandoahControlThread::is_implicit_gc(GCCause::Cause cause) const {
769   return !is_explicit_gc(cause) && cause != GCCause::_shenandoah_concurrent_gc;
770 }
771 
772 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
773   assert(GCCause::is_user_requested_gc(cause) ||
774          GCCause::is_serviceability_requested_gc(cause) ||
775          cause == GCCause::_metadata_GC_clear_soft_refs ||
776          cause == GCCause::_codecache_GC_threshold ||
777          cause == GCCause::_full_gc_alot ||
778          cause == GCCause::_wb_full_gc ||
779          cause == GCCause::_wb_breakpoint ||
780          cause == GCCause::_scavenge_alot,
781          "only requested GCs here");
782 
783   if (is_explicit_gc(cause)) {
784     if (!DisableExplicitGC) {
785       handle_requested_gc(cause);
786     }
787   } else {
788     handle_requested_gc(cause);
789   }
790 }
791 
792 bool ShenandoahControlThread::request_concurrent_gc(GenerationMode generation) {
793   if (_preemption_requested.is_set() || _gc_requested.is_set() || ShenandoahHeap::heap()->cancelled_gc()) {
794     // ignore subsequent requests from the heuristics
795     return false;
796   }
797 
798   if (_mode == none) {
799     _requested_gc_cause = GCCause::_shenandoah_concurrent_gc;
800     _requested_generation = generation;
801     notify_control_thread();
802     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
803     ml.wait();
804     return true;
805   }
806 
807   if (preempt_old_marking(generation)) {
808     log_info(gc)("Preempting old generation mark to allow %s GC.", generation_name(generation));
809     _requested_gc_cause = GCCause::_shenandoah_concurrent_gc;
810     _requested_generation = generation;
811     _preemption_requested.set();
812     ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc);
813     notify_control_thread();
814 
815     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
816     ml.wait();
817     return true;
818   }
819 
820   return false;
821 }
822 
823 void ShenandoahControlThread::notify_control_thread() {
824   MonitorLocker locker(&_control_lock, Mutex::_no_safepoint_check_flag);
825   _control_lock.notify();
826 }
827 
828 bool ShenandoahControlThread::preempt_old_marking(GenerationMode generation) {
829   return generation == YOUNG && _allow_old_preemption.try_unset();
830 }
831 
832 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
833   // Make sure we have at least one complete GC cycle before unblocking
834   // from the explicit GC request.
835   //
836   // This is especially important for weak references cleanup and/or native
837   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
838   // comes very late in the already running cycle, it would miss lots of new
839   // opportunities for cleanup that were made available before the caller
840   // requested the GC.
841 
842   MonitorLocker ml(&_gc_waiters_lock);
843   size_t current_gc_id = get_gc_id();
844   size_t required_gc_id = current_gc_id + 1;
845   while (current_gc_id < required_gc_id) {
846     // Although setting gc request is under _gc_waiters_lock, but read side (run_service())
847     // does not take the lock. We need to enforce following order, so that read side sees
848     // latest requested gc cause when the flag is set.
849     _requested_gc_cause = cause;
850     _gc_requested.set();
851     notify_control_thread();
852     if (cause != GCCause::_wb_breakpoint) {
853       ml.wait();
854     }
855     current_gc_id = get_gc_id();
856   }
857 }
858 
859 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
860   ShenandoahHeap* heap = ShenandoahHeap::heap();
861 
862   assert(current()->is_Java_thread(), "expect Java thread here");
863 
864   if (try_set_alloc_failure_gc()) {
865     // Only report the first allocation failure
866     log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
867                  req.type_string(),
868                  byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
869 
870     // Now that alloc failure GC is scheduled, we can abort everything else
871     heap->cancel_gc(GCCause::_allocation_failure);

915     _do_counters_update.unset();
916     ShenandoahHeap::heap()->monitoring_support()->update_counters();
917   }
918 }
919 
920 void ShenandoahControlThread::handle_force_counters_update() {
921   if (_force_counters_update.is_set()) {
922     _do_counters_update.unset(); // reset these too, we do update now!
923     ShenandoahHeap::heap()->monitoring_support()->update_counters();
924   }
925 }
926 
927 void ShenandoahControlThread::notify_heap_changed() {
928   // This is called from allocation path, and thus should be fast.
929 
930   // Update monitoring counters when we took a new region. This amortizes the
931   // update costs on slow path.
932   if (_do_counters_update.is_unset()) {
933     _do_counters_update.set();
934   }




935 }
936 
937 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
938   assert(ShenandoahPacing, "should only call when pacing is enabled");
939   Atomic::add(&_allocs_seen, words, memory_order_relaxed);
940 }
941 
942 void ShenandoahControlThread::set_forced_counters_update(bool value) {
943   _force_counters_update.set_cond(value);
944 }
945 
946 void ShenandoahControlThread::reset_gc_id() {
947   Atomic::store(&_gc_id, (size_t)0);
948 }
949 
950 void ShenandoahControlThread::update_gc_id() {
951   Atomic::inc(&_gc_id);
952 }
953 
954 size_t ShenandoahControlThread::get_gc_id() {

959   print_on(tty);
960 }
961 
962 void ShenandoahControlThread::print_on(outputStream* st) const {
963   st->print("Shenandoah Concurrent Thread");
964   Thread::print_on(st);
965   st->cr();
966 }
967 
968 void ShenandoahControlThread::start() {
969   create_and_start();
970 }
971 
972 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
973   _graceful_shutdown.set();
974 }
975 
976 bool ShenandoahControlThread::in_graceful_shutdown() {
977   return _graceful_shutdown.is_set();
978 }
979 
980 const char* ShenandoahControlThread::gc_mode_name(ShenandoahControlThread::GCMode mode) {
981   switch (mode) {
982     case none:              return "idle";
983     case concurrent_normal: return "normal";
984     case stw_degenerated:   return "degenerated";
985     case stw_full:          return "full";
986     case servicing_old:     return "old";
987     default:                return "unknown";
988   }
989 }
990 
991 void ShenandoahControlThread::set_gc_mode(ShenandoahControlThread::GCMode new_mode) {
992   if (_mode != new_mode) {
993     log_info(gc)("Transition from: %s to: %s", gc_mode_name(_mode), gc_mode_name(new_mode));
994     _mode = new_mode;
995     MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag);
996     ml.notify_all();
997   }
998 }
< prev index next >