< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp

Print this page

 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 28 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
 29 #include "gc/shenandoah/shenandoahControlThread.hpp"
 30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 31 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 32 #include "gc/shenandoah/shenandoahFullGC.hpp"
 33 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
 34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 35 #include "gc/shenandoah/shenandoahMark.inline.hpp"
 36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 38 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
 39 #include "gc/shenandoah/shenandoahUtils.hpp"
 40 #include "gc/shenandoah/shenandoahVMOperations.hpp"
 41 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 42 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
 43 #include "memory/iterator.hpp"

 44 #include "memory/metaspaceUtils.hpp"
 45 #include "memory/metaspaceStats.hpp"
 46 #include "memory/universe.hpp"
 47 #include "runtime/atomic.hpp"
 48 
 49 ShenandoahControlThread::ShenandoahControlThread() :
 50   ConcurrentGCThread(),
 51   _alloc_failure_waiters_lock(Mutex::safepoint-2, "ShenandoahAllocFailureGC_lock", true),
 52   _gc_waiters_lock(Mutex::safepoint-2, "ShenandoahRequestedGC_lock", true),
 53   _periodic_task(this),
 54   _requested_gc_cause(GCCause::_no_cause_specified),
 55   _degen_point(ShenandoahGC::_degenerated_outside_cycle),
 56   _allocs_seen(0) {
 57   set_name("Shenandoah Control Thread");
 58   reset_gc_id();
 59   create_and_start();
 60   _periodic_task.enroll();
 61   if (ShenandoahPacing) {
 62     _periodic_pacer_notify_task.enroll();
 63   }
 64 }
 65 
 66 ShenandoahControlThread::~ShenandoahControlThread() {
 67   // This is here so that super is called.
 68 }
 69 
 70 void ShenandoahPeriodicTask::task() {
 71   _thread->handle_force_counters_update();
 72   _thread->handle_counters_update();
 73 }
 74 
 75 void ShenandoahPeriodicPacerNotify::task() {
 76   assert(ShenandoahPacing, "Should not be here otherwise");
 77   ShenandoahHeap::heap()->pacer()->notify_waiters();
 78 }
 79 
 80 void ShenandoahControlThread::run_service() {
 81   ShenandoahHeap* heap = ShenandoahHeap::heap();
 82 
 83   GCMode default_mode = concurrent_normal;
 84   GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
 85   int sleep = ShenandoahControlIntervalMin;
 86 
 87   double last_shrink_time = os::elapsedTime();
 88   double last_sleep_adjust_time = os::elapsedTime();
 89 
 90   // Shrink period avoids constantly polling regions for shrinking.
 91   // Having a period 10x lower than the delay would mean we hit the
 92   // shrinking with lag of less than 1/10-th of true delay.
 93   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
 94   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
 95 
 96   ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
 97   ShenandoahHeuristics* heuristics = heap->heuristics();
 98   while (!in_graceful_shutdown() && !should_terminate()) {
 99     // Figure out if we have pending requests.
100     bool alloc_failure_pending = _alloc_failure_gc.is_set();
101     bool is_gc_requested = _gc_requested.is_set();
102     GCCause::Cause requested_gc_cause = _requested_gc_cause;
103     bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause);
104     bool implicit_gc_requested = is_gc_requested && !is_explicit_gc(requested_gc_cause);
105 
106     // This control loop iteration have seen this much allocations.
107     size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
108 
109     // Check if we have seen a new target for soft max heap size.
110     bool soft_max_changed = check_soft_max_changed();
111 
112     // Choose which GC mode to run in. The block below should select a single mode.
113     GCMode mode = none;
114     GCCause::Cause cause = GCCause::_last_gc_cause;
115     ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
116 
117     if (alloc_failure_pending) {
118       // Allocation failure takes precedence: we have to deal with it first thing
119       log_info(gc)("Trigger: Handle Allocation Failure");
120 
121       cause = GCCause::_allocation_failure;
122 
123       // Consume the degen point, and seed it with default value
124       degen_point = _degen_point;
125       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
126 
127       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
128         heuristics->record_allocation_failure_gc();
129         policy->record_alloc_failure_to_degenerated(degen_point);
130         mode = stw_degenerated;
131       } else {
132         heuristics->record_allocation_failure_gc();
133         policy->record_alloc_failure_to_full();
134         mode = stw_full;
135       }
136 
137     } else if (explicit_gc_requested) {
138       cause = requested_gc_cause;
139       log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
140 
141       heuristics->record_requested_gc();
142 
143       if (ExplicitGCInvokesConcurrent) {
144         policy->record_explicit_to_concurrent();
145         mode = default_mode;
146         // Unload and clean up everything
147         heap->set_unload_classes(heuristics->can_unload_classes());
148       } else {
149         policy->record_explicit_to_full();
150         mode = stw_full;
151       }
152     } else if (implicit_gc_requested) {
153       cause = requested_gc_cause;
154       log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
155 
156       heuristics->record_requested_gc();
157 
158       if (ShenandoahImplicitGCInvokesConcurrent) {
159         policy->record_implicit_to_concurrent();
160         mode = default_mode;
161 
162         // Unload and clean up everything
163         heap->set_unload_classes(heuristics->can_unload_classes());
164       } else {
165         policy->record_implicit_to_full();
166         mode = stw_full;
167       }
168     } else {
169       // Potential normal cycle: ask heuristics if it wants to act
170       if (heuristics->should_start_gc()) {
171         mode = default_mode;
172         cause = default_cause;
173       }
174 
175       // Ask policy if this cycle wants to process references or unload classes
176       heap->set_unload_classes(heuristics->should_unload_classes());
177     }
178 
179     // Blow all soft references on this cycle, if handling allocation failure,
180     // either implicit or explicit GC request,  or we are requested to do so unconditionally.
181     if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) {
182       heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
183     }
184 
185     bool gc_requested = (mode != none);
186     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
187 
188     if (gc_requested) {
189       // GC is starting, bump the internal ID
190       update_gc_id();
191 
192       heap->reset_bytes_allocated_since_gc_start();
193 
194       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
195 
196       // If GC was requested, we are sampling the counters even without actual triggers
197       // from allocation machinery. This captures GC phases more accurately.
198       set_forced_counters_update(true);
199 
200       // If GC was requested, we better dump freeset data for performance debugging
201       {
202         ShenandoahHeapLocker locker(heap->lock());
203         heap->free_set()->log_status();
204       }
205 
206       switch (mode) {
207         case concurrent_normal:
208           service_concurrent_normal_cycle(cause);
209           break;
210         case stw_degenerated:
211           service_stw_degenerated_cycle(cause, degen_point);
212           break;
213         case stw_full:
214           service_stw_full_cycle(cause);
215           break;
216         default:
217           ShouldNotReachHere();
218       }
219 
220       // If this was the requested GC cycle, notify waiters about it
221       if (explicit_gc_requested || implicit_gc_requested) {
222         notify_gc_waiters();
223       }
224 
225       // If this was the allocation failure GC cycle, notify waiters about it
226       if (alloc_failure_pending) {
227         notify_alloc_failure_waiters();
228       }
229 
230       // Report current free set state at the end of cycle, whether
231       // it is a normal completion, or the abort.
232       {
233         ShenandoahHeapLocker locker(heap->lock());
234         heap->free_set()->log_status();
235 
236         // Notify Universe about new heap usage. This has implications for
237         // global soft refs policy, and we better report it every time heap
238         // usage goes down.
239         Universe::heap()->update_capacity_and_used_at_gc();
240 
241         // Signal that we have completed a visit to all live objects.
242         Universe::heap()->record_whole_heap_examined_timestamp();
243       }
244 
245       // Disable forced counters update, and update counters one more time
246       // to capture the state at the end of GC session.
247       handle_force_counters_update();
248       set_forced_counters_update(false);
249 
250       // Retract forceful part of soft refs policy
251       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
252 
253       // Clear metaspace oom flag, if current cycle unloaded classes
254       if (heap->unload_classes()) {
255         heuristics->clear_metaspace_oom();
256       }
257 
258       // Commit worker statistics to cycle data
259       heap->phase_timings()->flush_par_workers_to_cycle();
260       if (ShenandoahPacing) {
261         heap->pacer()->flush_stats_to_cycle();
262       }
263 
264       // Print GC stats for current cycle
265       {
266         LogTarget(Info, gc, stats) lt;
267         if (lt.is_enabled()) {
268           ResourceMark rm;
269           LogStream ls(lt);
270           heap->phase_timings()->print_cycle_on(&ls);
271           if (ShenandoahPacing) {
272             heap->pacer()->print_cycle_on(&ls);
273           }
274         }
275       }
276 
277       // Commit statistics to globals
278       heap->phase_timings()->flush_cycle_to_global();
279 
280       // Print Metaspace change following GC (if logging is enabled).
281       MetaspaceUtils::print_metaspace_change(meta_sizes);
282 
283       // GC is over, we are at idle now
284       if (ShenandoahPacing) {
285         heap->pacer()->setup_for_idle();
286       }
287     } else {
288       // Allow allocators to know we have seen this much regions
289       if (ShenandoahPacing && (allocs_seen > 0)) {
290         heap->pacer()->report_alloc(allocs_seen);
291       }
292     }
293 
294     double current = os::elapsedTime();
295 
296     if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
297       // Explicit GC tries to uncommit everything down to min capacity.
298       // Soft max change tries to uncommit everything down to target capacity.
299       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
300 
301       double shrink_before = (explicit_gc_requested || soft_max_changed) ?
302                              current :
303                              current - (ShenandoahUncommitDelay / 1000.0);
304 
305       size_t shrink_until = soft_max_changed ?
306                              heap->soft_max_capacity() :
307                              heap->min_capacity();
308 
309       service_uncommit(shrink_before, shrink_until);
310       heap->phase_timings()->flush_cycle_to_global();
311       last_shrink_time = current;
312     }
313 
314     // Wait before performing the next action. If allocation happened during this wait,
315     // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
316     // back off exponentially.
317     if (_heap_changed.try_unset()) {
318       sleep = ShenandoahControlIntervalMin;
319     } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
320       sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
321       last_sleep_adjust_time = current;
322     }
323     os::naked_short_sleep(sleep);
324   }
325 
326   // Wait for the actual stop(), can't leave run_service() earlier.
327   while (!should_terminate()) {
328     os::naked_short_sleep(ShenandoahControlIntervalMin);
329   }
330 }
331 
332 bool ShenandoahControlThread::check_soft_max_changed() const {
333   ShenandoahHeap* heap = ShenandoahHeap::heap();
334   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
335   size_t old_soft_max = heap->soft_max_capacity();
336   if (new_soft_max != old_soft_max) {
337     new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
338     new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
339     if (new_soft_max != old_soft_max) {
340       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
341                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
342                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
343       );
344       heap->set_soft_max_capacity(new_soft_max);
345       return true;
346     }
347   }
348   return false;
349 }
350 
351 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
352   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
353   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
354   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
355   // tries to evac something and no memory is available), cycle degrades to Full GC.
356   //
357   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
358   // heuristics says there are no regions to compact, and all the collection comes from immediately
359   // reclaimable regions.
360   //
361   // ................................................................................................
362   //
363   //                                    (immediate garbage shortcut)                Concurrent GC
364   //                             /-------------------------------------------\
365   //                             |                                           |
366   //                             |                                           |
367   //                             |                                           |
368   //                             |                                           v
369   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
370   //                   |                    |                 |              ^
371   //                   | (af)               | (af)            | (af)         |
372   // ..................|....................|.................|..............|.......................
373   //                   |                    |                 |              |
374   //                   |                    |                 |              |      Degenerated GC
375   //                   v                    v                 v              |
376   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
377   //                   |                    |                 |              ^
378   //                   | (af)               | (af)            | (af)         |
379   // ..................|....................|.................|..............|.......................
380   //                   |                    |                 |              |
381   //                   |                    v                 |              |      Full GC
382   //                   \------------------->o<----------------/              |
383   //                                        |                                |
384   //                                        v                                |
385   //                                      Full GC  --------------------------/
386   //
387   ShenandoahHeap* heap = ShenandoahHeap::heap();
388   if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
389 
390   GCIdMark gc_id_mark;
391   ShenandoahGCSession session(cause);
392 
393   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
394 
395   ShenandoahConcurrentGC gc;
396   if (gc.collect(cause)) {
397     // Cycle is complete
398     heap->heuristics()->record_success_concurrent();
399     heap->shenandoah_policy()->record_success_concurrent();

400   } else {
401     assert(heap->cancelled_gc(), "Must have been cancelled");
402     check_cancellation_or_degen(gc.degen_point());

403   }
404 }
405 
406 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
407   ShenandoahHeap* heap = ShenandoahHeap::heap();
408   if (heap->cancelled_gc()) {
409     assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
410     if (!in_graceful_shutdown()) {
411       assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle,
412               "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
413       _degen_point = point;
414     }
415     return true;
416   }
417   return false;
418 }
419 
420 void ShenandoahControlThread::stop_service() {
421   // Nothing to do here.
422 }
423 
424 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {

425   GCIdMark gc_id_mark;
426   ShenandoahGCSession session(cause);
427 
428   ShenandoahFullGC gc;
429   gc.collect(cause);
430 
431   ShenandoahHeap* const heap = ShenandoahHeap::heap();
432   heap->heuristics()->record_success_full();
433   heap->shenandoah_policy()->record_success_full();
434 }
435 
436 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
437   assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
438 
439   GCIdMark gc_id_mark;
440   ShenandoahGCSession session(cause);
441 
442   ShenandoahDegenGC gc(point);
443   gc.collect(cause);
444 
445   ShenandoahHeap* const heap = ShenandoahHeap::heap();
446   heap->heuristics()->record_success_degenerated();
447   heap->shenandoah_policy()->record_success_degenerated();
448 }
449 
450 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
451   ShenandoahHeap* heap = ShenandoahHeap::heap();
452 
453   // Determine if there is work to do. This avoids taking heap lock if there is
454   // no work available, avoids spamming logs with superfluous logging messages,
455   // and minimises the amount of work while locks are taken.
456 
457   if (heap->committed() <= shrink_until) return;
458 
459   bool has_work = false;
460   for (size_t i = 0; i < heap->num_regions(); i++) {
461     ShenandoahHeapRegion *r = heap->get_region(i);
462     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
463       has_work = true;
464       break;
465     }
466   }
467 
468   if (has_work) {
469     heap->entry_uncommit(shrink_before, shrink_until);
470   }
471 }
472 
473 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
474   return GCCause::is_user_requested_gc(cause) ||
475          GCCause::is_serviceability_requested_gc(cause);
476 }
477 
478 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
479   assert(GCCause::is_user_requested_gc(cause) ||
480          GCCause::is_serviceability_requested_gc(cause) ||
481          cause == GCCause::_metadata_GC_clear_soft_refs ||
482          cause == GCCause::_codecache_GC_aggressive ||
483          cause == GCCause::_codecache_GC_threshold ||
484          cause == GCCause::_full_gc_alot ||
485          cause == GCCause::_wb_young_gc ||
486          cause == GCCause::_wb_full_gc ||
487          cause == GCCause::_wb_breakpoint ||
488          cause == GCCause::_scavenge_alot,
489          "only requested GCs here: %s", GCCause::to_string(cause));
490 
491   if (is_explicit_gc(cause)) {
492     if (!DisableExplicitGC) {
493       handle_requested_gc(cause);
494     }
495   } else {
496     handle_requested_gc(cause);
497   }
498 }
499 
500 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {










501   // Make sure we have at least one complete GC cycle before unblocking
502   // from the explicit GC request.
503   //
504   // This is especially important for weak references cleanup and/or native
505   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
506   // comes very late in the already running cycle, it would miss lots of new
507   // opportunities for cleanup that were made available before the caller
508   // requested the GC.
509 
510   MonitorLocker ml(&_gc_waiters_lock);
511   size_t current_gc_id = get_gc_id();
512   size_t required_gc_id = current_gc_id + 1;
513   while (current_gc_id < required_gc_id) {
514     // Although setting gc request is under _gc_waiters_lock, but read side (run_service())
515     // does not take the lock. We need to enforce following order, so that read side sees
516     // latest requested gc cause when the flag is set.
517     _requested_gc_cause = cause;
518     _gc_requested.set();
519 
520     if (cause != GCCause::_wb_breakpoint) {
521       ml.wait();
522     }
523     current_gc_id = get_gc_id();
524   }
525 }
526 
527 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
528   ShenandoahHeap* heap = ShenandoahHeap::heap();
529 
530   assert(current()->is_Java_thread(), "expect Java thread here");
531 
532   if (try_set_alloc_failure_gc()) {
533     // Only report the first allocation failure
534     log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
535                  req.type_string(),
536                  byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
537 
538     // Now that alloc failure GC is scheduled, we can abort everything else
539     heap->cancel_gc(GCCause::_allocation_failure);
540   }
541 
542   MonitorLocker ml(&_alloc_failure_waiters_lock);
543   while (is_alloc_failure_gc()) {
544     ml.wait();

545   }
546 }
547 
548 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
549   ShenandoahHeap* heap = ShenandoahHeap::heap();
550 
551   if (try_set_alloc_failure_gc()) {
552     // Only report the first allocation failure
553     log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
554                  byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
555   }
556 
557   // Forcefully report allocation failure
558   heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
559 }
560 
561 void ShenandoahControlThread::notify_alloc_failure_waiters() {
562   _alloc_failure_gc.unset();
563   MonitorLocker ml(&_alloc_failure_waiters_lock);
564   ml.notify_all();
565 }
566 
567 bool ShenandoahControlThread::try_set_alloc_failure_gc() {
568   return _alloc_failure_gc.try_set();
569 }
570 
571 bool ShenandoahControlThread::is_alloc_failure_gc() {
572   return _alloc_failure_gc.is_set();
573 }
574 
575 void ShenandoahControlThread::notify_gc_waiters() {
576   _gc_requested.unset();
577   MonitorLocker ml(&_gc_waiters_lock);
578   ml.notify_all();
579 }
580 
581 void ShenandoahControlThread::handle_counters_update() {
582   if (_do_counters_update.is_set()) {
583     _do_counters_update.unset();
584     ShenandoahHeap::heap()->monitoring_support()->update_counters();
585   }
586 }
587 
588 void ShenandoahControlThread::handle_force_counters_update() {
589   if (_force_counters_update.is_set()) {
590     _do_counters_update.unset(); // reset these too, we do update now!
591     ShenandoahHeap::heap()->monitoring_support()->update_counters();
592   }
593 }
594 
595 void ShenandoahControlThread::notify_heap_changed() {
596   // This is called from allocation path, and thus should be fast.
597 
598   // Update monitoring counters when we took a new region. This amortizes the
599   // update costs on slow path.
600   if (_do_counters_update.is_unset()) {
601     _do_counters_update.set();
602   }
603   // Notify that something had changed.
604   if (_heap_changed.is_unset()) {
605     _heap_changed.set();
606   }
607 }
608 
609 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
610   assert(ShenandoahPacing, "should only call when pacing is enabled");
611   Atomic::add(&_allocs_seen, words, memory_order_relaxed);
612 }
613 
614 void ShenandoahControlThread::set_forced_counters_update(bool value) {
615   _force_counters_update.set_cond(value);
616 }
617 
618 void ShenandoahControlThread::reset_gc_id() {
619   Atomic::store(&_gc_id, (size_t)0);
620 }
621 
622 void ShenandoahControlThread::update_gc_id() {
623   Atomic::inc(&_gc_id);
624 }
625 
626 size_t ShenandoahControlThread::get_gc_id() {
627   return Atomic::load(&_gc_id);
628 }
629 
630 void ShenandoahControlThread::start() {
631   create_and_start();
632 }
633 
634 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
635   _graceful_shutdown.set();
636 }
637 
638 bool ShenandoahControlThread::in_graceful_shutdown() {
639   return _graceful_shutdown.is_set();
640 }

 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 28 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
 29 #include "gc/shenandoah/shenandoahControlThread.hpp"
 30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 31 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 32 #include "gc/shenandoah/shenandoahFullGC.hpp"
 33 #include "gc/shenandoah/shenandoahGeneration.hpp"
 34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"

 35 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 36 #include "gc/shenandoah/shenandoahPacer.inline.hpp"

 37 #include "gc/shenandoah/shenandoahUtils.hpp"


 38 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
 39 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 40 #include "logging/log.hpp"
 41 #include "memory/metaspaceUtils.hpp"
 42 #include "memory/metaspaceStats.hpp"


 43 
 44 ShenandoahControlThread::ShenandoahControlThread() :
 45   ShenandoahController(),



 46   _requested_gc_cause(GCCause::_no_cause_specified),
 47   _degen_point(ShenandoahGC::_degenerated_outside_cycle) {

 48   set_name("Shenandoah Control Thread");

 49   create_and_start();


















 50 }
 51 
 52 void ShenandoahControlThread::run_service() {
 53   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 54 
 55   const GCMode default_mode = concurrent_normal;
 56   const GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
 57   int sleep = ShenandoahControlIntervalMin;
 58 
 59   double last_shrink_time = os::elapsedTime();
 60   double last_sleep_adjust_time = os::elapsedTime();
 61 
 62   // Shrink period avoids constantly polling regions for shrinking.
 63   // Having a period 10x lower than the delay would mean we hit the
 64   // shrinking with lag of less than 1/10-th of true delay.
 65   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
 66   const double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
 67 
 68   ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy();
 69   ShenandoahHeuristics* const heuristics = heap->heuristics();
 70   while (!in_graceful_shutdown() && !should_terminate()) {
 71     // Figure out if we have pending requests.
 72     const bool alloc_failure_pending = _alloc_failure_gc.is_set();
 73     const bool is_gc_requested = _gc_requested.is_set();
 74     const GCCause::Cause requested_gc_cause = _requested_gc_cause;


 75 
 76     // This control loop iteration has seen this much allocation.
 77     const size_t allocs_seen = reset_allocs_seen();
 78 
 79     // Check if we have seen a new target for soft max heap size.
 80     const bool soft_max_changed = heap->check_soft_max_changed();
 81 
 82     // Choose which GC mode to run in. The block below should select a single mode.
 83     GCMode mode = none;
 84     GCCause::Cause cause = GCCause::_last_gc_cause;
 85     ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
 86 
 87     if (alloc_failure_pending) {
 88       // Allocation failure takes precedence: we have to deal with it first thing
 89       log_info(gc)("Trigger: Handle Allocation Failure");
 90 
 91       cause = GCCause::_allocation_failure;
 92 
 93       // Consume the degen point, and seed it with default value
 94       degen_point = _degen_point;
 95       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
 96 
 97       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
 98         heuristics->record_allocation_failure_gc();
 99         policy->record_alloc_failure_to_degenerated(degen_point);
100         mode = stw_degenerated;
101       } else {
102         heuristics->record_allocation_failure_gc();
103         policy->record_alloc_failure_to_full();
104         mode = stw_full;
105       }
106     } else if (is_gc_requested) {

107       cause = requested_gc_cause;
108       log_info(gc)("Trigger: GC request (%s)", GCCause::to_string(cause));

109       heuristics->record_requested_gc();
110 
111       if (ShenandoahCollectorPolicy::should_run_full_gc(cause)) {






112         mode = stw_full;
113       } else {








114         mode = default_mode;

115         // Unload and clean up everything
116         heap->set_unload_classes(heuristics->can_unload_classes());



117       }
118     } else {
119       // Potential normal cycle: ask heuristics if it wants to act
120       if (heuristics->should_start_gc()) {
121         mode = default_mode;
122         cause = default_cause;
123       }
124 
125       // Ask policy if this cycle wants to process references or unload classes
126       heap->set_unload_classes(heuristics->should_unload_classes());
127     }
128 
129     // Blow all soft references on this cycle, if handling allocation failure,
130     // either implicit or explicit GC request,  or we are requested to do so unconditionally.
131     if (alloc_failure_pending || is_gc_requested || ShenandoahAlwaysClearSoftRefs) {
132       heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
133     }
134 
135     const bool gc_requested = (mode != none);
136     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
137 
138     if (gc_requested) {
139       // GC is starting, bump the internal ID
140       update_gc_id();
141 
142       heap->reset_bytes_allocated_since_gc_start();
143 
144       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
145 
146       // If GC was requested, we are sampling the counters even without actual triggers
147       // from allocation machinery. This captures GC phases more accurately.
148       heap->set_forced_counters_update(true);
149 
150       // If GC was requested, we better dump freeset data for performance debugging
151       {
152         ShenandoahHeapLocker locker(heap->lock());
153         heap->free_set()->log_status();
154       }
155 
156       switch (mode) {
157         case concurrent_normal:
158           service_concurrent_normal_cycle(cause);
159           break;
160         case stw_degenerated:
161           service_stw_degenerated_cycle(cause, degen_point);
162           break;
163         case stw_full:
164           service_stw_full_cycle(cause);
165           break;
166         default:
167           ShouldNotReachHere();
168       }
169 
170       // If this was the requested GC cycle, notify waiters about it
171       if (is_gc_requested) {
172         notify_gc_waiters();
173       }
174 
175       // If this was the allocation failure GC cycle, notify waiters about it
176       if (alloc_failure_pending) {
177         notify_alloc_failure_waiters();
178       }
179 
180       // Report current free set state at the end of cycle, whether
181       // it is a normal completion, or the abort.
182       {
183         ShenandoahHeapLocker locker(heap->lock());
184         heap->free_set()->log_status();
185 
186         // Notify Universe about new heap usage. This has implications for
187         // global soft refs policy, and we better report it every time heap
188         // usage goes down.
189         heap->update_capacity_and_used_at_gc();
190 
191         // Signal that we have completed a visit to all live objects.
192         heap->record_whole_heap_examined_timestamp();
193       }
194 
195       // Disable forced counters update, and update counters one more time
196       // to capture the state at the end of GC session.
197       heap->handle_force_counters_update();
198       heap->set_forced_counters_update(false);
199 
200       // Retract forceful part of soft refs policy
201       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
202 
203       // Clear metaspace oom flag, if current cycle unloaded classes
204       if (heap->unload_classes()) {
205         heuristics->clear_metaspace_oom();
206       }
207 
208       // Commit worker statistics to cycle data
209       heap->phase_timings()->flush_par_workers_to_cycle();
210       if (ShenandoahPacing) {
211         heap->pacer()->flush_stats_to_cycle();
212       }
213 
214       // Print GC stats for current cycle
215       {
216         LogTarget(Info, gc, stats) lt;
217         if (lt.is_enabled()) {
218           ResourceMark rm;
219           LogStream ls(lt);
220           heap->phase_timings()->print_cycle_on(&ls);
221           if (ShenandoahPacing) {
222             heap->pacer()->print_cycle_on(&ls);
223           }
224         }
225       }
226 
227       // Commit statistics to globals
228       heap->phase_timings()->flush_cycle_to_global();
229 
230       // Print Metaspace change following GC (if logging is enabled).
231       MetaspaceUtils::print_metaspace_change(meta_sizes);
232 
233       // GC is over, we are at idle now
234       if (ShenandoahPacing) {
235         heap->pacer()->setup_for_idle();
236       }
237     } else {
238       // Report to pacer that we have seen this many words allocated
239       if (ShenandoahPacing && (allocs_seen > 0)) {
240         heap->pacer()->report_alloc(allocs_seen);
241       }
242     }
243 
244     const double current = os::elapsedTime();
245 
246     if (ShenandoahUncommit && (is_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
247       // Explicit GC tries to uncommit everything down to min capacity.
248       // Soft max change tries to uncommit everything down to target capacity.
249       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
250 
251       double shrink_before = (is_gc_requested || soft_max_changed) ?
252                              current :
253                              current - (ShenandoahUncommitDelay / 1000.0);
254 
255       size_t shrink_until = soft_max_changed ?
256                              heap->soft_max_capacity() :
257                              heap->min_capacity();
258 
259       heap->maybe_uncommit(shrink_before, shrink_until);
260       heap->phase_timings()->flush_cycle_to_global();
261       last_shrink_time = current;
262     }
263 
264     // Wait before performing the next action. If allocation happened during this wait,
265     // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
266     // back off exponentially.
267     if (heap->has_changed()) {
268       sleep = ShenandoahControlIntervalMin;
269     } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
270       sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
271       last_sleep_adjust_time = current;
272     }
273     os::naked_short_sleep(sleep);
274   }
275 
276   // Wait for the actual stop(), can't leave run_service() earlier.
277   while (!should_terminate()) {
278     os::naked_short_sleep(ShenandoahControlIntervalMin);
279   }
280 }
281 



















282 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
283   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
284   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
285   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
286   // tries to evac something and no memory is available), cycle degrades to Full GC.
287   //
288   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
289   // heuristics says there are no regions to compact, and all the collection comes from immediately
290   // reclaimable regions.
291   //
292   // ................................................................................................
293   //
294   //                                    (immediate garbage shortcut)                Concurrent GC
295   //                             /-------------------------------------------\
296   //                             |                                           |
297   //                             |                                           |
298   //                             |                                           |
299   //                             |                                           v
300   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
301   //                   |                    |                 |              ^
302   //                   | (af)               | (af)            | (af)         |
303   // ..................|....................|.................|..............|.......................
304   //                   |                    |                 |              |
305   //                   |                    |                 |              |      Degenerated GC
306   //                   v                    v                 v              |
307   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
308   //                   |                    |                 |              ^
309   //                   | (af)               | (af)            | (af)         |
310   // ..................|....................|.................|..............|.......................
311   //                   |                    |                 |              |
312   //                   |                    v                 |              |      Full GC
313   //                   \------------------->o<----------------/              |
314   //                                        |                                |
315   //                                        v                                |
316   //                                      Full GC  --------------------------/
317   //
318   ShenandoahHeap* heap = ShenandoahHeap::heap();
319   if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
320 
321   GCIdMark gc_id_mark;
322   ShenandoahGCSession session(cause, heap->global_generation());
323 
324   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
325 
326   ShenandoahConcurrentGC gc(heap->global_generation(), false);
327   if (gc.collect(cause)) {
328     // Cycle is complete
329     heap->global_generation()->heuristics()->record_success_concurrent();
330     heap->shenandoah_policy()->record_success_concurrent(false, gc.abbreviated());
331     heap->log_heap_status("At end of GC");
332   } else {
333     assert(heap->cancelled_gc(), "Must have been cancelled");
334     check_cancellation_or_degen(gc.degen_point());
335     heap->log_heap_status("At end of cancelled GC");
336   }
337 }
338 
339 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
340   ShenandoahHeap* heap = ShenandoahHeap::heap();
341   if (heap->cancelled_gc()) {
342     assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
343     if (!in_graceful_shutdown()) {
344       assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle,
345               "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
346       _degen_point = point;
347     }
348     return true;
349   }
350   return false;
351 }
352 
353 void ShenandoahControlThread::stop_service() {
354   // Nothing to do here.
355 }
356 
357 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
358   ShenandoahHeap* const heap = ShenandoahHeap::heap();
359   GCIdMark gc_id_mark;
360   ShenandoahGCSession session(cause, heap->global_generation());
361 
362   ShenandoahFullGC gc;
363   gc.collect(cause);




364 }
365 
366 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
367   assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
368   ShenandoahHeap* const heap = ShenandoahHeap::heap();
369   GCIdMark gc_id_mark;
370   ShenandoahGCSession session(cause, heap->global_generation());
371 
372   ShenandoahDegenGC gc(point, heap->global_generation());
373   gc.collect(cause);
































374 }
375 
376 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
377   if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) {
















378     handle_requested_gc(cause);
379   }
380 }
381 
382 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
383   // For normal requested GCs (System.gc) we want to block the caller. However,
384   // for whitebox requested GC, we want to initiate the GC and return immediately.
385   // The whitebox caller thread will arrange for itself to wait until the GC notifies
386   // it that has reached the requested breakpoint (phase in the GC).
387   if (cause == GCCause::_wb_breakpoint) {
388     _requested_gc_cause = cause;
389     _gc_requested.set();
390     return;
391   }
392 
393   // Make sure we have at least one complete GC cycle before unblocking
394   // from the explicit GC request.
395   //
396   // This is especially important for weak references cleanup and/or native
397   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
398   // comes very late in the already running cycle, it would miss lots of new
399   // opportunities for cleanup that were made available before the caller
400   // requested the GC.
401 
402   MonitorLocker ml(&_gc_waiters_lock);
403   size_t current_gc_id = get_gc_id();
404   size_t required_gc_id = current_gc_id + 1;
405   while (current_gc_id < required_gc_id) {
406     // Although setting gc request is under _gc_waiters_lock, but read side (run_service())
407     // does not take the lock. We need to enforce following order, so that read side sees
408     // latest requested gc cause when the flag is set.
409     _requested_gc_cause = cause;
410     _gc_requested.set();
411 
























412     ml.wait();
413     current_gc_id = get_gc_id();
414   }
415 }
416 



























417 void ShenandoahControlThread::notify_gc_waiters() {
418   _gc_requested.unset();
419   MonitorLocker ml(&_gc_waiters_lock);
420   ml.notify_all();
421 }





























































< prev index next >