1 /*
  2  * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 27 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
 28 #include "gc/shenandoah/shenandoahControlThread.hpp"
 29 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 30 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 31 #include "gc/shenandoah/shenandoahFullGC.hpp"
 32 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
 33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 34 #include "gc/shenandoah/shenandoahMark.inline.hpp"
 35 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 36 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 37 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
 38 #include "gc/shenandoah/shenandoahUtils.hpp"
 39 #include "gc/shenandoah/shenandoahVMOperations.hpp"
 40 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 41 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
 42 #include "memory/iterator.hpp"
 43 #include "memory/metaspaceUtils.hpp"
 44 #include "memory/metaspaceStats.hpp"
 45 #include "memory/universe.hpp"
 46 #include "runtime/atomic.hpp"
 47 
 48 ShenandoahControlThread::ShenandoahControlThread() :
 49   ConcurrentGCThread(),
 50   _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", Monitor::_safepoint_check_always, true),
 51   _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", Monitor::_safepoint_check_always, true),
 52   _periodic_task(this),
 53   _requested_gc_cause(GCCause::_no_cause_specified),
 54   _degen_point(ShenandoahGC::_degenerated_outside_cycle),
 55   _allocs_seen(0) {
 56 
 57   reset_gc_id();
 58   create_and_start();
 59   _periodic_task.enroll();
 60   if (ShenandoahPacing) {
 61     _periodic_pacer_notify_task.enroll();
 62   }
 63 }
 64 
 65 ShenandoahControlThread::~ShenandoahControlThread() {
 66   // This is here so that super is called.
 67 }
 68 
 69 void ShenandoahPeriodicTask::task() {
 70   _thread->handle_force_counters_update();
 71   _thread->handle_counters_update();
 72 }
 73 
 74 void ShenandoahPeriodicPacerNotify::task() {
 75   assert(ShenandoahPacing, "Should not be here otherwise");
 76   ShenandoahHeap::heap()->pacer()->notify_waiters();
 77 }
 78 
 79 void ShenandoahControlThread::run_service() {
 80   ShenandoahHeap* heap = ShenandoahHeap::heap();
 81 
 82   GCMode default_mode = concurrent_normal;
 83   GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
 84   int sleep = ShenandoahControlIntervalMin;
 85 
 86   double last_shrink_time = os::elapsedTime();
 87   double last_sleep_adjust_time = os::elapsedTime();
 88 
 89   // Shrink period avoids constantly polling regions for shrinking.
 90   // Having a period 10x lower than the delay would mean we hit the
 91   // shrinking with lag of less than 1/10-th of true delay.
 92   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
 93   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
 94 
 95   ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
 96   ShenandoahHeuristics* heuristics = heap->heuristics();
 97   while (!in_graceful_shutdown() && !should_terminate()) {
 98     // Figure out if we have pending requests.
 99     bool alloc_failure_pending = _alloc_failure_gc.is_set();
100     bool explicit_gc_requested = _gc_requested.is_set() &&  is_explicit_gc(_requested_gc_cause);
101     bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);
102 
103     // This control loop iteration have seen this much allocations.
104     size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
105 
106     // Check if we have seen a new target for soft max heap size.
107     bool soft_max_changed = check_soft_max_changed();
108 
109     // Choose which GC mode to run in. The block below should select a single mode.
110     GCMode mode = none;
111     GCCause::Cause cause = GCCause::_last_gc_cause;
112     ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
113 
114     if (alloc_failure_pending) {
115       // Allocation failure takes precedence: we have to deal with it first thing
116       log_info(gc)("Trigger: Handle Allocation Failure");
117 
118       cause = GCCause::_allocation_failure;
119 
120       // Consume the degen point, and seed it with default value
121       degen_point = _degen_point;
122       _degen_point = ShenandoahGC::_degenerated_outside_cycle;
123 
124       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
125         heuristics->record_allocation_failure_gc();
126         policy->record_alloc_failure_to_degenerated(degen_point);
127         mode = stw_degenerated;
128       } else {
129         heuristics->record_allocation_failure_gc();
130         policy->record_alloc_failure_to_full();
131         mode = stw_full;
132       }
133 
134     } else if (explicit_gc_requested) {
135       cause = _requested_gc_cause;
136       log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
137 
138       heuristics->record_requested_gc();
139 
140       if (ExplicitGCInvokesConcurrent) {
141         policy->record_explicit_to_concurrent();
142         mode = default_mode;
143         // Unload and clean up everything
144         heap->set_unload_classes(heuristics->can_unload_classes());
145       } else {
146         policy->record_explicit_to_full();
147         mode = stw_full;
148       }
149     } else if (implicit_gc_requested) {
150       cause = _requested_gc_cause;
151       log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
152 
153       heuristics->record_requested_gc();
154 
155       if (ShenandoahImplicitGCInvokesConcurrent) {
156         policy->record_implicit_to_concurrent();
157         mode = default_mode;
158 
159         // Unload and clean up everything
160         heap->set_unload_classes(heuristics->can_unload_classes());
161       } else {
162         policy->record_implicit_to_full();
163         mode = stw_full;
164       }
165     } else {
166       // Potential normal cycle: ask heuristics if it wants to act
167       if (heuristics->should_start_gc()) {
168         mode = default_mode;
169         cause = default_cause;
170       }
171 
172       // Ask policy if this cycle wants to process references or unload classes
173       heap->set_unload_classes(heuristics->should_unload_classes());
174     }
175 
176     // Blow all soft references on this cycle, if handling allocation failure,
177     // either implicit or explicit GC request,  or we are requested to do so unconditionally.
178     if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) {
179       heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
180     }
181 
182     bool gc_requested = (mode != none);
183     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
184 
185     if (gc_requested) {
186       // GC is starting, bump the internal ID
187       update_gc_id();
188 
189       heap->reset_bytes_allocated_since_gc_start();
190 
191       MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
192 
193       // If GC was requested, we are sampling the counters even without actual triggers
194       // from allocation machinery. This captures GC phases more accurately.
195       set_forced_counters_update(true);
196 
197       // If GC was requested, we better dump freeset data for performance debugging
198       {
199         ShenandoahHeapLocker locker(heap->lock());
200         heap->free_set()->log_status();
201       }
202 
203       switch (mode) {
204         case concurrent_normal:
205           service_concurrent_normal_cycle(cause);
206           break;
207         case stw_degenerated:
208           service_stw_degenerated_cycle(cause, degen_point);
209           break;
210         case stw_full:
211           service_stw_full_cycle(cause);
212           break;
213         default:
214           ShouldNotReachHere();
215       }
216 
217       // If this was the requested GC cycle, notify waiters about it
218       if (explicit_gc_requested || implicit_gc_requested) {
219         notify_gc_waiters();
220       }
221 
222       // If this was the allocation failure GC cycle, notify waiters about it
223       if (alloc_failure_pending) {
224         notify_alloc_failure_waiters();
225       }
226 
227       // Report current free set state at the end of cycle, whether
228       // it is a normal completion, or the abort.
229       {
230         ShenandoahHeapLocker locker(heap->lock());
231         heap->free_set()->log_status();
232 
233         // Notify Universe about new heap usage. This has implications for
234         // global soft refs policy, and we better report it every time heap
235         // usage goes down.
236         Universe::heap()->update_capacity_and_used_at_gc();
237 
238         // Signal that we have completed a visit to all live objects.
239         Universe::heap()->record_whole_heap_examined_timestamp();
240       }
241 
242       // Disable forced counters update, and update counters one more time
243       // to capture the state at the end of GC session.
244       handle_force_counters_update();
245       set_forced_counters_update(false);
246 
247       // Retract forceful part of soft refs policy
248       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
249 
250       // Clear metaspace oom flag, if current cycle unloaded classes
251       if (heap->unload_classes()) {
252         heuristics->clear_metaspace_oom();
253       }
254 
255       // Commit worker statistics to cycle data
256       heap->phase_timings()->flush_par_workers_to_cycle();
257       if (ShenandoahPacing) {
258         heap->pacer()->flush_stats_to_cycle();
259       }
260 
261       // Print GC stats for current cycle
262       {
263         LogTarget(Info, gc, stats) lt;
264         if (lt.is_enabled()) {
265           ResourceMark rm;
266           LogStream ls(lt);
267           heap->phase_timings()->print_cycle_on(&ls);
268           if (ShenandoahPacing) {
269             heap->pacer()->print_cycle_on(&ls);
270           }
271         }
272       }
273 
274       // Commit statistics to globals
275       heap->phase_timings()->flush_cycle_to_global();
276 
277       // Print Metaspace change following GC (if logging is enabled).
278       MetaspaceUtils::print_metaspace_change(meta_sizes);
279 
280       // GC is over, we are at idle now
281       if (ShenandoahPacing) {
282         heap->pacer()->setup_for_idle();
283       }
284     } else {
285       // Allow allocators to know we have seen this much regions
286       if (ShenandoahPacing && (allocs_seen > 0)) {
287         heap->pacer()->report_alloc(allocs_seen);
288       }
289     }
290 
291     double current = os::elapsedTime();
292 
293     if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
294       // Explicit GC tries to uncommit everything down to min capacity.
295       // Soft max change tries to uncommit everything down to target capacity.
296       // Periodic uncommit tries to uncommit suitable regions down to min capacity.
297 
298       double shrink_before = (explicit_gc_requested || soft_max_changed) ?
299                              current :
300                              current - (ShenandoahUncommitDelay / 1000.0);
301 
302       size_t shrink_until = soft_max_changed ?
303                              heap->soft_max_capacity() :
304                              heap->min_capacity();
305 
306       service_uncommit(shrink_before, shrink_until);
307       heap->phase_timings()->flush_cycle_to_global();
308       last_shrink_time = current;
309     }
310 
311     // Wait before performing the next action. If allocation happened during this wait,
312     // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
313     // back off exponentially.
314     if (_heap_changed.try_unset()) {
315       sleep = ShenandoahControlIntervalMin;
316     } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
317       sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
318       last_sleep_adjust_time = current;
319     }
320     os::naked_short_sleep(sleep);
321   }
322 
323   // Wait for the actual stop(), can't leave run_service() earlier.
324   while (!should_terminate()) {
325     os::naked_short_sleep(ShenandoahControlIntervalMin);
326   }
327 }
328 
329 bool ShenandoahControlThread::check_soft_max_changed() const {
330   ShenandoahHeap* heap = ShenandoahHeap::heap();
331   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
332   size_t old_soft_max = heap->soft_max_capacity();
333   if (new_soft_max != old_soft_max) {
334     new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
335     new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
336     if (new_soft_max != old_soft_max) {
337       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
338                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
339                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
340       );
341       heap->set_soft_max_capacity(new_soft_max);
342       return true;
343     }
344   }
345   return false;
346 }
347 
348 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
349   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
350   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
351   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
352   // tries to evac something and no memory is available), cycle degrades to Full GC.
353   //
354   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
355   // heuristics says there are no regions to compact, and all the collection comes from immediately
356   // reclaimable regions.
357   //
358   // ................................................................................................
359   //
360   //                                    (immediate garbage shortcut)                Concurrent GC
361   //                             /-------------------------------------------\
362   //                             |                                           |
363   //                             |                                           |
364   //                             |                                           |
365   //                             |                                           v
366   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
367   //                   |                    |                 |              ^
368   //                   | (af)               | (af)            | (af)         |
369   // ..................|....................|.................|..............|.......................
370   //                   |                    |                 |              |
371   //                   |                    |                 |              |      Degenerated GC
372   //                   v                    v                 v              |
373   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
374   //                   |                    |                 |              ^
375   //                   | (af)               | (af)            | (af)         |
376   // ..................|....................|.................|..............|.......................
377   //                   |                    |                 |              |
378   //                   |                    v                 |              |      Full GC
379   //                   \------------------->o<----------------/              |
380   //                                        |                                |
381   //                                        v                                |
382   //                                      Full GC  --------------------------/
383   //
384   ShenandoahHeap* heap = ShenandoahHeap::heap();
385   if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
386 
387   GCIdMark gc_id_mark;
388   ShenandoahGCSession session(cause);
389 
390   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
391 
392   ShenandoahConcurrentGC gc;
393   if (gc.collect(cause)) {
394     // Cycle is complete
395     heap->heuristics()->record_success_concurrent();
396     heap->shenandoah_policy()->record_success_concurrent();
397   } else {
398     assert(heap->cancelled_gc(), "Must have been cancelled");
399     check_cancellation_or_degen(gc.degen_point());
400   }
401 }
402 
403 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
404   ShenandoahHeap* heap = ShenandoahHeap::heap();
405   if (heap->cancelled_gc()) {
406     assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
407     if (!in_graceful_shutdown()) {
408       assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle,
409               "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
410       _degen_point = point;
411     }
412     return true;
413   }
414   return false;
415 }
416 
417 void ShenandoahControlThread::stop_service() {
418   // Nothing to do here.
419 }
420 
421 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
422   GCIdMark gc_id_mark;
423   ShenandoahGCSession session(cause);
424 
425   ShenandoahFullGC gc;
426   gc.collect(cause);
427 
428   ShenandoahHeap* const heap = ShenandoahHeap::heap();
429   heap->heuristics()->record_success_full();
430   heap->shenandoah_policy()->record_success_full();
431 }
432 
433 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
434   assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
435 
436   GCIdMark gc_id_mark;
437   ShenandoahGCSession session(cause);
438 
439   ShenandoahDegenGC gc(point);
440   gc.collect(cause);
441 
442   ShenandoahHeap* const heap = ShenandoahHeap::heap();
443   heap->heuristics()->record_success_degenerated();
444   heap->shenandoah_policy()->record_success_degenerated();
445 }
446 
447 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
448   ShenandoahHeap* heap = ShenandoahHeap::heap();
449 
450   // Determine if there is work to do. This avoids taking heap lock if there is
451   // no work available, avoids spamming logs with superfluous logging messages,
452   // and minimises the amount of work while locks are taken.
453 
454   if (heap->committed() <= shrink_until) return;
455 
456   bool has_work = false;
457   for (size_t i = 0; i < heap->num_regions(); i++) {
458     ShenandoahHeapRegion *r = heap->get_region(i);
459     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
460       has_work = true;
461       break;
462     }
463   }
464 
465   if (has_work) {
466     heap->entry_uncommit(shrink_before, shrink_until);
467   }
468 }
469 
470 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
471   return GCCause::is_user_requested_gc(cause) ||
472          GCCause::is_serviceability_requested_gc(cause);
473 }
474 
475 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
476   assert(GCCause::is_user_requested_gc(cause) ||
477          GCCause::is_serviceability_requested_gc(cause) ||
478          cause == GCCause::_metadata_GC_clear_soft_refs ||
479          cause == GCCause::_full_gc_alot ||
480          cause == GCCause::_wb_full_gc ||
481          cause == GCCause::_wb_breakpoint ||
482          cause == GCCause::_scavenge_alot,
483          "only requested GCs here");
484 
485   if (is_explicit_gc(cause)) {
486     if (!DisableExplicitGC) {
487       handle_requested_gc(cause);
488     }
489   } else {
490     handle_requested_gc(cause);
491   }
492 }
493 
494 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
495   // Make sure we have at least one complete GC cycle before unblocking
496   // from the explicit GC request.
497   //
498   // This is especially important for weak references cleanup and/or native
499   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
500   // comes very late in the already running cycle, it would miss lots of new
501   // opportunities for cleanup that were made available before the caller
502   // requested the GC.
503 
504   MonitorLocker ml(&_gc_waiters_lock);
505   size_t current_gc_id = get_gc_id();
506   size_t required_gc_id = current_gc_id + 1;
507   while (current_gc_id < required_gc_id) {
508     _gc_requested.set();
509     _requested_gc_cause = cause;
510 
511     if (cause != GCCause::_wb_breakpoint) {
512       ml.wait();
513     }
514     current_gc_id = get_gc_id();
515   }
516 }
517 
518 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
519   ShenandoahHeap* heap = ShenandoahHeap::heap();
520 
521   assert(current()->is_Java_thread(), "expect Java thread here");
522 
523   if (try_set_alloc_failure_gc()) {
524     // Only report the first allocation failure
525     log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
526                  req.type_string(),
527                  byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
528 
529     // Now that alloc failure GC is scheduled, we can abort everything else
530     heap->cancel_gc(GCCause::_allocation_failure);
531   }
532 
533   MonitorLocker ml(&_alloc_failure_waiters_lock);
534   while (is_alloc_failure_gc()) {
535     ml.wait();
536   }
537 }
538 
539 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
540   ShenandoahHeap* heap = ShenandoahHeap::heap();
541 
542   if (try_set_alloc_failure_gc()) {
543     // Only report the first allocation failure
544     log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
545                  byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
546   }
547 
548   // Forcefully report allocation failure
549   heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
550 }
551 
552 void ShenandoahControlThread::notify_alloc_failure_waiters() {
553   _alloc_failure_gc.unset();
554   MonitorLocker ml(&_alloc_failure_waiters_lock);
555   ml.notify_all();
556 }
557 
558 bool ShenandoahControlThread::try_set_alloc_failure_gc() {
559   return _alloc_failure_gc.try_set();
560 }
561 
562 bool ShenandoahControlThread::is_alloc_failure_gc() {
563   return _alloc_failure_gc.is_set();
564 }
565 
566 void ShenandoahControlThread::notify_gc_waiters() {
567   _gc_requested.unset();
568   MonitorLocker ml(&_gc_waiters_lock);
569   ml.notify_all();
570 }
571 
572 void ShenandoahControlThread::handle_counters_update() {
573   if (_do_counters_update.is_set()) {
574     _do_counters_update.unset();
575     ShenandoahHeap::heap()->monitoring_support()->update_counters();
576   }
577 }
578 
579 void ShenandoahControlThread::handle_force_counters_update() {
580   if (_force_counters_update.is_set()) {
581     _do_counters_update.unset(); // reset these too, we do update now!
582     ShenandoahHeap::heap()->monitoring_support()->update_counters();
583   }
584 }
585 
586 void ShenandoahControlThread::notify_heap_changed() {
587   // This is called from allocation path, and thus should be fast.
588 
589   // Update monitoring counters when we took a new region. This amortizes the
590   // update costs on slow path.
591   if (_do_counters_update.is_unset()) {
592     _do_counters_update.set();
593   }
594   // Notify that something had changed.
595   if (_heap_changed.is_unset()) {
596     _heap_changed.set();
597   }
598 }
599 
600 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
601   assert(ShenandoahPacing, "should only call when pacing is enabled");
602   Atomic::add(&_allocs_seen, words, memory_order_relaxed);
603 }
604 
605 void ShenandoahControlThread::set_forced_counters_update(bool value) {
606   _force_counters_update.set_cond(value);
607 }
608 
609 void ShenandoahControlThread::reset_gc_id() {
610   Atomic::store(&_gc_id, (size_t)0);
611 }
612 
613 void ShenandoahControlThread::update_gc_id() {
614   Atomic::inc(&_gc_id);
615 }
616 
617 size_t ShenandoahControlThread::get_gc_id() {
618   return Atomic::load(&_gc_id);
619 }
620 
621 void ShenandoahControlThread::print() const {
622   print_on(tty);
623 }
624 
625 void ShenandoahControlThread::print_on(outputStream* st) const {
626   st->print("Shenandoah Concurrent Thread");
627   Thread::print_on(st);
628   st->cr();
629 }
630 
631 void ShenandoahControlThread::start() {
632   create_and_start();
633 }
634 
635 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
636   _graceful_shutdown.set();
637 }
638 
639 bool ShenandoahControlThread::in_graceful_shutdown() {
640   return _graceful_shutdown.is_set();
641 }