13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
28 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
29 #include "gc/shenandoah/shenandoahControlThread.hpp"
30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
31 #include "gc/shenandoah/shenandoahFreeSet.hpp"
32 #include "gc/shenandoah/shenandoahFullGC.hpp"
33 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
35 #include "gc/shenandoah/shenandoahMark.inline.hpp"
36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
38 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
39 #include "gc/shenandoah/shenandoahUtils.hpp"
40 #include "gc/shenandoah/shenandoahVMOperations.hpp"
41 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
42 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
43 #include "memory/iterator.hpp"
44 #include "memory/metaspaceUtils.hpp"
45 #include "memory/metaspaceStats.hpp"
46 #include "memory/universe.hpp"
47 #include "runtime/atomic.hpp"
48
49 ShenandoahControlThread::ShenandoahControlThread() :
50 ConcurrentGCThread(),
51 _alloc_failure_waiters_lock(Mutex::safepoint-2, "ShenandoahAllocFailureGC_lock", true),
52 _gc_waiters_lock(Mutex::safepoint-2, "ShenandoahRequestedGC_lock", true),
53 _periodic_task(this),
54 _requested_gc_cause(GCCause::_no_cause_specified),
55 _degen_point(ShenandoahGC::_degenerated_outside_cycle),
56 _allocs_seen(0) {
57 set_name("Shenandoah Control Thread");
58 reset_gc_id();
59 create_and_start();
60 _periodic_task.enroll();
61 if (ShenandoahPacing) {
62 _periodic_pacer_notify_task.enroll();
63 }
64 }
65
66 ShenandoahControlThread::~ShenandoahControlThread() {
67 // This is here so that super is called.
68 }
69
70 void ShenandoahPeriodicTask::task() {
71 _thread->handle_force_counters_update();
72 _thread->handle_counters_update();
73 }
74
75 void ShenandoahPeriodicPacerNotify::task() {
76 assert(ShenandoahPacing, "Should not be here otherwise");
77 ShenandoahHeap::heap()->pacer()->notify_waiters();
78 }
79
80 void ShenandoahControlThread::run_service() {
81 ShenandoahHeap* heap = ShenandoahHeap::heap();
82
83 GCMode default_mode = concurrent_normal;
84 GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
85 int sleep = ShenandoahControlIntervalMin;
86
87 double last_shrink_time = os::elapsedTime();
88 double last_sleep_adjust_time = os::elapsedTime();
89
90 // Shrink period avoids constantly polling regions for shrinking.
91 // Having a period 10x lower than the delay would mean we hit the
92 // shrinking with lag of less than 1/10-th of true delay.
93 // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
94 double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
95
96 ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
97 ShenandoahHeuristics* heuristics = heap->heuristics();
98 while (!in_graceful_shutdown() && !should_terminate()) {
99 // Figure out if we have pending requests.
100 bool alloc_failure_pending = _alloc_failure_gc.is_set();
101 bool is_gc_requested = _gc_requested.is_set();
102 GCCause::Cause requested_gc_cause = _requested_gc_cause;
103 bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause);
104 bool implicit_gc_requested = is_gc_requested && !is_explicit_gc(requested_gc_cause);
105
106 // This control loop iteration have seen this much allocations.
107 size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
108
109 // Check if we have seen a new target for soft max heap size.
110 bool soft_max_changed = check_soft_max_changed();
111
112 // Choose which GC mode to run in. The block below should select a single mode.
113 GCMode mode = none;
114 GCCause::Cause cause = GCCause::_last_gc_cause;
115 ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
116
117 if (alloc_failure_pending) {
118 // Allocation failure takes precedence: we have to deal with it first thing
119 log_info(gc)("Trigger: Handle Allocation Failure");
120
121 cause = GCCause::_allocation_failure;
122
123 // Consume the degen point, and seed it with default value
124 degen_point = _degen_point;
125 _degen_point = ShenandoahGC::_degenerated_outside_cycle;
126
127 if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
128 heuristics->record_allocation_failure_gc();
129 policy->record_alloc_failure_to_degenerated(degen_point);
130 mode = stw_degenerated;
131 } else {
132 heuristics->record_allocation_failure_gc();
133 policy->record_alloc_failure_to_full();
134 mode = stw_full;
135 }
136
137 } else if (explicit_gc_requested) {
138 cause = requested_gc_cause;
139 log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
140
141 heuristics->record_requested_gc();
142
143 if (ExplicitGCInvokesConcurrent) {
144 policy->record_explicit_to_concurrent();
145 mode = default_mode;
146 // Unload and clean up everything
147 heap->set_unload_classes(heuristics->can_unload_classes());
148 } else {
149 policy->record_explicit_to_full();
150 mode = stw_full;
151 }
152 } else if (implicit_gc_requested) {
153 cause = requested_gc_cause;
154 log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
155
156 heuristics->record_requested_gc();
157
158 if (ShenandoahImplicitGCInvokesConcurrent) {
159 policy->record_implicit_to_concurrent();
160 mode = default_mode;
161
162 // Unload and clean up everything
163 heap->set_unload_classes(heuristics->can_unload_classes());
164 } else {
165 policy->record_implicit_to_full();
166 mode = stw_full;
167 }
168 } else {
169 // Potential normal cycle: ask heuristics if it wants to act
170 if (heuristics->should_start_gc()) {
171 mode = default_mode;
172 cause = default_cause;
173 }
174
175 // Ask policy if this cycle wants to process references or unload classes
176 heap->set_unload_classes(heuristics->should_unload_classes());
177 }
178
179 // Blow all soft references on this cycle, if handling allocation failure,
180 // either implicit or explicit GC request, or we are requested to do so unconditionally.
181 if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) {
182 heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
183 }
184
185 bool gc_requested = (mode != none);
186 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
187
188 if (gc_requested) {
189 // GC is starting, bump the internal ID
190 update_gc_id();
191
192 heap->reset_bytes_allocated_since_gc_start();
193
194 MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
195
196 // If GC was requested, we are sampling the counters even without actual triggers
197 // from allocation machinery. This captures GC phases more accurately.
198 set_forced_counters_update(true);
199
200 // If GC was requested, we better dump freeset data for performance debugging
201 {
202 ShenandoahHeapLocker locker(heap->lock());
203 heap->free_set()->log_status();
204 }
205
206 switch (mode) {
207 case concurrent_normal:
208 service_concurrent_normal_cycle(cause);
209 break;
210 case stw_degenerated:
211 service_stw_degenerated_cycle(cause, degen_point);
212 break;
213 case stw_full:
214 service_stw_full_cycle(cause);
215 break;
216 default:
217 ShouldNotReachHere();
218 }
219
220 // If this was the requested GC cycle, notify waiters about it
221 if (explicit_gc_requested || implicit_gc_requested) {
222 notify_gc_waiters();
223 }
224
225 // If this was the allocation failure GC cycle, notify waiters about it
226 if (alloc_failure_pending) {
227 notify_alloc_failure_waiters();
228 }
229
230 // Report current free set state at the end of cycle, whether
231 // it is a normal completion, or the abort.
232 {
233 ShenandoahHeapLocker locker(heap->lock());
234 heap->free_set()->log_status();
235
236 // Notify Universe about new heap usage. This has implications for
237 // global soft refs policy, and we better report it every time heap
238 // usage goes down.
239 Universe::heap()->update_capacity_and_used_at_gc();
240
241 // Signal that we have completed a visit to all live objects.
242 Universe::heap()->record_whole_heap_examined_timestamp();
243 }
244
245 // Disable forced counters update, and update counters one more time
246 // to capture the state at the end of GC session.
247 handle_force_counters_update();
248 set_forced_counters_update(false);
249
250 // Retract forceful part of soft refs policy
251 heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
252
253 // Clear metaspace oom flag, if current cycle unloaded classes
254 if (heap->unload_classes()) {
255 heuristics->clear_metaspace_oom();
256 }
257
258 // Commit worker statistics to cycle data
259 heap->phase_timings()->flush_par_workers_to_cycle();
260 if (ShenandoahPacing) {
261 heap->pacer()->flush_stats_to_cycle();
262 }
263
264 // Print GC stats for current cycle
265 {
266 LogTarget(Info, gc, stats) lt;
267 if (lt.is_enabled()) {
268 ResourceMark rm;
269 LogStream ls(lt);
270 heap->phase_timings()->print_cycle_on(&ls);
271 if (ShenandoahPacing) {
272 heap->pacer()->print_cycle_on(&ls);
273 }
274 }
275 }
276
277 // Commit statistics to globals
278 heap->phase_timings()->flush_cycle_to_global();
279
280 // Print Metaspace change following GC (if logging is enabled).
281 MetaspaceUtils::print_metaspace_change(meta_sizes);
282
283 // GC is over, we are at idle now
284 if (ShenandoahPacing) {
285 heap->pacer()->setup_for_idle();
286 }
287 } else {
288 // Allow allocators to know we have seen this much regions
289 if (ShenandoahPacing && (allocs_seen > 0)) {
290 heap->pacer()->report_alloc(allocs_seen);
291 }
292 }
293
294 double current = os::elapsedTime();
295
296 if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
297 // Explicit GC tries to uncommit everything down to min capacity.
298 // Soft max change tries to uncommit everything down to target capacity.
299 // Periodic uncommit tries to uncommit suitable regions down to min capacity.
300
301 double shrink_before = (explicit_gc_requested || soft_max_changed) ?
302 current :
303 current - (ShenandoahUncommitDelay / 1000.0);
304
305 size_t shrink_until = soft_max_changed ?
306 heap->soft_max_capacity() :
307 heap->min_capacity();
308
309 service_uncommit(shrink_before, shrink_until);
310 heap->phase_timings()->flush_cycle_to_global();
311 last_shrink_time = current;
312 }
313
314 // Wait before performing the next action. If allocation happened during this wait,
315 // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
316 // back off exponentially.
317 if (_heap_changed.try_unset()) {
318 sleep = ShenandoahControlIntervalMin;
319 } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
320 sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
321 last_sleep_adjust_time = current;
322 }
323 os::naked_short_sleep(sleep);
324 }
325
326 // Wait for the actual stop(), can't leave run_service() earlier.
327 while (!should_terminate()) {
328 os::naked_short_sleep(ShenandoahControlIntervalMin);
329 }
330 }
331
332 bool ShenandoahControlThread::check_soft_max_changed() const {
333 ShenandoahHeap* heap = ShenandoahHeap::heap();
334 size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
335 size_t old_soft_max = heap->soft_max_capacity();
336 if (new_soft_max != old_soft_max) {
337 new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
338 new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
339 if (new_soft_max != old_soft_max) {
340 log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
341 byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
342 byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
343 );
344 heap->set_soft_max_capacity(new_soft_max);
345 return true;
346 }
347 }
348 return false;
349 }
350
351 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
352 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
353 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
354 // If second allocation failure happens during Degenerated GC cycle (for example, when GC
355 // tries to evac something and no memory is available), cycle degrades to Full GC.
356 //
357 // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
358 // heuristics says there are no regions to compact, and all the collection comes from immediately
359 // reclaimable regions.
360 //
361 // ................................................................................................
362 //
363 // (immediate garbage shortcut) Concurrent GC
364 // /-------------------------------------------\
365 // | |
366 // | |
367 // | |
368 // | v
370 // | | | ^
371 // | (af) | (af) | (af) |
372 // ..................|....................|.................|..............|.......................
373 // | | | |
374 // | | | | Degenerated GC
375 // v v v |
376 // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
377 // | | | ^
378 // | (af) | (af) | (af) |
379 // ..................|....................|.................|..............|.......................
380 // | | | |
381 // | v | | Full GC
382 // \------------------->o<----------------/ |
383 // | |
384 // v |
385 // Full GC --------------------------/
386 //
387 ShenandoahHeap* heap = ShenandoahHeap::heap();
388 if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
389
390 GCIdMark gc_id_mark;
391 ShenandoahGCSession session(cause);
392
393 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
394
395 ShenandoahConcurrentGC gc;
396 if (gc.collect(cause)) {
397 // Cycle is complete
398 heap->heuristics()->record_success_concurrent();
399 heap->shenandoah_policy()->record_success_concurrent();
400 } else {
401 assert(heap->cancelled_gc(), "Must have been cancelled");
402 check_cancellation_or_degen(gc.degen_point());
403 }
404 }
405
406 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
407 ShenandoahHeap* heap = ShenandoahHeap::heap();
408 if (heap->cancelled_gc()) {
409 assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
410 if (!in_graceful_shutdown()) {
411 assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle,
412 "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
413 _degen_point = point;
414 }
415 return true;
416 }
417 return false;
418 }
419
420 void ShenandoahControlThread::stop_service() {
421 // Nothing to do here.
422 }
423
424 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
425 GCIdMark gc_id_mark;
426 ShenandoahGCSession session(cause);
427
428 ShenandoahFullGC gc;
429 gc.collect(cause);
430
431 ShenandoahHeap* const heap = ShenandoahHeap::heap();
432 heap->heuristics()->record_success_full();
433 heap->shenandoah_policy()->record_success_full();
434 }
435
436 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
437 assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
438
439 GCIdMark gc_id_mark;
440 ShenandoahGCSession session(cause);
441
442 ShenandoahDegenGC gc(point);
443 gc.collect(cause);
444
445 ShenandoahHeap* const heap = ShenandoahHeap::heap();
446 heap->heuristics()->record_success_degenerated();
447 heap->shenandoah_policy()->record_success_degenerated();
448 }
449
450 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
451 ShenandoahHeap* heap = ShenandoahHeap::heap();
452
453 // Determine if there is work to do. This avoids taking heap lock if there is
454 // no work available, avoids spamming logs with superfluous logging messages,
455 // and minimises the amount of work while locks are taken.
456
457 if (heap->committed() <= shrink_until) return;
458
459 bool has_work = false;
460 for (size_t i = 0; i < heap->num_regions(); i++) {
461 ShenandoahHeapRegion *r = heap->get_region(i);
462 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
463 has_work = true;
464 break;
465 }
466 }
467
468 if (has_work) {
469 heap->entry_uncommit(shrink_before, shrink_until);
470 }
471 }
472
473 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
474 return GCCause::is_user_requested_gc(cause) ||
475 GCCause::is_serviceability_requested_gc(cause);
476 }
477
478 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
479 assert(GCCause::is_user_requested_gc(cause) ||
480 GCCause::is_serviceability_requested_gc(cause) ||
481 cause == GCCause::_metadata_GC_clear_soft_refs ||
482 cause == GCCause::_codecache_GC_aggressive ||
483 cause == GCCause::_codecache_GC_threshold ||
484 cause == GCCause::_full_gc_alot ||
485 cause == GCCause::_wb_young_gc ||
486 cause == GCCause::_wb_full_gc ||
487 cause == GCCause::_wb_breakpoint ||
488 cause == GCCause::_scavenge_alot,
489 "only requested GCs here: %s", GCCause::to_string(cause));
490
491 if (is_explicit_gc(cause)) {
492 if (!DisableExplicitGC) {
493 handle_requested_gc(cause);
494 }
495 } else {
496 handle_requested_gc(cause);
497 }
498 }
499
500 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
501 // Make sure we have at least one complete GC cycle before unblocking
502 // from the explicit GC request.
503 //
504 // This is especially important for weak references cleanup and/or native
505 // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
506 // comes very late in the already running cycle, it would miss lots of new
507 // opportunities for cleanup that were made available before the caller
508 // requested the GC.
509
510 MonitorLocker ml(&_gc_waiters_lock);
511 size_t current_gc_id = get_gc_id();
512 size_t required_gc_id = current_gc_id + 1;
513 while (current_gc_id < required_gc_id) {
514 // Although setting gc request is under _gc_waiters_lock, but read side (run_service())
515 // does not take the lock. We need to enforce following order, so that read side sees
516 // latest requested gc cause when the flag is set.
517 _requested_gc_cause = cause;
518 _gc_requested.set();
519
520 if (cause != GCCause::_wb_breakpoint) {
521 ml.wait();
522 }
523 current_gc_id = get_gc_id();
524 }
525 }
526
527 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
528 ShenandoahHeap* heap = ShenandoahHeap::heap();
529
530 assert(current()->is_Java_thread(), "expect Java thread here");
531
532 if (try_set_alloc_failure_gc()) {
533 // Only report the first allocation failure
534 log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
535 req.type_string(),
536 byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
537
538 // Now that alloc failure GC is scheduled, we can abort everything else
539 heap->cancel_gc(GCCause::_allocation_failure);
540 }
541
542 MonitorLocker ml(&_alloc_failure_waiters_lock);
543 while (is_alloc_failure_gc()) {
544 ml.wait();
545 }
546 }
547
548 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
549 ShenandoahHeap* heap = ShenandoahHeap::heap();
550
551 if (try_set_alloc_failure_gc()) {
552 // Only report the first allocation failure
553 log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
554 byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
555 }
556
557 // Forcefully report allocation failure
558 heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
559 }
560
561 void ShenandoahControlThread::notify_alloc_failure_waiters() {
562 _alloc_failure_gc.unset();
563 MonitorLocker ml(&_alloc_failure_waiters_lock);
564 ml.notify_all();
565 }
566
567 bool ShenandoahControlThread::try_set_alloc_failure_gc() {
568 return _alloc_failure_gc.try_set();
569 }
570
571 bool ShenandoahControlThread::is_alloc_failure_gc() {
572 return _alloc_failure_gc.is_set();
573 }
574
575 void ShenandoahControlThread::notify_gc_waiters() {
576 _gc_requested.unset();
577 MonitorLocker ml(&_gc_waiters_lock);
578 ml.notify_all();
579 }
580
581 void ShenandoahControlThread::handle_counters_update() {
582 if (_do_counters_update.is_set()) {
583 _do_counters_update.unset();
584 ShenandoahHeap::heap()->monitoring_support()->update_counters();
585 }
586 }
587
588 void ShenandoahControlThread::handle_force_counters_update() {
589 if (_force_counters_update.is_set()) {
590 _do_counters_update.unset(); // reset these too, we do update now!
591 ShenandoahHeap::heap()->monitoring_support()->update_counters();
592 }
593 }
594
595 void ShenandoahControlThread::notify_heap_changed() {
596 // This is called from allocation path, and thus should be fast.
597
598 // Update monitoring counters when we took a new region. This amortizes the
599 // update costs on slow path.
600 if (_do_counters_update.is_unset()) {
601 _do_counters_update.set();
602 }
603 // Notify that something had changed.
604 if (_heap_changed.is_unset()) {
605 _heap_changed.set();
606 }
607 }
608
609 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
610 assert(ShenandoahPacing, "should only call when pacing is enabled");
611 Atomic::add(&_allocs_seen, words, memory_order_relaxed);
612 }
613
614 void ShenandoahControlThread::set_forced_counters_update(bool value) {
615 _force_counters_update.set_cond(value);
616 }
617
618 void ShenandoahControlThread::reset_gc_id() {
619 Atomic::store(&_gc_id, (size_t)0);
620 }
621
622 void ShenandoahControlThread::update_gc_id() {
623 Atomic::inc(&_gc_id);
624 }
625
626 size_t ShenandoahControlThread::get_gc_id() {
627 return Atomic::load(&_gc_id);
628 }
629
630 void ShenandoahControlThread::start() {
631 create_and_start();
632 }
633
634 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
635 _graceful_shutdown.set();
636 }
637
638 bool ShenandoahControlThread::in_graceful_shutdown() {
639 return _graceful_shutdown.is_set();
640 }
|
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
28 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
29 #include "gc/shenandoah/shenandoahControlThread.hpp"
30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
31 #include "gc/shenandoah/shenandoahFreeSet.hpp"
32 #include "gc/shenandoah/shenandoahFullGC.hpp"
33 #include "gc/shenandoah/shenandoahGeneration.hpp"
34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
35 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
36 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
37 #include "gc/shenandoah/shenandoahUtils.hpp"
38 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
39 #include "gc/shenandoah/mode/shenandoahMode.hpp"
40 #include "logging/log.hpp"
41 #include "memory/metaspaceUtils.hpp"
42 #include "memory/metaspaceStats.hpp"
43
44 ShenandoahControlThread::ShenandoahControlThread() :
45 ShenandoahController(),
46 _requested_gc_cause(GCCause::_no_cause_specified),
47 _degen_point(ShenandoahGC::_degenerated_outside_cycle) {
48 set_name("Shenandoah Control Thread");
49 create_and_start();
50 }
51
52 void ShenandoahControlThread::run_service() {
53 ShenandoahHeap* const heap = ShenandoahHeap::heap();
54 const GCMode default_mode = concurrent_normal;
55 const GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
56 int sleep = ShenandoahControlIntervalMin;
57
58 double last_sleep_adjust_time = os::elapsedTime();
59
60 ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy();
61 ShenandoahHeuristics* const heuristics = heap->heuristics();
62 while (!should_terminate()) {
63 const GCCause::Cause cancelled_cause = heap->cancelled_cause();
64 if (cancelled_cause == GCCause::_shenandoah_stop_vm) {
65 break;
66 }
67
68 // Figure out if we have pending requests.
69 const bool alloc_failure_pending = ShenandoahCollectorPolicy::is_allocation_failure(cancelled_cause);
70 const bool is_gc_requested = _gc_requested.is_set();
71 const GCCause::Cause requested_gc_cause = _requested_gc_cause;
72
73 // This control loop iteration has seen this much allocation.
74 const size_t allocs_seen = reset_allocs_seen();
75
76 // Choose which GC mode to run in. The block below should select a single mode.
77 GCMode mode = none;
78 GCCause::Cause cause = GCCause::_last_gc_cause;
79 ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
80
81 if (alloc_failure_pending) {
82 // Allocation failure takes precedence: we have to deal with it first thing
83 heuristics->log_trigger("Handle Allocation Failure");
84
85 cause = GCCause::_allocation_failure;
86
87 // Consume the degen point, and seed it with default value
88 degen_point = _degen_point;
89 _degen_point = ShenandoahGC::_degenerated_outside_cycle;
90
91 if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
92 heuristics->record_allocation_failure_gc();
93 policy->record_alloc_failure_to_degenerated(degen_point);
94 mode = stw_degenerated;
95 } else {
96 heuristics->record_allocation_failure_gc();
97 policy->record_alloc_failure_to_full();
98 mode = stw_full;
99 }
100 } else if (is_gc_requested) {
101 cause = requested_gc_cause;
102 heuristics->log_trigger("GC request (%s)", GCCause::to_string(cause));
103 heuristics->record_requested_gc();
104
105 if (ShenandoahCollectorPolicy::should_run_full_gc(cause)) {
106 mode = stw_full;
107 } else {
108 mode = default_mode;
109 // Unload and clean up everything
110 heap->set_unload_classes(heuristics->can_unload_classes());
111 }
112 } else {
113 // Potential normal cycle: ask heuristics if it wants to act
114 if (heuristics->should_start_gc()) {
115 mode = default_mode;
116 cause = default_cause;
117 }
118
119 // Ask policy if this cycle wants to process references or unload classes
120 heap->set_unload_classes(heuristics->should_unload_classes());
121 }
122
123 // Blow all soft references on this cycle, if handling allocation failure,
124 // either implicit or explicit GC request, or we are requested to do so unconditionally.
125 if (alloc_failure_pending || is_gc_requested || ShenandoahAlwaysClearSoftRefs) {
126 heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
127 }
128
129 const bool gc_requested = (mode != none);
130 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
131
132 if (gc_requested) {
133 // Cannot uncommit bitmap slices during concurrent reset
134 ShenandoahNoUncommitMark forbid_region_uncommit(heap);
135
136 // GC is starting, bump the internal ID
137 update_gc_id();
138
139 GCIdMark gc_id_mark;
140
141 heuristics->cancel_trigger_request();
142
143 heap->reset_bytes_allocated_since_gc_start();
144
145 MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
146
147 // If GC was requested, we are sampling the counters even without actual triggers
148 // from allocation machinery. This captures GC phases more accurately.
149 heap->set_forced_counters_update(true);
150
151 // If GC was requested, we better dump freeset data for performance debugging
152 heap->free_set()->log_status_under_lock();
153
154 switch (mode) {
155 case concurrent_normal:
156 service_concurrent_normal_cycle(cause);
157 break;
158 case stw_degenerated:
159 service_stw_degenerated_cycle(cause, degen_point);
160 break;
161 case stw_full:
162 service_stw_full_cycle(cause);
163 break;
164 default:
165 ShouldNotReachHere();
166 }
167
168 // If this was the requested GC cycle, notify waiters about it
169 if (is_gc_requested) {
170 notify_gc_waiters();
171 }
172
173 // If this cycle completed without being cancelled, notify waiters about it
174 if (!heap->cancelled_gc()) {
175 notify_alloc_failure_waiters();
176 }
177
178 // Report current free set state at the end of cycle, whether
179 // it is a normal completion, or the abort.
180 heap->free_set()->log_status_under_lock();
181
182 {
183 // Notify Universe about new heap usage. This has implications for
184 // global soft refs policy, and we better report it every time heap
185 // usage goes down.
186 ShenandoahHeapLocker locker(heap->lock());
187 heap->update_capacity_and_used_at_gc();
188 }
189
190 // Signal that we have completed a visit to all live objects.
191 heap->record_whole_heap_examined_timestamp();
192
193 // Disable forced counters update, and update counters one more time
194 // to capture the state at the end of GC session.
195 heap->handle_force_counters_update();
196 heap->set_forced_counters_update(false);
197
198 // Retract forceful part of soft refs policy
199 heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
200
201 // Clear metaspace oom flag, if current cycle unloaded classes
202 if (heap->unload_classes()) {
203 heuristics->clear_metaspace_oom();
204 }
205
206 // Commit worker statistics to cycle data
207 heap->phase_timings()->flush_par_workers_to_cycle();
208 if (ShenandoahPacing) {
209 heap->pacer()->flush_stats_to_cycle();
210 }
211
212 // Print GC stats for current cycle
213 {
214 LogTarget(Info, gc, stats) lt;
215 if (lt.is_enabled()) {
216 ResourceMark rm;
217 LogStream ls(lt);
218 heap->phase_timings()->print_cycle_on(&ls);
219 if (ShenandoahPacing) {
220 heap->pacer()->print_cycle_on(&ls);
221 }
222 }
223 }
224
225 // Commit statistics to globals
226 heap->phase_timings()->flush_cycle_to_global();
227
228 // Print Metaspace change following GC (if logging is enabled).
229 MetaspaceUtils::print_metaspace_change(meta_sizes);
230
231 // GC is over, we are at idle now
232 if (ShenandoahPacing) {
233 heap->pacer()->setup_for_idle();
234 }
235 } else {
236 // Report to pacer that we have seen this many words allocated
237 if (ShenandoahPacing && (allocs_seen > 0)) {
238 heap->pacer()->report_alloc(allocs_seen);
239 }
240 }
241
242 // Check if we have seen a new target for soft max heap size or if a gc was requested.
243 // Either of these conditions will attempt to uncommit regions.
244 if (ShenandoahUncommit) {
245 if (heap->check_soft_max_changed()) {
246 heap->notify_soft_max_changed();
247 } else if (is_gc_requested) {
248 heap->notify_explicit_gc_requested();
249 }
250 }
251
252 // Wait before performing the next action. If allocation happened during this wait,
253 // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
254 // back off exponentially.
255 const double current = os::elapsedTime();
256 if (heap->has_changed()) {
257 sleep = ShenandoahControlIntervalMin;
258 } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
259 sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
260 last_sleep_adjust_time = current;
261 }
262 os::naked_short_sleep(sleep);
263 }
264 }
265
266 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
267 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
268 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
269 // If second allocation failure happens during Degenerated GC cycle (for example, when GC
270 // tries to evac something and no memory is available), cycle degrades to Full GC.
271 //
272 // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
273 // heuristics says there are no regions to compact, and all the collection comes from immediately
274 // reclaimable regions.
275 //
276 // ................................................................................................
277 //
278 // (immediate garbage shortcut) Concurrent GC
279 // /-------------------------------------------\
280 // | |
281 // | |
282 // | |
283 // | v
285 // | | | ^
286 // | (af) | (af) | (af) |
287 // ..................|....................|.................|..............|.......................
288 // | | | |
289 // | | | | Degenerated GC
290 // v v v |
291 // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
292 // | | | ^
293 // | (af) | (af) | (af) |
294 // ..................|....................|.................|..............|.......................
295 // | | | |
296 // | v | | Full GC
297 // \------------------->o<----------------/ |
298 // | |
299 // v |
300 // Full GC --------------------------/
301 //
302 ShenandoahHeap* heap = ShenandoahHeap::heap();
303 if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
304
305 ShenandoahGCSession session(cause, heap->global_generation());
306
307 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
308
309 ShenandoahConcurrentGC gc(heap->global_generation(), false);
310 if (gc.collect(cause)) {
311 // Cycle is complete. There were no failed allocation requests and no degeneration, so count this as good progress.
312 heap->notify_gc_progress();
313 heap->global_generation()->heuristics()->record_success_concurrent();
314 heap->shenandoah_policy()->record_success_concurrent(false, gc.abbreviated());
315 heap->log_heap_status("At end of GC");
316 } else {
317 assert(heap->cancelled_gc(), "Must have been cancelled");
318 check_cancellation_or_degen(gc.degen_point());
319 heap->log_heap_status("At end of cancelled GC");
320 }
321 }
322
323 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
324 ShenandoahHeap* heap = ShenandoahHeap::heap();
325 if (heap->cancelled_gc()) {
326 if (heap->cancelled_cause() == GCCause::_shenandoah_stop_vm) {
327 return true;
328 }
329
330 if (ShenandoahCollectorPolicy::is_allocation_failure(heap->cancelled_cause())) {
331 assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle,
332 "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
333 _degen_point = point;
334 return true;
335 }
336
337 fatal("Unexpected reason for cancellation: %s", GCCause::to_string(heap->cancelled_cause()));
338 }
339 return false;
340 }
341
342 void ShenandoahControlThread::stop_service() {
343 ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_stop_vm);
344 }
345
346 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
347 ShenandoahHeap* const heap = ShenandoahHeap::heap();
348 ShenandoahGCSession session(cause, heap->global_generation());
349
350 ShenandoahFullGC gc;
351 gc.collect(cause);
352 }
353
354 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
355 assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
356 ShenandoahHeap* const heap = ShenandoahHeap::heap();
357 ShenandoahGCSession session(cause, heap->global_generation());
358
359 ShenandoahDegenGC gc(point, heap->global_generation());
360 gc.collect(cause);
361 }
362
363 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
364 if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) {
365 handle_requested_gc(cause);
366 }
367 }
368
369 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
370 if (should_terminate()) {
371 log_info(gc)("Control thread is terminating, no more GCs");
372 return;
373 }
374
375 // For normal requested GCs (System.gc) we want to block the caller. However,
376 // for whitebox requested GC, we want to initiate the GC and return immediately.
377 // The whitebox caller thread will arrange for itself to wait until the GC notifies
378 // it that has reached the requested breakpoint (phase in the GC).
379 if (cause == GCCause::_wb_breakpoint) {
380 _requested_gc_cause = cause;
381 _gc_requested.set();
382 return;
383 }
384
385 // Make sure we have at least one complete GC cycle before unblocking
386 // from the explicit GC request.
387 //
388 // This is especially important for weak references cleanup and/or native
389 // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
390 // comes very late in the already running cycle, it would miss lots of new
391 // opportunities for cleanup that were made available before the caller
392 // requested the GC.
393
394 MonitorLocker ml(&_gc_waiters_lock);
395 size_t current_gc_id = get_gc_id();
396 size_t required_gc_id = current_gc_id + 1;
397 while (current_gc_id < required_gc_id && !should_terminate()) {
398 // Although setting gc request is under _gc_waiters_lock, but read side (run_service())
399 // does not take the lock. We need to enforce following order, so that read side sees
400 // latest requested gc cause when the flag is set.
401 _requested_gc_cause = cause;
402 _gc_requested.set();
403
404 ml.wait();
405 current_gc_id = get_gc_id();
406 }
407 }
408
409 void ShenandoahControlThread::notify_gc_waiters() {
410 _gc_requested.unset();
411 MonitorLocker ml(&_gc_waiters_lock);
412 ml.notify_all();
413 }
|