1 /*
2 * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shared/collectorCounters.hpp"
28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
29 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
31 #include "gc/shenandoah/shenandoahFullGC.hpp"
32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
33 #include "gc/shenandoah/shenandoahMetrics.hpp"
34 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
35 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
36 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
37 #include "gc/shenandoah/shenandoahSTWMark.hpp"
38 #include "gc/shenandoah/shenandoahUtils.hpp"
39 #include "gc/shenandoah/shenandoahVerifier.hpp"
40 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
41 #include "gc/shenandoah/shenandoahVMOperations.hpp"
42 #include "runtime/vmThread.hpp"
43 #include "utilities/events.hpp"
44
45 ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point) :
46 ShenandoahGC(),
47 _degen_point(degen_point) {
48 }
49
50 bool ShenandoahDegenGC::collect(GCCause::Cause cause) {
51 vmop_degenerated();
52 return true;
53 }
54
55 void ShenandoahDegenGC::vmop_degenerated() {
56 TraceCollectorStats tcs(ShenandoahHeap::heap()->monitoring_support()->full_stw_collection_counters());
57 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
58 VM_ShenandoahDegeneratedGC degenerated_gc(this);
59 VMThread::execute(°enerated_gc);
60 }
61
62 void ShenandoahDegenGC::entry_degenerated() {
63 const char* msg = degen_event_message(_degen_point);
64 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
65 EventMark em("%s", msg);
66 ShenandoahHeap* const heap = ShenandoahHeap::heap();
67
68 ShenandoahWorkerScope scope(heap->workers(),
69 ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
70 "stw degenerated gc");
71
72 heap->set_degenerated_gc_in_progress(true);
73 op_degenerated();
74 heap->set_degenerated_gc_in_progress(false);
75 }
76
77 void ShenandoahDegenGC::op_degenerated() {
78 ShenandoahHeap* const heap = ShenandoahHeap::heap();
79 // Degenerated GC is STW, but it can also fail. Current mechanics communicates
80 // GC failure via cancelled_concgc() flag. So, if we detect the failure after
81 // some phase, we have to upgrade the Degenerate GC to Full GC.
82 heap->clear_cancelled_gc();
83
84 ShenandoahMetricsSnapshot metrics;
85 metrics.snap_before();
86
87 switch (_degen_point) {
88 // The cases below form the Duff's-like device: it describes the actual GC cycle,
89 // but enters it at different points, depending on which concurrent phase had
90 // degenerated.
91
92 case _degenerated_outside_cycle:
93 // We have degenerated from outside the cycle, which means something is bad with
94 // the heap, most probably heavy humongous fragmentation, or we are very low on free
95 // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
96 // we can do the most aggressive degen cycle, which includes processing references and
97 // class unloading, unless those features are explicitly disabled.
98 //
99
100 // Degenerated from concurrent root mark, reset the flag for STW mark
101 if (heap->is_concurrent_mark_in_progress()) {
102 ShenandoahConcurrentMark::cancel();
103 heap->set_concurrent_mark_in_progress(false);
104 }
105
106 // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
107 // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
108 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
109
110 op_reset();
111
112 // STW mark
113 op_mark();
114
115 case _degenerated_mark:
116 // No fallthrough. Continue mark, handed over from concurrent mark if
117 // concurrent mark has yet completed
118 if (_degen_point == ShenandoahDegenPoint::_degenerated_mark &&
119 heap->is_concurrent_mark_in_progress()) {
120 op_finish_mark();
121 }
122 assert(!heap->cancelled_gc(), "STW mark can not OOM");
123
124 /* Degen select Collection Set. etc. */
125 op_prepare_evacuation();
126
127 op_cleanup_early();
128
152 }
153 }
154
155 // Degeneration under oom-evac protocol might have left some objects in
156 // collection set un-evacuated. Restart evacuation from the beginning to
157 // capture all objects. For all the objects that are already evacuated,
158 // it would be a simple check, which is supposed to be fast. This is also
159 // safe to do even without degeneration, as CSet iterator is at beginning
160 // in preparation for evacuation anyway.
161 //
162 // Before doing that, we need to make sure we never had any cset-pinned
163 // regions. This may happen if allocation failure happened when evacuating
164 // the about-to-be-pinned object, oom-evac protocol left the object in
165 // the collection set, and then the pin reached the cset region. If we continue
166 // the cycle here, we would trash the cset and alive objects in it. To avoid
167 // it, we fail degeneration right away and slide into Full GC to recover.
168
169 {
170 heap->sync_pinned_region_status();
171 heap->collection_set()->clear_current_index();
172
173 ShenandoahHeapRegion* r;
174 while ((r = heap->collection_set()->next()) != nullptr) {
175 if (r->is_pinned()) {
176 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
177 op_degenerated_fail();
178 return;
179 }
180 }
181
182 heap->collection_set()->clear_current_index();
183 }
184 op_evacuate();
185 if (heap->cancelled_gc()) {
186 op_degenerated_fail();
187 return;
188 }
189 }
190
191 // If heuristics thinks we should do the cycle, this flag would be set,
192 // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
193 if (heap->has_forwarded_objects()) {
194 op_init_updaterefs();
195 assert(!heap->cancelled_gc(), "STW reference update can not OOM");
196 }
197
198 case _degenerated_updaterefs:
199 if (heap->has_forwarded_objects()) {
200 op_updaterefs();
201 op_update_roots();
202 assert(!heap->cancelled_gc(), "STW reference update can not OOM");
203 }
204
205 // Disarm nmethods that armed in concurrent cycle.
206 // In above case, update roots should disarm them
207 ShenandoahCodeRoots::disarm_nmethods();
208
209 op_cleanup_complete();
210 break;
211 default:
212 ShouldNotReachHere();
213 }
214
215 if (ShenandoahVerify) {
216 heap->verifier()->verify_after_degenerated();
217 }
218
219 if (VerifyAfterGC) {
220 Universe::verify();
221 }
222
223 metrics.snap_after();
224
225 // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
226 // because that probably means the heap is overloaded and/or fragmented.
227 if (!metrics.is_good_progress()) {
228 heap->notify_gc_no_progress();
229 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
230 op_degenerated_futile();
231 } else {
232 heap->notify_gc_progress();
233 }
234 }
235
236 void ShenandoahDegenGC::op_reset() {
237 ShenandoahHeap::heap()->prepare_gc();
238 }
239
240 void ShenandoahDegenGC::op_mark() {
241 assert(!ShenandoahHeap::heap()->is_concurrent_mark_in_progress(), "Should be reset");
242 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_mark);
243 ShenandoahSTWMark mark(false /*full gc*/);
244 mark.clear();
245 mark.mark();
246 }
247
248 void ShenandoahDegenGC::op_finish_mark() {
249 ShenandoahConcurrentMark mark;
250 mark.finish_mark();
251 }
252
253 void ShenandoahDegenGC::op_prepare_evacuation() {
254 ShenandoahHeap* const heap = ShenandoahHeap::heap();
255 if (ShenandoahVerify) {
256 heap->verifier()->verify_roots_no_forwarded();
257 }
258
259 // STW cleanup weak roots and unload classes
260 heap->parallel_cleaning(false /*full gc*/);
261 // Prepare regions and collection set
262 heap->prepare_regions_and_collection_set(false /*concurrent*/);
263
264 // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
265 // This is needed for two reasons. Strong one: new allocations would be with new freeset,
266 // which would be outside the collection set, so no cset writes would happen there.
267 // Weaker one: new allocations would happen past update watermark, and so less work would
268 // be needed for reference updates (would update the large filler instead).
269 if (UseTLAB) {
270 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_final_manage_labs);
271 heap->tlabs_retire(false);
272 }
273
274 if (!heap->collection_set()->is_empty()) {
275 heap->set_evacuation_in_progress(true);
276 heap->set_has_forwarded_objects(true);
277
278 if(ShenandoahVerify) {
279 heap->verifier()->verify_during_evacuation();
280 }
281 } else {
282 if (ShenandoahVerify) {
283 heap->verifier()->verify_after_concmark();
284 }
285
286 if (VerifyAfterGC) {
287 Universe::verify();
288 }
289 }
290 }
291
292 void ShenandoahDegenGC::op_cleanup_early() {
293 ShenandoahHeap::heap()->recycle_trash();
294 }
295
296 void ShenandoahDegenGC::op_evacuate() {
297 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_evac);
298 ShenandoahHeap::heap()->evacuate_collection_set(false /* concurrent*/);
299 }
300
301 void ShenandoahDegenGC::op_init_updaterefs() {
302 // Evacuation has completed
303 ShenandoahHeap* const heap = ShenandoahHeap::heap();
304 heap->set_evacuation_in_progress(false);
305 heap->set_concurrent_weak_root_in_progress(false);
306 heap->set_concurrent_strong_root_in_progress(false);
307
308 heap->prepare_update_heap_references(false /*concurrent*/);
309 heap->set_update_refs_in_progress(true);
310 }
311
312 void ShenandoahDegenGC::op_updaterefs() {
313 ShenandoahHeap* const heap = ShenandoahHeap::heap();
314 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_updaterefs);
315 // Handed over from concurrent update references phase
316 heap->update_heap_references(false /*concurrent*/);
317
318 heap->set_update_refs_in_progress(false);
319 heap->set_has_forwarded_objects(false);
320 }
321
322 void ShenandoahDegenGC::op_update_roots() {
323 ShenandoahHeap* const heap = ShenandoahHeap::heap();
324
325 update_roots(false /*full_gc*/);
326
327 heap->update_heap_region_states(false /*concurrent*/);
328
329 if (ShenandoahVerify) {
330 heap->verifier()->verify_after_updaterefs();
331 }
332
333 if (VerifyAfterGC) {
334 Universe::verify();
335 }
336
337 heap->rebuild_free_set(false /*concurrent*/);
338 }
339
340 void ShenandoahDegenGC::op_cleanup_complete() {
341 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete);
342 ShenandoahHeap::heap()->recycle_trash();
343 }
344
345 void ShenandoahDegenGC::op_degenerated_fail() {
346 log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
347 ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
348
349 ShenandoahFullGC full_gc;
350 full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
351 }
352
353 void ShenandoahDegenGC::op_degenerated_futile() {
354 ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
355 ShenandoahFullGC full_gc;
356 full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
357 }
358
359 const char* ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point) const {
360 switch (point) {
361 case _degenerated_unset:
362 return "Pause Degenerated GC (<UNSET>)";
363 case _degenerated_outside_cycle:
364 return "Pause Degenerated GC (Outside of Cycle)";
365 case _degenerated_mark:
366 return "Pause Degenerated GC (Mark)";
367 case _degenerated_evac:
368 return "Pause Degenerated GC (Evacuation)";
369 case _degenerated_updaterefs:
370 return "Pause Degenerated GC (Update Refs)";
371 default:
372 ShouldNotReachHere();
373 return "ERROR";
374 }
375 }
|
1 /*
2 * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shared/collectorCounters.hpp"
29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
30 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
31 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
32 #include "gc/shenandoah/shenandoahFullGC.hpp"
33 #include "gc/shenandoah/shenandoahGeneration.hpp"
34 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
35 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
36 #include "gc/shenandoah/shenandoahMetrics.hpp"
37 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
38 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
39 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
41 #include "gc/shenandoah/shenandoahSTWMark.hpp"
42 #include "gc/shenandoah/shenandoahUtils.hpp"
43 #include "gc/shenandoah/shenandoahVerifier.hpp"
44 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
45 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
46 #include "gc/shenandoah/shenandoahVMOperations.hpp"
47 #include "runtime/vmThread.hpp"
48 #include "utilities/events.hpp"
49
50 ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point, ShenandoahGeneration* generation) :
51 ShenandoahGC(),
52 _degen_point(degen_point),
53 _generation(generation),
54 _abbreviated(false),
55 _consecutive_degen_with_bad_progress(0) {
56 }
57
58 bool ShenandoahDegenGC::collect(GCCause::Cause cause) {
59 vmop_degenerated();
60 ShenandoahHeap* heap = ShenandoahHeap::heap();
61 if (heap->mode()->is_generational()) {
62 bool is_bootstrap_gc = heap->old_generation()->is_bootstrapping();
63 heap->mmu_tracker()->record_degenerated(GCId::current(), is_bootstrap_gc);
64 const char* msg = is_bootstrap_gc? "At end of Degenerated Bootstrap Old GC": "At end of Degenerated Young GC";
65 heap->log_heap_status(msg);
66 }
67 return true;
68 }
69
70 void ShenandoahDegenGC::vmop_degenerated() {
71 TraceCollectorStats tcs(ShenandoahHeap::heap()->monitoring_support()->full_stw_collection_counters());
72 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
73 VM_ShenandoahDegeneratedGC degenerated_gc(this);
74 VMThread::execute(°enerated_gc);
75 }
76
77 void ShenandoahDegenGC::entry_degenerated() {
78 const char* msg = degen_event_message(_degen_point);
79 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
80 EventMark em("%s", msg);
81 ShenandoahHeap* const heap = ShenandoahHeap::heap();
82 ShenandoahWorkerScope scope(heap->workers(),
83 ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
84 "stw degenerated gc");
85
86 heap->set_degenerated_gc_in_progress(true);
87 op_degenerated();
88 heap->set_degenerated_gc_in_progress(false);
89 {
90 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_propagate_gc_state);
91 heap->propagate_gc_state_to_all_threads();
92 }
93 }
94
95 void ShenandoahDegenGC::op_degenerated() {
96 ShenandoahHeap* const heap = ShenandoahHeap::heap();
97 // Degenerated GC is STW, but it can also fail. Current mechanics communicates
98 // GC failure via cancelled_concgc() flag. So, if we detect the failure after
99 // some phase, we have to upgrade the Degenerate GC to Full GC.
100 heap->clear_cancelled_gc(true /* clear oom handler */);
101
102 #ifdef ASSERT
103 if (heap->mode()->is_generational()) {
104 ShenandoahOldGeneration* old_generation = heap->old_generation();
105 if (!heap->is_concurrent_old_mark_in_progress()) {
106 // If we are not marking the old generation, there should be nothing in the old mark queues
107 assert(old_generation->task_queues()->is_empty(), "Old gen task queues should be empty");
108 }
109
110 if (_generation->is_global()) {
111 // If we are in a global cycle, the old generation should not be marking. It is, however,
112 // allowed to be holding regions for evacuation or coalescing.
113 assert(old_generation->is_idle()
114 || old_generation->is_doing_mixed_evacuations()
115 || old_generation->is_preparing_for_mark(),
116 "Old generation cannot be in state: %s", old_generation->state_name());
117 }
118 }
119 #endif
120
121 ShenandoahMetricsSnapshot metrics;
122 metrics.snap_before();
123
124 switch (_degen_point) {
125 // The cases below form the Duff's-like device: it describes the actual GC cycle,
126 // but enters it at different points, depending on which concurrent phase had
127 // degenerated.
128
129 case _degenerated_outside_cycle:
130 // We have degenerated from outside the cycle, which means something is bad with
131 // the heap, most probably heavy humongous fragmentation, or we are very low on free
132 // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
133 // we can do the most aggressive degen cycle, which includes processing references and
134 // class unloading, unless those features are explicitly disabled.
135
136 // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
137 // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
138 heap->set_unload_classes(_generation->heuristics()->can_unload_classes() &&
139 (!heap->mode()->is_generational() || _generation->is_global()));
140
141 if (heap->mode()->is_generational() && _generation->is_young()) {
142 // Swap remembered sets for young
143 _generation->swap_remembered_set();
144 }
145
146 case _degenerated_roots:
147 // Degenerated from concurrent root mark, reset the flag for STW mark
148 if (!heap->mode()->is_generational()) {
149 if (heap->is_concurrent_mark_in_progress()) {
150 heap->cancel_concurrent_mark();
151 }
152 } else {
153 if (_generation->is_concurrent_mark_in_progress()) {
154 // We want to allow old generation marking to be punctuated by young collections
155 // (even if they have degenerated). If this is a global cycle, we'd have cancelled
156 // the entire old gc before coming into this switch. Note that cancel_marking on
157 // the generation does NOT abandon incomplete SATB buffers as cancel_concurrent_mark does.
158 // We need to separate out the old pointers which is done below.
159 _generation->cancel_marking();
160 }
161
162 if (heap->is_concurrent_mark_in_progress()) {
163 // If either old or young marking is in progress, the SATB barrier will be enabled.
164 // The SATB buffer may hold a mix of old and young pointers. The old pointers need to be
165 // transferred to the old generation mark queues and the young pointers are NOT part
166 // of this snapshot, so they must be dropped here. It is safe to drop them here because
167 // we will rescan the roots on this safepoint.
168 heap->old_generation()->transfer_pointers_from_satb();
169 }
170
171 if (_degen_point == ShenandoahDegenPoint::_degenerated_roots) {
172 // We only need this if the concurrent cycle has already swapped the card tables.
173 // Marking will use the 'read' table, but interesting pointers may have been
174 // recorded in the 'write' table in the time between the cancelled concurrent cycle
175 // and this degenerated cycle. These pointers need to be included the 'read' table
176 // used to scan the remembered set during the STW mark which follows here.
177 _generation->merge_write_table();
178 }
179 }
180
181 op_reset();
182
183 // STW mark
184 op_mark();
185
186 case _degenerated_mark:
187 // No fallthrough. Continue mark, handed over from concurrent mark if
188 // concurrent mark has yet completed
189 if (_degen_point == ShenandoahDegenPoint::_degenerated_mark &&
190 heap->is_concurrent_mark_in_progress()) {
191 op_finish_mark();
192 }
193 assert(!heap->cancelled_gc(), "STW mark can not OOM");
194
195 /* Degen select Collection Set. etc. */
196 op_prepare_evacuation();
197
198 op_cleanup_early();
199
223 }
224 }
225
226 // Degeneration under oom-evac protocol might have left some objects in
227 // collection set un-evacuated. Restart evacuation from the beginning to
228 // capture all objects. For all the objects that are already evacuated,
229 // it would be a simple check, which is supposed to be fast. This is also
230 // safe to do even without degeneration, as CSet iterator is at beginning
231 // in preparation for evacuation anyway.
232 //
233 // Before doing that, we need to make sure we never had any cset-pinned
234 // regions. This may happen if allocation failure happened when evacuating
235 // the about-to-be-pinned object, oom-evac protocol left the object in
236 // the collection set, and then the pin reached the cset region. If we continue
237 // the cycle here, we would trash the cset and alive objects in it. To avoid
238 // it, we fail degeneration right away and slide into Full GC to recover.
239
240 {
241 heap->sync_pinned_region_status();
242 heap->collection_set()->clear_current_index();
243 ShenandoahHeapRegion* r;
244 while ((r = heap->collection_set()->next()) != nullptr) {
245 if (r->is_pinned()) {
246 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
247 op_degenerated_fail();
248 return;
249 }
250 }
251
252 heap->collection_set()->clear_current_index();
253 }
254 op_evacuate();
255 if (heap->cancelled_gc()) {
256 op_degenerated_fail();
257 return;
258 }
259 } else if (has_in_place_promotions(heap)) {
260 // We have nothing to evacuate, but there are still regions to promote in place.
261 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_promote_regions);
262 ShenandoahGenerationalHeap::heap()->promote_regions_in_place(false /* concurrent*/);
263 }
264
265 // Update collector state regardless of whether there are forwarded objects
266 heap->set_evacuation_in_progress(false);
267 heap->set_concurrent_weak_root_in_progress(false);
268 heap->set_concurrent_strong_root_in_progress(false);
269
270 // If heuristics thinks we should do the cycle, this flag would be set,
271 // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
272 if (heap->has_forwarded_objects()) {
273 op_init_update_refs();
274 assert(!heap->cancelled_gc(), "STW reference update can not OOM");
275 } else {
276 _abbreviated = true;
277 }
278
279 case _degenerated_update_refs:
280 if (heap->has_forwarded_objects()) {
281 op_update_refs();
282 op_update_roots();
283 assert(!heap->cancelled_gc(), "STW reference update can not OOM");
284 }
285
286 // Disarm nmethods that armed in concurrent cycle.
287 // In above case, update roots should disarm them
288 ShenandoahCodeRoots::disarm_nmethods();
289
290 op_cleanup_complete();
291
292 if (heap->mode()->is_generational()) {
293 ShenandoahGenerationalHeap::heap()->complete_degenerated_cycle();
294 }
295
296 break;
297 default:
298 ShouldNotReachHere();
299 }
300
301 if (ShenandoahVerify) {
302 heap->verifier()->verify_after_degenerated();
303 }
304
305 if (VerifyAfterGC) {
306 Universe::verify();
307 }
308
309 metrics.snap_after();
310
311 // The most common scenario for lack of good progress following a degenerated GC is an accumulation of floating
312 // garbage during the most recently aborted concurrent GC effort. With generational GC, it is far more effective to
313 // reclaim this floating garbage with another degenerated cycle (which focuses on young generation and might require
314 // a pause of 200 ms) rather than a full GC cycle (which may require over 2 seconds with a 10 GB old generation).
315 //
316 // In generational mode, we'll only upgrade to full GC if we've done two degen cycles in a row and both indicated
317 // bad progress. In non-generational mode, we'll preserve the original behavior, which is to upgrade to full
318 // immediately following a degenerated cycle with bad progress. This preserves original behavior of non-generational
319 // Shenandoah so as to avoid introducing "surprising new behavior." It also makes less sense with non-generational
320 // Shenandoah to replace a full GC with a degenerated GC, because both have similar pause times in non-generational
321 // mode.
322 if (!metrics.is_good_progress(_generation)) {
323 _consecutive_degen_with_bad_progress++;
324 } else {
325 _consecutive_degen_with_bad_progress = 0;
326 }
327 if (!heap->mode()->is_generational() ||
328 ((heap->shenandoah_policy()->consecutive_degenerated_gc_count() > 1) && (_consecutive_degen_with_bad_progress >= 2))) {
329 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
330 op_degenerated_futile();
331 } else {
332 heap->notify_gc_progress();
333 heap->shenandoah_policy()->record_success_degenerated(_generation->is_young(), _abbreviated);
334 _generation->heuristics()->record_success_degenerated();
335 }
336 }
337
338 void ShenandoahDegenGC::op_reset() {
339 _generation->prepare_gc();
340 }
341
342 void ShenandoahDegenGC::op_mark() {
343 assert(!_generation->is_concurrent_mark_in_progress(), "Should be reset");
344 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_mark);
345 ShenandoahSTWMark mark(_generation, false /*full gc*/);
346 mark.mark();
347 }
348
349 void ShenandoahDegenGC::op_finish_mark() {
350 ShenandoahConcurrentMark mark(_generation);
351 mark.finish_mark();
352 }
353
354 void ShenandoahDegenGC::op_prepare_evacuation() {
355 ShenandoahHeap* const heap = ShenandoahHeap::heap();
356 if (ShenandoahVerify) {
357 heap->verifier()->verify_roots_no_forwarded();
358 }
359
360 // STW cleanup weak roots and unload classes
361 heap->parallel_cleaning(false /*full gc*/);
362
363 // Prepare regions and collection set
364 _generation->prepare_regions_and_collection_set(false /*concurrent*/);
365
366 // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
367 // This is needed for two reasons. Strong one: new allocations would be with new freeset,
368 // which would be outside the collection set, so no cset writes would happen there.
369 // Weaker one: new allocations would happen past update watermark, and so less work would
370 // be needed for reference updates (would update the large filler instead).
371 if (UseTLAB) {
372 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_final_manage_labs);
373 heap->tlabs_retire(false);
374 }
375
376 if (!heap->collection_set()->is_empty()) {
377 if (ShenandoahVerify) {
378 heap->verifier()->verify_before_evacuation();
379 }
380
381 heap->set_evacuation_in_progress(true);
382
383 heap->set_has_forwarded_objects(true);
384 } else {
385 if (ShenandoahVerify) {
386 if (has_in_place_promotions(heap)) {
387 heap->verifier()->verify_after_concmark_with_promotions();
388 } else {
389 heap->verifier()->verify_after_concmark();
390 }
391 }
392
393 if (VerifyAfterGC) {
394 Universe::verify();
395 }
396 }
397 }
398
399 bool ShenandoahDegenGC::has_in_place_promotions(const ShenandoahHeap* heap) const {
400 return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions();
401 }
402
403 void ShenandoahDegenGC::op_cleanup_early() {
404 ShenandoahHeap::heap()->recycle_trash();
405 }
406
407 void ShenandoahDegenGC::op_evacuate() {
408 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_evac);
409 ShenandoahHeap::heap()->evacuate_collection_set(false /* concurrent*/);
410 }
411
412 void ShenandoahDegenGC::op_init_update_refs() {
413 // Evacuation has completed
414 ShenandoahHeap* const heap = ShenandoahHeap::heap();
415 heap->prepare_update_heap_references();
416 heap->set_update_refs_in_progress(true);
417 }
418
419 void ShenandoahDegenGC::op_update_refs() {
420 ShenandoahHeap* const heap = ShenandoahHeap::heap();
421 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_update_refs);
422 // Handed over from concurrent update references phase
423 heap->update_heap_references(false /*concurrent*/);
424
425 heap->set_update_refs_in_progress(false);
426 heap->set_has_forwarded_objects(false);
427 }
428
429 void ShenandoahDegenGC::op_update_roots() {
430 ShenandoahHeap* const heap = ShenandoahHeap::heap();
431
432 update_roots(false /*full_gc*/);
433
434 heap->update_heap_region_states(false /*concurrent*/);
435
436 if (ShenandoahVerify) {
437 heap->verifier()->verify_after_update_refs();
438 }
439
440 if (VerifyAfterGC) {
441 Universe::verify();
442 }
443
444 heap->rebuild_free_set(false /*concurrent*/);
445 }
446
447 void ShenandoahDegenGC::op_cleanup_complete() {
448 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete);
449 ShenandoahHeap::heap()->recycle_trash();
450 }
451
452 void ShenandoahDegenGC::op_degenerated_fail() {
453 upgrade_to_full();
454 }
455
456 void ShenandoahDegenGC::op_degenerated_futile() {
457 upgrade_to_full();
458 }
459
460 const char* ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point) const {
461 switch (point) {
462 case _degenerated_unset:
463 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (<UNSET>)");
464 case _degenerated_outside_cycle:
465 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Outside of Cycle)");
466 case _degenerated_roots:
467 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Roots)");
468 case _degenerated_mark:
469 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Mark)");
470 case _degenerated_evac:
471 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Evacuation)");
472 case _degenerated_update_refs:
473 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Update Refs)");
474 default:
475 ShouldNotReachHere();
476 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (?)");
477 }
478 }
479
480 void ShenandoahDegenGC::upgrade_to_full() {
481 log_info(gc)("Degenerated GC upgrading to Full GC");
482 ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
483 ShenandoahFullGC full_gc;
484 full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
485 }
|