1 /*
2 * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shared/collectorCounters.hpp"
28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
29 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
31 #include "gc/shenandoah/shenandoahFullGC.hpp"
32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
33 #include "gc/shenandoah/shenandoahMetrics.hpp"
34 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
35 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
36 #include "gc/shenandoah/shenandoahSTWMark.hpp"
37 #include "gc/shenandoah/shenandoahUtils.hpp"
38 #include "gc/shenandoah/shenandoahVerifier.hpp"
39 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
40 #include "gc/shenandoah/shenandoahVMOperations.hpp"
41 #include "runtime/vmThread.hpp"
42 #include "utilities/events.hpp"
43
44 ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point) :
45 ShenandoahGC(),
46 _degen_point(degen_point),
47 _abbreviated(false) {
48 }
49
50 bool ShenandoahDegenGC::collect(GCCause::Cause cause) {
51 vmop_degenerated();
52 return true;
53 }
54
55 void ShenandoahDegenGC::vmop_degenerated() {
56 TraceCollectorStats tcs(ShenandoahHeap::heap()->monitoring_support()->full_stw_collection_counters());
57 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
58 VM_ShenandoahDegeneratedGC degenerated_gc(this);
59 VMThread::execute(°enerated_gc);
60 }
61
62 void ShenandoahDegenGC::entry_degenerated() {
63 const char* msg = degen_event_message(_degen_point);
64 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
65 EventMark em("%s", msg);
66 ShenandoahHeap* const heap = ShenandoahHeap::heap();
67
68 ShenandoahWorkerScope scope(heap->workers(),
69 ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
70 "stw degenerated gc");
71
72 heap->set_degenerated_gc_in_progress(true);
73 op_degenerated();
74 heap->set_degenerated_gc_in_progress(false);
75 }
76
77 void ShenandoahDegenGC::op_degenerated() {
78 ShenandoahHeap* const heap = ShenandoahHeap::heap();
79 // Degenerated GC is STW, but it can also fail. Current mechanics communicates
80 // GC failure via cancelled_concgc() flag. So, if we detect the failure after
81 // some phase, we have to upgrade the Degenerate GC to Full GC.
82 heap->clear_cancelled_gc();
83
84 ShenandoahMetricsSnapshot metrics;
85 metrics.snap_before();
86
87 switch (_degen_point) {
88 // The cases below form the Duff's-like device: it describes the actual GC cycle,
89 // but enters it at different points, depending on which concurrent phase had
90 // degenerated.
91
92 case _degenerated_outside_cycle:
93 // We have degenerated from outside the cycle, which means something is bad with
94 // the heap, most probably heavy humongous fragmentation, or we are very low on free
95 // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
96 // we can do the most aggressive degen cycle, which includes processing references and
97 // class unloading, unless those features are explicitly disabled.
98 //
99
100 // Degenerated from concurrent root mark, reset the flag for STW mark
101 if (heap->is_concurrent_mark_in_progress()) {
102 ShenandoahConcurrentMark::cancel();
103 heap->set_concurrent_mark_in_progress(false);
104 }
105
106 // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
107 // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
108 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
109
110 op_reset();
111
112 // STW mark
113 op_mark();
114
115 case _degenerated_mark:
116 // No fallthrough. Continue mark, handed over from concurrent mark if
117 // concurrent mark has yet completed
118 if (_degen_point == ShenandoahDegenPoint::_degenerated_mark &&
119 heap->is_concurrent_mark_in_progress()) {
120 op_finish_mark();
121 }
122 assert(!heap->cancelled_gc(), "STW mark can not OOM");
123
124 /* Degen select Collection Set. etc. */
125 op_prepare_evacuation();
126
127 op_cleanup_early();
128
152 }
153 }
154
155 // Degeneration under oom-evac protocol might have left some objects in
156 // collection set un-evacuated. Restart evacuation from the beginning to
157 // capture all objects. For all the objects that are already evacuated,
158 // it would be a simple check, which is supposed to be fast. This is also
159 // safe to do even without degeneration, as CSet iterator is at beginning
160 // in preparation for evacuation anyway.
161 //
162 // Before doing that, we need to make sure we never had any cset-pinned
163 // regions. This may happen if allocation failure happened when evacuating
164 // the about-to-be-pinned object, oom-evac protocol left the object in
165 // the collection set, and then the pin reached the cset region. If we continue
166 // the cycle here, we would trash the cset and alive objects in it. To avoid
167 // it, we fail degeneration right away and slide into Full GC to recover.
168
169 {
170 heap->sync_pinned_region_status();
171 heap->collection_set()->clear_current_index();
172
173 ShenandoahHeapRegion* r;
174 while ((r = heap->collection_set()->next()) != nullptr) {
175 if (r->is_pinned()) {
176 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
177 op_degenerated_fail();
178 return;
179 }
180 }
181
182 heap->collection_set()->clear_current_index();
183 }
184 op_evacuate();
185 if (heap->cancelled_gc()) {
186 op_degenerated_fail();
187 return;
188 }
189 }
190
191 // If heuristics thinks we should do the cycle, this flag would be set,
192 // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
193 if (heap->has_forwarded_objects()) {
194 op_init_updaterefs();
195 assert(!heap->cancelled_gc(), "STW reference update can not OOM");
196 } else {
197 _abbreviated = true;
198 }
199
200 case _degenerated_updaterefs:
201 if (heap->has_forwarded_objects()) {
202 op_updaterefs();
203 op_update_roots();
204 assert(!heap->cancelled_gc(), "STW reference update can not OOM");
205 }
206
207 // Disarm nmethods that armed in concurrent cycle.
208 // In above case, update roots should disarm them
209 ShenandoahCodeRoots::disarm_nmethods();
210
211 op_cleanup_complete();
212 break;
213 default:
214 ShouldNotReachHere();
215 }
216
217 if (ShenandoahVerify) {
218 heap->verifier()->verify_after_degenerated();
219 }
220
221 if (VerifyAfterGC) {
222 Universe::verify();
223 }
224
225 metrics.snap_after();
226
227 // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
228 // because that probably means the heap is overloaded and/or fragmented.
229 if (!metrics.is_good_progress()) {
230 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
231 op_degenerated_futile();
232 } else {
233 heap->notify_gc_progress();
234 heap->shenandoah_policy()->record_success_degenerated(_abbreviated);
235 heap->heuristics()->record_success_degenerated();
236 }
237 }
238
239 void ShenandoahDegenGC::op_reset() {
240 ShenandoahHeap::heap()->prepare_gc();
241 }
242
243 void ShenandoahDegenGC::op_mark() {
244 assert(!ShenandoahHeap::heap()->is_concurrent_mark_in_progress(), "Should be reset");
245 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_mark);
246 ShenandoahSTWMark mark(false /*full gc*/);
247 mark.clear();
248 mark.mark();
249 }
250
251 void ShenandoahDegenGC::op_finish_mark() {
252 ShenandoahConcurrentMark mark;
253 mark.finish_mark();
254 }
255
256 void ShenandoahDegenGC::op_prepare_evacuation() {
257 ShenandoahHeap* const heap = ShenandoahHeap::heap();
258 if (ShenandoahVerify) {
259 heap->verifier()->verify_roots_no_forwarded();
260 }
261
262 // STW cleanup weak roots and unload classes
263 heap->parallel_cleaning(false /*full gc*/);
264 // Prepare regions and collection set
265 heap->prepare_regions_and_collection_set(false /*concurrent*/);
266
267 // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
268 // This is needed for two reasons. Strong one: new allocations would be with new freeset,
269 // which would be outside the collection set, so no cset writes would happen there.
270 // Weaker one: new allocations would happen past update watermark, and so less work would
271 // be needed for reference updates (would update the large filler instead).
272 if (UseTLAB) {
273 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_final_manage_labs);
274 heap->tlabs_retire(false);
275 }
276
277 if (!heap->collection_set()->is_empty()) {
278 if (ShenandoahVerify) {
279 heap->verifier()->verify_before_evacuation();
280 }
281
282 heap->set_evacuation_in_progress(true);
283 heap->set_has_forwarded_objects(true);
284 } else {
285 if (ShenandoahVerify) {
286 heap->verifier()->verify_after_concmark();
287 }
288
289 if (VerifyAfterGC) {
290 Universe::verify();
291 }
292 }
293 }
294
295 void ShenandoahDegenGC::op_cleanup_early() {
296 ShenandoahHeap::heap()->recycle_trash();
297 }
298
299 void ShenandoahDegenGC::op_evacuate() {
300 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_evac);
301 ShenandoahHeap::heap()->evacuate_collection_set(false /* concurrent*/);
302 }
303
304 void ShenandoahDegenGC::op_init_updaterefs() {
305 // Evacuation has completed
306 ShenandoahHeap* const heap = ShenandoahHeap::heap();
307 heap->set_evacuation_in_progress(false);
308 heap->set_concurrent_weak_root_in_progress(false);
309 heap->set_concurrent_strong_root_in_progress(false);
310
311 heap->prepare_update_heap_references(false /*concurrent*/);
312 heap->set_update_refs_in_progress(true);
313 }
314
315 void ShenandoahDegenGC::op_updaterefs() {
316 ShenandoahHeap* const heap = ShenandoahHeap::heap();
317 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_updaterefs);
318 // Handed over from concurrent update references phase
319 heap->update_heap_references(false /*concurrent*/);
320
321 heap->set_update_refs_in_progress(false);
322 heap->set_has_forwarded_objects(false);
323 }
324
325 void ShenandoahDegenGC::op_update_roots() {
326 ShenandoahHeap* const heap = ShenandoahHeap::heap();
327
328 update_roots(false /*full_gc*/);
329
330 heap->update_heap_region_states(false /*concurrent*/);
339
340 heap->rebuild_free_set(false /*concurrent*/);
341 }
342
343 void ShenandoahDegenGC::op_cleanup_complete() {
344 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete);
345 ShenandoahHeap::heap()->recycle_trash();
346 }
347
348 void ShenandoahDegenGC::op_degenerated_fail() {
349 upgrade_to_full();
350 }
351
352 void ShenandoahDegenGC::op_degenerated_futile() {
353 upgrade_to_full();
354 }
355
356 const char* ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point) const {
357 switch (point) {
358 case _degenerated_unset:
359 return "Pause Degenerated GC (<UNSET>)";
360 case _degenerated_outside_cycle:
361 return "Pause Degenerated GC (Outside of Cycle)";
362 case _degenerated_mark:
363 return "Pause Degenerated GC (Mark)";
364 case _degenerated_evac:
365 return "Pause Degenerated GC (Evacuation)";
366 case _degenerated_updaterefs:
367 return "Pause Degenerated GC (Update Refs)";
368 default:
369 ShouldNotReachHere();
370 return "ERROR";
371 }
372 }
373
374 void ShenandoahDegenGC::upgrade_to_full() {
375 log_info(gc)("Degenerated GC upgrading to Full GC");
376 ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
377 ShenandoahFullGC full_gc;
378 full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
379 }
|
1 /*
2 * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shared/collectorCounters.hpp"
29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
30 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
31 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
32 #include "gc/shenandoah/shenandoahFullGC.hpp"
33 #include "gc/shenandoah/shenandoahGeneration.hpp"
34 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
35 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
36 #include "gc/shenandoah/shenandoahMetrics.hpp"
37 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
38 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
39 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
40 #include "gc/shenandoah/shenandoahSTWMark.hpp"
41 #include "gc/shenandoah/shenandoahUtils.hpp"
42 #include "gc/shenandoah/shenandoahVerifier.hpp"
43 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
44 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
45 #include "gc/shenandoah/shenandoahVMOperations.hpp"
46 #include "runtime/vmThread.hpp"
47 #include "utilities/events.hpp"
48
49 ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point, ShenandoahGeneration* generation) :
50 ShenandoahGC(),
51 _degen_point(degen_point),
52 _generation(generation),
53 _abbreviated(false) {
54 }
55
56 bool ShenandoahDegenGC::collect(GCCause::Cause cause) {
57 vmop_degenerated();
58 ShenandoahHeap* heap = ShenandoahHeap::heap();
59 if (heap->mode()->is_generational()) {
60 bool is_bootstrap_gc = heap->old_generation()->is_bootstrapping();
61 heap->mmu_tracker()->record_degenerated(GCId::current(), is_bootstrap_gc);
62 const char* msg = is_bootstrap_gc? "At end of Degenerated Bootstrap Old GC": "At end of Degenerated Young GC";
63 heap->log_heap_status(msg);
64 }
65 return true;
66 }
67
68 void ShenandoahDegenGC::vmop_degenerated() {
69 TraceCollectorStats tcs(ShenandoahHeap::heap()->monitoring_support()->full_stw_collection_counters());
70 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
71 VM_ShenandoahDegeneratedGC degenerated_gc(this);
72 VMThread::execute(°enerated_gc);
73 }
74
75 void ShenandoahDegenGC::entry_degenerated() {
76 const char* msg = degen_event_message(_degen_point);
77 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
78 EventMark em("%s", msg);
79 ShenandoahHeap* const heap = ShenandoahHeap::heap();
80 ShenandoahWorkerScope scope(heap->workers(),
81 ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
82 "stw degenerated gc");
83
84 heap->set_degenerated_gc_in_progress(true);
85 op_degenerated();
86 heap->set_degenerated_gc_in_progress(false);
87 }
88
89 void ShenandoahDegenGC::op_degenerated() {
90 ShenandoahHeap* const heap = ShenandoahHeap::heap();
91 // Degenerated GC is STW, but it can also fail. Current mechanics communicates
92 // GC failure via cancelled_concgc() flag. So, if we detect the failure after
93 // some phase, we have to upgrade the Degenerate GC to Full GC.
94 heap->clear_cancelled_gc(true /* clear oom handler */);
95
96 #ifdef ASSERT
97 if (heap->mode()->is_generational()) {
98 ShenandoahOldGeneration* old_generation = heap->old_generation();
99 if (!heap->is_concurrent_old_mark_in_progress()) {
100 // If we are not marking the old generation, there should be nothing in the old mark queues
101 assert(old_generation->task_queues()->is_empty(), "Old gen task queues should be empty");
102 }
103
104 if (_generation->is_global()) {
105 // If we are in a global cycle, the old generation should not be marking. It is, however,
106 // allowed to be holding regions for evacuation or coalescing.
107 assert(old_generation->is_idle()
108 || old_generation->is_doing_mixed_evacuations()
109 || old_generation->is_preparing_for_mark(),
110 "Old generation cannot be in state: %s", old_generation->state_name());
111 }
112 }
113 #endif
114
115 ShenandoahMetricsSnapshot metrics;
116 metrics.snap_before();
117
118 switch (_degen_point) {
119 // The cases below form the Duff's-like device: it describes the actual GC cycle,
120 // but enters it at different points, depending on which concurrent phase had
121 // degenerated.
122
123 case _degenerated_outside_cycle:
124 // We have degenerated from outside the cycle, which means something is bad with
125 // the heap, most probably heavy humongous fragmentation, or we are very low on free
126 // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
127 // we can do the most aggressive degen cycle, which includes processing references and
128 // class unloading, unless those features are explicitly disabled.
129
130 // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
131 // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
132 heap->set_unload_classes(_generation->heuristics()->can_unload_classes() &&
133 (!heap->mode()->is_generational() || _generation->is_global()));
134
135 if (heap->mode()->is_generational() && _generation->is_young()) {
136 // Swap remembered sets for young
137 _generation->swap_remembered_set();
138 }
139
140 case _degenerated_roots:
141 // Degenerated from concurrent root mark, reset the flag for STW mark
142 if (!heap->mode()->is_generational()) {
143 if (heap->is_concurrent_mark_in_progress()) {
144 heap->cancel_concurrent_mark();
145 }
146 } else {
147 if (_generation->is_concurrent_mark_in_progress()) {
148 // We want to allow old generation marking to be punctuated by young collections
149 // (even if they have degenerated). If this is a global cycle, we'd have cancelled
150 // the entire old gc before coming into this switch. Note that cancel_marking on
151 // the generation does NOT abandon incomplete SATB buffers as cancel_concurrent_mark does.
152 // We need to separate out the old pointers which is done below.
153 _generation->cancel_marking();
154 }
155
156 if (heap->is_concurrent_mark_in_progress()) {
157 // If either old or young marking is in progress, the SATB barrier will be enabled.
158 // The SATB buffer may hold a mix of old and young pointers. The old pointers need to be
159 // transferred to the old generation mark queues and the young pointers are NOT part
160 // of this snapshot, so they must be dropped here. It is safe to drop them here because
161 // we will rescan the roots on this safepoint.
162 heap->old_generation()->transfer_pointers_from_satb();
163 }
164
165 if (_degen_point == ShenandoahDegenPoint::_degenerated_roots) {
166 // We only need this if the concurrent cycle has already swapped the card tables.
167 // Marking will use the 'read' table, but interesting pointers may have been
168 // recorded in the 'write' table in the time between the cancelled concurrent cycle
169 // and this degenerated cycle. These pointers need to be included the 'read' table
170 // used to scan the remembered set during the STW mark which follows here.
171 _generation->merge_write_table();
172 }
173 }
174
175 op_reset();
176
177 // STW mark
178 op_mark();
179
180 case _degenerated_mark:
181 // No fallthrough. Continue mark, handed over from concurrent mark if
182 // concurrent mark has yet completed
183 if (_degen_point == ShenandoahDegenPoint::_degenerated_mark &&
184 heap->is_concurrent_mark_in_progress()) {
185 op_finish_mark();
186 }
187 assert(!heap->cancelled_gc(), "STW mark can not OOM");
188
189 /* Degen select Collection Set. etc. */
190 op_prepare_evacuation();
191
192 op_cleanup_early();
193
217 }
218 }
219
220 // Degeneration under oom-evac protocol might have left some objects in
221 // collection set un-evacuated. Restart evacuation from the beginning to
222 // capture all objects. For all the objects that are already evacuated,
223 // it would be a simple check, which is supposed to be fast. This is also
224 // safe to do even without degeneration, as CSet iterator is at beginning
225 // in preparation for evacuation anyway.
226 //
227 // Before doing that, we need to make sure we never had any cset-pinned
228 // regions. This may happen if allocation failure happened when evacuating
229 // the about-to-be-pinned object, oom-evac protocol left the object in
230 // the collection set, and then the pin reached the cset region. If we continue
231 // the cycle here, we would trash the cset and alive objects in it. To avoid
232 // it, we fail degeneration right away and slide into Full GC to recover.
233
234 {
235 heap->sync_pinned_region_status();
236 heap->collection_set()->clear_current_index();
237 ShenandoahHeapRegion* r;
238 while ((r = heap->collection_set()->next()) != nullptr) {
239 if (r->is_pinned()) {
240 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
241 op_degenerated_fail();
242 return;
243 }
244 }
245
246 heap->collection_set()->clear_current_index();
247 }
248 op_evacuate();
249 if (heap->cancelled_gc()) {
250 op_degenerated_fail();
251 return;
252 }
253 } else if (has_in_place_promotions(heap)) {
254 // We have nothing to evacuate, but there are still regions to promote in place.
255 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_promote_regions);
256 ShenandoahGenerationalHeap::heap()->promote_regions_in_place(false /* concurrent*/);
257 }
258
259 // Update collector state regardless of whether there are forwarded objects
260 heap->set_evacuation_in_progress(false);
261 heap->set_concurrent_weak_root_in_progress(false);
262 heap->set_concurrent_strong_root_in_progress(false);
263
264 // If heuristics thinks we should do the cycle, this flag would be set,
265 // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
266 if (heap->has_forwarded_objects()) {
267 op_init_updaterefs();
268 assert(!heap->cancelled_gc(), "STW reference update can not OOM");
269 } else {
270 _abbreviated = true;
271 }
272
273 case _degenerated_updaterefs:
274 if (heap->has_forwarded_objects()) {
275 op_updaterefs();
276 op_update_roots();
277 assert(!heap->cancelled_gc(), "STW reference update can not OOM");
278 }
279
280 // Disarm nmethods that armed in concurrent cycle.
281 // In above case, update roots should disarm them
282 ShenandoahCodeRoots::disarm_nmethods();
283
284 op_cleanup_complete();
285
286 if (heap->mode()->is_generational()) {
287 ShenandoahGenerationalHeap::heap()->complete_degenerated_cycle();
288 }
289
290 break;
291 default:
292 ShouldNotReachHere();
293 }
294
295 if (ShenandoahVerify) {
296 heap->verifier()->verify_after_degenerated();
297 }
298
299 if (VerifyAfterGC) {
300 Universe::verify();
301 }
302
303 metrics.snap_after();
304
305 // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
306 // because that probably means the heap is overloaded and/or fragmented.
307 if (!metrics.is_good_progress()) {
308 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
309 op_degenerated_futile();
310 } else {
311 heap->notify_gc_progress();
312 heap->shenandoah_policy()->record_success_degenerated(_generation->is_young(), _abbreviated);
313 _generation->heuristics()->record_success_degenerated();
314 }
315 }
316
317 void ShenandoahDegenGC::op_reset() {
318 _generation->prepare_gc();
319 }
320
321 void ShenandoahDegenGC::op_mark() {
322 assert(!_generation->is_concurrent_mark_in_progress(), "Should be reset");
323 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_mark);
324 ShenandoahSTWMark mark(_generation, false /*full gc*/);
325 mark.mark();
326 }
327
328 void ShenandoahDegenGC::op_finish_mark() {
329 ShenandoahConcurrentMark mark(_generation);
330 mark.finish_mark();
331 }
332
333 void ShenandoahDegenGC::op_prepare_evacuation() {
334 ShenandoahHeap* const heap = ShenandoahHeap::heap();
335 if (ShenandoahVerify) {
336 heap->verifier()->verify_roots_no_forwarded();
337 }
338
339 // STW cleanup weak roots and unload classes
340 heap->parallel_cleaning(false /*full gc*/);
341
342 // Prepare regions and collection set
343 _generation->prepare_regions_and_collection_set(false /*concurrent*/);
344
345 // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
346 // This is needed for two reasons. Strong one: new allocations would be with new freeset,
347 // which would be outside the collection set, so no cset writes would happen there.
348 // Weaker one: new allocations would happen past update watermark, and so less work would
349 // be needed for reference updates (would update the large filler instead).
350 if (UseTLAB) {
351 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_final_manage_labs);
352 heap->tlabs_retire(false);
353 }
354
355 if (!heap->collection_set()->is_empty()) {
356 if (ShenandoahVerify) {
357 heap->verifier()->verify_before_evacuation();
358 }
359
360 heap->set_evacuation_in_progress(true);
361 heap->set_has_forwarded_objects(true);
362 } else {
363 if (ShenandoahVerify) {
364 if (has_in_place_promotions(heap)) {
365 heap->verifier()->verify_after_concmark_with_promotions();
366 } else {
367 heap->verifier()->verify_after_concmark();
368 }
369 }
370
371 if (VerifyAfterGC) {
372 Universe::verify();
373 }
374 }
375 }
376
377 bool ShenandoahDegenGC::has_in_place_promotions(const ShenandoahHeap* heap) const {
378 return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions();
379 }
380
381 void ShenandoahDegenGC::op_cleanup_early() {
382 ShenandoahHeap::heap()->recycle_trash();
383 }
384
385 void ShenandoahDegenGC::op_evacuate() {
386 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_evac);
387 ShenandoahHeap::heap()->evacuate_collection_set(false /* concurrent*/);
388 }
389
390 void ShenandoahDegenGC::op_init_updaterefs() {
391 // Evacuation has completed
392 ShenandoahHeap* const heap = ShenandoahHeap::heap();
393 heap->prepare_update_heap_references(false /*concurrent*/);
394 heap->set_update_refs_in_progress(true);
395 }
396
397 void ShenandoahDegenGC::op_updaterefs() {
398 ShenandoahHeap* const heap = ShenandoahHeap::heap();
399 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_updaterefs);
400 // Handed over from concurrent update references phase
401 heap->update_heap_references(false /*concurrent*/);
402
403 heap->set_update_refs_in_progress(false);
404 heap->set_has_forwarded_objects(false);
405 }
406
407 void ShenandoahDegenGC::op_update_roots() {
408 ShenandoahHeap* const heap = ShenandoahHeap::heap();
409
410 update_roots(false /*full_gc*/);
411
412 heap->update_heap_region_states(false /*concurrent*/);
421
422 heap->rebuild_free_set(false /*concurrent*/);
423 }
424
425 void ShenandoahDegenGC::op_cleanup_complete() {
426 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete);
427 ShenandoahHeap::heap()->recycle_trash();
428 }
429
430 void ShenandoahDegenGC::op_degenerated_fail() {
431 upgrade_to_full();
432 }
433
434 void ShenandoahDegenGC::op_degenerated_futile() {
435 upgrade_to_full();
436 }
437
438 const char* ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point) const {
439 switch (point) {
440 case _degenerated_unset:
441 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (<UNSET>)");
442 case _degenerated_outside_cycle:
443 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Outside of Cycle)");
444 case _degenerated_roots:
445 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Roots)");
446 case _degenerated_mark:
447 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Mark)");
448 case _degenerated_evac:
449 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Evacuation)");
450 case _degenerated_updaterefs:
451 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Update Refs)");
452 default:
453 ShouldNotReachHere();
454 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (?)");
455 }
456 }
457
458 void ShenandoahDegenGC::upgrade_to_full() {
459 log_info(gc)("Degenerated GC upgrading to Full GC");
460 ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
461 ShenandoahFullGC full_gc;
462 full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
463 }
|