1 /*
2 * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shared/collectorCounters.hpp"
28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
29 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
31 #include "gc/shenandoah/shenandoahFullGC.hpp"
32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
33 #include "gc/shenandoah/shenandoahMetrics.hpp"
34 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
35 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
36 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
37 #include "gc/shenandoah/shenandoahSTWMark.hpp"
38 #include "gc/shenandoah/shenandoahUtils.hpp"
39 #include "gc/shenandoah/shenandoahVerifier.hpp"
40 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
41 #include "gc/shenandoah/shenandoahVMOperations.hpp"
42 #include "runtime/vmThread.hpp"
43 #include "utilities/events.hpp"
44
45 ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point) :
46 ShenandoahGC(),
47 _degen_point(degen_point) {
48 }
49
50 bool ShenandoahDegenGC::collect(GCCause::Cause cause) {
51 vmop_degenerated();
52 return true;
53 }
54
55 void ShenandoahDegenGC::vmop_degenerated() {
56 TraceCollectorStats tcs(ShenandoahHeap::heap()->monitoring_support()->full_stw_collection_counters());
57 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
58 VM_ShenandoahDegeneratedGC degenerated_gc(this);
59 VMThread::execute(°enerated_gc);
60 }
61
62 void ShenandoahDegenGC::entry_degenerated() {
63 const char* msg = degen_event_message(_degen_point);
64 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
65 EventMark em("%s", msg);
66 ShenandoahHeap* const heap = ShenandoahHeap::heap();
67
68 ShenandoahWorkerScope scope(heap->workers(),
69 ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
70 "stw degenerated gc");
71
72 heap->set_degenerated_gc_in_progress(true);
73 op_degenerated();
74 heap->set_degenerated_gc_in_progress(false);
75 }
76
77 void ShenandoahDegenGC::op_degenerated() {
78 ShenandoahHeap* const heap = ShenandoahHeap::heap();
79 // Degenerated GC is STW, but it can also fail. Current mechanics communicates
80 // GC failure via cancelled_concgc() flag. So, if we detect the failure after
81 // some phase, we have to upgrade the Degenerate GC to Full GC.
82 heap->clear_cancelled_gc();
83
84 ShenandoahMetricsSnapshot metrics;
85 metrics.snap_before();
86
87 switch (_degen_point) {
88 // The cases below form the Duff's-like device: it describes the actual GC cycle,
89 // but enters it at different points, depending on which concurrent phase had
90 // degenerated.
91
92 case _degenerated_outside_cycle:
93 // We have degenerated from outside the cycle, which means something is bad with
94 // the heap, most probably heavy humongous fragmentation, or we are very low on free
95 // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
96 // we can do the most aggressive degen cycle, which includes processing references and
97 // class unloading, unless those features are explicitly disabled.
98 //
99
100 // Degenerated from concurrent root mark, reset the flag for STW mark
101 if (heap->is_concurrent_mark_in_progress()) {
102 ShenandoahConcurrentMark::cancel();
103 heap->set_concurrent_mark_in_progress(false);
104 }
105
106 // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
107 // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
108 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
109
110 op_reset();
111
112 // STW mark
113 op_mark();
114
115 case _degenerated_mark:
116 // No fallthrough. Continue mark, handed over from concurrent mark if
117 // concurrent mark has yet completed
118 if (_degen_point == ShenandoahDegenPoint::_degenerated_mark &&
119 heap->is_concurrent_mark_in_progress()) {
120 op_finish_mark();
121 }
122 assert(!heap->cancelled_gc(), "STW mark can not OOM");
123
124 /* Degen select Collection Set. etc. */
125 op_prepare_evacuation();
126
127 op_cleanup_early();
128
129 case _degenerated_evac:
130 // If heuristics thinks we should do the cycle, this flag would be set,
131 // and we can do evacuation. Otherwise, it would be the shortcut cycle.
132 if (heap->is_evacuation_in_progress()) {
133
134 // Degeneration under oom-evac protocol might have left some objects in
135 // collection set un-evacuated. Restart evacuation from the beginning to
136 // capture all objects. For all the objects that are already evacuated,
137 // it would be a simple check, which is supposed to be fast. This is also
138 // safe to do even without degeneration, as CSet iterator is at beginning
139 // in preparation for evacuation anyway.
140 //
141 // Before doing that, we need to make sure we never had any cset-pinned
142 // regions. This may happen if allocation failure happened when evacuating
143 // the about-to-be-pinned object, oom-evac protocol left the object in
144 // the collection set, and then the pin reached the cset region. If we continue
145 // the cycle here, we would trash the cset and alive objects in it. To avoid
146 // it, we fail degeneration right away and slide into Full GC to recover.
147
148 {
149 heap->sync_pinned_region_status();
150 heap->collection_set()->clear_current_index();
151
152 ShenandoahHeapRegion* r;
153 while ((r = heap->collection_set()->next()) != nullptr) {
154 if (r->is_pinned()) {
155 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
156 op_degenerated_fail();
157 return;
158 }
159 }
160
161 heap->collection_set()->clear_current_index();
162 }
163 op_evacuate();
164 if (heap->cancelled_gc()) {
165 op_degenerated_fail();
166 return;
167 }
168 }
169
170 // If heuristics thinks we should do the cycle, this flag would be set,
171 // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
172 if (heap->has_forwarded_objects()) {
173 op_init_updaterefs();
174 assert(!heap->cancelled_gc(), "STW reference update can not OOM");
175 }
176
177 case _degenerated_updaterefs:
178 if (heap->has_forwarded_objects()) {
179 op_updaterefs();
180 op_update_roots();
181 assert(!heap->cancelled_gc(), "STW reference update can not OOM");
182 }
183
184 // Disarm nmethods that armed in concurrent cycle.
185 // In above case, update roots should disarm them
186 ShenandoahCodeRoots::disarm_nmethods();
187
188 op_cleanup_complete();
189 break;
190 default:
191 ShouldNotReachHere();
192 }
193
194 if (ShenandoahVerify) {
195 heap->verifier()->verify_after_degenerated();
196 }
197
198 if (VerifyAfterGC) {
199 Universe::verify();
200 }
201
202 metrics.snap_after();
203
204 // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
205 // because that probably means the heap is overloaded and/or fragmented.
206 if (!metrics.is_good_progress()) {
207 heap->notify_gc_no_progress();
208 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
209 op_degenerated_futile();
210 } else {
211 heap->notify_gc_progress();
212 }
213 }
214
215 void ShenandoahDegenGC::op_reset() {
216 ShenandoahHeap::heap()->prepare_gc();
217 }
218
219 void ShenandoahDegenGC::op_mark() {
220 assert(!ShenandoahHeap::heap()->is_concurrent_mark_in_progress(), "Should be reset");
221 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_mark);
222 ShenandoahSTWMark mark(false /*full gc*/);
223 mark.clear();
224 mark.mark();
225 }
226
227 void ShenandoahDegenGC::op_finish_mark() {
228 ShenandoahConcurrentMark mark;
229 mark.finish_mark();
230 }
231
232 void ShenandoahDegenGC::op_prepare_evacuation() {
233 ShenandoahHeap* const heap = ShenandoahHeap::heap();
234 if (ShenandoahVerify) {
235 heap->verifier()->verify_roots_no_forwarded();
236 }
237
238 // STW cleanup weak roots and unload classes
239 heap->parallel_cleaning(false /*full gc*/);
240 // Prepare regions and collection set
241 heap->prepare_regions_and_collection_set(false /*concurrent*/);
242
243 // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
244 // This is needed for two reasons. Strong one: new allocations would be with new freeset,
245 // which would be outside the collection set, so no cset writes would happen there.
246 // Weaker one: new allocations would happen past update watermark, and so less work would
247 // be needed for reference updates (would update the large filler instead).
248 if (UseTLAB) {
249 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_final_manage_labs);
250 heap->tlabs_retire(false);
251 }
252
253 if (!heap->collection_set()->is_empty()) {
254 heap->set_evacuation_in_progress(true);
255 heap->set_has_forwarded_objects(true);
256
257 if(ShenandoahVerify) {
258 heap->verifier()->verify_during_evacuation();
259 }
260 } else {
261 if (ShenandoahVerify) {
262 heap->verifier()->verify_after_concmark();
263 }
264
265 if (VerifyAfterGC) {
266 Universe::verify();
267 }
268 }
269 }
270
271 void ShenandoahDegenGC::op_cleanup_early() {
272 ShenandoahHeap::heap()->recycle_trash();
273 }
274
275 void ShenandoahDegenGC::op_evacuate() {
276 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_evac);
277 ShenandoahHeap::heap()->evacuate_collection_set(false /* concurrent*/);
278 }
279
280 void ShenandoahDegenGC::op_init_updaterefs() {
281 // Evacuation has completed
282 ShenandoahHeap* const heap = ShenandoahHeap::heap();
283 heap->set_evacuation_in_progress(false);
284 heap->set_concurrent_weak_root_in_progress(false);
285 heap->set_concurrent_strong_root_in_progress(false);
286
287 heap->prepare_update_heap_references(false /*concurrent*/);
288 heap->set_update_refs_in_progress(true);
289 }
290
291 void ShenandoahDegenGC::op_updaterefs() {
292 ShenandoahHeap* const heap = ShenandoahHeap::heap();
293 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_updaterefs);
294 // Handed over from concurrent update references phase
295 heap->update_heap_references(false /*concurrent*/);
296
297 heap->set_update_refs_in_progress(false);
298 heap->set_has_forwarded_objects(false);
299 }
300
301 void ShenandoahDegenGC::op_update_roots() {
302 ShenandoahHeap* const heap = ShenandoahHeap::heap();
303
304 update_roots(false /*full_gc*/);
305
306 heap->update_heap_region_states(false /*concurrent*/);
307
308 if (ShenandoahVerify) {
309 heap->verifier()->verify_after_updaterefs();
310 }
311
312 if (VerifyAfterGC) {
313 Universe::verify();
314 }
315
316 heap->rebuild_free_set(false /*concurrent*/);
317 }
318
319 void ShenandoahDegenGC::op_cleanup_complete() {
320 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete);
321 ShenandoahHeap::heap()->recycle_trash();
322 }
323
324 void ShenandoahDegenGC::op_degenerated_fail() {
325 log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
326 ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
327
328 ShenandoahFullGC full_gc;
329 full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
330 }
331
332 void ShenandoahDegenGC::op_degenerated_futile() {
333 ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
334 ShenandoahFullGC full_gc;
335 full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
336 }
337
338 const char* ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point) const {
339 switch (point) {
340 case _degenerated_unset:
341 return "Pause Degenerated GC (<UNSET>)";
342 case _degenerated_outside_cycle:
343 return "Pause Degenerated GC (Outside of Cycle)";
344 case _degenerated_mark:
345 return "Pause Degenerated GC (Mark)";
346 case _degenerated_evac:
347 return "Pause Degenerated GC (Evacuation)";
348 case _degenerated_updaterefs:
349 return "Pause Degenerated GC (Update Refs)";
350 default:
351 ShouldNotReachHere();
352 return "ERROR";
353 }
354 }
|
1 /*
2 * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shared/collectorCounters.hpp"
29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
30 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
31 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
32 #include "gc/shenandoah/shenandoahFullGC.hpp"
33 #include "gc/shenandoah/shenandoahGeneration.hpp"
34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
35 #include "gc/shenandoah/shenandoahMetrics.hpp"
36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
37 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
38 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
39 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
40 #include "gc/shenandoah/shenandoahSTWMark.hpp"
41 #include "gc/shenandoah/shenandoahUtils.hpp"
42 #include "gc/shenandoah/shenandoahVerifier.hpp"
43 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
44 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
45 #include "gc/shenandoah/shenandoahVMOperations.hpp"
46 #include "runtime/vmThread.hpp"
47 #include "utilities/events.hpp"
48
49 ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point, ShenandoahGeneration* generation) :
50 ShenandoahGC(),
51 _degen_point(degen_point),
52 _generation(generation),
53 _upgraded_to_full(false) {
54 }
55
56 bool ShenandoahDegenGC::collect(GCCause::Cause cause) {
57 vmop_degenerated();
58 ShenandoahHeap* heap = ShenandoahHeap::heap();
59 if (heap->mode()->is_generational()) {
60 bool is_bootstrap_gc = heap->old_generation()->state() == ShenandoahOldGeneration::BOOTSTRAPPING;
61 heap->mmu_tracker()->record_degenerated(_generation, GCId::current(), is_bootstrap_gc,
62 !heap->collection_set()->has_old_regions());
63 const char* msg = is_bootstrap_gc? "At end of Degenerated Bootstrap Old GC": "At end of Degenerated Young GC";
64 heap->log_heap_status(msg);
65 }
66 return true;
67 }
68
69 void ShenandoahDegenGC::vmop_degenerated() {
70 TraceCollectorStats tcs(ShenandoahHeap::heap()->monitoring_support()->full_stw_collection_counters());
71 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
72 VM_ShenandoahDegeneratedGC degenerated_gc(this);
73 VMThread::execute(°enerated_gc);
74 }
75
76 void ShenandoahDegenGC::entry_degenerated() {
77 const char* msg = degen_event_message(_degen_point);
78 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
79 EventMark em("%s", msg);
80 ShenandoahHeap* const heap = ShenandoahHeap::heap();
81 ShenandoahWorkerScope scope(heap->workers(),
82 ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
83 "stw degenerated gc");
84
85 heap->set_degenerated_gc_in_progress(true);
86 op_degenerated();
87 heap->set_degenerated_gc_in_progress(false);
88 }
89
90 void ShenandoahDegenGC::op_degenerated() {
91 ShenandoahHeap* const heap = ShenandoahHeap::heap();
92 // Degenerated GC is STW, but it can also fail. Current mechanics communicates
93 // GC failure via cancelled_concgc() flag. So, if we detect the failure after
94 // some phase, we have to upgrade the Degenerate GC to Full GC.
95 heap->clear_cancelled_gc(true /* clear oom handler */);
96
97 #ifdef ASSERT
98 if (heap->mode()->is_generational()) {
99 ShenandoahOldGeneration* old_generation = heap->old_generation();
100 if (!heap->is_concurrent_old_mark_in_progress()) {
101 // If we are not marking the old generation, there should be nothing in the old mark queues
102 assert(old_generation->task_queues()->is_empty(), "Old gen task queues should be empty");
103 }
104
105 if (_generation->is_global()) {
106 // If we are in a global cycle, the old generation should not be marking. It is, however,
107 // allowed to be holding regions for evacuation or coalescing.
108 ShenandoahOldGeneration::State state = old_generation->state();
109 assert(state == ShenandoahOldGeneration::IDLE
110 || state == ShenandoahOldGeneration::WAITING_FOR_EVAC
111 || state == ShenandoahOldGeneration::WAITING_FOR_FILL,
112 "Old generation cannot be in state: %s", old_generation->state_name());
113 }
114 }
115 #endif
116
117 ShenandoahMetricsSnapshot metrics;
118 metrics.snap_before();
119
120 switch (_degen_point) {
121 // The cases below form the Duff's-like device: it describes the actual GC cycle,
122 // but enters it at different points, depending on which concurrent phase had
123 // degenerated.
124
125 case _degenerated_outside_cycle:
126 // We have degenerated from outside the cycle, which means something is bad with
127 // the heap, most probably heavy humongous fragmentation, or we are very low on free
128 // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
129 // we can do the most aggressive degen cycle, which includes processing references and
130 // class unloading, unless those features are explicitly disabled.
131
132 // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
133 // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
134 heap->set_unload_classes(_generation->heuristics()->can_unload_classes() &&
135 (!heap->mode()->is_generational() || _generation->is_global()));
136
137 if (heap->mode()->is_generational() &&
138 (_generation->is_young() || (_generation->is_global() && ShenandoahVerify))) {
139 // Swap remembered sets for young, or if the verifier will run during a global collect
140 // TODO: This path should not depend on ShenandoahVerify
141 _generation->swap_remembered_set();
142 }
143
144 case _degenerated_roots:
145 // Degenerated from concurrent root mark, reset the flag for STW mark
146 if (!heap->mode()->is_generational()) {
147 if (heap->is_concurrent_mark_in_progress()) {
148 heap->cancel_concurrent_mark();
149 }
150 } else {
151 if (_generation->is_concurrent_mark_in_progress()) {
152 // We want to allow old generation marking to be punctuated by young collections
153 // (even if they have degenerated). If this is a global cycle, we'd have cancelled
154 // the entire old gc before coming into this switch. Note that cancel_marking on
155 // the generation does NOT abandon incomplete SATB buffers as cancel_concurrent_mark does.
156 // We need to separate out the old pointers which is done below.
157 _generation->cancel_marking();
158 }
159
160 if (heap->is_concurrent_mark_in_progress()) {
161 // If either old or young marking is in progress, the SATB barrier will be enabled.
162 // The SATB buffer may hold a mix of old and young pointers. The old pointers need to be
163 // transferred to the old generation mark queues and the young pointers are NOT part
164 // of this snapshot, so they must be dropped here. It is safe to drop them here because
165 // we will rescan the roots on this safepoint.
166 heap->transfer_old_pointers_from_satb();
167 }
168 }
169
170 if (_degen_point == ShenandoahDegenPoint::_degenerated_roots) {
171 // We only need this if the concurrent cycle has already swapped the card tables.
172 // Marking will use the 'read' table, but interesting pointers may have been
173 // recorded in the 'write' table in the time between the cancelled concurrent cycle
174 // and this degenerated cycle. These pointers need to be included the 'read' table
175 // used to scan the remembered set during the STW mark which follows here.
176 _generation->merge_write_table();
177 }
178
179 op_reset();
180
181 // STW mark
182 op_mark();
183
184 case _degenerated_mark:
185 // No fallthrough. Continue mark, handed over from concurrent mark if
186 // concurrent mark has yet completed
187 if (_degen_point == ShenandoahDegenPoint::_degenerated_mark &&
188 heap->is_concurrent_mark_in_progress()) {
189 op_finish_mark();
190 }
191 assert(!heap->cancelled_gc(), "STW mark can not OOM");
192
193 /* Degen select Collection Set. etc. */
194 op_prepare_evacuation();
195
196 op_cleanup_early();
197
198 case _degenerated_evac:
199 // If heuristics thinks we should do the cycle, this flag would be set,
200 // and we can do evacuation. Otherwise, it would be the shortcut cycle.
201 if (heap->is_evacuation_in_progress()) {
202
203 if (_degen_point == _degenerated_evac) {
204 // Degeneration under oom-evac protocol allows the mutator LRB to expose
205 // references to from-space objects. This is okay, in theory, because we
206 // will come to the safepoint here to complete the evacuations and update
207 // the references. However, if the from-space reference is written to a
208 // region that was EC during final mark or was recycled after final mark
209 // it will not have TAMS or UWM updated. Such a region is effectively
210 // skipped during update references which can lead to crashes and corruption
211 // if the from-space reference is accessed.
212 if (UseTLAB) {
213 heap->labs_make_parsable();
214 }
215
216 for (size_t i = 0; i < heap->num_regions(); i++) {
217 ShenandoahHeapRegion* r = heap->get_region(i);
218 if (r->is_active() && r->top() > r->get_update_watermark()) {
219 r->set_update_watermark_at_safepoint(r->top());
220 }
221 }
222 }
223
224 // Degeneration under oom-evac protocol might have left some objects in
225 // collection set un-evacuated. Restart evacuation from the beginning to
226 // capture all objects. For all the objects that are already evacuated,
227 // it would be a simple check, which is supposed to be fast. This is also
228 // safe to do even without degeneration, as CSet iterator is at beginning
229 // in preparation for evacuation anyway.
230 //
231 // Before doing that, we need to make sure we never had any cset-pinned
232 // regions. This may happen if allocation failure happened when evacuating
233 // the about-to-be-pinned object, oom-evac protocol left the object in
234 // the collection set, and then the pin reached the cset region. If we continue
235 // the cycle here, we would trash the cset and alive objects in it. To avoid
236 // it, we fail degeneration right away and slide into Full GC to recover.
237
238 {
239 heap->sync_pinned_region_status();
240 heap->collection_set()->clear_current_index();
241 ShenandoahHeapRegion* r;
242 while ((r = heap->collection_set()->next()) != nullptr) {
243 if (r->is_pinned()) {
244 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
245 op_degenerated_fail();
246 return;
247 }
248 }
249
250 heap->collection_set()->clear_current_index();
251 }
252 op_evacuate();
253 if (heap->cancelled_gc()) {
254 op_degenerated_fail();
255 return;
256 }
257 }
258
259 // Update collector state regardless of whether or not there are forwarded objects
260 heap->set_evacuation_in_progress(false);
261 heap->set_concurrent_weak_root_in_progress(false);
262 heap->set_concurrent_strong_root_in_progress(false);
263
264 // If heuristics thinks we should do the cycle, this flag would be set,
265 // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
266 if (heap->has_forwarded_objects()) {
267 op_init_updaterefs();
268 assert(!heap->cancelled_gc(), "STW reference update can not OOM");
269 }
270
271 case _degenerated_updaterefs:
272 if (heap->has_forwarded_objects()) {
273 op_updaterefs();
274 op_update_roots();
275 assert(!heap->cancelled_gc(), "STW reference update can not OOM");
276 }
277
278 // Disarm nmethods that armed in concurrent cycle.
279 // In above case, update roots should disarm them
280 ShenandoahCodeRoots::disarm_nmethods();
281
282 if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
283 // This is still necessary for degenerated cycles because the degeneration point may occur
284 // after final mark of the young generation. See ShenandoahConcurrentGC::op_final_updaterefs for
285 // a more detailed explanation.
286 heap->transfer_old_pointers_from_satb();
287 }
288
289 op_cleanup_complete();
290 // We defer generation resizing actions until after cset regions have been recycled.
291 if (heap->mode()->is_generational()) {
292 size_t old_region_surplus = heap->get_old_region_surplus();
293 size_t old_region_deficit = heap->get_old_region_deficit();
294 bool success;
295 size_t region_xfer;
296 const char* region_destination;
297 if (old_region_surplus) {
298 region_xfer = old_region_surplus;
299 region_destination = "young";
300 success = heap->generation_sizer()->transfer_to_young(old_region_surplus);
301 } else if (old_region_deficit) {
302 region_xfer = old_region_surplus;
303 region_destination = "old";
304 success = heap->generation_sizer()->transfer_to_old(old_region_deficit);
305 if (!success) {
306 heap->old_heuristics()->trigger_cannot_expand();
307 }
308 } else {
309 region_destination = "none";
310 region_xfer = 0;
311 success = true;
312 }
313
314 size_t young_available = heap->young_generation()->available();
315 size_t old_available = heap->old_generation()->available();
316 log_info(gc, ergo)("After cleanup, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: "
317 SIZE_FORMAT "%s, young_available: " SIZE_FORMAT "%s",
318 success? "successfully transferred": "failed to transfer", region_xfer, region_destination,
319 byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available),
320 byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
321
322 heap->set_old_region_surplus(0);
323 heap->set_old_region_deficit(0);
324 }
325 break;
326 default:
327 ShouldNotReachHere();
328 }
329
330 if (heap->mode()->is_generational()) {
331 // In case degeneration interrupted concurrent evacuation or update references, we need to clean up transient state.
332 // Otherwise, these actions have no effect.
333 heap->set_young_evac_reserve(0);
334 heap->set_old_evac_reserve(0);
335 heap->reset_old_evac_expended();
336 heap->set_promoted_reserve(0);
337 }
338
339 if (ShenandoahVerify) {
340 heap->verifier()->verify_after_degenerated();
341 }
342
343 if (VerifyAfterGC) {
344 Universe::verify();
345 }
346
347 metrics.snap_after();
348
349 // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
350 // because that probably means the heap is overloaded and/or fragmented.
351 if (!metrics.is_good_progress()) {
352 heap->notify_gc_no_progress();
353 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
354 op_degenerated_futile();
355 } else {
356 heap->notify_gc_progress();
357 }
358 }
359
360 void ShenandoahDegenGC::op_reset() {
361 _generation->prepare_gc();
362 }
363
364 void ShenandoahDegenGC::op_mark() {
365 assert(!_generation->is_concurrent_mark_in_progress(), "Should be reset");
366 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_mark);
367 ShenandoahSTWMark mark(_generation, false /*full gc*/);
368 mark.mark();
369 }
370
371 void ShenandoahDegenGC::op_finish_mark() {
372 ShenandoahConcurrentMark mark(_generation);
373 mark.finish_mark();
374 }
375
376 void ShenandoahDegenGC::op_prepare_evacuation() {
377 ShenandoahHeap* const heap = ShenandoahHeap::heap();
378 if (ShenandoahVerify) {
379 heap->verifier()->verify_roots_no_forwarded();
380 }
381
382 // STW cleanup weak roots and unload classes
383 heap->parallel_cleaning(false /*full gc*/);
384
385 // Prepare regions and collection set
386 _generation->prepare_regions_and_collection_set(false /*concurrent*/);
387
388 // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
389 // This is needed for two reasons. Strong one: new allocations would be with new freeset,
390 // which would be outside the collection set, so no cset writes would happen there.
391 // Weaker one: new allocations would happen past update watermark, and so less work would
392 // be needed for reference updates (would update the large filler instead).
393 if (UseTLAB) {
394 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_final_manage_labs);
395 heap->tlabs_retire(false);
396 }
397
398 size_t humongous_regions_promoted = heap->get_promotable_humongous_regions();
399 size_t regular_regions_promoted_in_place = heap->get_regular_regions_promoted_in_place();
400 if (!heap->collection_set()->is_empty() || (humongous_regions_promoted + regular_regions_promoted_in_place > 0)) {
401 // Even if the collection set is empty, we need to do evacuation if there are regions to be promoted in place.
402 // Degenerated evacuation takes responsibility for registering objects and setting the remembered set cards to dirty.
403
404 if (ShenandoahVerify) {
405 heap->verifier()->verify_before_evacuation();
406 }
407
408 heap->set_evacuation_in_progress(true);
409
410 if(ShenandoahVerify) {
411 heap->verifier()->verify_during_evacuation();
412 }
413
414 heap->set_has_forwarded_objects(!heap->collection_set()->is_empty());
415 } else {
416 if (ShenandoahVerify) {
417 heap->verifier()->verify_after_concmark();
418 }
419
420 if (VerifyAfterGC) {
421 Universe::verify();
422 }
423 }
424 }
425
426 void ShenandoahDegenGC::op_cleanup_early() {
427 ShenandoahHeap::heap()->recycle_trash();
428 }
429
430 void ShenandoahDegenGC::op_evacuate() {
431 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_evac);
432 ShenandoahHeap::heap()->evacuate_collection_set(false /* concurrent*/);
433 }
434
435 void ShenandoahDegenGC::op_init_updaterefs() {
436 // Evacuation has completed
437 ShenandoahHeap* const heap = ShenandoahHeap::heap();
438 heap->prepare_update_heap_references(false /*concurrent*/);
439 heap->set_update_refs_in_progress(true);
440 }
441
442 void ShenandoahDegenGC::op_updaterefs() {
443 ShenandoahHeap* const heap = ShenandoahHeap::heap();
444 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_updaterefs);
445 // Handed over from concurrent update references phase
446 heap->update_heap_references(false /*concurrent*/);
447
448 heap->set_update_refs_in_progress(false);
449 heap->set_has_forwarded_objects(false);
450 }
451
452 void ShenandoahDegenGC::op_update_roots() {
453 ShenandoahHeap* const heap = ShenandoahHeap::heap();
454
455 update_roots(false /*full_gc*/);
456
457 heap->update_heap_region_states(false /*concurrent*/);
458
459 if (ShenandoahVerify) {
460 heap->verifier()->verify_after_updaterefs();
461 }
462
463 if (VerifyAfterGC) {
464 Universe::verify();
465 }
466
467 heap->rebuild_free_set(false /*concurrent*/);
468 }
469
470 void ShenandoahDegenGC::op_cleanup_complete() {
471 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete);
472 ShenandoahHeap::heap()->recycle_trash();
473 }
474
475 void ShenandoahDegenGC::op_degenerated_fail() {
476 upgrade_to_full();
477 ShenandoahFullGC full_gc;
478 full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
479 }
480
481 void ShenandoahDegenGC::op_degenerated_futile() {
482 upgrade_to_full();
483 ShenandoahFullGC full_gc;
484 full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
485 }
486
487 const char* ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point) const {
488 const ShenandoahHeap* heap = ShenandoahHeap::heap();
489 switch (point) {
490 case _degenerated_unset:
491 SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Pause Degenerated GC", " (<UNSET>)");
492 case _degenerated_outside_cycle:
493 SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Pause Degenerated GC", " (Outside of Cycle)");
494 case _degenerated_roots:
495 SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Pause Degenerated GC", " (Roots)");
496 case _degenerated_mark:
497 SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Pause Degenerated GC", " (Mark)");
498 case _degenerated_evac:
499 SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Pause Degenerated GC", " (Evacuation)");
500 case _degenerated_updaterefs:
501 SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Pause Degenerated GC", " (Update Refs)");
502 default:
503 ShouldNotReachHere();
504 SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Pause Degenerated GC", " (?)");
505 }
506 }
507
508 void ShenandoahDegenGC::upgrade_to_full() {
509 log_info(gc)("Degenerate GC upgrading to Full GC");
510 ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
511 _upgraded_to_full = true;
512 }
513
514 bool ShenandoahDegenGC::upgraded_to_full() {
515 return _upgraded_to_full;
516 }
|