12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shared/collectorCounters.hpp"
28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
29 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
31 #include "gc/shenandoah/shenandoahFullGC.hpp"
32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
33 #include "gc/shenandoah/shenandoahMetrics.hpp"
34 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
35 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
36 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
37 #include "gc/shenandoah/shenandoahSTWMark.hpp"
38 #include "gc/shenandoah/shenandoahUtils.hpp"
39 #include "gc/shenandoah/shenandoahVerifier.hpp"
40 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
41 #include "gc/shenandoah/shenandoahVMOperations.hpp"
42 #include "runtime/vmThread.hpp"
43 #include "utilities/events.hpp"
44
45 ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point) :
46 ShenandoahGC(),
47 _degen_point(degen_point) {
48 }
49
50 bool ShenandoahDegenGC::collect(GCCause::Cause cause) {
51 vmop_degenerated();
52 return true;
53 }
54
55 void ShenandoahDegenGC::vmop_degenerated() {
56 TraceCollectorStats tcs(ShenandoahHeap::heap()->monitoring_support()->full_stw_collection_counters());
57 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
58 VM_ShenandoahDegeneratedGC degenerated_gc(this);
59 VMThread::execute(°enerated_gc);
60 }
61
62 void ShenandoahDegenGC::entry_degenerated() {
63 const char* msg = degen_event_message(_degen_point);
64 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
65 EventMark em("%s", msg);
66 ShenandoahHeap* const heap = ShenandoahHeap::heap();
67
68 ShenandoahWorkerScope scope(heap->workers(),
69 ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
70 "stw degenerated gc");
71
72 heap->set_degenerated_gc_in_progress(true);
73 op_degenerated();
74 heap->set_degenerated_gc_in_progress(false);
75 }
76
77 void ShenandoahDegenGC::op_degenerated() {
78 ShenandoahHeap* const heap = ShenandoahHeap::heap();
79 // Degenerated GC is STW, but it can also fail. Current mechanics communicates
80 // GC failure via cancelled_concgc() flag. So, if we detect the failure after
81 // some phase, we have to upgrade the Degenerate GC to Full GC.
82 heap->clear_cancelled_gc();
83
84 ShenandoahMetricsSnapshot metrics;
85 metrics.snap_before();
86
87 switch (_degen_point) {
88 // The cases below form the Duff's-like device: it describes the actual GC cycle,
89 // but enters it at different points, depending on which concurrent phase had
90 // degenerated.
91
92 case _degenerated_outside_cycle:
93 // We have degenerated from outside the cycle, which means something is bad with
94 // the heap, most probably heavy humongous fragmentation, or we are very low on free
95 // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
96 // we can do the most aggressive degen cycle, which includes processing references and
97 // class unloading, unless those features are explicitly disabled.
98 //
99
100 // Degenerated from concurrent root mark, reset the flag for STW mark
101 if (heap->is_concurrent_mark_in_progress()) {
102 ShenandoahConcurrentMark::cancel();
103 heap->set_concurrent_mark_in_progress(false);
104 }
105
106 // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
107 // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
108 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
109
110 op_reset();
111
112 // STW mark
113 op_mark();
114
115 case _degenerated_mark:
116 // No fallthrough. Continue mark, handed over from concurrent mark if
117 // concurrent mark has yet completed
118 if (_degen_point == ShenandoahDegenPoint::_degenerated_mark &&
119 heap->is_concurrent_mark_in_progress()) {
120 op_finish_mark();
121 }
122 assert(!heap->cancelled_gc(), "STW mark can not OOM");
123
124 /* Degen select Collection Set. etc. */
125 op_prepare_evacuation();
126
127 op_cleanup_early();
128
129 case _degenerated_evac:
130 // If heuristics thinks we should do the cycle, this flag would be set,
131 // and we can do evacuation. Otherwise, it would be the shortcut cycle.
132 if (heap->is_evacuation_in_progress()) {
133
134 // Degeneration under oom-evac protocol might have left some objects in
135 // collection set un-evacuated. Restart evacuation from the beginning to
136 // capture all objects. For all the objects that are already evacuated,
137 // it would be a simple check, which is supposed to be fast. This is also
138 // safe to do even without degeneration, as CSet iterator is at beginning
139 // in preparation for evacuation anyway.
140 //
141 // Before doing that, we need to make sure we never had any cset-pinned
142 // regions. This may happen if allocation failure happened when evacuating
143 // the about-to-be-pinned object, oom-evac protocol left the object in
144 // the collection set, and then the pin reached the cset region. If we continue
145 // the cycle here, we would trash the cset and alive objects in it. To avoid
146 // it, we fail degeneration right away and slide into Full GC to recover.
147
148 {
149 heap->sync_pinned_region_status();
150 heap->collection_set()->clear_current_index();
151
152 ShenandoahHeapRegion* r;
153 while ((r = heap->collection_set()->next()) != nullptr) {
154 if (r->is_pinned()) {
155 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
156 op_degenerated_fail();
157 return;
158 }
159 }
160
161 heap->collection_set()->clear_current_index();
162 }
163 op_evacuate();
164 if (heap->cancelled_gc()) {
165 op_degenerated_fail();
166 return;
167 }
168 }
169
170 // If heuristics thinks we should do the cycle, this flag would be set,
171 // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
176
177 case _degenerated_updaterefs:
178 if (heap->has_forwarded_objects()) {
179 op_updaterefs();
180 op_update_roots();
181 assert(!heap->cancelled_gc(), "STW reference update can not OOM");
182 }
183
184 if (ClassUnloading) {
185 // Disarm nmethods that armed in concurrent cycle.
186 // In above case, update roots should disarm them
187 ShenandoahCodeRoots::disarm_nmethods();
188 }
189
190 op_cleanup_complete();
191 break;
192 default:
193 ShouldNotReachHere();
194 }
195
196 if (ShenandoahVerify) {
197 heap->verifier()->verify_after_degenerated();
198 }
199
200 if (VerifyAfterGC) {
201 Universe::verify();
202 }
203
204 metrics.snap_after();
205
206 // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
207 // because that probably means the heap is overloaded and/or fragmented.
208 if (!metrics.is_good_progress()) {
209 heap->notify_gc_no_progress();
210 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
211 op_degenerated_futile();
212 } else {
213 heap->notify_gc_progress();
214 }
215 }
216
217 void ShenandoahDegenGC::op_reset() {
218 ShenandoahHeap::heap()->prepare_gc();
219 }
220
221 void ShenandoahDegenGC::op_mark() {
222 assert(!ShenandoahHeap::heap()->is_concurrent_mark_in_progress(), "Should be reset");
223 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_mark);
224 ShenandoahSTWMark mark(false /*full gc*/);
225 mark.clear();
226 mark.mark();
227 }
228
229 void ShenandoahDegenGC::op_finish_mark() {
230 ShenandoahConcurrentMark mark;
231 mark.finish_mark();
232 }
233
234 void ShenandoahDegenGC::op_prepare_evacuation() {
235 ShenandoahHeap* const heap = ShenandoahHeap::heap();
236 if (ShenandoahVerify) {
237 heap->verifier()->verify_roots_no_forwarded();
238 }
239
240 // STW cleanup weak roots and unload classes
241 heap->parallel_cleaning(false /*full gc*/);
242 // Prepare regions and collection set
243 heap->prepare_regions_and_collection_set(false /*concurrent*/);
244
245 // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
246 // This is needed for two reasons. Strong one: new allocations would be with new freeset,
247 // which would be outside the collection set, so no cset writes would happen there.
248 // Weaker one: new allocations would happen past update watermark, and so less work would
249 // be needed for reference updates (would update the large filler instead).
250 if (UseTLAB) {
251 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_final_manage_labs);
252 heap->tlabs_retire(false);
253 }
254
255 if (!heap->collection_set()->is_empty()) {
256 heap->set_evacuation_in_progress(true);
257 heap->set_has_forwarded_objects(true);
258
259 if(ShenandoahVerify) {
260 heap->verifier()->verify_during_evacuation();
261 }
262 } else {
263 if (ShenandoahVerify) {
264 heap->verifier()->verify_after_concmark();
265 }
266
267 if (VerifyAfterGC) {
268 Universe::verify();
269 }
270 }
271 }
272
273 void ShenandoahDegenGC::op_cleanup_early() {
274 ShenandoahHeap::heap()->recycle_trash();
275 }
276
277 void ShenandoahDegenGC::op_evacuate() {
278 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_evac);
279 ShenandoahHeap::heap()->evacuate_collection_set(false /* concurrent*/);
280 }
281
282 void ShenandoahDegenGC::op_init_updaterefs() {
283 // Evacuation has completed
284 ShenandoahHeap* const heap = ShenandoahHeap::heap();
285 heap->set_evacuation_in_progress(false);
286 heap->set_concurrent_weak_root_in_progress(false);
287 heap->set_concurrent_strong_root_in_progress(false);
288
289 heap->prepare_update_heap_references(false /*concurrent*/);
290 heap->set_update_refs_in_progress(true);
291 }
292
293 void ShenandoahDegenGC::op_updaterefs() {
294 ShenandoahHeap* const heap = ShenandoahHeap::heap();
295 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_updaterefs);
296 // Handed over from concurrent update references phase
307
308 heap->update_heap_region_states(false /*concurrent*/);
309
310 if (ShenandoahVerify) {
311 heap->verifier()->verify_after_updaterefs();
312 }
313
314 if (VerifyAfterGC) {
315 Universe::verify();
316 }
317
318 heap->rebuild_free_set(false /*concurrent*/);
319 }
320
321 void ShenandoahDegenGC::op_cleanup_complete() {
322 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete);
323 ShenandoahHeap::heap()->recycle_trash();
324 }
325
326 void ShenandoahDegenGC::op_degenerated_fail() {
327 log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
328 ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
329
330 ShenandoahFullGC full_gc;
331 full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
332 }
333
334 void ShenandoahDegenGC::op_degenerated_futile() {
335 ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
336 ShenandoahFullGC full_gc;
337 full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
338 }
339
340 const char* ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point) const {
341 switch (point) {
342 case _degenerated_unset:
343 return "Pause Degenerated GC (<UNSET>)";
344 case _degenerated_outside_cycle:
345 return "Pause Degenerated GC (Outside of Cycle)";
346 case _degenerated_mark:
347 return "Pause Degenerated GC (Mark)";
348 case _degenerated_evac:
349 return "Pause Degenerated GC (Evacuation)";
350 case _degenerated_updaterefs:
351 return "Pause Degenerated GC (Update Refs)";
352 default:
353 ShouldNotReachHere();
354 return "ERROR";
355 }
356 }
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shared/collectorCounters.hpp"
28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
29 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
31 #include "gc/shenandoah/shenandoahFullGC.hpp"
32 #include "gc/shenandoah/shenandoahGeneration.hpp"
33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
34 #include "gc/shenandoah/shenandoahMetrics.hpp"
35 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
36 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
38 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
39 #include "gc/shenandoah/shenandoahSTWMark.hpp"
40 #include "gc/shenandoah/shenandoahUtils.hpp"
41 #include "gc/shenandoah/shenandoahVerifier.hpp"
42 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
43 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
44 #include "gc/shenandoah/shenandoahVMOperations.hpp"
45 #include "runtime/vmThread.hpp"
46 #include "utilities/events.hpp"
47
48 ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point, ShenandoahGeneration* generation) :
49 ShenandoahGC(),
50 _degen_point(degen_point),
51 _generation(generation),
52 _upgraded_to_full(false) {
53 }
54
55 bool ShenandoahDegenGC::collect(GCCause::Cause cause) {
56 vmop_degenerated();
57 ShenandoahHeap* heap = ShenandoahHeap::heap();
58 if (heap->mode()->is_generational()) {
59 heap->log_heap_status("At end of Degenerated GC");
60 }
61 return true;
62 }
63
64 void ShenandoahDegenGC::vmop_degenerated() {
65 TraceCollectorStats tcs(ShenandoahHeap::heap()->monitoring_support()->full_stw_collection_counters());
66 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
67 VM_ShenandoahDegeneratedGC degenerated_gc(this);
68 VMThread::execute(°enerated_gc);
69 }
70
71 void ShenandoahDegenGC::entry_degenerated() {
72 char msg[1024];
73 degen_event_message(_degen_point, msg, sizeof(msg));
74 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
75 EventMark em("%s", msg);
76 ShenandoahHeap* const heap = ShenandoahHeap::heap();
77 ShenandoahWorkerScope scope(heap->workers(),
78 ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
79 "stw degenerated gc");
80
81 heap->set_degenerated_gc_in_progress(true);
82 op_degenerated();
83 heap->set_degenerated_gc_in_progress(false);
84 }
85
86 void ShenandoahDegenGC::op_degenerated() {
87 ShenandoahHeap* const heap = ShenandoahHeap::heap();
88 // Degenerated GC is STW, but it can also fail. Current mechanics communicates
89 // GC failure via cancelled_concgc() flag. So, if we detect the failure after
90 // some phase, we have to upgrade the Degenerate GC to Full GC.
91 heap->clear_cancelled_gc(true /* clear oom handler */);
92
93 #ifdef ASSERT
94 if (heap->mode()->is_generational()) {
95 if (_generation->generation_mode() == GenerationMode::GLOBAL) {
96 // We can only get to a degenerated global cycle _after_ a concurrent global cycle
97 // has been cancelled. In which case, we expect the concurrent global cycle to have
98 // cancelled the old gc already.
99 assert(!heap->is_old_gc_active(), "Old GC should not be active during global cycle.");
100 }
101
102 if (!heap->is_concurrent_old_mark_in_progress()) {
103 // If we are not marking the old generation, there should be nothing in the old mark queues
104 assert(heap->old_generation()->task_queues()->is_empty(), "Old gen task queues should be empty.");
105 }
106 }
107 #endif
108
109 ShenandoahMetricsSnapshot metrics;
110 metrics.snap_before();
111
112 switch (_degen_point) {
113 // The cases below form the Duff's-like device: it describes the actual GC cycle,
114 // but enters it at different points, depending on which concurrent phase had
115 // degenerated.
116
117 case _degenerated_outside_cycle:
118 // We have degenerated from outside the cycle, which means something is bad with
119 // the heap, most probably heavy humongous fragmentation, or we are very low on free
120 // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
121 // we can do the most aggressive degen cycle, which includes processing references and
122 // class unloading, unless those features are explicitly disabled.
123
124 if (heap->is_concurrent_old_mark_in_progress()) {
125 // We have come straight into a degenerated cycle without running a concurrent cycle
126 // first and the SATB barrier is enabled to support concurrent old marking. The SATB buffer
127 // may hold a mix of old and young pointers. The old pointers need to be transferred
128 // to the old generation mark queues and the young pointers are _not_ part of this
129 // snapshot, so they must be dropped here.
130 heap->transfer_old_pointers_from_satb();
131 }
132
133 // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
134 // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
135 heap->set_unload_classes((!heap->mode()->is_generational() || _generation->generation_mode() == GLOBAL) && _generation->heuristics()->can_unload_classes());
136
137 if (heap->mode()->is_generational() && (_generation->generation_mode() == YOUNG || (_generation->generation_mode() == GLOBAL && ShenandoahVerify))) {
138 // Swap remembered sets for young, or if the verifier will run during a global collect
139 _generation->swap_remembered_set();
140 }
141
142 case _degenerated_roots:
143 // Degenerated from concurrent root mark, reset the flag for STW mark
144 if (!heap->mode()->is_generational()) {
145 if (heap->is_concurrent_mark_in_progress()) {
146 heap->cancel_concurrent_mark();
147 }
148 } else {
149 if (_generation->is_concurrent_mark_in_progress()) {
150 // We want to allow old generation marking to be punctuated by young collections
151 // (even if they have degenerated). If this is a global cycle, we'd have cancelled
152 // the entire old gc before coming into this switch.
153 _generation->cancel_marking();
154 }
155 }
156
157 if (_degen_point == ShenandoahDegenPoint::_degenerated_roots) {
158 // We only need this if the concurrent cycle has already swapped the card tables.
159 // Marking will use the 'read' table, but interesting pointers may have been
160 // recorded in the 'write' table in the time between the cancelled concurrent cycle
161 // and this degenerated cycle. These pointers need to be included the 'read' table
162 // used to scan the remembered set during the STW mark which follows here.
163 _generation->merge_write_table();
164 }
165
166 op_reset();
167
168 // STW mark
169 op_mark();
170
171 case _degenerated_mark:
172 // No fallthrough. Continue mark, handed over from concurrent mark if
173 // concurrent mark has yet completed
174 if (_degen_point == ShenandoahDegenPoint::_degenerated_mark &&
175 heap->is_concurrent_mark_in_progress()) {
176 op_finish_mark();
177 }
178 assert(!heap->cancelled_gc(), "STW mark can not OOM");
179
180 /* Degen select Collection Set. etc. */
181 op_prepare_evacuation();
182
183 op_cleanup_early();
184
185 case _degenerated_evac:
186
187 if (heap->mode()->is_generational() && _generation->generation_mode() == GLOBAL) {
188 op_global_coalesce_and_fill();
189 }
190
191 // If heuristics thinks we should do the cycle, this flag would be set,
192 // and we can do evacuation. Otherwise, it would be the shortcut cycle.
193 if (heap->is_evacuation_in_progress()) {
194
195 if (_degen_point == _degenerated_evac) {
196 // Degeneration under oom-evac protocol allows the mutator LRB to expose
197 // references to from-space objects. This is okay, in theory, because we
198 // will come to the safepoint here to complete the evacuations and update
199 // the references. However, if the from-space reference is written to a
200 // region that was EC during final mark or was recycled after final mark
201 // it will not have TAMS or UWM updated. Such a region is effectively
202 // skipped during update references which can lead to crashes and corruption
203 // if the from-space reference is accessed.
204 if (UseTLAB) {
205 heap->labs_make_parsable();
206 }
207
208 for (size_t i = 0; i < heap->num_regions(); i++) {
209 ShenandoahHeapRegion* r = heap->get_region(i);
210 if (r->is_active() && r->top() > r->get_update_watermark()) {
211 r->set_update_watermark_at_safepoint(r->top());
212 }
213 }
214 }
215
216 // Degeneration under oom-evac protocol might have left some objects in
217 // collection set un-evacuated. Restart evacuation from the beginning to
218 // capture all objects. For all the objects that are already evacuated,
219 // it would be a simple check, which is supposed to be fast. This is also
220 // safe to do even without degeneration, as CSet iterator is at beginning
221 // in preparation for evacuation anyway.
222 //
223 // Before doing that, we need to make sure we never had any cset-pinned
224 // regions. This may happen if allocation failure happened when evacuating
225 // the about-to-be-pinned object, oom-evac protocol left the object in
226 // the collection set, and then the pin reached the cset region. If we continue
227 // the cycle here, we would trash the cset and alive objects in it. To avoid
228 // it, we fail degeneration right away and slide into Full GC to recover.
229
230 {
231 heap->sync_pinned_region_status();
232 heap->collection_set()->clear_current_index();
233 ShenandoahHeapRegion* r;
234 while ((r = heap->collection_set()->next()) != nullptr) {
235 if (r->is_pinned()) {
236 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
237 op_degenerated_fail();
238 return;
239 }
240 }
241
242 heap->collection_set()->clear_current_index();
243 }
244 op_evacuate();
245 if (heap->cancelled_gc()) {
246 op_degenerated_fail();
247 return;
248 }
249 }
250
251 // If heuristics thinks we should do the cycle, this flag would be set,
252 // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
257
258 case _degenerated_updaterefs:
259 if (heap->has_forwarded_objects()) {
260 op_updaterefs();
261 op_update_roots();
262 assert(!heap->cancelled_gc(), "STW reference update can not OOM");
263 }
264
265 if (ClassUnloading) {
266 // Disarm nmethods that armed in concurrent cycle.
267 // In above case, update roots should disarm them
268 ShenandoahCodeRoots::disarm_nmethods();
269 }
270
271 op_cleanup_complete();
272 break;
273 default:
274 ShouldNotReachHere();
275 }
276
277 if (heap->mode()->is_generational()) {
278 // In case degeneration interrupted concurrent evacuation or update references, we need to clean up transient state.
279 // Otherwise, these actions have no effect.
280
281 heap->young_generation()->unadjust_available();
282 heap->old_generation()->unadjust_available();
283 // No need to old_gen->increase_used(). That was done when plabs were allocated, accounting for both old evacs and promotions.
284
285 heap->set_alloc_supplement_reserve(0);
286 heap->set_young_evac_reserve(0);
287 heap->set_old_evac_reserve(0);
288 heap->reset_old_evac_expended();
289 heap->set_promoted_reserve(0);
290
291 heap->adjust_generation_sizes();
292 }
293
294 if (ShenandoahVerify) {
295 heap->verifier()->verify_after_degenerated();
296 }
297
298 if (VerifyAfterGC) {
299 Universe::verify();
300 }
301
302 metrics.snap_after();
303
304 // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
305 // because that probably means the heap is overloaded and/or fragmented.
306 if (!metrics.is_good_progress()) {
307 heap->notify_gc_no_progress();
308 heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
309 op_degenerated_futile();
310 } else {
311 heap->notify_gc_progress();
312 }
313 }
314
315 void ShenandoahDegenGC::op_reset() {
316 _generation->prepare_gc();
317 }
318
319 void ShenandoahDegenGC::op_mark() {
320 assert(!_generation->is_concurrent_mark_in_progress(), "Should be reset");
321 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_mark);
322 ShenandoahSTWMark mark(_generation, false /*full gc*/);
323 mark.mark();
324 }
325
326 void ShenandoahDegenGC::op_finish_mark() {
327 ShenandoahConcurrentMark mark(_generation);
328 mark.finish_mark();
329 }
330
331 void ShenandoahDegenGC::op_prepare_evacuation() {
332 ShenandoahHeap* const heap = ShenandoahHeap::heap();
333 if (ShenandoahVerify) {
334 heap->verifier()->verify_roots_no_forwarded();
335 }
336
337 // STW cleanup weak roots and unload classes
338 heap->parallel_cleaning(false /*full gc*/);
339
340 // Prepare regions and collection set
341 _generation->prepare_regions_and_collection_set(false /*concurrent*/);
342
343 // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
344 // This is needed for two reasons. Strong one: new allocations would be with new freeset,
345 // which would be outside the collection set, so no cset writes would happen there.
346 // Weaker one: new allocations would happen past update watermark, and so less work would
347 // be needed for reference updates (would update the large filler instead).
348 if (UseTLAB) {
349 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_final_manage_labs);
350 heap->tlabs_retire(false);
351 }
352
353 if (!heap->collection_set()->is_empty()) {
354 heap->set_evacuation_in_progress(true);
355 heap->set_has_forwarded_objects(true);
356
357 if(ShenandoahVerify) {
358 heap->verifier()->verify_during_evacuation();
359 }
360 } else {
361 if (ShenandoahVerify) {
362 heap->verifier()->verify_after_concmark();
363 }
364
365 if (VerifyAfterGC) {
366 Universe::verify();
367 }
368 }
369 }
370
371 void ShenandoahDegenGC::op_cleanup_early() {
372 ShenandoahHeap::heap()->recycle_trash();
373 }
374
375 void ShenandoahDegenGC::op_global_coalesce_and_fill() {
376 ShenandoahHeap::heap()->coalesce_and_fill_old_regions();
377 }
378
379 void ShenandoahDegenGC::op_evacuate() {
380 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_evac);
381 ShenandoahHeap::heap()->evacuate_collection_set(false /* concurrent*/);
382 }
383
384 void ShenandoahDegenGC::op_init_updaterefs() {
385 // Evacuation has completed
386 ShenandoahHeap* const heap = ShenandoahHeap::heap();
387 heap->set_evacuation_in_progress(false);
388 heap->set_concurrent_weak_root_in_progress(false);
389 heap->set_concurrent_strong_root_in_progress(false);
390
391 heap->prepare_update_heap_references(false /*concurrent*/);
392 heap->set_update_refs_in_progress(true);
393 }
394
395 void ShenandoahDegenGC::op_updaterefs() {
396 ShenandoahHeap* const heap = ShenandoahHeap::heap();
397 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_updaterefs);
398 // Handed over from concurrent update references phase
409
410 heap->update_heap_region_states(false /*concurrent*/);
411
412 if (ShenandoahVerify) {
413 heap->verifier()->verify_after_updaterefs();
414 }
415
416 if (VerifyAfterGC) {
417 Universe::verify();
418 }
419
420 heap->rebuild_free_set(false /*concurrent*/);
421 }
422
423 void ShenandoahDegenGC::op_cleanup_complete() {
424 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete);
425 ShenandoahHeap::heap()->recycle_trash();
426 }
427
428 void ShenandoahDegenGC::op_degenerated_fail() {
429 upgrade_to_full();
430 ShenandoahFullGC full_gc;
431 full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
432 }
433
434 void ShenandoahDegenGC::op_degenerated_futile() {
435 upgrade_to_full();
436 ShenandoahFullGC full_gc;
437 full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
438 }
439
440 void ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point, char* buf, size_t len) const {
441 jio_snprintf(buf, len, "Pause Degenerated %s GC (%s)", _generation->name(), ShenandoahGC::degen_point_to_string(point));
442 }
443
444 void ShenandoahDegenGC::upgrade_to_full() {
445 log_info(gc)("Degenerate GC upgrading to Full GC");
446 ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
447 _upgraded_to_full = true;
448 }
449
450 bool ShenandoahDegenGC::upgraded_to_full() {
451 return _upgraded_to_full;
452 }
|