< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp

Print this page

  1 /*
  2  * Copyright (c) 2021, Red Hat, Inc. All rights reserved.

  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shared/collectorCounters.hpp"
 28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 29 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
 30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 31 #include "gc/shenandoah/shenandoahFullGC.hpp"


 32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 33 #include "gc/shenandoah/shenandoahMetrics.hpp"
 34 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"

 35 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 36 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
 37 #include "gc/shenandoah/shenandoahSTWMark.hpp"
 38 #include "gc/shenandoah/shenandoahUtils.hpp"
 39 #include "gc/shenandoah/shenandoahVerifier.hpp"

 40 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 41 #include "gc/shenandoah/shenandoahVMOperations.hpp"
 42 #include "runtime/vmThread.hpp"
 43 #include "utilities/events.hpp"
 44 
 45 ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point) :
 46   ShenandoahGC(),
 47   _degen_point(degen_point) {



 48 }
 49 
 50 bool ShenandoahDegenGC::collect(GCCause::Cause cause) {
 51   vmop_degenerated();







 52   return true;
 53 }
 54 
 55 void ShenandoahDegenGC::vmop_degenerated() {
 56   TraceCollectorStats tcs(ShenandoahHeap::heap()->monitoring_support()->full_stw_collection_counters());
 57   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
 58   VM_ShenandoahDegeneratedGC degenerated_gc(this);
 59   VMThread::execute(&degenerated_gc);
 60 }
 61 
 62 void ShenandoahDegenGC::entry_degenerated() {
 63   const char* msg = degen_event_message(_degen_point);
 64   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
 65   EventMark em("%s", msg);
 66   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 67 
 68   ShenandoahWorkerScope scope(heap->workers(),
 69                               ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
 70                               "stw degenerated gc");
 71 
 72   heap->set_degenerated_gc_in_progress(true);
 73   op_degenerated();
 74   heap->set_degenerated_gc_in_progress(false);




 75 }
 76 
 77 void ShenandoahDegenGC::op_degenerated() {
 78   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 79   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
 80   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
 81   // some phase, we have to upgrade the Degenerate GC to Full GC.
 82   heap->clear_cancelled_gc();



















 83 
 84   ShenandoahMetricsSnapshot metrics;
 85   metrics.snap_before();
 86 
 87   switch (_degen_point) {
 88     // The cases below form the Duff's-like device: it describes the actual GC cycle,
 89     // but enters it at different points, depending on which concurrent phase had
 90     // degenerated.
 91 
 92     case _degenerated_outside_cycle:
 93       // We have degenerated from outside the cycle, which means something is bad with
 94       // the heap, most probably heavy humongous fragmentation, or we are very low on free
 95       // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
 96       // we can do the most aggressive degen cycle, which includes processing references and
 97       // class unloading, unless those features are explicitly disabled.
 98       //
 99 
100       // Degenerated from concurrent root mark, reset the flag for STW mark
101       if (heap->is_concurrent_mark_in_progress()) {
102         ShenandoahConcurrentMark::cancel();
103         heap->set_concurrent_mark_in_progress(false);
104       }
105 
106       // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
107       // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
108       heap->set_unload_classes(heap->heuristics()->can_unload_classes());















































109 
110       op_reset();
111 
112       // STW mark
113       op_mark();
114 
115     case _degenerated_mark:
116       // No fallthrough. Continue mark, handed over from concurrent mark if
117       // concurrent mark has yet completed
118       if (_degen_point == ShenandoahDegenPoint::_degenerated_mark &&
119           heap->is_concurrent_mark_in_progress()) {
120         op_finish_mark();
121       }
122       assert(!heap->cancelled_gc(), "STW mark can not OOM");
123 
124       /* Degen select Collection Set. etc. */
125       op_prepare_evacuation();
126 
127       op_cleanup_early();
128 

152           }
153         }
154 
155         // Degeneration under oom-evac protocol might have left some objects in
156         // collection set un-evacuated. Restart evacuation from the beginning to
157         // capture all objects. For all the objects that are already evacuated,
158         // it would be a simple check, which is supposed to be fast. This is also
159         // safe to do even without degeneration, as CSet iterator is at beginning
160         // in preparation for evacuation anyway.
161         //
162         // Before doing that, we need to make sure we never had any cset-pinned
163         // regions. This may happen if allocation failure happened when evacuating
164         // the about-to-be-pinned object, oom-evac protocol left the object in
165         // the collection set, and then the pin reached the cset region. If we continue
166         // the cycle here, we would trash the cset and alive objects in it. To avoid
167         // it, we fail degeneration right away and slide into Full GC to recover.
168 
169         {
170           heap->sync_pinned_region_status();
171           heap->collection_set()->clear_current_index();
172 
173           ShenandoahHeapRegion* r;
174           while ((r = heap->collection_set()->next()) != nullptr) {
175             if (r->is_pinned()) {
176               heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
177               op_degenerated_fail();
178               return;
179             }
180           }
181 
182           heap->collection_set()->clear_current_index();
183         }
184         op_evacuate();
185         if (heap->cancelled_gc()) {
186           op_degenerated_fail();
187           return;
188         }




189       }
190 





191       // If heuristics thinks we should do the cycle, this flag would be set,
192       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
193       if (heap->has_forwarded_objects()) {
194         op_init_updaterefs();
195         assert(!heap->cancelled_gc(), "STW reference update can not OOM");


196       }
197 
198     case _degenerated_updaterefs:
199       if (heap->has_forwarded_objects()) {
200         op_updaterefs();
201         op_update_roots();
202         assert(!heap->cancelled_gc(), "STW reference update can not OOM");
203       }
204 
205       // Disarm nmethods that armed in concurrent cycle.
206       // In above case, update roots should disarm them
207       ShenandoahCodeRoots::disarm_nmethods();
208 
209       op_cleanup_complete();





210       break;
211     default:
212       ShouldNotReachHere();
213   }
214 
215   if (ShenandoahVerify) {
216     heap->verifier()->verify_after_degenerated();
217   }
218 
219   if (VerifyAfterGC) {
220     Universe::verify();
221   }
222 
223   metrics.snap_after();
224 
225   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
226   // because that probably means the heap is overloaded and/or fragmented.
227   if (!metrics.is_good_progress()) {
228     heap->notify_gc_no_progress();














229     heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
230     op_degenerated_futile();
231   } else {
232     heap->notify_gc_progress();


233   }
234 }
235 
236 void ShenandoahDegenGC::op_reset() {
237   ShenandoahHeap::heap()->prepare_gc();
238 }
239 
240 void ShenandoahDegenGC::op_mark() {
241   assert(!ShenandoahHeap::heap()->is_concurrent_mark_in_progress(), "Should be reset");
242   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_mark);
243   ShenandoahSTWMark mark(false /*full gc*/);
244   mark.clear();
245   mark.mark();
246 }
247 
248 void ShenandoahDegenGC::op_finish_mark() {
249   ShenandoahConcurrentMark mark;
250   mark.finish_mark();
251 }
252 
253 void ShenandoahDegenGC::op_prepare_evacuation() {
254   ShenandoahHeap* const heap = ShenandoahHeap::heap();
255   if (ShenandoahVerify) {
256     heap->verifier()->verify_roots_no_forwarded();
257   }
258 
259   // STW cleanup weak roots and unload classes
260   heap->parallel_cleaning(false /*full gc*/);

261   // Prepare regions and collection set
262   heap->prepare_regions_and_collection_set(false /*concurrent*/);
263 
264   // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
265   // This is needed for two reasons. Strong one: new allocations would be with new freeset,
266   // which would be outside the collection set, so no cset writes would happen there.
267   // Weaker one: new allocations would happen past update watermark, and so less work would
268   // be needed for reference updates (would update the large filler instead).
269   if (UseTLAB) {
270     ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_final_manage_labs);
271     heap->tlabs_retire(false);
272   }
273 
274   if (!heap->collection_set()->is_empty()) {




275     heap->set_evacuation_in_progress(true);
276     heap->set_has_forwarded_objects(true);
277 
278     if(ShenandoahVerify) {
279       heap->verifier()->verify_during_evacuation();
280     }
281   } else {
282     if (ShenandoahVerify) {
283       heap->verifier()->verify_after_concmark();




284     }
285 
286     if (VerifyAfterGC) {
287       Universe::verify();
288     }
289   }
290 }
291 




292 void ShenandoahDegenGC::op_cleanup_early() {
293   ShenandoahHeap::heap()->recycle_trash();
294 }
295 
296 void ShenandoahDegenGC::op_evacuate() {
297   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_evac);
298   ShenandoahHeap::heap()->evacuate_collection_set(false /* concurrent*/);
299 }
300 
301 void ShenandoahDegenGC::op_init_updaterefs() {
302   // Evacuation has completed
303   ShenandoahHeap* const heap = ShenandoahHeap::heap();
304   heap->set_evacuation_in_progress(false);
305   heap->set_concurrent_weak_root_in_progress(false);
306   heap->set_concurrent_strong_root_in_progress(false);
307 
308   heap->prepare_update_heap_references(false /*concurrent*/);
309   heap->set_update_refs_in_progress(true);
310 }
311 
312 void ShenandoahDegenGC::op_updaterefs() {
313   ShenandoahHeap* const heap = ShenandoahHeap::heap();
314   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_updaterefs);
315   // Handed over from concurrent update references phase
316   heap->update_heap_references(false /*concurrent*/);
317 
318   heap->set_update_refs_in_progress(false);
319   heap->set_has_forwarded_objects(false);
320 }
321 
322 void ShenandoahDegenGC::op_update_roots() {
323   ShenandoahHeap* const heap = ShenandoahHeap::heap();
324 
325   update_roots(false /*full_gc*/);
326 
327   heap->update_heap_region_states(false /*concurrent*/);
328 
329   if (ShenandoahVerify) {
330     heap->verifier()->verify_after_updaterefs();
331   }
332 
333   if (VerifyAfterGC) {
334     Universe::verify();
335   }
336 
337   heap->rebuild_free_set(false /*concurrent*/);
338 }
339 
340 void ShenandoahDegenGC::op_cleanup_complete() {
341   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete);
342   ShenandoahHeap::heap()->recycle_trash();
343 }
344 
345 void ShenandoahDegenGC::op_degenerated_fail() {
346   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
347   ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
348 
349   ShenandoahFullGC full_gc;
350   full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
351 }
352 
353 void ShenandoahDegenGC::op_degenerated_futile() {
354   ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
355   ShenandoahFullGC full_gc;
356   full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
357 }
358 
359 const char* ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point) const {
360   switch (point) {
361     case _degenerated_unset:
362       return "Pause Degenerated GC (<UNSET>)";
363     case _degenerated_outside_cycle:
364       return "Pause Degenerated GC (Outside of Cycle)";


365     case _degenerated_mark:
366       return "Pause Degenerated GC (Mark)";
367     case _degenerated_evac:
368       return "Pause Degenerated GC (Evacuation)";
369     case _degenerated_updaterefs:
370       return "Pause Degenerated GC (Update Refs)";
371     default:
372       ShouldNotReachHere();
373       return "ERROR";
374   }
375 }








  1 /*
  2  * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 
 28 #include "gc/shared/collectorCounters.hpp"
 29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 30 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
 31 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 32 #include "gc/shenandoah/shenandoahFullGC.hpp"
 33 #include "gc/shenandoah/shenandoahGeneration.hpp"
 34 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 35 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 36 #include "gc/shenandoah/shenandoahMetrics.hpp"
 37 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 38 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 39 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
 41 #include "gc/shenandoah/shenandoahSTWMark.hpp"
 42 #include "gc/shenandoah/shenandoahUtils.hpp"
 43 #include "gc/shenandoah/shenandoahVerifier.hpp"
 44 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 45 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 46 #include "gc/shenandoah/shenandoahVMOperations.hpp"
 47 #include "runtime/vmThread.hpp"
 48 #include "utilities/events.hpp"
 49 
 50 ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point, ShenandoahGeneration* generation) :
 51   ShenandoahGC(),
 52   _degen_point(degen_point),
 53   _generation(generation),
 54   _abbreviated(false),
 55   _consecutive_degen_with_bad_progress(0) {
 56 }
 57 
 58 bool ShenandoahDegenGC::collect(GCCause::Cause cause) {
 59   vmop_degenerated();
 60   ShenandoahHeap* heap = ShenandoahHeap::heap();
 61   if (heap->mode()->is_generational()) {
 62     bool is_bootstrap_gc = heap->old_generation()->is_bootstrapping();
 63     heap->mmu_tracker()->record_degenerated(GCId::current(), is_bootstrap_gc);
 64     const char* msg = is_bootstrap_gc? "At end of Degenerated Bootstrap Old GC": "At end of Degenerated Young GC";
 65     heap->log_heap_status(msg);
 66   }
 67   return true;
 68 }
 69 
 70 void ShenandoahDegenGC::vmop_degenerated() {
 71   TraceCollectorStats tcs(ShenandoahHeap::heap()->monitoring_support()->full_stw_collection_counters());
 72   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
 73   VM_ShenandoahDegeneratedGC degenerated_gc(this);
 74   VMThread::execute(&degenerated_gc);
 75 }
 76 
 77 void ShenandoahDegenGC::entry_degenerated() {
 78   const char* msg = degen_event_message(_degen_point);
 79   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
 80   EventMark em("%s", msg);
 81   ShenandoahHeap* const heap = ShenandoahHeap::heap();

 82   ShenandoahWorkerScope scope(heap->workers(),
 83                               ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
 84                               "stw degenerated gc");
 85 
 86   heap->set_degenerated_gc_in_progress(true);
 87   op_degenerated();
 88   heap->set_degenerated_gc_in_progress(false);
 89   {
 90     ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_propagate_gc_state);
 91     heap->propagate_gc_state_to_all_threads();
 92   }
 93 }
 94 
 95 void ShenandoahDegenGC::op_degenerated() {
 96   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 97   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
 98   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
 99   // some phase, we have to upgrade the Degenerate GC to Full GC.
100   heap->clear_cancelled_gc(true /* clear oom handler */);
101 
102 #ifdef ASSERT
103   if (heap->mode()->is_generational()) {
104     ShenandoahOldGeneration* old_generation = heap->old_generation();
105     if (!heap->is_concurrent_old_mark_in_progress()) {
106       // If we are not marking the old generation, there should be nothing in the old mark queues
107       assert(old_generation->task_queues()->is_empty(), "Old gen task queues should be empty");
108     }
109 
110     if (_generation->is_global()) {
111       // If we are in a global cycle, the old generation should not be marking. It is, however,
112       // allowed to be holding regions for evacuation or coalescing.
113       assert(old_generation->is_idle()
114              || old_generation->is_doing_mixed_evacuations()
115              || old_generation->is_preparing_for_mark(),
116              "Old generation cannot be in state: %s", old_generation->state_name());
117     }
118   }
119 #endif
120 
121   ShenandoahMetricsSnapshot metrics;
122   metrics.snap_before();
123 
124   switch (_degen_point) {
125     // The cases below form the Duff's-like device: it describes the actual GC cycle,
126     // but enters it at different points, depending on which concurrent phase had
127     // degenerated.
128 
129     case _degenerated_outside_cycle:
130       // We have degenerated from outside the cycle, which means something is bad with
131       // the heap, most probably heavy humongous fragmentation, or we are very low on free
132       // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
133       // we can do the most aggressive degen cycle, which includes processing references and
134       // class unloading, unless those features are explicitly disabled.







135 
136       // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
137       // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
138       heap->set_unload_classes(_generation->heuristics()->can_unload_classes() &&
139                                 (!heap->mode()->is_generational() || _generation->is_global()));
140 
141       if (heap->mode()->is_generational()) {
142         // Clean the read table before swapping it. The end goal here is to have a clean
143         // write table, and to have the read table updated with the previous write table.
144         heap->old_generation()->card_scan()->mark_read_table_as_clean();
145 
146         if (_generation->is_young()) {
147           // Swap remembered sets for young
148           _generation->swap_card_tables();
149         }
150       }
151 
152     case _degenerated_roots:
153       // Degenerated from concurrent root mark, reset the flag for STW mark
154       if (!heap->mode()->is_generational()) {
155         if (heap->is_concurrent_mark_in_progress()) {
156           heap->cancel_concurrent_mark();
157         }
158       } else {
159         if (_generation->is_concurrent_mark_in_progress()) {
160           // We want to allow old generation marking to be punctuated by young collections
161           // (even if they have degenerated). If this is a global cycle, we'd have cancelled
162           // the entire old gc before coming into this switch. Note that cancel_marking on
163           // the generation does NOT abandon incomplete SATB buffers as cancel_concurrent_mark does.
164           // We need to separate out the old pointers which is done below.
165           _generation->cancel_marking();
166         }
167 
168         if (heap->is_concurrent_mark_in_progress()) {
169           // If either old or young marking is in progress, the SATB barrier will be enabled.
170           // The SATB buffer may hold a mix of old and young pointers. The old pointers need to be
171           // transferred to the old generation mark queues and the young pointers are NOT part
172           // of this snapshot, so they must be dropped here. It is safe to drop them here because
173           // we will rescan the roots on this safepoint.
174           heap->old_generation()->transfer_pointers_from_satb();
175         }
176 
177         if (_degen_point == ShenandoahDegenPoint::_degenerated_roots) {
178           // We only need this if the concurrent cycle has already swapped the card tables.
179           // Marking will use the 'read' table, but interesting pointers may have been
180           // recorded in the 'write' table in the time between the cancelled concurrent cycle
181           // and this degenerated cycle. These pointers need to be included in the 'read' table
182           // used to scan the remembered set during the STW mark which follows here.
183           _generation->merge_write_table();
184         }
185       }
186 
187       op_reset();
188 
189       // STW mark
190       op_mark();
191 
192     case _degenerated_mark:
193       // No fallthrough. Continue mark, handed over from concurrent mark if
194       // concurrent mark has yet completed
195       if (_degen_point == ShenandoahDegenPoint::_degenerated_mark &&
196           heap->is_concurrent_mark_in_progress()) {
197         op_finish_mark();
198       }
199       assert(!heap->cancelled_gc(), "STW mark can not OOM");
200 
201       /* Degen select Collection Set. etc. */
202       op_prepare_evacuation();
203 
204       op_cleanup_early();
205 

229           }
230         }
231 
232         // Degeneration under oom-evac protocol might have left some objects in
233         // collection set un-evacuated. Restart evacuation from the beginning to
234         // capture all objects. For all the objects that are already evacuated,
235         // it would be a simple check, which is supposed to be fast. This is also
236         // safe to do even without degeneration, as CSet iterator is at beginning
237         // in preparation for evacuation anyway.
238         //
239         // Before doing that, we need to make sure we never had any cset-pinned
240         // regions. This may happen if allocation failure happened when evacuating
241         // the about-to-be-pinned object, oom-evac protocol left the object in
242         // the collection set, and then the pin reached the cset region. If we continue
243         // the cycle here, we would trash the cset and alive objects in it. To avoid
244         // it, we fail degeneration right away and slide into Full GC to recover.
245 
246         {
247           heap->sync_pinned_region_status();
248           heap->collection_set()->clear_current_index();

249           ShenandoahHeapRegion* r;
250           while ((r = heap->collection_set()->next()) != nullptr) {
251             if (r->is_pinned()) {
252               heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
253               op_degenerated_fail();
254               return;
255             }
256           }
257 
258           heap->collection_set()->clear_current_index();
259         }
260         op_evacuate();
261         if (heap->cancelled_gc()) {
262           op_degenerated_fail();
263           return;
264         }
265       } else if (has_in_place_promotions(heap)) {
266         // We have nothing to evacuate, but there are still regions to promote in place.
267         ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_promote_regions);
268         ShenandoahGenerationalHeap::heap()->promote_regions_in_place(false /* concurrent*/);
269       }
270 
271       // Update collector state regardless of whether there are forwarded objects
272       heap->set_evacuation_in_progress(false);
273       heap->set_concurrent_weak_root_in_progress(false);
274       heap->set_concurrent_strong_root_in_progress(false);
275 
276       // If heuristics thinks we should do the cycle, this flag would be set,
277       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
278       if (heap->has_forwarded_objects()) {
279         op_init_update_refs();
280         assert(!heap->cancelled_gc(), "STW reference update can not OOM");
281       } else {
282         _abbreviated = true;
283       }
284 
285     case _degenerated_update_refs:
286       if (heap->has_forwarded_objects()) {
287         op_update_refs();
288         op_update_roots();
289         assert(!heap->cancelled_gc(), "STW reference update can not OOM");
290       }
291 
292       // Disarm nmethods that armed in concurrent cycle.
293       // In above case, update roots should disarm them
294       ShenandoahCodeRoots::disarm_nmethods();
295 
296       op_cleanup_complete();
297 
298       if (heap->mode()->is_generational()) {
299         ShenandoahGenerationalHeap::heap()->complete_degenerated_cycle();
300       }
301 
302       break;
303     default:
304       ShouldNotReachHere();
305   }
306 
307   if (ShenandoahVerify) {
308     heap->verifier()->verify_after_degenerated();
309   }
310 
311   if (VerifyAfterGC) {
312     Universe::verify();
313   }
314 
315   metrics.snap_after();
316 
317   // The most common scenario for lack of good progress following a degenerated GC is an accumulation of floating
318   // garbage during the most recently aborted concurrent GC effort.  With generational GC, it is far more effective to
319   // reclaim this floating garbage with another degenerated cycle (which focuses on young generation and might require
320   // a pause of 200 ms) rather than a full GC cycle (which may require over 2 seconds with a 10 GB old generation).
321   //
322   // In generational mode, we'll only upgrade to full GC if we've done two degen cycles in a row and both indicated
323   // bad progress.  In non-generational mode, we'll preserve the original behavior, which is to upgrade to full
324   // immediately following a degenerated cycle with bad progress.  This preserves original behavior of non-generational
325   // Shenandoah so as to avoid introducing "surprising new behavior."  It also makes less sense with non-generational
326   // Shenandoah to replace a full GC with a degenerated GC, because both have similar pause times in non-generational
327   // mode.
328   if (!metrics.is_good_progress(_generation)) {
329     _consecutive_degen_with_bad_progress++;
330   } else {
331     _consecutive_degen_with_bad_progress = 0;
332   }
333   if (!heap->mode()->is_generational() ||
334       ((heap->shenandoah_policy()->consecutive_degenerated_gc_count() > 1) && (_consecutive_degen_with_bad_progress >= 2))) {
335     heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
336     op_degenerated_futile();
337   } else {
338     heap->notify_gc_progress();
339     heap->shenandoah_policy()->record_success_degenerated(_generation->is_young(), _abbreviated);
340     _generation->heuristics()->record_success_degenerated();
341   }
342 }
343 
344 void ShenandoahDegenGC::op_reset() {
345   _generation->prepare_gc();
346 }
347 
348 void ShenandoahDegenGC::op_mark() {
349   assert(!_generation->is_concurrent_mark_in_progress(), "Should be reset");
350   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_mark);
351   ShenandoahSTWMark mark(_generation, false /*full gc*/);

352   mark.mark();
353 }
354 
355 void ShenandoahDegenGC::op_finish_mark() {
356   ShenandoahConcurrentMark mark(_generation);
357   mark.finish_mark();
358 }
359 
360 void ShenandoahDegenGC::op_prepare_evacuation() {
361   ShenandoahHeap* const heap = ShenandoahHeap::heap();
362   if (ShenandoahVerify) {
363     heap->verifier()->verify_roots_no_forwarded();
364   }
365 
366   // STW cleanup weak roots and unload classes
367   heap->parallel_cleaning(false /*full gc*/);
368 
369   // Prepare regions and collection set
370   _generation->prepare_regions_and_collection_set(false /*concurrent*/);
371 
372   // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
373   // This is needed for two reasons. Strong one: new allocations would be with new freeset,
374   // which would be outside the collection set, so no cset writes would happen there.
375   // Weaker one: new allocations would happen past update watermark, and so less work would
376   // be needed for reference updates (would update the large filler instead).
377   if (UseTLAB) {
378     ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_final_manage_labs);
379     heap->tlabs_retire(false);
380   }
381 
382   if (!heap->collection_set()->is_empty()) {
383     if (ShenandoahVerify) {
384       heap->verifier()->verify_before_evacuation();
385     }
386 
387     heap->set_evacuation_in_progress(true);

388 
389     heap->set_has_forwarded_objects(true);


390   } else {
391     if (ShenandoahVerify) {
392       if (has_in_place_promotions(heap)) {
393         heap->verifier()->verify_after_concmark_with_promotions();
394       } else {
395         heap->verifier()->verify_after_concmark();
396       }
397     }
398 
399     if (VerifyAfterGC) {
400       Universe::verify();
401     }
402   }
403 }
404 
405 bool ShenandoahDegenGC::has_in_place_promotions(const ShenandoahHeap* heap) const {
406   return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions();
407 }
408 
409 void ShenandoahDegenGC::op_cleanup_early() {
410   ShenandoahHeap::heap()->recycle_trash();
411 }
412 
413 void ShenandoahDegenGC::op_evacuate() {
414   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_evac);
415   ShenandoahHeap::heap()->evacuate_collection_set(false /* concurrent*/);
416 }
417 
418 void ShenandoahDegenGC::op_init_update_refs() {
419   // Evacuation has completed
420   ShenandoahHeap* const heap = ShenandoahHeap::heap();
421   heap->prepare_update_heap_references();




422   heap->set_update_refs_in_progress(true);
423 }
424 
425 void ShenandoahDegenGC::op_update_refs() {
426   ShenandoahHeap* const heap = ShenandoahHeap::heap();
427   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_update_refs);
428   // Handed over from concurrent update references phase
429   heap->update_heap_references(false /*concurrent*/);
430 
431   heap->set_update_refs_in_progress(false);
432   heap->set_has_forwarded_objects(false);
433 }
434 
435 void ShenandoahDegenGC::op_update_roots() {
436   ShenandoahHeap* const heap = ShenandoahHeap::heap();
437 
438   update_roots(false /*full_gc*/);
439 
440   heap->update_heap_region_states(false /*concurrent*/);
441 
442   if (ShenandoahVerify) {
443     heap->verifier()->verify_after_update_refs();
444   }
445 
446   if (VerifyAfterGC) {
447     Universe::verify();
448   }
449 
450   heap->rebuild_free_set(false /*concurrent*/);
451 }
452 
453 void ShenandoahDegenGC::op_cleanup_complete() {
454   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete);
455   ShenandoahHeap::heap()->recycle_trash();
456 }
457 
458 void ShenandoahDegenGC::op_degenerated_fail() {
459   upgrade_to_full();




460 }
461 
462 void ShenandoahDegenGC::op_degenerated_futile() {
463   upgrade_to_full();


464 }
465 
466 const char* ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point) const {
467   switch (point) {
468     case _degenerated_unset:
469       SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (<UNSET>)");
470     case _degenerated_outside_cycle:
471       SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Outside of Cycle)");
472     case _degenerated_roots:
473       SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Roots)");
474     case _degenerated_mark:
475       SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Mark)");
476     case _degenerated_evac:
477       SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Evacuation)");
478     case _degenerated_update_refs:
479       SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (Update Refs)");
480     default:
481       ShouldNotReachHere();
482       SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Degenerated GC", " (?)");
483   }
484 }
485 
486 void ShenandoahDegenGC::upgrade_to_full() {
487   log_info(gc)("Degenerated GC upgrading to Full GC");
488   ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
489   ShenandoahFullGC full_gc;
490   full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
491 }
< prev index next >