1 /*
  2  * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shared/collectorCounters.hpp"
 28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 29 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
 30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 31 #include "gc/shenandoah/shenandoahFullGC.hpp"
 32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 33 #include "gc/shenandoah/shenandoahMetrics.hpp"
 34 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 35 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 36 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
 37 #include "gc/shenandoah/shenandoahSTWMark.hpp"
 38 #include "gc/shenandoah/shenandoahUtils.hpp"
 39 #include "gc/shenandoah/shenandoahVerifier.hpp"
 40 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 41 #include "gc/shenandoah/shenandoahVMOperations.hpp"
 42 #include "runtime/vmThread.hpp"
 43 #include "utilities/events.hpp"
 44 
 45 ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point) :
 46   ShenandoahGC(),
 47   _degen_point(degen_point),
 48   _abbreviated(false) {
 49 }
 50 
 51 bool ShenandoahDegenGC::collect(GCCause::Cause cause) {
 52   vmop_degenerated();
 53   return true;
 54 }
 55 
 56 void ShenandoahDegenGC::vmop_degenerated() {
 57   TraceCollectorStats tcs(ShenandoahHeap::heap()->monitoring_support()->full_stw_collection_counters());
 58   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
 59   VM_ShenandoahDegeneratedGC degenerated_gc(this);
 60   VMThread::execute(&degenerated_gc);
 61 }
 62 
 63 void ShenandoahDegenGC::entry_degenerated() {
 64   const char* msg = degen_event_message(_degen_point);
 65   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
 66   EventMark em("%s", msg);
 67   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 68 
 69   ShenandoahWorkerScope scope(heap->workers(),
 70                               ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
 71                               "stw degenerated gc");
 72 
 73   heap->set_degenerated_gc_in_progress(true);
 74   op_degenerated();
 75   heap->set_degenerated_gc_in_progress(false);
 76 }
 77 
 78 void ShenandoahDegenGC::op_degenerated() {
 79   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 80   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
 81   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
 82   // some phase, we have to upgrade the Degenerate GC to Full GC.
 83   heap->clear_cancelled_gc();
 84 
 85   ShenandoahMetricsSnapshot metrics;
 86   metrics.snap_before();
 87 
 88   switch (_degen_point) {
 89     // The cases below form the Duff's-like device: it describes the actual GC cycle,
 90     // but enters it at different points, depending on which concurrent phase had
 91     // degenerated.
 92 
 93     case _degenerated_outside_cycle:
 94       // We have degenerated from outside the cycle, which means something is bad with
 95       // the heap, most probably heavy humongous fragmentation, or we are very low on free
 96       // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
 97       // we can do the most aggressive degen cycle, which includes processing references and
 98       // class unloading, unless those features are explicitly disabled.
 99       //
100 
101       // Degenerated from concurrent root mark, reset the flag for STW mark
102       if (heap->is_concurrent_mark_in_progress()) {
103         ShenandoahConcurrentMark::cancel();
104         heap->set_concurrent_mark_in_progress(false);
105       }
106 
107       // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
108       // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
109       heap->set_unload_classes(heap->heuristics()->can_unload_classes());
110 
111       op_reset();
112 
113       // STW mark
114       op_mark();
115 
116     case _degenerated_mark:
117       // No fallthrough. Continue mark, handed over from concurrent mark if
118       // concurrent mark has yet completed
119       if (_degen_point == ShenandoahDegenPoint::_degenerated_mark &&
120           heap->is_concurrent_mark_in_progress()) {
121         op_finish_mark();
122       }
123       assert(!heap->cancelled_gc(), "STW mark can not OOM");
124 
125       /* Degen select Collection Set. etc. */
126       op_prepare_evacuation();
127 
128       op_cleanup_early();
129 
130     case _degenerated_evac:
131       // If heuristics thinks we should do the cycle, this flag would be set,
132       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
133       if (heap->is_evacuation_in_progress()) {
134 
135         if (_degen_point == _degenerated_evac) {
136           // Degeneration under oom-evac protocol allows the mutator LRB to expose
137           // references to from-space objects. This is okay, in theory, because we
138           // will come to the safepoint here to complete the evacuations and update
139           // the references. However, if the from-space reference is written to a
140           // region that was EC during final mark or was recycled after final mark
141           // it will not have TAMS or UWM updated. Such a region is effectively
142           // skipped during update references which can lead to crashes and corruption
143           // if the from-space reference is accessed.
144           if (UseTLAB) {
145             heap->labs_make_parsable();
146           }
147 
148           for (size_t i = 0; i < heap->num_regions(); i++) {
149             ShenandoahHeapRegion* r = heap->get_region(i);
150             if (r->is_active() && r->top() > r->get_update_watermark()) {
151               r->set_update_watermark_at_safepoint(r->top());
152             }
153           }
154         }
155 
156         // Degeneration under oom-evac protocol might have left some objects in
157         // collection set un-evacuated. Restart evacuation from the beginning to
158         // capture all objects. For all the objects that are already evacuated,
159         // it would be a simple check, which is supposed to be fast. This is also
160         // safe to do even without degeneration, as CSet iterator is at beginning
161         // in preparation for evacuation anyway.
162         //
163         // Before doing that, we need to make sure we never had any cset-pinned
164         // regions. This may happen if allocation failure happened when evacuating
165         // the about-to-be-pinned object, oom-evac protocol left the object in
166         // the collection set, and then the pin reached the cset region. If we continue
167         // the cycle here, we would trash the cset and alive objects in it. To avoid
168         // it, we fail degeneration right away and slide into Full GC to recover.
169 
170         {
171           heap->sync_pinned_region_status();
172           heap->collection_set()->clear_current_index();
173 
174           ShenandoahHeapRegion* r;
175           while ((r = heap->collection_set()->next()) != nullptr) {
176             if (r->is_pinned()) {
177               heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
178               op_degenerated_fail();
179               return;
180             }
181           }
182 
183           heap->collection_set()->clear_current_index();
184         }
185         op_evacuate();
186         if (heap->cancelled_gc()) {
187           op_degenerated_fail();
188           return;
189         }
190       }
191 
192       // If heuristics thinks we should do the cycle, this flag would be set,
193       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
194       if (heap->has_forwarded_objects()) {
195         op_init_updaterefs();
196         assert(!heap->cancelled_gc(), "STW reference update can not OOM");
197       } else {
198         _abbreviated = true;
199       }
200 
201     case _degenerated_updaterefs:
202       if (heap->has_forwarded_objects()) {
203         op_updaterefs();
204         op_update_roots();
205         assert(!heap->cancelled_gc(), "STW reference update can not OOM");
206       }
207 
208       // Disarm nmethods that armed in concurrent cycle.
209       // In above case, update roots should disarm them
210       ShenandoahCodeRoots::disarm_nmethods();
211 
212       op_cleanup_complete();
213       break;
214     default:
215       ShouldNotReachHere();
216   }
217 
218   if (ShenandoahVerify) {
219     heap->verifier()->verify_after_degenerated();
220   }
221 
222   if (VerifyAfterGC) {
223     Universe::verify();
224   }
225 
226   metrics.snap_after();
227 
228   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
229   // because that probably means the heap is overloaded and/or fragmented.
230   if (!metrics.is_good_progress()) {
231     heap->notify_gc_no_progress();
232     heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
233     op_degenerated_futile();
234   } else {
235     heap->notify_gc_progress();
236     heap->shenandoah_policy()->record_success_degenerated(_abbreviated);
237     heap->heuristics()->record_success_degenerated();
238   }
239 }
240 
241 void ShenandoahDegenGC::op_reset() {
242   ShenandoahHeap::heap()->prepare_gc();
243 }
244 
245 void ShenandoahDegenGC::op_mark() {
246   assert(!ShenandoahHeap::heap()->is_concurrent_mark_in_progress(), "Should be reset");
247   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_mark);
248   ShenandoahSTWMark mark(false /*full gc*/);
249   mark.clear();
250   mark.mark();
251 }
252 
253 void ShenandoahDegenGC::op_finish_mark() {
254   ShenandoahConcurrentMark mark;
255   mark.finish_mark();
256 }
257 
258 void ShenandoahDegenGC::op_prepare_evacuation() {
259   ShenandoahHeap* const heap = ShenandoahHeap::heap();
260   if (ShenandoahVerify) {
261     heap->verifier()->verify_roots_no_forwarded();
262   }
263 
264   // STW cleanup weak roots and unload classes
265   heap->parallel_cleaning(false /*full gc*/);
266   // Prepare regions and collection set
267   heap->prepare_regions_and_collection_set(false /*concurrent*/);
268 
269   // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
270   // This is needed for two reasons. Strong one: new allocations would be with new freeset,
271   // which would be outside the collection set, so no cset writes would happen there.
272   // Weaker one: new allocations would happen past update watermark, and so less work would
273   // be needed for reference updates (would update the large filler instead).
274   if (UseTLAB) {
275     ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_final_manage_labs);
276     heap->tlabs_retire(false);
277   }
278 
279   if (!heap->collection_set()->is_empty()) {
280     heap->set_evacuation_in_progress(true);
281     heap->set_has_forwarded_objects(true);
282 
283     if(ShenandoahVerify) {
284       heap->verifier()->verify_during_evacuation();
285     }
286   } else {
287     if (ShenandoahVerify) {
288       heap->verifier()->verify_after_concmark();
289     }
290 
291     if (VerifyAfterGC) {
292       Universe::verify();
293     }
294   }
295 }
296 
297 void ShenandoahDegenGC::op_cleanup_early() {
298   ShenandoahHeap::heap()->recycle_trash();
299 }
300 
301 void ShenandoahDegenGC::op_evacuate() {
302   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_evac);
303   ShenandoahHeap::heap()->evacuate_collection_set(false /* concurrent*/);
304 }
305 
306 void ShenandoahDegenGC::op_init_updaterefs() {
307   // Evacuation has completed
308   ShenandoahHeap* const heap = ShenandoahHeap::heap();
309   heap->set_evacuation_in_progress(false);
310   heap->set_concurrent_weak_root_in_progress(false);
311   heap->set_concurrent_strong_root_in_progress(false);
312 
313   heap->prepare_update_heap_references(false /*concurrent*/);
314   heap->set_update_refs_in_progress(true);
315 }
316 
317 void ShenandoahDegenGC::op_updaterefs() {
318   ShenandoahHeap* const heap = ShenandoahHeap::heap();
319   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_updaterefs);
320   // Handed over from concurrent update references phase
321   heap->update_heap_references(false /*concurrent*/);
322 
323   heap->set_update_refs_in_progress(false);
324   heap->set_has_forwarded_objects(false);
325 }
326 
327 void ShenandoahDegenGC::op_update_roots() {
328   ShenandoahHeap* const heap = ShenandoahHeap::heap();
329 
330   update_roots(false /*full_gc*/);
331 
332   heap->update_heap_region_states(false /*concurrent*/);
333 
334   if (ShenandoahVerify) {
335     heap->verifier()->verify_after_updaterefs();
336   }
337 
338   if (VerifyAfterGC) {
339     Universe::verify();
340   }
341 
342   heap->rebuild_free_set(false /*concurrent*/);
343 }
344 
345 void ShenandoahDegenGC::op_cleanup_complete() {
346   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete);
347   ShenandoahHeap::heap()->recycle_trash();
348 }
349 
350 void ShenandoahDegenGC::op_degenerated_fail() {
351   upgrade_to_full();
352 }
353 
354 void ShenandoahDegenGC::op_degenerated_futile() {
355   upgrade_to_full();
356 }
357 
358 const char* ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point) const {
359   switch (point) {
360     case _degenerated_unset:
361       return "Pause Degenerated GC (<UNSET>)";
362     case _degenerated_outside_cycle:
363       return "Pause Degenerated GC (Outside of Cycle)";
364     case _degenerated_mark:
365       return "Pause Degenerated GC (Mark)";
366     case _degenerated_evac:
367       return "Pause Degenerated GC (Evacuation)";
368     case _degenerated_updaterefs:
369       return "Pause Degenerated GC (Update Refs)";
370     default:
371       ShouldNotReachHere();
372       return "ERROR";
373   }
374 }
375 
376 void ShenandoahDegenGC::upgrade_to_full() {
377   log_info(gc)("Degenerated GC upgrading to Full GC");
378   ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
379   ShenandoahFullGC full_gc;
380   full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
381 }