1 /*
  2  * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shared/collectorCounters.hpp"
 28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 29 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
 30 #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 31 #include "gc/shenandoah/shenandoahFullGC.hpp"
 32 #include "gc/shenandoah/shenandoahGeneration.hpp"
 33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 34 #include "gc/shenandoah/shenandoahMetrics.hpp"
 35 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 36 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 37 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
 38 #include "gc/shenandoah/shenandoahSTWMark.hpp"
 39 #include "gc/shenandoah/shenandoahUtils.hpp"
 40 #include "gc/shenandoah/shenandoahVerifier.hpp"
 41 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 42 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 43 #include "gc/shenandoah/shenandoahVMOperations.hpp"
 44 #include "runtime/vmThread.hpp"
 45 #include "utilities/events.hpp"
 46 
 47 ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point, ShenandoahGeneration* generation) :
 48   ShenandoahGC(),
 49   _degen_point(degen_point),
 50   _generation(generation),
 51   _upgraded_to_full(false) {
 52 }
 53 
 54 bool ShenandoahDegenGC::collect(GCCause::Cause cause) {
 55   vmop_degenerated();
 56   return true;
 57 }
 58 
 59 void ShenandoahDegenGC::vmop_degenerated() {
 60   TraceCollectorStats tcs(ShenandoahHeap::heap()->monitoring_support()->full_stw_collection_counters());
 61   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
 62   VM_ShenandoahDegeneratedGC degenerated_gc(this);
 63   VMThread::execute(&degenerated_gc);
 64 }
 65 
 66 void ShenandoahDegenGC::entry_degenerated() {
 67   char msg[1024];
 68   degen_event_message(_degen_point, msg, sizeof(msg));
 69   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
 70   EventMark em("%s", msg);
 71   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 72 
 73   // In case degenerated GC preempted evacuation or update-refs, clear the aging cycle now.  No harm in clearing it
 74   // redundantly if it is already clear.  We don't age during degenerated cycles.
 75   heap->set_aging_cycle(false);
 76 
 77   ShenandoahWorkerScope scope(heap->workers(),
 78                               ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
 79                               "stw degenerated gc");
 80 
 81   heap->set_degenerated_gc_in_progress(true);
 82   op_degenerated();
 83   heap->set_degenerated_gc_in_progress(false);
 84 }
 85 
 86 void ShenandoahDegenGC::op_degenerated() {
 87   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 88   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
 89   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
 90   // some phase, we have to upgrade the Degenerate GC to Full GC.
 91   heap->clear_cancelled_gc(true /* clear oom handler */);
 92 
 93 #ifdef ASSERT
 94   if (heap->mode()->is_generational()) {
 95     if (_generation->generation_mode() == GenerationMode::GLOBAL) {
 96       // We can only get to a degenerated global cycle _after_ a concurrent global cycle
 97       // has been cancelled. In which case, we expect the concurrent global cycle to have
 98       // cancelled the old gc already.
 99       assert(!heap->is_old_gc_active(), "Old GC should not be active during global cycle.");
100     }
101 
102     if (!heap->is_concurrent_old_mark_in_progress()) {
103       // If we are not marking the old generation, there should be nothing in the old mark queues
104       assert(heap->old_generation()->task_queues()->is_empty(), "Old gen task queues should be empty.");
105     }
106   }
107 #endif
108 
109   ShenandoahMetricsSnapshot metrics;
110   metrics.snap_before();
111 
112   switch (_degen_point) {
113     // The cases below form the Duff's-like device: it describes the actual GC cycle,
114     // but enters it at different points, depending on which concurrent phase had
115     // degenerated.
116 
117     case _degenerated_outside_cycle:
118       // We have degenerated from outside the cycle, which means something is bad with
119       // the heap, most probably heavy humongous fragmentation, or we are very low on free
120       // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
121       // we can do the most aggressive degen cycle, which includes processing references and
122       // class unloading, unless those features are explicitly disabled.
123 
124       if (heap->is_concurrent_old_mark_in_progress()) {
125         // We have come straight into a degenerated cycle without running a concurrent cycle
126         // first and the SATB barrier is enabled to support concurrent old marking. The SATB buffer
127         // may hold a mix of old and young pointers. The old pointers need to be transferred
128         // to the old generation mark queues and the young pointers are _not_ part of this
129         // snapshot, so they must be dropped here.
130         heap->transfer_old_pointers_from_satb();
131       }
132 
133       // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
134       // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
135       heap->set_unload_classes((!heap->mode()->is_generational() || _generation->generation_mode() == GLOBAL) && _generation->heuristics()->can_unload_classes());
136 
137       if (heap->mode()->is_generational() && (_generation->generation_mode() == YOUNG || (_generation->generation_mode() == GLOBAL && ShenandoahVerify))) {
138         // Swap remembered sets for young, or if the verifier will run during a global collect
139         _generation->swap_remembered_set();
140       }
141 
142     case _degenerated_roots:
143       // Degenerated from concurrent root mark, reset the flag for STW mark
144       if (!heap->mode()->is_generational()) {
145         if (heap->is_concurrent_mark_in_progress()) {
146           heap->cancel_concurrent_mark();
147         }
148       } else {
149         if (_generation->is_concurrent_mark_in_progress()) {
150           // We want to allow old generation marking to be punctuated by young collections
151           // (even if they have degenerated). If this is a global cycle, we'd have cancelled
152           // the entire old gc before coming into this switch.
153           _generation->cancel_marking();
154         }
155       }
156 
157       if (_degen_point == ShenandoahDegenPoint::_degenerated_roots) {
158         // We only need this if the concurrent cycle has already swapped the card tables.
159         // Marking will use the 'read' table, but interesting pointers may have been
160         // recorded in the 'write' table in the time between the cancelled concurrent cycle
161         // and this degenerated cycle. These pointers need to be included the 'read' table
162         // used to scan the remembered set during the STW mark which follows here.
163         _generation->merge_write_table();
164       }
165 
166       op_reset();
167 
168       // STW mark
169       op_mark();
170 
171     case _degenerated_mark:
172       // No fallthrough. Continue mark, handed over from concurrent mark if
173       // concurrent mark has yet completed
174       if (_degen_point == ShenandoahDegenPoint::_degenerated_mark &&
175           heap->is_concurrent_mark_in_progress()) {
176         op_finish_mark();
177       }
178       assert(!heap->cancelled_gc(), "STW mark can not OOM");
179 
180       /* Degen select Collection Set. etc. */
181       op_prepare_evacuation();
182 
183       op_cleanup_early();
184 
185     case _degenerated_evac:
186 
187       if (heap->mode()->is_generational() && _generation->generation_mode() == GLOBAL) {
188         op_global_coalesce_and_fill();
189       }
190 
191       // If heuristics thinks we should do the cycle, this flag would be set,
192       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
193       if (heap->is_evacuation_in_progress()) {
194 
195         if (_degen_point == _degenerated_evac) {
196           // Degeneration under oom-evac protocol allows the mutator LRB to expose
197           // references to from-space objects. This is okay, in theory, because we
198           // will come to the safepoint here to complete the evacuations and update
199           // the references. However, if the from-space reference is written to a
200           // region that was EC during final mark or was recycled after final mark
201           // it will not have TAMS or UWM updated. Such a region is effectively
202           // skipped during update references which can lead to crashes and corruption
203           // if the from-space reference is accessed.
204           if (UseTLAB) {
205             heap->labs_make_parsable();
206           }
207 
208           for (size_t i = 0; i < heap->num_regions(); i++) {
209             ShenandoahHeapRegion* r = heap->get_region(i);
210             if (r->is_active() && r->top() > r->get_update_watermark()) {
211               r->set_update_watermark_at_safepoint(r->top());
212             }
213           }
214         }
215 
216         // Degeneration under oom-evac protocol might have left some objects in
217         // collection set un-evacuated. Restart evacuation from the beginning to
218         // capture all objects. For all the objects that are already evacuated,
219         // it would be a simple check, which is supposed to be fast. This is also
220         // safe to do even without degeneration, as CSet iterator is at beginning
221         // in preparation for evacuation anyway.
222         //
223         // Before doing that, we need to make sure we never had any cset-pinned
224         // regions. This may happen if allocation failure happened when evacuating
225         // the about-to-be-pinned object, oom-evac protocol left the object in
226         // the collection set, and then the pin reached the cset region. If we continue
227         // the cycle here, we would trash the cset and alive objects in it. To avoid
228         // it, we fail degeneration right away and slide into Full GC to recover.
229 
230         {
231           heap->sync_pinned_region_status();
232           heap->collection_set()->clear_current_index();
233           ShenandoahHeapRegion* r;
234           while ((r = heap->collection_set()->next()) != NULL) {
235             if (r->is_pinned()) {
236               heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
237               op_degenerated_fail();
238               return;
239             }
240           }
241 
242           heap->collection_set()->clear_current_index();
243         }
244         op_evacuate();
245         if (heap->cancelled_gc()) {
246           op_degenerated_fail();
247           return;
248         }
249       }
250 
251       // If heuristics thinks we should do the cycle, this flag would be set,
252       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
253       if (heap->has_forwarded_objects()) {
254         op_init_updaterefs();
255         assert(!heap->cancelled_gc(), "STW reference update can not OOM");
256       }
257 
258     case _degenerated_updaterefs:
259       if (heap->has_forwarded_objects()) {
260         op_updaterefs();
261         op_update_roots();
262         assert(!heap->cancelled_gc(), "STW reference update can not OOM");
263       }
264 
265       if (ClassUnloading) {
266          // Disarm nmethods that armed in concurrent cycle.
267          // In above case, update roots should disarm them
268          ShenandoahCodeRoots::disarm_nmethods();
269       }
270 
271       op_cleanup_complete();
272       break;
273     default:
274       ShouldNotReachHere();
275   }
276 
277   if (heap->mode()->is_generational()) {
278     // In case degeneration interrupted concurrent evacuation or update references, we need to clean up transient state.
279     // Otherwise, these actions have no effect.
280 
281     heap->young_generation()->unadjust_available();
282     heap->old_generation()->unadjust_available();
283     // No need to old_gen->increase_used().  That was done when plabs were allocated, accounting for both old evacs and promotions.
284 
285     heap->set_alloc_supplement_reserve(0);
286     heap->set_young_evac_reserve(0);
287     heap->set_old_evac_reserve(0);
288     heap->reset_old_evac_expended();
289     heap->set_promotion_reserve(0);
290 
291   }
292 
293   if (ShenandoahVerify) {
294     heap->verifier()->verify_after_degenerated();
295   }
296 
297   if (VerifyAfterGC) {
298     Universe::verify();
299   }
300 
301   metrics.snap_after();
302 
303   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
304   // because that probably means the heap is overloaded and/or fragmented.
305   if (!metrics.is_good_progress()) {
306     heap->notify_gc_no_progress();
307     heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
308     op_degenerated_futile();
309   } else {
310     heap->notify_gc_progress();
311   }
312 }
313 
314 void ShenandoahDegenGC::op_reset() {
315   _generation->prepare_gc(false);
316 }
317 
318 void ShenandoahDegenGC::op_mark() {
319   assert(!_generation->is_concurrent_mark_in_progress(), "Should be reset");
320   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_mark);
321   ShenandoahSTWMark mark(_generation, false /*full gc*/);
322   mark.mark();
323 }
324 
325 void ShenandoahDegenGC::op_finish_mark() {
326   ShenandoahConcurrentMark mark(_generation);
327   mark.finish_mark();
328 }
329 
330 void ShenandoahDegenGC::op_prepare_evacuation() {
331   ShenandoahHeap* const heap = ShenandoahHeap::heap();
332   if (ShenandoahVerify) {
333     heap->verifier()->verify_roots_no_forwarded();
334   }
335 
336   // STW cleanup weak roots and unload classes
337   heap->parallel_cleaning(false /*full gc*/);
338 
339   // Prepare regions and collection set
340   _generation->prepare_regions_and_collection_set(false /*concurrent*/);
341 
342   // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
343   // This is needed for two reasons. Strong one: new allocations would be with new freeset,
344   // which would be outside the collection set, so no cset writes would happen there.
345   // Weaker one: new allocations would happen past update watermark, and so less work would
346   // be needed for reference updates (would update the large filler instead).
347   if (UseTLAB) {
348     ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_final_manage_labs);
349     heap->tlabs_retire(false);
350   }
351 
352   if (!heap->collection_set()->is_empty()) {
353     heap->set_evacuation_in_progress(true);
354     heap->set_has_forwarded_objects(true);
355 
356     if(ShenandoahVerify) {
357       heap->verifier()->verify_during_evacuation();
358     }
359   } else {
360     if (ShenandoahVerify) {
361       heap->verifier()->verify_after_concmark();
362     }
363 
364     if (VerifyAfterGC) {
365       Universe::verify();
366     }
367   }
368 }
369 
370 void ShenandoahDegenGC::op_cleanup_early() {
371   ShenandoahHeap::heap()->recycle_trash();
372 }
373 
374 void ShenandoahDegenGC::op_global_coalesce_and_fill() {
375   ShenandoahHeap::heap()->coalesce_and_fill_old_regions();
376 }
377 
378 void ShenandoahDegenGC::op_evacuate() {
379   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_evac);
380   ShenandoahHeap::heap()->evacuate_collection_set(false /* concurrent*/);
381 }
382 
383 void ShenandoahDegenGC::op_init_updaterefs() {
384   // Evacuation has completed
385   ShenandoahHeap* const heap = ShenandoahHeap::heap();
386   heap->set_evacuation_in_progress(false);
387   heap->set_concurrent_weak_root_in_progress(false);
388   heap->set_concurrent_strong_root_in_progress(false);
389 
390   heap->prepare_update_heap_references(false /*concurrent*/);
391   heap->set_update_refs_in_progress(true);
392 }
393 
394 void ShenandoahDegenGC::op_updaterefs() {
395   ShenandoahHeap* const heap = ShenandoahHeap::heap();
396   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_updaterefs);
397   // Handed over from concurrent update references phase
398   heap->update_heap_references(false /*concurrent*/);
399 
400   heap->set_update_refs_in_progress(false);
401   heap->set_has_forwarded_objects(false);
402 }
403 
404 void ShenandoahDegenGC::op_update_roots() {
405   ShenandoahHeap* const heap = ShenandoahHeap::heap();
406 
407   update_roots(false /*full_gc*/);
408 
409   heap->update_heap_region_states(false /*concurrent*/);
410 
411   if (ShenandoahVerify) {
412     heap->verifier()->verify_after_updaterefs();
413   }
414 
415   if (VerifyAfterGC) {
416     Universe::verify();
417   }
418 
419   heap->rebuild_free_set(false /*concurrent*/);
420 }
421 
422 void ShenandoahDegenGC::op_cleanup_complete() {
423   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete);
424   ShenandoahHeap::heap()->recycle_trash();
425 }
426 
427 void ShenandoahDegenGC::op_degenerated_fail() {
428   upgrade_to_full();
429   ShenandoahFullGC full_gc;
430   full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
431 }
432 
433 void ShenandoahDegenGC::op_degenerated_futile() {
434   upgrade_to_full();
435   ShenandoahFullGC full_gc;
436   full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
437 }
438 
439 void ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point, char* buf, size_t len) const {
440   jio_snprintf(buf, len, "Pause Degenerated %s GC (%s)", _generation->name(), ShenandoahGC::degen_point_to_string(point));
441 }
442 
443 void ShenandoahDegenGC::upgrade_to_full() {
444   log_info(gc)("Degenerate GC upgrading to Full GC");
445   ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
446   _upgraded_to_full = true;
447 }
448 
449 bool ShenandoahDegenGC::upgraded_to_full() {
450   return _upgraded_to_full;
451 }