< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp

Print this page
@@ -27,10 +27,11 @@
  #include "gc/shared/collectorCounters.hpp"
  #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
  #include "gc/shenandoah/shenandoahFullGC.hpp"
+ #include "gc/shenandoah/shenandoahGeneration.hpp"
  #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  #include "gc/shenandoah/shenandoahMetrics.hpp"
  #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"

@@ -40,13 +41,15 @@
  #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  #include "gc/shenandoah/shenandoahVMOperations.hpp"
  #include "runtime/vmThread.hpp"
  #include "utilities/events.hpp"
  
- ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point) :
+ ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point, ShenandoahGeneration* generation) :
    ShenandoahGC(),
-   _degen_point(degen_point) {
+   _degen_point(degen_point),
+   _generation(generation),
+   _upgraded_to_full(false) {
  }
  
  bool ShenandoahDegenGC::collect(GCCause::Cause cause) {
    vmop_degenerated();
    return true;

@@ -58,15 +61,20 @@
    VM_ShenandoahDegeneratedGC degenerated_gc(this);
    VMThread::execute(&degenerated_gc);
  }
  
  void ShenandoahDegenGC::entry_degenerated() {
-   const char* msg = degen_event_message(_degen_point);
+   char msg[1024];
+   degen_event_message(_degen_point, msg, sizeof(msg));
    ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
    EventMark em("%s", msg);
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
  
+   // In case degenerated GC preempted evacuation or update-refs, clear the aging cycle now.  No harm in clearing it
+   // redundantly if it is already clear.  We don't age during degenerated cycles.
+   heap->set_aging_cycle(false);
+ 
    ShenandoahWorkerScope scope(heap->workers(),
                                ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
                                "stw degenerated gc");
  
    heap->set_degenerated_gc_in_progress(true);

@@ -77,11 +85,19 @@
  void ShenandoahDegenGC::op_degenerated() {
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
    // Degenerated GC is STW, but it can also fail. Current mechanics communicates
    // GC failure via cancelled_concgc() flag. So, if we detect the failure after
    // some phase, we have to upgrade the Degenerate GC to Full GC.
-   heap->clear_cancelled_gc();
+   heap->clear_cancelled_gc(true /* clear oom handler */);
+ 
+   // We can't easily clear the old mark in progress flag because it must be done
+   // on a safepoint (not sure if that is a hard requirement). At any rate, once
+   // we are in a degenerated cycle, there should be no more old marking.
+   if (heap->is_concurrent_old_mark_in_progress()) {
+     heap->old_generation()->cancel_marking();
+   }
+   assert(heap->old_generation()->task_queues()->is_empty(), "Old gen task queues should be empty.");
  
    ShenandoahMetricsSnapshot metrics;
    metrics.snap_before();
  
    switch (_degen_point) {

@@ -93,21 +109,26 @@
        // We have degenerated from outside the cycle, which means something is bad with
        // the heap, most probably heavy humongous fragmentation, or we are very low on free
        // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
        // we can do the most aggressive degen cycle, which includes processing references and
        // class unloading, unless those features are explicitly disabled.
-       //
  
-       // Degenerated from concurrent root mark, reset the flag for STW mark
-       if (heap->is_concurrent_mark_in_progress()) {
-         ShenandoahConcurrentMark::cancel();
-         heap->set_concurrent_mark_in_progress(false);
-       }
  
        // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
        // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
-       heap->set_unload_classes(heap->heuristics()->can_unload_classes());
+       heap->set_unload_classes((!heap->mode()->is_generational() || _generation->generation_mode() == GLOBAL) && _generation->heuristics()->can_unload_classes());
+ 
+       if (_generation->generation_mode() == YOUNG || (_generation->generation_mode() == GLOBAL && ShenandoahVerify)) {
+         // Swap remembered sets for young, or if the verifier will run during a global collect
+         _generation->swap_remembered_set();
+       }
+ 
+     case _degenerated_roots:
+       // Degenerated from concurrent root mark, reset the flag for STW mark
+       if (heap->is_concurrent_mark_in_progress()) {
+         heap->cancel_concurrent_mark();
+       }
  
        op_reset();
  
        // STW mark
        op_mark();

@@ -122,10 +143,14 @@
        /* Degen select Collection Set. etc. */
        op_prepare_evacuation();
  
        op_cleanup_early();
  
+       if (heap->mode()->is_generational() && _generation->generation_mode() == GLOBAL) {
+         op_global_coalesce_and_fill();
+       }
+ 
      case _degenerated_evac:
        // If heuristics thinks we should do the cycle, this flag would be set,
        // and we can do evacuation. Otherwise, it would be the shortcut cycle.
        if (heap->is_evacuation_in_progress()) {
  

@@ -144,11 +169,10 @@
          // it, we fail degeneration right away and slide into Full GC to recover.
  
          {
            heap->sync_pinned_region_status();
            heap->collection_set()->clear_current_index();
- 
            ShenandoahHeapRegion* r;
            while ((r = heap->collection_set()->next()) != NULL) {
              if (r->is_pinned()) {
                heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
                op_degenerated_fail();

@@ -211,23 +235,22 @@
      heap->notify_gc_progress();
    }
  }
  
  void ShenandoahDegenGC::op_reset() {
-   ShenandoahHeap::heap()->prepare_gc();
+   _generation->prepare_gc(false);
  }
  
  void ShenandoahDegenGC::op_mark() {
    assert(!ShenandoahHeap::heap()->is_concurrent_mark_in_progress(), "Should be reset");
    ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_mark);
-   ShenandoahSTWMark mark(false /*full gc*/);
-   mark.clear();
+   ShenandoahSTWMark mark(_generation, false /*full gc*/);
    mark.mark();
  }
  
  void ShenandoahDegenGC::op_finish_mark() {
-   ShenandoahConcurrentMark mark;
+   ShenandoahConcurrentMark mark(_generation);
    mark.finish_mark();
  }
  
  void ShenandoahDegenGC::op_prepare_evacuation() {
    ShenandoahHeap* const heap = ShenandoahHeap::heap();

@@ -235,12 +258,13 @@
      heap->verifier()->verify_roots_no_forwarded();
    }
  
    // STW cleanup weak roots and unload classes
    heap->parallel_cleaning(false /*full gc*/);
+ 
    // Prepare regions and collection set
-   heap->prepare_regions_and_collection_set(false /*concurrent*/);
+   _generation->prepare_regions_and_collection_set(false /*concurrent*/);
  
    // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
    // This is needed for two reasons. Strong one: new allocations would be with new freeset,
    // which would be outside the collection set, so no cset writes would happen there.
    // Weaker one: new allocations would happen past update watermark, and so less work would

@@ -270,10 +294,14 @@
  
  void ShenandoahDegenGC::op_cleanup_early() {
    ShenandoahHeap::heap()->recycle_trash();
  }
  
+ void ShenandoahDegenGC::op_global_coalesce_and_fill() {
+   ShenandoahHeap::heap()->coalesce_and_fill_old_regions();
+ }
+ 
  void ShenandoahDegenGC::op_evacuate() {
    ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_evac);
    ShenandoahHeap::heap()->evacuate_collection_set(false /* concurrent*/);
  }
  

@@ -320,35 +348,29 @@
    ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete);
    ShenandoahHeap::heap()->recycle_trash();
  }
  
  void ShenandoahDegenGC::op_degenerated_fail() {
-   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
-   ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
- 
+   upgrade_to_full();
    ShenandoahFullGC full_gc;
    full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
  }
  
  void ShenandoahDegenGC::op_degenerated_futile() {
-   ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
+   upgrade_to_full();
    ShenandoahFullGC full_gc;
    full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
  }
  
- const char* ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point) const {
-   switch (point) {
-     case _degenerated_unset:
-       return "Pause Degenerated GC (<UNSET>)";
-     case _degenerated_outside_cycle:
-       return "Pause Degenerated GC (Outside of Cycle)";
-     case _degenerated_mark:
-       return "Pause Degenerated GC (Mark)";
-     case _degenerated_evac:
-       return "Pause Degenerated GC (Evacuation)";
-     case _degenerated_updaterefs:
-       return "Pause Degenerated GC (Update Refs)";
-     default:
-       ShouldNotReachHere();
-       return "ERROR";
-   }
+ void ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point, char* buf, size_t len) const {
+   jio_snprintf(buf, len, "Pause Degenerated %s GC (%s)", _generation->name(), ShenandoahGC::degen_point_to_string(point));
+ }
+ 
+ void ShenandoahDegenGC::upgrade_to_full() {
+   log_info(gc)("Degenerate GC upgrading to Full GC");
+   ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
+   _upgraded_to_full = true;
+ }
+ 
+ bool ShenandoahDegenGC::upgraded_to_full() {
+   return _upgraded_to_full;
  }
< prev index next >