< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp

Print this page
@@ -28,10 +28,11 @@
  #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  #include "gc/shenandoah/shenandoahControlThread.hpp"
  #include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
  #include "gc/shenandoah/shenandoahFreeSet.hpp"
  #include "gc/shenandoah/shenandoahFullGC.hpp"
+ #include "gc/shenandoah/shenandoahGeneration.hpp"
  #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  #include "gc/shenandoah/shenandoahUtils.hpp"
  #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"

@@ -310,23 +311,25 @@
    //
    ShenandoahHeap* heap = ShenandoahHeap::heap();
    if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
  
    GCIdMark gc_id_mark;
-   ShenandoahGCSession session(cause);
+   ShenandoahGCSession session(cause, heap->global_generation());
  
    TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
  
-   ShenandoahConcurrentGC gc;
+   ShenandoahConcurrentGC gc(heap->global_generation(), false);
    if (gc.collect(cause)) {
      // Cycle is complete.  There were no failed allocation requests and no degeneration, so count this as good progress.
      heap->notify_gc_progress();
-     heap->heuristics()->record_success_concurrent();
-     heap->shenandoah_policy()->record_success_concurrent(gc.abbreviated());
+     heap->global_generation()->heuristics()->record_success_concurrent();
+     heap->shenandoah_policy()->record_success_concurrent(false, gc.abbreviated());
+     heap->log_heap_status("At end of GC");
    } else {
      assert(heap->cancelled_gc(), "Must have been cancelled");
      check_cancellation_or_degen(gc.degen_point());
+     heap->log_heap_status("At end of cancelled GC");
    }
  }
  
  bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
    ShenandoahHeap* heap = ShenandoahHeap::heap();

@@ -345,34 +348,45 @@
  void ShenandoahControlThread::stop_service() {
    // Nothing to do here.
  }
  
  void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
+   ShenandoahHeap* const heap = ShenandoahHeap::heap();
    GCIdMark gc_id_mark;
-   ShenandoahGCSession session(cause);
+   ShenandoahGCSession session(cause, heap->global_generation());
  
    ShenandoahFullGC gc;
    gc.collect(cause);
  }
  
  void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
    assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
- 
+   ShenandoahHeap* const heap = ShenandoahHeap::heap();
    GCIdMark gc_id_mark;
-   ShenandoahGCSession session(cause);
+   ShenandoahGCSession session(cause, heap->global_generation());
  
-   ShenandoahDegenGC gc(point);
+   ShenandoahDegenGC gc(point, heap->global_generation());
    gc.collect(cause);
  }
  
  void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
    if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) {
      handle_requested_gc(cause);
    }
  }
  
  void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
+   // For normal requested GCs (System.gc) we want to block the caller. However,
+   // for whitebox requested GC, we want to initiate the GC and return immediately.
+   // The whitebox caller thread will arrange for itself to wait until the GC notifies
+   // it that has reached the requested breakpoint (phase in the GC).
+   if (cause == GCCause::_wb_breakpoint) {
+     _requested_gc_cause = cause;
+     _gc_requested.set();
+     return;
+   }
+ 
    // Make sure we have at least one complete GC cycle before unblocking
    // from the explicit GC request.
    //
    // This is especially important for weak references cleanup and/or native
    // resources (e.g. DirectByteBuffers) machinery: when explicit GC request

@@ -388,13 +402,11 @@
      // does not take the lock. We need to enforce following order, so that read side sees
      // latest requested gc cause when the flag is set.
      _requested_gc_cause = cause;
      _gc_requested.set();
  
-     if (cause != GCCause::_wb_breakpoint) {
-       ml.wait();
-     }
+     ml.wait();
      current_gc_id = get_gc_id();
    }
  }
  
  void ShenandoahControlThread::notify_gc_waiters() {
< prev index next >