< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp

Print this page
*** 28,10 ***
--- 28,11 ---
  #include "gc/shared/collectorCounters.hpp"
  #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  #include "gc/shenandoah/shenandoahFreeSet.hpp"
+ #include "gc/shenandoah/shenandoahGeneration.hpp"
  #include "gc/shenandoah/shenandoahLock.hpp"
  #include "gc/shenandoah/shenandoahMark.inline.hpp"
  #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  #include "gc/shenandoah/shenandoahPhaseTimings.hpp"

*** 69,23 ***
    ~ShenandoahBreakpointMarkScope() {
      ShenandoahBreakpoint::at_before_marking_completed();
    }
  };
  
! ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
!   _mark(),
!   _degen_point(ShenandoahDegenPoint::_degenerated_unset) {
  }
  
  ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
    return _degen_point;
  }
  
- void ShenandoahConcurrentGC::cancel() {
-   ShenandoahConcurrentMark::cancel();
- }
- 
  bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
    if (cause == GCCause::_wb_breakpoint) {
      ShenandoahBreakpoint::start_gc();
    }
--- 70,22 ---
    ~ShenandoahBreakpointMarkScope() {
      ShenandoahBreakpoint::at_before_marking_completed();
    }
  };
  
! ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
!   _mark(generation),
!   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
+   _mixed_evac (false),
+   _do_old_gc_bootstrap(do_old_gc_bootstrap),
+   _generation(generation) {
  }
  
  ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
    return _degen_point;
  }
  
  bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
    if (cause == GCCause::_wb_breakpoint) {
      ShenandoahBreakpoint::start_gc();
    }

*** 97,13 ***
    // Start initial mark under STW
    vmop_entry_init_mark();
  
    {
      ShenandoahBreakpointMarkScope breakpoint_mark_scope;
      // Concurrent mark roots
      entry_mark_roots();
!     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false;
  
      // Continue concurrent mark
      entry_mark();
      if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
    }
--- 97,25 ---
    // Start initial mark under STW
    vmop_entry_init_mark();
  
    {
      ShenandoahBreakpointMarkScope breakpoint_mark_scope;
+ 
+     // Reset task queue stats here, rather than in mark_concurrent_roots
+     // because remembered set scan will `push` oops into the queues and
+     // resetting after this happens will lose those counts.
+     TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
+ 
+     // Concurrent remembered set scanning
+     if (_generation->generation_mode() == YOUNG) {
+       ShenandoahConcurrentPhase gc_phase("Concurrent remembered set scanning", ShenandoahPhaseTimings::init_scan_rset);
+       _generation->scan_remembered_set();
+     }
+ 
      // Concurrent mark roots
      entry_mark_roots();
!     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) return false;
  
      // Continue concurrent mark
      entry_mark();
      if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
    }

*** 121,11 ***
      entry_weak_refs();
      entry_weak_roots();
    }
  
    // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
!   // the space. This would be the last action if there is nothing to evacuate.
    entry_cleanup_early();
  
    {
      ShenandoahHeapLocker locker(heap->lock());
      heap->free_set()->log_status();
--- 133,12 ---
      entry_weak_refs();
      entry_weak_roots();
    }
  
    // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
!   // the space. This would be the last action if there is nothing to evacuate.  Note that
+   // we will not age young-gen objects in the case that we skip evacuation.
    entry_cleanup_early();
  
    {
      ShenandoahHeapLocker locker(heap->lock());
      heap->free_set()->log_status();

*** 142,10 ***
--- 155,14 ---
    // If so, strong_root_in_progress would be unset.
    if (heap->is_concurrent_strong_root_in_progress()) {
      entry_strong_roots();
    }
  
+   if (!heap->cancelled_gc() && heap->mode()->is_generational() && _generation->generation_mode() == GLOBAL) {
+     entry_global_coalesce_and_fill();
+   }
+ 
    // Continue the cycle with evacuation and optional update-refs.
    // This may be skipped if there is nothing to evacuate.
    // If so, evac_in_progress would be unset by collection set preparation code.
    if (heap->is_evacuation_in_progress()) {
      // Concurrently evacuate

*** 176,11 ***
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
    TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
    ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
  
    heap->try_inject_alloc_failure();
!   VM_ShenandoahInitMark op(this);
    VMThread::execute(&op); // jump to entry_init_mark() under safepoint
  }
  
  void ShenandoahConcurrentGC::vmop_entry_final_mark() {
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
--- 193,11 ---
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
    TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
    ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
  
    heap->try_inject_alloc_failure();
!   VM_ShenandoahInitMark op(this, _do_old_gc_bootstrap);
    VMThread::execute(&op); // jump to entry_init_mark() under safepoint
  }
  
  void ShenandoahConcurrentGC::vmop_entry_final_mark() {
    ShenandoahHeap* const heap = ShenandoahHeap::heap();

*** 222,23 ***
    VM_ShenandoahFinalRoots op(this);
    VMThread::execute(&op);
  }
  
  void ShenandoahConcurrentGC::entry_init_mark() {
!   const char* msg = init_mark_event_message();
    ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
    EventMark em("%s", msg);
  
    ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
                                ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
                                "init marking");
  
    op_init_mark();
  }
  
  void ShenandoahConcurrentGC::entry_final_mark() {
!   const char* msg = final_mark_event_message();
    ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
    EventMark em("%s", msg);
  
    ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
                                ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
--- 239,33 ---
    VM_ShenandoahFinalRoots op(this);
    VMThread::execute(&op);
  }
  
  void ShenandoahConcurrentGC::entry_init_mark() {
!   char msg[1024];
+   init_mark_event_message(msg, sizeof(msg));
    ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
    EventMark em("%s", msg);
  
    ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
                                ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
                                "init marking");
  
+   if (ShenandoahHeap::heap()->mode()->is_generational()
+     && (_generation->generation_mode() == YOUNG || (_generation->generation_mode() == GLOBAL && ShenandoahVerify))) {
+     // The current implementation of swap_remembered_set() copies the write-card-table
+     // to the read-card-table. The remembered sets are also swapped for GLOBAL collections
+     // so that the verifier works with the correct copy of the card table when verifying.
+     _generation->swap_remembered_set();
+   }
+ 
    op_init_mark();
  }
  
  void ShenandoahConcurrentGC::entry_final_mark() {
!   char msg[1024];
+   final_mark_event_message(msg, sizeof(msg));
    ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
    EventMark em("%s", msg);
  
    ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
                                ShenandoahWorkerPolicy::calc_workers_for_final_marking(),

*** 305,13 ***
    heap->try_inject_alloc_failure();
    op_mark_roots();
  }
  
  void ShenandoahConcurrentGC::entry_mark() {
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
    TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
!   const char* msg = conc_mark_event_message();
    ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
    EventMark em("%s", msg);
  
    ShenandoahWorkerScope scope(heap->workers(),
                                ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
--- 332,14 ---
    heap->try_inject_alloc_failure();
    op_mark_roots();
  }
  
  void ShenandoahConcurrentGC::entry_mark() {
+   char msg[1024];
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
    TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
!   conc_mark_event_message(msg, sizeof(msg));
    ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
    EventMark em("%s", msg);
  
    ShenandoahWorkerScope scope(heap->workers(),
                                ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),

*** 462,17 ***
    // This phase does not use workers, no need for setup
    heap->try_inject_alloc_failure();
    op_cleanup_complete();
  }
  
  void ShenandoahConcurrentGC::op_reset() {
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
    if (ShenandoahPacing) {
      heap->pacer()->setup_for_reset();
    }
! 
-   heap->prepare_gc();
  }
  
  class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
  private:
    ShenandoahMarkingContext* const _ctx;
--- 490,31 ---
    // This phase does not use workers, no need for setup
    heap->try_inject_alloc_failure();
    op_cleanup_complete();
  }
  
+ void ShenandoahConcurrentGC::entry_global_coalesce_and_fill() {
+   ShenandoahHeap* const heap = ShenandoahHeap::heap();
+ 
+   const char* msg = "Coalescing and filling old regions in global collect";
+   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::coalesce_and_fill);
+ 
+   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
+   EventMark em("%s", msg);
+   ShenandoahWorkerScope scope(heap->workers(),
+                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
+                               "concurrent coalesce and fill");
+ 
+   op_global_coalesce_and_fill();
+ }
+ 
  void ShenandoahConcurrentGC::op_reset() {
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
    if (ShenandoahPacing) {
      heap->pacer()->setup_for_reset();
    }
!   _generation->prepare_gc(_do_old_gc_bootstrap);
  }
  
  class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
  private:
    ShenandoahMarkingContext* const _ctx;

*** 481,11 ***
  
    void heap_region_do(ShenandoahHeapRegion* r) {
      assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
      if (r->is_active()) {
        // Check if region needs updating its TAMS. We have updated it already during concurrent
!       // reset, so it is very likely we don't need to do another write here.
        if (_ctx->top_at_mark_start(r) != r->top()) {
          _ctx->capture_top_at_mark_start(r);
        }
      } else {
        assert(_ctx->top_at_mark_start(r) == r->top(),
--- 523,12 ---
  
    void heap_region_do(ShenandoahHeapRegion* r) {
      assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
      if (r->is_active()) {
        // Check if region needs updating its TAMS. We have updated it already during concurrent
!       // reset, so it is very likely we don't need to do another write here.  Since most regions
+       // are not "active", this path is relatively rare.
        if (_ctx->top_at_mark_start(r) != r->top()) {
          _ctx->capture_top_at_mark_start(r);
        }
      } else {
        assert(_ctx->top_at_mark_start(r) == r->top(),

*** 499,37 ***
  void ShenandoahConcurrentGC::op_init_mark() {
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
    assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
    assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
  
!   assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
!   assert(!heap->marking_context()->is_complete(), "should not be complete");
    assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
  
    if (ShenandoahVerify) {
      heap->verifier()->verify_before_concmark();
    }
  
    if (VerifyBeforeGC) {
      Universe::verify();
    }
  
!   heap->set_concurrent_mark_in_progress(true);
  
!   {
      ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
      ShenandoahInitMarkUpdateRegionStateClosure cl;
      heap->parallel_heap_region_iterate(&cl);
    }
  
    // Weak reference processing
!   ShenandoahReferenceProcessor* rp = heap->ref_processor();
    rp->reset_thread_locals();
    rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
  
    // Make above changes visible to worker threads
    OrderAccess::fence();
    // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
    // we need to make sure that all its metadata are marked. alternative is to remark
    // thread roots at final mark pause, but it can be potential latency killer.
    if (heap->unload_classes()) {
      ShenandoahCodeRoots::arm_nmethods();
--- 542,45 ---
  void ShenandoahConcurrentGC::op_init_mark() {
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
    assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
    assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
  
!   assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
!   assert(!_generation->is_mark_complete(), "should not be complete");
    assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
  
    if (ShenandoahVerify) {
      heap->verifier()->verify_before_concmark();
    }
  
    if (VerifyBeforeGC) {
      Universe::verify();
    }
  
!   _generation->set_concurrent_mark_in_progress(true);
  
!   if (_do_old_gc_bootstrap) {
+     // Update region state for both young and old regions
      ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
      ShenandoahInitMarkUpdateRegionStateClosure cl;
      heap->parallel_heap_region_iterate(&cl);
+     heap->old_generation()->parallel_heap_region_iterate(&cl);
+   } else {
+     // Update region state for only young regions
+     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
+     ShenandoahInitMarkUpdateRegionStateClosure cl;
+     _generation->parallel_heap_region_iterate(&cl);
    }
  
    // Weak reference processing
!   ShenandoahReferenceProcessor* rp = _generation->ref_processor();
    rp->reset_thread_locals();
    rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
  
    // Make above changes visible to worker threads
    OrderAccess::fence();
+ 
    // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
    // we need to make sure that all its metadata are marked. alternative is to remark
    // thread roots at final mark pause, but it can be potential latency killer.
    if (heap->unload_classes()) {
      ShenandoahCodeRoots::arm_nmethods();

*** 563,11 ***
      assert(!heap->cancelled_gc(), "STW mark cannot OOM");
  
      // Notify JVMTI that the tagmap table will need cleaning.
      JvmtiTagMap::set_needs_cleaning();
  
!     heap->prepare_regions_and_collection_set(true /*concurrent*/);
  
      // Has to be done after cset selection
      heap->prepare_concurrent_roots();
  
      if (!heap->collection_set()->is_empty()) {
--- 614,12 ---
      assert(!heap->cancelled_gc(), "STW mark cannot OOM");
  
      // Notify JVMTI that the tagmap table will need cleaning.
      JvmtiTagMap::set_needs_cleaning();
  
!     bool mixed_evac = _generation->prepare_regions_and_collection_set(true /*concurrent*/);
+     heap->set_mixed_evac(mixed_evac);
  
      // Has to be done after cset selection
      heap->prepare_concurrent_roots();
  
      if (!heap->collection_set()->is_empty()) {

*** 656,11 ***
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
    assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
    // Concurrent weak refs processing
    ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
    ShenandoahBreakpoint::at_after_reference_processing_started();
!   heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
  }
  
  class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
  private:
    ShenandoahHeap* const _heap;
--- 708,11 ---
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
    assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
    // Concurrent weak refs processing
    ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
    ShenandoahBreakpoint::at_after_reference_processing_started();
!   _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
  }
  
  class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
  private:
    ShenandoahHeap* const _heap;

*** 683,12 ***
  
  void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
    const oop obj = RawAccess<>::oop_load(p);
    if (!CompressedOops::is_null(obj)) {
      if (!_mark_context->is_marked(obj)) {
!       shenandoah_assert_correct(p, obj);
!       ShenandoahHeap::atomic_clear_oop(p, obj);
      } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
        oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
        if (resolved == obj) {
          resolved = _heap->evacuate_object(obj, _thread);
        }
--- 735,19 ---
  
  void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
    const oop obj = RawAccess<>::oop_load(p);
    if (!CompressedOops::is_null(obj)) {
      if (!_mark_context->is_marked(obj)) {
!       if (_heap->is_in_active_generation(obj)) {
!         // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'.
+         // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for
+         // accessing from-space objects during class unloading. However, the from-space object may have
+         // been "filled". We've made no effort to prevent old generation classes being unloaded by young
+         // gen (and vice-versa).
+         shenandoah_assert_correct(p, obj);
+         ShenandoahHeap::atomic_clear_oop(p, obj);
+       }
      } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
        oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
        if (resolved == obj) {
          resolved = _heap->evacuate_object(obj, _thread);
        }

*** 908,11 ***
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
    heap->set_evacuation_in_progress(false);
    heap->set_concurrent_weak_root_in_progress(false);
    heap->prepare_update_heap_references(true /*concurrent*/);
    heap->set_update_refs_in_progress(true);
! 
    if (ShenandoahPacing) {
      heap->pacer()->setup_for_updaterefs();
    }
  }
  
--- 967,13 ---
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
    heap->set_evacuation_in_progress(false);
    heap->set_concurrent_weak_root_in_progress(false);
    heap->prepare_update_heap_references(true /*concurrent*/);
    heap->set_update_refs_in_progress(true);
!   if (ShenandoahVerify) {
+     heap->verifier()->verify_before_updaterefs();
+   }
    if (ShenandoahPacing) {
      heap->pacer()->setup_for_updaterefs();
    }
  }
  

*** 953,23 ***
    heap->finish_concurrent_roots();
  
    // Clear cancelled GC, if set. On cancellation path, the block before would handle
    // everything.
    if (heap->cancelled_gc()) {
!     heap->clear_cancelled_gc();
    }
  
    // Has to be done before cset is clear
    if (ShenandoahVerify) {
      heap->verifier()->verify_roots_in_to_space();
    }
  
    heap->update_heap_region_states(true /*concurrent*/);
  
    heap->set_update_refs_in_progress(false);
    heap->set_has_forwarded_objects(false);
  
    if (ShenandoahVerify) {
      heap->verifier()->verify_after_updaterefs();
    }
  
    if (VerifyAfterGC) {
--- 1014,34 ---
    heap->finish_concurrent_roots();
  
    // Clear cancelled GC, if set. On cancellation path, the block before would handle
    // everything.
    if (heap->cancelled_gc()) {
!     heap->clear_cancelled_gc(true /* clear oom handler */);
    }
  
    // Has to be done before cset is clear
    if (ShenandoahVerify) {
      heap->verifier()->verify_roots_in_to_space();
    }
  
    heap->update_heap_region_states(true /*concurrent*/);
  
+   if (heap->is_concurrent_old_mark_in_progress()) {
+     // Purge the SATB buffers, transferring any valid, old pointers to the
+     // old generation mark queue. From here on, no mutator will have access
+     // to anything that will be trashed and recycled.
+     heap->purge_old_satb_buffers(false /* abandon */);
+   }
+ 
    heap->set_update_refs_in_progress(false);
    heap->set_has_forwarded_objects(false);
  
+   // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
+   // entire regions.  Both of these relevant operations occur before final update refs.
+   heap->set_aging_cycle(false);
+ 
    if (ShenandoahVerify) {
      heap->verifier()->verify_after_updaterefs();
    }
  
    if (VerifyAfterGC) {

*** 985,42 ***
  
  void ShenandoahConcurrentGC::op_cleanup_complete() {
    ShenandoahHeap::heap()->free_set()->recycle_trash();
  }
  
  bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
    if (ShenandoahHeap::heap()->cancelled_gc()) {
      _degen_point = point;
      return true;
    }
    return false;
  }
  
! const char* ShenandoahConcurrentGC::init_mark_event_message() const {
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
    assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
    if (heap->unload_classes()) {
!     return "Pause Init Mark (unload classes)";
    } else {
!     return "Pause Init Mark";
    }
  }
  
! const char* ShenandoahConcurrentGC::final_mark_event_message() const {
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
!   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
    if (heap->unload_classes()) {
!     return "Pause Final Mark (unload classes)";
    } else {
!     return "Pause Final Mark";
    }
  }
  
! const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
!   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
    if (heap->unload_classes()) {
!     return "Concurrent marking (unload classes)";
    } else {
!     return "Concurrent marking";
    }
  }
--- 1057,48 ---
  
  void ShenandoahConcurrentGC::op_cleanup_complete() {
    ShenandoahHeap::heap()->free_set()->recycle_trash();
  }
  
+ void ShenandoahConcurrentGC::op_global_coalesce_and_fill() {
+   ShenandoahHeap::heap()->coalesce_and_fill_old_regions();
+ }
+ 
  bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
    if (ShenandoahHeap::heap()->cancelled_gc()) {
      _degen_point = point;
      return true;
    }
    return false;
  }
  
! void ShenandoahConcurrentGC::init_mark_event_message(char* buf, size_t len) const {
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
    assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
    if (heap->unload_classes()) {
!     jio_snprintf(buf, len, "Pause Init Mark (%s) (unload classes)", _generation->name());
    } else {
!     jio_snprintf(buf, len, "Pause Init Mark (%s)", _generation->name());
    }
  }
  
! void ShenandoahConcurrentGC::final_mark_event_message(char* buf, size_t len) const {
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
!   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
+          "Should not have forwarded objects during final mark (unless old gen concurrent mark is running)");
    if (heap->unload_classes()) {
!     jio_snprintf(buf, len, "Pause Final Mark (%s) (unload classes)", _generation->name());
    } else {
!     jio_snprintf(buf, len, "Pause Final Mark (%s)", _generation->name());
    }
  }
  
! void ShenandoahConcurrentGC::conc_mark_event_message(char* buf, size_t len) const {
    ShenandoahHeap* const heap = ShenandoahHeap::heap();
!   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
+          "Should not have forwarded objects concurrent mark (unless old gen concurrent mark is running");
    if (heap->unload_classes()) {
!     jio_snprintf(buf, len, "Concurrent marking (%s) (unload classes)", _generation->name());
    } else {
!     jio_snprintf(buf, len, "Concurrent marking (%s)", _generation->name());
    }
  }
< prev index next >