< prev index next >

src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp

Print this page
*** 27,10 ***
--- 27,11 ---
  #include "gc/parallel/parallelInitLogger.hpp"
  #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  #include "gc/parallel/psMemoryPool.hpp"
  #include "gc/parallel/psParallelCompact.inline.hpp"
+ #include "gc/parallel/psParallelCompactNew.inline.hpp"
  #include "gc/parallel/psPromotionManager.hpp"
  #include "gc/parallel/psScavenge.hpp"
  #include "gc/parallel/psVMOperations.hpp"
  #include "gc/shared/barrierSetNMethod.hpp"
  #include "gc/shared/fullGCForwarding.inline.hpp"

*** 116,12 ***
            young_gen()->virtual_space()->low_boundary()),
           "Boundaries must meet");
    // initialize the policy counters - 2 collectors, 2 generations
    _gc_policy_counters = new GCPolicyCounters("ParScav:MSC", 2, 2);
  
!   if (!PSParallelCompact::initialize_aux_data()) {
!     return JNI_ENOMEM;
    }
  
    // Create CPU time counter
    CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
  
--- 117,18 ---
            young_gen()->virtual_space()->low_boundary()),
           "Boundaries must meet");
    // initialize the policy counters - 2 collectors, 2 generations
    _gc_policy_counters = new GCPolicyCounters("ParScav:MSC", 2, 2);
  
!   if (UseCompactObjectHeaders) {
!     if (!PSParallelCompactNew::initialize_aux_data()) {
+       return JNI_ENOMEM;
+     }
+   } else {
+     if (!PSParallelCompact::initialize_aux_data()) {
+       return JNI_ENOMEM;
+     }
    }
  
    // Create CPU time counter
    CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
  

*** 169,11 ***
  
  void ParallelScavengeHeap::post_initialize() {
    CollectedHeap::post_initialize();
    // Need to init the tenuring threshold
    PSScavenge::initialize();
!   PSParallelCompact::post_initialize();
    PSPromotionManager::initialize();
  
    ScavengableNMethods::initialize(&_is_scavengable);
    GCLocker::initialize();
  }
--- 176,15 ---
  
  void ParallelScavengeHeap::post_initialize() {
    CollectedHeap::post_initialize();
    // Need to init the tenuring threshold
    PSScavenge::initialize();
!   if (UseCompactObjectHeaders) {
+     PSParallelCompactNew::post_initialize();
+   } else {
+     PSParallelCompact::post_initialize();
+   }
    PSPromotionManager::initialize();
  
    ScavengableNMethods::initialize(&_is_scavengable);
    GCLocker::initialize();
  }

*** 360,11 ***
  }
  
  void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
    // No need for max-compaction in this context.
    const bool should_do_max_compaction = false;
!   PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
  }
  
  bool ParallelScavengeHeap::should_attempt_young_gc() const {
    const bool ShouldRunYoungGC = true;
    const bool ShouldRunFullGC = false;
--- 371,15 ---
  }
  
  void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
    // No need for max-compaction in this context.
    const bool should_do_max_compaction = false;
!   if (UseCompactObjectHeaders) {
+     PSParallelCompactNew::invoke(clear_all_soft_refs, should_do_max_compaction);
+   } else {
+     PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
+   }
  }
  
  bool ParallelScavengeHeap::should_attempt_young_gc() const {
    const bool ShouldRunYoungGC = true;
    const bool ShouldRunFullGC = false;

*** 489,20 ***
  
    // Last resort GC; clear soft refs and do max-compaction before throwing OOM.
    {
      const bool clear_all_soft_refs = true;
      const bool should_do_max_compaction = true;
!     PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
    }
  
    if (check_gc_overhead_limit()) {
      log_info(gc)("GC Overhead Limit exceeded too often (%zu).", GCOverheadLimitThreshold);
      return nullptr;
    }
  
    result = expand_heap_and_allocate(size, is_tlab);
- 
    return result;
  }
  
  void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
    CollectedHeap::ensure_parsability(retire_tlabs);
--- 504,23 ---
  
    // Last resort GC; clear soft refs and do max-compaction before throwing OOM.
    {
      const bool clear_all_soft_refs = true;
      const bool should_do_max_compaction = true;
!     if (UseCompactObjectHeaders) {
+       PSParallelCompactNew::invoke(clear_all_soft_refs, should_do_max_compaction);
+     } else {
+       PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
+     }
    }
  
    if (check_gc_overhead_limit()) {
      log_info(gc)("GC Overhead Limit exceeded too often (%zu).", GCOverheadLimitThreshold);
      return nullptr;
    }
  
    result = expand_heap_and_allocate(size, is_tlab);
    return result;
  }
  
  void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
    CollectedHeap::ensure_parsability(retire_tlabs);

*** 571,11 ***
      }
      log_debug(gc, heap)("Upgrade to Full-GC since Young-gc failed.");
    }
  
    const bool should_do_max_compaction = false;
!   PSParallelCompact::invoke(clear_soft_refs, should_do_max_compaction);
  }
  
  void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
    young_gen()->object_iterate(cl);
    old_gen()->object_iterate(cl);
--- 589,15 ---
      }
      log_debug(gc, heap)("Upgrade to Full-GC since Young-gc failed.");
    }
  
    const bool should_do_max_compaction = false;
!   if (UseCompactObjectHeaders) {
+     PSParallelCompactNew::invoke(clear_soft_refs, should_do_max_compaction);
+   } else {
+     PSParallelCompact::invoke(clear_soft_refs, should_do_max_compaction);
+   }
  }
  
  void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
    young_gen()->object_iterate(cl);
    old_gen()->object_iterate(cl);

*** 714,20 ***
    if (bs != nullptr) {
      bs->print_on(st);
    }
    st->cr();
  
!   PSParallelCompact::print_on(st);
  }
  
  void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
    ParallelScavengeHeap::heap()->workers().threads_do(tc);
  }
  
  void ParallelScavengeHeap::print_tracing_info() const {
    log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
!   log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
  }
  
  PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
    const PSYoungGen* const young = young_gen();
    const MutableSpace* const eden = young->eden_space();
--- 736,28 ---
    if (bs != nullptr) {
      bs->print_on(st);
    }
    st->cr();
  
!   if (UseCompactObjectHeaders) {
+     PSParallelCompactNew::print_on(st);
+   } else {
+     PSParallelCompact::print_on(st);
+   }
  }
  
  void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
    ParallelScavengeHeap::heap()->workers().threads_do(tc);
  }
  
  void ParallelScavengeHeap::print_tracing_info() const {
    log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
!   if (UseCompactObjectHeaders) {
+     log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompactNew::accumulated_time()->seconds());
+   } else {
+     log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
+   }
  }
  
  PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
    const PSYoungGen* const young = young_gen();
    const MutableSpace* const eden = young->eden_space();
< prev index next >