< prev index next >

src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp

Print this page

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "gc/parallel/objectStartArray.inline.hpp"
  26 #include "gc/parallel/parallelArguments.hpp"
  27 #include "gc/parallel/parallelInitLogger.hpp"
  28 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  29 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  30 #include "gc/parallel/psMemoryPool.hpp"
  31 #include "gc/parallel/psParallelCompact.inline.hpp"

  32 #include "gc/parallel/psPromotionManager.hpp"
  33 #include "gc/parallel/psScavenge.hpp"
  34 #include "gc/parallel/psVMOperations.hpp"
  35 #include "gc/shared/barrierSetNMethod.hpp"
  36 #include "gc/shared/fullGCForwarding.inline.hpp"
  37 #include "gc/shared/gcHeapSummary.hpp"
  38 #include "gc/shared/gcLocker.inline.hpp"
  39 #include "gc/shared/gcWhen.hpp"
  40 #include "gc/shared/genArguments.hpp"
  41 #include "gc/shared/locationPrinter.inline.hpp"
  42 #include "gc/shared/scavengableNMethods.hpp"
  43 #include "gc/shared/suspendibleThreadSet.hpp"
  44 #include "logging/log.hpp"
  45 #include "memory/iterator.hpp"
  46 #include "memory/metaspaceCounters.hpp"
  47 #include "memory/metaspaceUtils.hpp"
  48 #include "memory/reservedSpace.hpp"
  49 #include "memory/universe.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "runtime/atomic.hpp"

 100   _old_gen = new PSOldGen(
 101       old_rs,
 102       OldSize,
 103       MinOldSize,
 104       MaxOldSize);
 105 
 106   assert(young_gen()->max_gen_size() == young_rs.size(),"Consistency check");
 107   assert(old_gen()->max_gen_size() == old_rs.size(), "Consistency check");
 108 
 109   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
 110 
 111   _size_policy = new PSAdaptiveSizePolicy(SpaceAlignment,
 112                                           max_gc_pause_sec);
 113 
 114   assert((old_gen()->virtual_space()->high_boundary() ==
 115           young_gen()->virtual_space()->low_boundary()),
 116          "Boundaries must meet");
 117   // initialize the policy counters - 2 collectors, 2 generations
 118   _gc_policy_counters = new GCPolicyCounters("ParScav:MSC", 2, 2);
 119 
 120   if (!PSParallelCompact::initialize_aux_data()) {
 121     return JNI_ENOMEM;






 122   }
 123 
 124   // Create CPU time counter
 125   CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
 126 
 127   ParallelInitLogger::print();
 128 
 129   FullGCForwarding::initialize(_reserved);
 130 
 131   return JNI_OK;
 132 }
 133 
 134 void ParallelScavengeHeap::initialize_serviceability() {
 135 
 136   _eden_pool = new PSEdenSpacePool(_young_gen,
 137                                    _young_gen->eden_space(),
 138                                    "PS Eden Space",
 139                                    false /* support_usage_threshold */);
 140 
 141   _survivor_pool = new PSSurvivorSpacePool(_young_gen,

 153   _old_manager->add_pool(_survivor_pool);
 154   _old_manager->add_pool(_old_pool);
 155 
 156   _young_manager->add_pool(_eden_pool);
 157   _young_manager->add_pool(_survivor_pool);
 158 
 159 }
 160 
 161 class PSIsScavengable : public BoolObjectClosure {
 162   bool do_object_b(oop obj) {
 163     return ParallelScavengeHeap::heap()->is_in_young(obj);
 164   }
 165 };
 166 
 167 static PSIsScavengable _is_scavengable;
 168 
 169 void ParallelScavengeHeap::post_initialize() {
 170   CollectedHeap::post_initialize();
 171   // Need to init the tenuring threshold
 172   PSScavenge::initialize();
 173   PSParallelCompact::post_initialize();




 174   PSPromotionManager::initialize();
 175 
 176   ScavengableNMethods::initialize(&_is_scavengable);
 177   GCLocker::initialize();
 178 }
 179 
 180 void ParallelScavengeHeap::gc_epilogue(bool full) {
 181   if (_is_heap_almost_full) {
 182     // Reset emergency state if eden is empty after a young/full gc
 183     if (_young_gen->eden_space()->is_empty()) {
 184       log_debug(gc)("Leaving memory constrained state; back to normal");
 185       _is_heap_almost_full = false;
 186     }
 187   } else {
 188     if (full && !_young_gen->eden_space()->is_empty()) {
 189       log_debug(gc)("Non-empty young-gen after full-gc; in memory constrained state");
 190       _is_heap_almost_full = true;
 191     }
 192   }
 193 }

 353         return op.result();
 354       }
 355     }
 356 
 357     // Was the gc-overhead reached inside the safepoint? If so, this mutator
 358     // should return null as well for global consistency.
 359     if (_gc_overhead_counter >= GCOverheadLimitThreshold) {
 360       return nullptr;
 361     }
 362 
 363     if ((QueuedAllocationWarningCount > 0) &&
 364         (loop_count % QueuedAllocationWarningCount == 0)) {
 365       log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times, size=%zu", loop_count, size);
 366     }
 367   }
 368 }
 369 
 370 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
 371   // No need for max-compaction in this context.
 372   const bool should_do_max_compaction = false;
 373   PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);




 374 }
 375 
 376 bool ParallelScavengeHeap::should_attempt_young_gc() const {
 377   const bool ShouldRunYoungGC = true;
 378   const bool ShouldRunFullGC = false;
 379 
 380   if (!_young_gen->to_space()->is_empty()) {
 381     log_debug(gc, ergo)("To-space is not empty; run full-gc instead.");
 382     return ShouldRunFullGC;
 383   }
 384 
 385   // Check if the predicted promoted bytes will overflow free space in old-gen.
 386   PSAdaptiveSizePolicy* policy = _size_policy;
 387 
 388   size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
 389   size_t promotion_estimate = MIN2(avg_promoted, _young_gen->used_in_bytes());
 390   // Total free size after possible old gen expansion
 391   size_t free_in_old_gen_with_expansion = _old_gen->max_gen_size() - _old_gen->used_in_bytes();
 392 
 393   log_trace(gc, ergo)("average_promoted %zu; padded_average_promoted %zu",

 482 
 483   if (!_is_heap_almost_full) {
 484     // If young-gen can handle this allocation, attempt young-gc firstly, as young-gc is usually cheaper.
 485     bool should_run_young_gc = is_tlab || should_alloc_in_eden(size);
 486 
 487     collect_at_safepoint(!should_run_young_gc);
 488 
 489     // If gc-overhead is reached, we will skip allocation.
 490     if (!check_gc_overhead_limit()) {
 491       result = expand_heap_and_allocate(size, is_tlab);
 492       if (result != nullptr) {
 493         return result;
 494       }
 495     }
 496   }
 497 
 498   // Last resort GC; clear soft refs and do max-compaction before throwing OOM.
 499   {
 500     const bool clear_all_soft_refs = true;
 501     const bool should_do_max_compaction = true;
 502     PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);




 503   }
 504 
 505   if (check_gc_overhead_limit()) {
 506     log_info(gc)("GC Overhead Limit exceeded too often (%zu).", GCOverheadLimitThreshold);
 507     return nullptr;
 508   }
 509 
 510   result = expand_heap_and_allocate(size, is_tlab);
 511 
 512   return result;
 513 }
 514 
 515 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
 516   CollectedHeap::ensure_parsability(retire_tlabs);
 517   young_gen()->eden_space()->ensure_parsability();
 518 }
 519 
 520 size_t ParallelScavengeHeap::tlab_capacity() const {
 521   return young_gen()->eden_space()->tlab_capacity();
 522 }
 523 
 524 size_t ParallelScavengeHeap::tlab_used() const {
 525   return young_gen()->eden_space()->tlab_used();
 526 }
 527 
 528 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc() const {
 529   return young_gen()->eden_space()->unsafe_max_tlab_alloc();
 530 }
 531 

 564     full_gc_count = total_full_collections();
 565   }
 566 
 567   VM_ParallelGCCollect op(gc_count, full_gc_count, cause);
 568   VMThread::execute(&op);
 569 }
 570 
 571 void ParallelScavengeHeap::collect_at_safepoint(bool is_full) {
 572   assert(!GCLocker::is_active(), "precondition");
 573   bool clear_soft_refs = GCCause::should_clear_all_soft_refs(_gc_cause);
 574 
 575   if (!is_full && should_attempt_young_gc()) {
 576     bool young_gc_success = PSScavenge::invoke(clear_soft_refs);
 577     if (young_gc_success) {
 578       return;
 579     }
 580     log_debug(gc, heap)("Upgrade to Full-GC since Young-gc failed.");
 581   }
 582 
 583   const bool should_do_max_compaction = false;
 584   PSParallelCompact::invoke(clear_soft_refs, should_do_max_compaction);




 585 }
 586 
 587 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
 588   young_gen()->object_iterate(cl);
 589   old_gen()->object_iterate(cl);
 590 }
 591 
 592 // The HeapBlockClaimer is used during parallel iteration over the heap,
 593 // allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.
 594 // The eden and survivor spaces are treated as single blocks as it is hard to divide
 595 // these spaces.
 596 // The old space is divided into fixed-size blocks.
 597 class HeapBlockClaimer : public StackObj {
 598   Atomic<size_t> _claimed_index;
 599 
 600 public:
 601   static const size_t InvalidIndex = SIZE_MAX;
 602   static const size_t EdenIndex = 0;
 603   static const size_t SurvivorIndex = 1;
 604   static const size_t NumNonOldGenClaims = 2;

 707 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
 708   return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);
 709 }
 710 
 711 void ParallelScavengeHeap::print_heap_on(outputStream* st) const {
 712   if (young_gen() != nullptr) {
 713     young_gen()->print_on(st);
 714   }
 715   if (old_gen() != nullptr) {
 716     old_gen()->print_on(st);
 717   }
 718 }
 719 
 720 void ParallelScavengeHeap::print_gc_on(outputStream* st) const {
 721   BarrierSet* bs = BarrierSet::barrier_set();
 722   if (bs != nullptr) {
 723     bs->print_on(st);
 724   }
 725   st->cr();
 726 
 727   PSParallelCompact::print_on(st);




 728 }
 729 
 730 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
 731   ParallelScavengeHeap::heap()->workers().threads_do(tc);
 732 }
 733 
 734 void ParallelScavengeHeap::print_tracing_info() const {
 735   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
 736   log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());




 737 }
 738 
 739 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
 740   const PSYoungGen* const young = young_gen();
 741   const MutableSpace* const eden = young->eden_space();
 742   const MutableSpace* const from = young->from_space();
 743   const PSOldGen* const old = old_gen();
 744 
 745   return PreGenGCValues(young->used_in_bytes(),
 746                         young->capacity_in_bytes(),
 747                         eden->used_in_bytes(),
 748                         eden->capacity_in_bytes(),
 749                         from->used_in_bytes(),
 750                         from->capacity_in_bytes(),
 751                         old->used_in_bytes(),
 752                         old->capacity_in_bytes());
 753 }
 754 
 755 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
 756   const PSYoungGen* const young = young_gen();

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "gc/parallel/objectStartArray.inline.hpp"
  26 #include "gc/parallel/parallelArguments.hpp"
  27 #include "gc/parallel/parallelInitLogger.hpp"
  28 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  29 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  30 #include "gc/parallel/psMemoryPool.hpp"
  31 #include "gc/parallel/psParallelCompact.inline.hpp"
  32 #include "gc/parallel/psParallelCompactNew.inline.hpp"
  33 #include "gc/parallel/psPromotionManager.hpp"
  34 #include "gc/parallel/psScavenge.hpp"
  35 #include "gc/parallel/psVMOperations.hpp"
  36 #include "gc/shared/barrierSetNMethod.hpp"
  37 #include "gc/shared/fullGCForwarding.inline.hpp"
  38 #include "gc/shared/gcHeapSummary.hpp"
  39 #include "gc/shared/gcLocker.inline.hpp"
  40 #include "gc/shared/gcWhen.hpp"
  41 #include "gc/shared/genArguments.hpp"
  42 #include "gc/shared/locationPrinter.inline.hpp"
  43 #include "gc/shared/scavengableNMethods.hpp"
  44 #include "gc/shared/suspendibleThreadSet.hpp"
  45 #include "logging/log.hpp"
  46 #include "memory/iterator.hpp"
  47 #include "memory/metaspaceCounters.hpp"
  48 #include "memory/metaspaceUtils.hpp"
  49 #include "memory/reservedSpace.hpp"
  50 #include "memory/universe.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "runtime/atomic.hpp"

 101   _old_gen = new PSOldGen(
 102       old_rs,
 103       OldSize,
 104       MinOldSize,
 105       MaxOldSize);
 106 
 107   assert(young_gen()->max_gen_size() == young_rs.size(),"Consistency check");
 108   assert(old_gen()->max_gen_size() == old_rs.size(), "Consistency check");
 109 
 110   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
 111 
 112   _size_policy = new PSAdaptiveSizePolicy(SpaceAlignment,
 113                                           max_gc_pause_sec);
 114 
 115   assert((old_gen()->virtual_space()->high_boundary() ==
 116           young_gen()->virtual_space()->low_boundary()),
 117          "Boundaries must meet");
 118   // initialize the policy counters - 2 collectors, 2 generations
 119   _gc_policy_counters = new GCPolicyCounters("ParScav:MSC", 2, 2);
 120 
 121   if (UseCompactObjectHeaders) {
 122     if (!PSParallelCompactNew::initialize_aux_data()) {
 123       return JNI_ENOMEM;
 124     }
 125   } else {
 126     if (!PSParallelCompact::initialize_aux_data()) {
 127       return JNI_ENOMEM;
 128     }
 129   }
 130 
 131   // Create CPU time counter
 132   CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
 133 
 134   ParallelInitLogger::print();
 135 
 136   FullGCForwarding::initialize(_reserved);
 137 
 138   return JNI_OK;
 139 }
 140 
 141 void ParallelScavengeHeap::initialize_serviceability() {
 142 
 143   _eden_pool = new PSEdenSpacePool(_young_gen,
 144                                    _young_gen->eden_space(),
 145                                    "PS Eden Space",
 146                                    false /* support_usage_threshold */);
 147 
 148   _survivor_pool = new PSSurvivorSpacePool(_young_gen,

 160   _old_manager->add_pool(_survivor_pool);
 161   _old_manager->add_pool(_old_pool);
 162 
 163   _young_manager->add_pool(_eden_pool);
 164   _young_manager->add_pool(_survivor_pool);
 165 
 166 }
 167 
 168 class PSIsScavengable : public BoolObjectClosure {
 169   bool do_object_b(oop obj) {
 170     return ParallelScavengeHeap::heap()->is_in_young(obj);
 171   }
 172 };
 173 
 174 static PSIsScavengable _is_scavengable;
 175 
 176 void ParallelScavengeHeap::post_initialize() {
 177   CollectedHeap::post_initialize();
 178   // Need to init the tenuring threshold
 179   PSScavenge::initialize();
 180   if (UseCompactObjectHeaders) {
 181     PSParallelCompactNew::post_initialize();
 182   } else {
 183     PSParallelCompact::post_initialize();
 184   }
 185   PSPromotionManager::initialize();
 186 
 187   ScavengableNMethods::initialize(&_is_scavengable);
 188   GCLocker::initialize();
 189 }
 190 
 191 void ParallelScavengeHeap::gc_epilogue(bool full) {
 192   if (_is_heap_almost_full) {
 193     // Reset emergency state if eden is empty after a young/full gc
 194     if (_young_gen->eden_space()->is_empty()) {
 195       log_debug(gc)("Leaving memory constrained state; back to normal");
 196       _is_heap_almost_full = false;
 197     }
 198   } else {
 199     if (full && !_young_gen->eden_space()->is_empty()) {
 200       log_debug(gc)("Non-empty young-gen after full-gc; in memory constrained state");
 201       _is_heap_almost_full = true;
 202     }
 203   }
 204 }

 364         return op.result();
 365       }
 366     }
 367 
 368     // Was the gc-overhead reached inside the safepoint? If so, this mutator
 369     // should return null as well for global consistency.
 370     if (_gc_overhead_counter >= GCOverheadLimitThreshold) {
 371       return nullptr;
 372     }
 373 
 374     if ((QueuedAllocationWarningCount > 0) &&
 375         (loop_count % QueuedAllocationWarningCount == 0)) {
 376       log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times, size=%zu", loop_count, size);
 377     }
 378   }
 379 }
 380 
 381 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
 382   // No need for max-compaction in this context.
 383   const bool should_do_max_compaction = false;
 384   if (UseCompactObjectHeaders) {
 385     PSParallelCompactNew::invoke(clear_all_soft_refs, should_do_max_compaction);
 386   } else {
 387     PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
 388   }
 389 }
 390 
 391 bool ParallelScavengeHeap::should_attempt_young_gc() const {
 392   const bool ShouldRunYoungGC = true;
 393   const bool ShouldRunFullGC = false;
 394 
 395   if (!_young_gen->to_space()->is_empty()) {
 396     log_debug(gc, ergo)("To-space is not empty; run full-gc instead.");
 397     return ShouldRunFullGC;
 398   }
 399 
 400   // Check if the predicted promoted bytes will overflow free space in old-gen.
 401   PSAdaptiveSizePolicy* policy = _size_policy;
 402 
 403   size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
 404   size_t promotion_estimate = MIN2(avg_promoted, _young_gen->used_in_bytes());
 405   // Total free size after possible old gen expansion
 406   size_t free_in_old_gen_with_expansion = _old_gen->max_gen_size() - _old_gen->used_in_bytes();
 407 
 408   log_trace(gc, ergo)("average_promoted %zu; padded_average_promoted %zu",

 497 
 498   if (!_is_heap_almost_full) {
 499     // If young-gen can handle this allocation, attempt young-gc firstly, as young-gc is usually cheaper.
 500     bool should_run_young_gc = is_tlab || should_alloc_in_eden(size);
 501 
 502     collect_at_safepoint(!should_run_young_gc);
 503 
 504     // If gc-overhead is reached, we will skip allocation.
 505     if (!check_gc_overhead_limit()) {
 506       result = expand_heap_and_allocate(size, is_tlab);
 507       if (result != nullptr) {
 508         return result;
 509       }
 510     }
 511   }
 512 
 513   // Last resort GC; clear soft refs and do max-compaction before throwing OOM.
 514   {
 515     const bool clear_all_soft_refs = true;
 516     const bool should_do_max_compaction = true;
 517     if (UseCompactObjectHeaders) {
 518       PSParallelCompactNew::invoke(clear_all_soft_refs, should_do_max_compaction);
 519     } else {
 520       PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
 521     }
 522   }
 523 
 524   if (check_gc_overhead_limit()) {
 525     log_info(gc)("GC Overhead Limit exceeded too often (%zu).", GCOverheadLimitThreshold);
 526     return nullptr;
 527   }
 528 
 529   result = expand_heap_and_allocate(size, is_tlab);

 530   return result;
 531 }
 532 
 533 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
 534   CollectedHeap::ensure_parsability(retire_tlabs);
 535   young_gen()->eden_space()->ensure_parsability();
 536 }
 537 
 538 size_t ParallelScavengeHeap::tlab_capacity() const {
 539   return young_gen()->eden_space()->tlab_capacity();
 540 }
 541 
 542 size_t ParallelScavengeHeap::tlab_used() const {
 543   return young_gen()->eden_space()->tlab_used();
 544 }
 545 
 546 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc() const {
 547   return young_gen()->eden_space()->unsafe_max_tlab_alloc();
 548 }
 549 

 582     full_gc_count = total_full_collections();
 583   }
 584 
 585   VM_ParallelGCCollect op(gc_count, full_gc_count, cause);
 586   VMThread::execute(&op);
 587 }
 588 
 589 void ParallelScavengeHeap::collect_at_safepoint(bool is_full) {
 590   assert(!GCLocker::is_active(), "precondition");
 591   bool clear_soft_refs = GCCause::should_clear_all_soft_refs(_gc_cause);
 592 
 593   if (!is_full && should_attempt_young_gc()) {
 594     bool young_gc_success = PSScavenge::invoke(clear_soft_refs);
 595     if (young_gc_success) {
 596       return;
 597     }
 598     log_debug(gc, heap)("Upgrade to Full-GC since Young-gc failed.");
 599   }
 600 
 601   const bool should_do_max_compaction = false;
 602   if (UseCompactObjectHeaders) {
 603     PSParallelCompactNew::invoke(clear_soft_refs, should_do_max_compaction);
 604   } else {
 605     PSParallelCompact::invoke(clear_soft_refs, should_do_max_compaction);
 606   }
 607 }
 608 
 609 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
 610   young_gen()->object_iterate(cl);
 611   old_gen()->object_iterate(cl);
 612 }
 613 
 614 // The HeapBlockClaimer is used during parallel iteration over the heap,
 615 // allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.
 616 // The eden and survivor spaces are treated as single blocks as it is hard to divide
 617 // these spaces.
 618 // The old space is divided into fixed-size blocks.
 619 class HeapBlockClaimer : public StackObj {
 620   Atomic<size_t> _claimed_index;
 621 
 622 public:
 623   static const size_t InvalidIndex = SIZE_MAX;
 624   static const size_t EdenIndex = 0;
 625   static const size_t SurvivorIndex = 1;
 626   static const size_t NumNonOldGenClaims = 2;

 729 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
 730   return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);
 731 }
 732 
 733 void ParallelScavengeHeap::print_heap_on(outputStream* st) const {
 734   if (young_gen() != nullptr) {
 735     young_gen()->print_on(st);
 736   }
 737   if (old_gen() != nullptr) {
 738     old_gen()->print_on(st);
 739   }
 740 }
 741 
 742 void ParallelScavengeHeap::print_gc_on(outputStream* st) const {
 743   BarrierSet* bs = BarrierSet::barrier_set();
 744   if (bs != nullptr) {
 745     bs->print_on(st);
 746   }
 747   st->cr();
 748 
 749   if (UseCompactObjectHeaders) {
 750     PSParallelCompactNew::print_on(st);
 751   } else {
 752     PSParallelCompact::print_on(st);
 753   }
 754 }
 755 
 756 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
 757   ParallelScavengeHeap::heap()->workers().threads_do(tc);
 758 }
 759 
 760 void ParallelScavengeHeap::print_tracing_info() const {
 761   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
 762   if (UseCompactObjectHeaders) {
 763     log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompactNew::accumulated_time()->seconds());
 764   } else {
 765     log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
 766   }
 767 }
 768 
 769 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
 770   const PSYoungGen* const young = young_gen();
 771   const MutableSpace* const eden = young->eden_space();
 772   const MutableSpace* const from = young->from_space();
 773   const PSOldGen* const old = old_gen();
 774 
 775   return PreGenGCValues(young->used_in_bytes(),
 776                         young->capacity_in_bytes(),
 777                         eden->used_in_bytes(),
 778                         eden->capacity_in_bytes(),
 779                         from->used_in_bytes(),
 780                         from->capacity_in_bytes(),
 781                         old->used_in_bytes(),
 782                         old->capacity_in_bytes());
 783 }
 784 
 785 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
 786   const PSYoungGen* const young = young_gen();
< prev index next >