12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/parallel/objectStartArray.inline.hpp"
26 #include "gc/parallel/parallelArguments.hpp"
27 #include "gc/parallel/parallelInitLogger.hpp"
28 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
29 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
30 #include "gc/parallel/psMemoryPool.hpp"
31 #include "gc/parallel/psParallelCompact.inline.hpp"
32 #include "gc/parallel/psPromotionManager.hpp"
33 #include "gc/parallel/psScavenge.hpp"
34 #include "gc/parallel/psVMOperations.hpp"
35 #include "gc/shared/barrierSetNMethod.hpp"
36 #include "gc/shared/fullGCForwarding.inline.hpp"
37 #include "gc/shared/gcHeapSummary.hpp"
38 #include "gc/shared/gcLocker.inline.hpp"
39 #include "gc/shared/gcWhen.hpp"
40 #include "gc/shared/genArguments.hpp"
41 #include "gc/shared/locationPrinter.inline.hpp"
42 #include "gc/shared/scavengableNMethods.hpp"
43 #include "gc/shared/suspendibleThreadSet.hpp"
44 #include "logging/log.hpp"
45 #include "memory/iterator.hpp"
46 #include "memory/metaspaceCounters.hpp"
47 #include "memory/metaspaceUtils.hpp"
48 #include "memory/reservedSpace.hpp"
49 #include "memory/universe.hpp"
50 #include "oops/oop.inline.hpp"
51 #include "runtime/cpuTimeCounters.hpp"
101 _old_gen = new PSOldGen(
102 old_rs,
103 OldSize,
104 MinOldSize,
105 MaxOldSize);
106
107 assert(young_gen()->max_gen_size() == young_rs.size(),"Consistency check");
108 assert(old_gen()->max_gen_size() == old_rs.size(), "Consistency check");
109
110 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
111
112 _size_policy = new PSAdaptiveSizePolicy(SpaceAlignment,
113 max_gc_pause_sec);
114
115 assert((old_gen()->virtual_space()->high_boundary() ==
116 young_gen()->virtual_space()->low_boundary()),
117 "Boundaries must meet");
118 // initialize the policy counters - 2 collectors, 2 generations
119 _gc_policy_counters = new GCPolicyCounters("ParScav:MSC", 2, 2);
120
121 if (!PSParallelCompact::initialize_aux_data()) {
122 return JNI_ENOMEM;
123 }
124
125 // Create CPU time counter
126 CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
127
128 ParallelInitLogger::print();
129
130 FullGCForwarding::initialize(_reserved);
131
132 return JNI_OK;
133 }
134
135 void ParallelScavengeHeap::initialize_serviceability() {
136
137 _eden_pool = new EdenMutableSpacePool(_young_gen,
138 _young_gen->eden_space(),
139 "PS Eden Space",
140 false /* support_usage_threshold */);
141
142 _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
154 _old_manager->add_pool(_survivor_pool);
155 _old_manager->add_pool(_old_pool);
156
157 _young_manager->add_pool(_eden_pool);
158 _young_manager->add_pool(_survivor_pool);
159
160 }
161
162 class PSIsScavengable : public BoolObjectClosure {
163 bool do_object_b(oop obj) {
164 return ParallelScavengeHeap::heap()->is_in_young(obj);
165 }
166 };
167
168 static PSIsScavengable _is_scavengable;
169
170 void ParallelScavengeHeap::post_initialize() {
171 CollectedHeap::post_initialize();
172 // Need to init the tenuring threshold
173 PSScavenge::initialize();
174 PSParallelCompact::post_initialize();
175 PSPromotionManager::initialize();
176
177 ScavengableNMethods::initialize(&_is_scavengable);
178 GCLocker::initialize();
179 }
180
181 void ParallelScavengeHeap::gc_epilogue(bool full) {
182 if (_is_heap_almost_full) {
183 // Reset emergency state if eden is empty after a young/full gc
184 if (_young_gen->eden_space()->is_empty()) {
185 log_debug(gc)("Leaving memory constrained state; back to normal");
186 _is_heap_almost_full = false;
187 }
188 } else {
189 if (full && !_young_gen->eden_space()->is_empty()) {
190 log_debug(gc)("Non-empty young-gen after full-gc; in memory constrained state");
191 _is_heap_almost_full = true;
192 }
193 }
194 }
345 return op.result();
346 }
347 }
348
349 // Was the gc-overhead reached inside the safepoint? If so, this mutator
350 // should return null as well for global consistency.
351 if (_gc_overhead_counter >= GCOverheadLimitThreshold) {
352 return nullptr;
353 }
354
355 if ((QueuedAllocationWarningCount > 0) &&
356 (loop_count % QueuedAllocationWarningCount == 0)) {
357 log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times, size=%zu", loop_count, size);
358 }
359 }
360 }
361
362 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
363 // No need for max-compaction in this context.
364 const bool should_do_max_compaction = false;
365 PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
366 }
367
368 bool ParallelScavengeHeap::should_attempt_young_gc() const {
369 const bool ShouldRunYoungGC = true;
370 const bool ShouldRunFullGC = false;
371
372 if (!_young_gen->to_space()->is_empty()) {
373 log_debug(gc, ergo)("To-space is not empty; run full-gc instead.");
374 return ShouldRunFullGC;
375 }
376
377 // Check if the predicted promoted bytes will overflow free space in old-gen.
378 PSAdaptiveSizePolicy* policy = _size_policy;
379
380 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
381 size_t promotion_estimate = MIN2(avg_promoted, _young_gen->used_in_bytes());
382 // Total free size after possible old gen expansion
383 size_t free_in_old_gen_with_expansion = _old_gen->max_gen_size() - _old_gen->used_in_bytes();
384
385 log_trace(gc, ergo)("average_promoted %zu; padded_average_promoted %zu",
474
475 if (!_is_heap_almost_full) {
476 // If young-gen can handle this allocation, attempt young-gc firstly, as young-gc is usually cheaper.
477 bool should_run_young_gc = is_tlab || should_alloc_in_eden(size);
478
479 collect_at_safepoint(!should_run_young_gc);
480
481 // If gc-overhead is reached, we will skip allocation.
482 if (!check_gc_overhead_limit()) {
483 result = expand_heap_and_allocate(size, is_tlab);
484 if (result != nullptr) {
485 return result;
486 }
487 }
488 }
489
490 // Last resort GC; clear soft refs and do max-compaction before throwing OOM.
491 {
492 const bool clear_all_soft_refs = true;
493 const bool should_do_max_compaction = true;
494 PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
495 }
496
497 if (check_gc_overhead_limit()) {
498 log_info(gc)("GC Overhead Limit exceeded too often (%zu).", GCOverheadLimitThreshold);
499 return nullptr;
500 }
501
502 result = expand_heap_and_allocate(size, is_tlab);
503
504 return result;
505 }
506
507 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
508 CollectedHeap::ensure_parsability(retire_tlabs);
509 young_gen()->eden_space()->ensure_parsability();
510 }
511
512 size_t ParallelScavengeHeap::tlab_capacity() const {
513 return young_gen()->eden_space()->tlab_capacity();
514 }
515
516 size_t ParallelScavengeHeap::tlab_used() const {
517 return young_gen()->eden_space()->tlab_used();
518 }
519
520 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc() const {
521 return young_gen()->eden_space()->unsafe_max_tlab_alloc();
522 }
523
556 full_gc_count = total_full_collections();
557 }
558
559 VM_ParallelGCCollect op(gc_count, full_gc_count, cause);
560 VMThread::execute(&op);
561 }
562
563 void ParallelScavengeHeap::collect_at_safepoint(bool is_full) {
564 assert(!GCLocker::is_active(), "precondition");
565 bool clear_soft_refs = GCCause::should_clear_all_soft_refs(_gc_cause);
566
567 if (!is_full && should_attempt_young_gc()) {
568 bool young_gc_success = PSScavenge::invoke(clear_soft_refs);
569 if (young_gc_success) {
570 return;
571 }
572 log_debug(gc, heap)("Upgrade to Full-GC since Young-gc failed.");
573 }
574
575 const bool should_do_max_compaction = false;
576 PSParallelCompact::invoke(clear_soft_refs, should_do_max_compaction);
577 }
578
579 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
580 young_gen()->object_iterate(cl);
581 old_gen()->object_iterate(cl);
582 }
583
584 // The HeapBlockClaimer is used during parallel iteration over the heap,
585 // allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.
586 // The eden and survivor spaces are treated as single blocks as it is hard to divide
587 // these spaces.
588 // The old space is divided into fixed-size blocks.
589 class HeapBlockClaimer : public StackObj {
590 size_t _claimed_index;
591
592 public:
593 static const size_t InvalidIndex = SIZE_MAX;
594 static const size_t EdenIndex = 0;
595 static const size_t SurvivorIndex = 1;
596 static const size_t NumNonOldGenClaims = 2;
699 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
700 return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);
701 }
702
703 void ParallelScavengeHeap::print_heap_on(outputStream* st) const {
704 if (young_gen() != nullptr) {
705 young_gen()->print_on(st);
706 }
707 if (old_gen() != nullptr) {
708 old_gen()->print_on(st);
709 }
710 }
711
712 void ParallelScavengeHeap::print_gc_on(outputStream* st) const {
713 BarrierSet* bs = BarrierSet::barrier_set();
714 if (bs != nullptr) {
715 bs->print_on(st);
716 }
717 st->cr();
718
719 PSParallelCompact::print_on(st);
720 }
721
722 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
723 ParallelScavengeHeap::heap()->workers().threads_do(tc);
724 }
725
726 void ParallelScavengeHeap::print_tracing_info() const {
727 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
728 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
729 }
730
731 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
732 const PSYoungGen* const young = young_gen();
733 const MutableSpace* const eden = young->eden_space();
734 const MutableSpace* const from = young->from_space();
735 const PSOldGen* const old = old_gen();
736
737 return PreGenGCValues(young->used_in_bytes(),
738 young->capacity_in_bytes(),
739 eden->used_in_bytes(),
740 eden->capacity_in_bytes(),
741 from->used_in_bytes(),
742 from->capacity_in_bytes(),
743 old->used_in_bytes(),
744 old->capacity_in_bytes());
745 }
746
747 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
748 const PSYoungGen* const young = young_gen();
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/parallel/objectStartArray.inline.hpp"
26 #include "gc/parallel/parallelArguments.hpp"
27 #include "gc/parallel/parallelInitLogger.hpp"
28 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
29 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
30 #include "gc/parallel/psMemoryPool.hpp"
31 #include "gc/parallel/psParallelCompact.inline.hpp"
32 #include "gc/parallel/psParallelCompactNew.inline.hpp"
33 #include "gc/parallel/psPromotionManager.hpp"
34 #include "gc/parallel/psScavenge.hpp"
35 #include "gc/parallel/psVMOperations.hpp"
36 #include "gc/shared/barrierSetNMethod.hpp"
37 #include "gc/shared/fullGCForwarding.inline.hpp"
38 #include "gc/shared/gcHeapSummary.hpp"
39 #include "gc/shared/gcLocker.inline.hpp"
40 #include "gc/shared/gcWhen.hpp"
41 #include "gc/shared/genArguments.hpp"
42 #include "gc/shared/locationPrinter.inline.hpp"
43 #include "gc/shared/scavengableNMethods.hpp"
44 #include "gc/shared/suspendibleThreadSet.hpp"
45 #include "logging/log.hpp"
46 #include "memory/iterator.hpp"
47 #include "memory/metaspaceCounters.hpp"
48 #include "memory/metaspaceUtils.hpp"
49 #include "memory/reservedSpace.hpp"
50 #include "memory/universe.hpp"
51 #include "oops/oop.inline.hpp"
52 #include "runtime/cpuTimeCounters.hpp"
102 _old_gen = new PSOldGen(
103 old_rs,
104 OldSize,
105 MinOldSize,
106 MaxOldSize);
107
108 assert(young_gen()->max_gen_size() == young_rs.size(),"Consistency check");
109 assert(old_gen()->max_gen_size() == old_rs.size(), "Consistency check");
110
111 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
112
113 _size_policy = new PSAdaptiveSizePolicy(SpaceAlignment,
114 max_gc_pause_sec);
115
116 assert((old_gen()->virtual_space()->high_boundary() ==
117 young_gen()->virtual_space()->low_boundary()),
118 "Boundaries must meet");
119 // initialize the policy counters - 2 collectors, 2 generations
120 _gc_policy_counters = new GCPolicyCounters("ParScav:MSC", 2, 2);
121
122 if (UseCompactObjectHeaders) {
123 if (!PSParallelCompactNew::initialize_aux_data()) {
124 return JNI_ENOMEM;
125 }
126 } else {
127 if (!PSParallelCompact::initialize_aux_data()) {
128 return JNI_ENOMEM;
129 }
130 }
131
132 // Create CPU time counter
133 CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
134
135 ParallelInitLogger::print();
136
137 FullGCForwarding::initialize(_reserved);
138
139 return JNI_OK;
140 }
141
142 void ParallelScavengeHeap::initialize_serviceability() {
143
144 _eden_pool = new EdenMutableSpacePool(_young_gen,
145 _young_gen->eden_space(),
146 "PS Eden Space",
147 false /* support_usage_threshold */);
148
149 _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
161 _old_manager->add_pool(_survivor_pool);
162 _old_manager->add_pool(_old_pool);
163
164 _young_manager->add_pool(_eden_pool);
165 _young_manager->add_pool(_survivor_pool);
166
167 }
168
169 class PSIsScavengable : public BoolObjectClosure {
170 bool do_object_b(oop obj) {
171 return ParallelScavengeHeap::heap()->is_in_young(obj);
172 }
173 };
174
175 static PSIsScavengable _is_scavengable;
176
177 void ParallelScavengeHeap::post_initialize() {
178 CollectedHeap::post_initialize();
179 // Need to init the tenuring threshold
180 PSScavenge::initialize();
181 if (UseCompactObjectHeaders) {
182 PSParallelCompactNew::post_initialize();
183 } else {
184 PSParallelCompact::post_initialize();
185 }
186 PSPromotionManager::initialize();
187
188 ScavengableNMethods::initialize(&_is_scavengable);
189 GCLocker::initialize();
190 }
191
192 void ParallelScavengeHeap::gc_epilogue(bool full) {
193 if (_is_heap_almost_full) {
194 // Reset emergency state if eden is empty after a young/full gc
195 if (_young_gen->eden_space()->is_empty()) {
196 log_debug(gc)("Leaving memory constrained state; back to normal");
197 _is_heap_almost_full = false;
198 }
199 } else {
200 if (full && !_young_gen->eden_space()->is_empty()) {
201 log_debug(gc)("Non-empty young-gen after full-gc; in memory constrained state");
202 _is_heap_almost_full = true;
203 }
204 }
205 }
356 return op.result();
357 }
358 }
359
360 // Was the gc-overhead reached inside the safepoint? If so, this mutator
361 // should return null as well for global consistency.
362 if (_gc_overhead_counter >= GCOverheadLimitThreshold) {
363 return nullptr;
364 }
365
366 if ((QueuedAllocationWarningCount > 0) &&
367 (loop_count % QueuedAllocationWarningCount == 0)) {
368 log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times, size=%zu", loop_count, size);
369 }
370 }
371 }
372
373 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
374 // No need for max-compaction in this context.
375 const bool should_do_max_compaction = false;
376 if (UseCompactObjectHeaders) {
377 PSParallelCompactNew::invoke(clear_all_soft_refs, should_do_max_compaction);
378 } else {
379 PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
380 }
381 }
382
383 bool ParallelScavengeHeap::should_attempt_young_gc() const {
384 const bool ShouldRunYoungGC = true;
385 const bool ShouldRunFullGC = false;
386
387 if (!_young_gen->to_space()->is_empty()) {
388 log_debug(gc, ergo)("To-space is not empty; run full-gc instead.");
389 return ShouldRunFullGC;
390 }
391
392 // Check if the predicted promoted bytes will overflow free space in old-gen.
393 PSAdaptiveSizePolicy* policy = _size_policy;
394
395 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
396 size_t promotion_estimate = MIN2(avg_promoted, _young_gen->used_in_bytes());
397 // Total free size after possible old gen expansion
398 size_t free_in_old_gen_with_expansion = _old_gen->max_gen_size() - _old_gen->used_in_bytes();
399
400 log_trace(gc, ergo)("average_promoted %zu; padded_average_promoted %zu",
489
490 if (!_is_heap_almost_full) {
491 // If young-gen can handle this allocation, attempt young-gc firstly, as young-gc is usually cheaper.
492 bool should_run_young_gc = is_tlab || should_alloc_in_eden(size);
493
494 collect_at_safepoint(!should_run_young_gc);
495
496 // If gc-overhead is reached, we will skip allocation.
497 if (!check_gc_overhead_limit()) {
498 result = expand_heap_and_allocate(size, is_tlab);
499 if (result != nullptr) {
500 return result;
501 }
502 }
503 }
504
505 // Last resort GC; clear soft refs and do max-compaction before throwing OOM.
506 {
507 const bool clear_all_soft_refs = true;
508 const bool should_do_max_compaction = true;
509 if (UseCompactObjectHeaders) {
510 PSParallelCompactNew::invoke(clear_all_soft_refs, should_do_max_compaction);
511 } else {
512 PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
513 }
514 }
515
516 if (check_gc_overhead_limit()) {
517 log_info(gc)("GC Overhead Limit exceeded too often (%zu).", GCOverheadLimitThreshold);
518 return nullptr;
519 }
520
521 result = expand_heap_and_allocate(size, is_tlab);
522 return result;
523 }
524
525 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
526 CollectedHeap::ensure_parsability(retire_tlabs);
527 young_gen()->eden_space()->ensure_parsability();
528 }
529
530 size_t ParallelScavengeHeap::tlab_capacity() const {
531 return young_gen()->eden_space()->tlab_capacity();
532 }
533
534 size_t ParallelScavengeHeap::tlab_used() const {
535 return young_gen()->eden_space()->tlab_used();
536 }
537
538 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc() const {
539 return young_gen()->eden_space()->unsafe_max_tlab_alloc();
540 }
541
574 full_gc_count = total_full_collections();
575 }
576
577 VM_ParallelGCCollect op(gc_count, full_gc_count, cause);
578 VMThread::execute(&op);
579 }
580
581 void ParallelScavengeHeap::collect_at_safepoint(bool is_full) {
582 assert(!GCLocker::is_active(), "precondition");
583 bool clear_soft_refs = GCCause::should_clear_all_soft_refs(_gc_cause);
584
585 if (!is_full && should_attempt_young_gc()) {
586 bool young_gc_success = PSScavenge::invoke(clear_soft_refs);
587 if (young_gc_success) {
588 return;
589 }
590 log_debug(gc, heap)("Upgrade to Full-GC since Young-gc failed.");
591 }
592
593 const bool should_do_max_compaction = false;
594 if (UseCompactObjectHeaders) {
595 PSParallelCompactNew::invoke(clear_soft_refs, should_do_max_compaction);
596 } else {
597 PSParallelCompact::invoke(clear_soft_refs, should_do_max_compaction);
598 }
599 }
600
601 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
602 young_gen()->object_iterate(cl);
603 old_gen()->object_iterate(cl);
604 }
605
606 // The HeapBlockClaimer is used during parallel iteration over the heap,
607 // allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.
608 // The eden and survivor spaces are treated as single blocks as it is hard to divide
609 // these spaces.
610 // The old space is divided into fixed-size blocks.
611 class HeapBlockClaimer : public StackObj {
612 size_t _claimed_index;
613
614 public:
615 static const size_t InvalidIndex = SIZE_MAX;
616 static const size_t EdenIndex = 0;
617 static const size_t SurvivorIndex = 1;
618 static const size_t NumNonOldGenClaims = 2;
721 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
722 return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);
723 }
724
725 void ParallelScavengeHeap::print_heap_on(outputStream* st) const {
726 if (young_gen() != nullptr) {
727 young_gen()->print_on(st);
728 }
729 if (old_gen() != nullptr) {
730 old_gen()->print_on(st);
731 }
732 }
733
734 void ParallelScavengeHeap::print_gc_on(outputStream* st) const {
735 BarrierSet* bs = BarrierSet::barrier_set();
736 if (bs != nullptr) {
737 bs->print_on(st);
738 }
739 st->cr();
740
741 if (UseCompactObjectHeaders) {
742 PSParallelCompactNew::print_on(st);
743 } else {
744 PSParallelCompact::print_on(st);
745 }
746 }
747
748 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
749 ParallelScavengeHeap::heap()->workers().threads_do(tc);
750 }
751
752 void ParallelScavengeHeap::print_tracing_info() const {
753 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
754 if (UseCompactObjectHeaders) {
755 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompactNew::accumulated_time()->seconds());
756 } else {
757 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
758 }
759 }
760
761 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
762 const PSYoungGen* const young = young_gen();
763 const MutableSpace* const eden = young->eden_space();
764 const MutableSpace* const from = young->from_space();
765 const PSOldGen* const old = old_gen();
766
767 return PreGenGCValues(young->used_in_bytes(),
768 young->capacity_in_bytes(),
769 eden->used_in_bytes(),
770 eden->capacity_in_bytes(),
771 from->used_in_bytes(),
772 from->capacity_in_bytes(),
773 old->used_in_bytes(),
774 old->capacity_in_bytes());
775 }
776
777 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
778 const PSYoungGen* const young = young_gen();
|