22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/serial/cardTableRS.hpp"
27 #include "gc/serial/serialGcRefProcProxyTask.hpp"
28 #include "gc/serial/serialHeap.inline.hpp"
29 #include "gc/serial/serialStringDedup.inline.hpp"
30 #include "gc/serial/tenuredGeneration.hpp"
31 #include "gc/shared/adaptiveSizePolicy.hpp"
32 #include "gc/shared/ageTable.inline.hpp"
33 #include "gc/shared/collectorCounters.hpp"
34 #include "gc/shared/continuationGCSupport.inline.hpp"
35 #include "gc/shared/gcArguments.hpp"
36 #include "gc/shared/gcHeapSummary.hpp"
37 #include "gc/shared/gcLocker.hpp"
38 #include "gc/shared/gcPolicyCounters.hpp"
39 #include "gc/shared/gcTimer.hpp"
40 #include "gc/shared/gcTrace.hpp"
41 #include "gc/shared/gcTraceTime.inline.hpp"
42 #include "gc/shared/preservedMarks.inline.hpp"
43 #include "gc/shared/referencePolicy.hpp"
44 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
45 #include "gc/shared/space.hpp"
46 #include "gc/shared/spaceDecorator.hpp"
47 #include "gc/shared/strongRootsScope.hpp"
48 #include "gc/shared/weakProcessor.hpp"
49 #include "logging/log.hpp"
50 #include "memory/iterator.inline.hpp"
51 #include "memory/resourceArea.hpp"
52 #include "oops/instanceRefKlass.hpp"
53 #include "oops/oop.inline.hpp"
54 #include "runtime/java.hpp"
55 #include "runtime/javaThread.hpp"
56 #include "runtime/prefetch.inline.hpp"
57 #include "runtime/threads.hpp"
58 #include "utilities/align.hpp"
59 #include "utilities/copy.hpp"
60 #include "utilities/globalDefinitions.hpp"
61 #include "utilities/stack.inline.hpp"
62
210 OldGenScanClosure* _old_cl;
211 public:
212 FastEvacuateFollowersClosure(SerialHeap* heap,
213 YoungGenScanClosure* young_cl,
214 OldGenScanClosure* old_cl) :
215 _heap(heap), _young_cl(young_cl), _old_cl(old_cl)
216 {}
217
218 void do_void() {
219 _heap->scan_evacuated_objs(_young_cl, _old_cl);
220 }
221 };
222
223 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
224 size_t initial_size,
225 size_t min_size,
226 size_t max_size,
227 const char* policy)
228 : Generation(rs, initial_size),
229 _promotion_failed(false),
230 _preserved_marks_set(false /* in_c_heap */),
231 _promo_failure_drain_in_progress(false),
232 _should_allocate_from_space(false),
233 _string_dedup_requests()
234 {
235 MemRegion cmr((HeapWord*)_virtual_space.low(),
236 (HeapWord*)_virtual_space.high());
237 SerialHeap* gch = SerialHeap::heap();
238
239 gch->rem_set()->resize_covered_region(cmr);
240
241 _eden_space = new ContiguousSpace();
242 _from_space = new ContiguousSpace();
243 _to_space = new ContiguousSpace();
244
245 // Compute the maximum eden and survivor space sizes. These sizes
246 // are computed assuming the entire reserved space is committed.
247 // These values are exported as performance counters.
248 uintx size = _virtual_space.reserved_size();
249 _max_survivor_size = compute_survivor_size(size, SpaceAlignment);
250 _max_eden_size = size - (2*_max_survivor_size);
630 SerialHeap* heap = SerialHeap::heap();
631
632 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
633 _gc_timer->register_gc_start();
634 _gc_tracer->report_gc_start(heap->gc_cause(), _gc_timer->gc_start());
635 _ref_processor->start_discovery(clear_all_soft_refs);
636
637 _old_gen = heap->old_gen();
638
639 init_assuming_no_promotion_failure();
640
641 GCTraceTime(Trace, gc, phases) tm("DefNew", nullptr, heap->gc_cause());
642
643 heap->trace_heap_before_gc(_gc_tracer);
644
645 // These can be shared for all code paths
646 IsAliveClosure is_alive(this);
647
648 age_table()->clear();
649 to()->clear(SpaceDecorator::Mangle);
650 // The preserved marks should be empty at the start of the GC.
651 _preserved_marks_set.init(1);
652
653 YoungGenScanClosure young_gen_cl(this);
654 OldGenScanClosure old_gen_cl(this);
655
656 FastEvacuateFollowersClosure evacuate_followers(heap,
657 &young_gen_cl,
658 &old_gen_cl);
659
660 {
661 StrongRootsScope srs(0);
662 RootScanClosure root_cl{this};
663 CLDScanClosure cld_cl{this};
664
665 MarkingNMethodClosure code_cl(&root_cl,
666 NMethodToOopClosure::FixRelocations,
667 false /* keepalive_nmethods */);
668
669 HeapWord* saved_top_in_old_gen = _old_gen->space()->top();
670 heap->process_roots(SerialHeap::SO_ScavengeCodeCache,
671 &root_cl,
711 assert(!heap->incremental_collection_failed(), "Should be clear");
712 } else {
713 assert(_promo_failure_scan_stack.is_empty(), "post condition");
714 _promo_failure_scan_stack.clear(true); // Clear cached segments.
715
716 remove_forwarding_pointers();
717 log_info(gc, promotion)("Promotion failed");
718 // Add to-space to the list of space to compact
719 // when a promotion failure has occurred. In that
720 // case there can be live objects in to-space
721 // as a result of a partial evacuation of eden
722 // and from-space.
723 swap_spaces(); // For uniformity wrt ParNewGeneration.
724 heap->set_incremental_collection_failed();
725
726 _gc_tracer->report_promotion_failed(_promotion_failed_info);
727
728 // Reset the PromotionFailureALot counters.
729 NOT_PRODUCT(heap->reset_promotion_should_fail();)
730 }
731 // We should have processed and cleared all the preserved marks.
732 _preserved_marks_set.reclaim();
733
734 heap->trace_heap_after_gc(_gc_tracer);
735
736 _gc_timer->register_gc_end();
737
738 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
739
740 return !_promotion_failed;
741 }
742
743 void DefNewGeneration::init_assuming_no_promotion_failure() {
744 _promotion_failed = false;
745 _promotion_failed_info.reset();
746 }
747
748 void DefNewGeneration::remove_forwarding_pointers() {
749 assert(_promotion_failed, "precondition");
750
751 // Will enter Full GC soon due to failed promotion. Must reset the mark word
752 // of objs in young-gen so that no objs are marked (forwarded) when Full GC
753 // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
754 struct ResetForwardedMarkWord : ObjectClosure {
755 void do_object(oop obj) override {
756 if (obj->is_forwarded()) {
757 obj->init_mark();
758 }
759 }
760 } cl;
761 eden()->object_iterate(&cl);
762 from()->object_iterate(&cl);
763
764 restore_preserved_marks();
765 }
766
767 void DefNewGeneration::restore_preserved_marks() {
768 _preserved_marks_set.restore(nullptr);
769 }
770
771 void DefNewGeneration::handle_promotion_failure(oop old) {
772 log_debug(gc, promotion)("Promotion failure size = " SIZE_FORMAT ") ", old->size());
773
774 _promotion_failed = true;
775 _promotion_failed_info.register_copy_failure(old->size());
776 _preserved_marks_set.get()->push_if_necessary(old, old->mark());
777
778 ContinuationGCSupport::transform_stack_chunk(old);
779
780 // forward to self
781 old->forward_to(old);
782
783 _promo_failure_scan_stack.push(old);
784
785 if (!_promo_failure_drain_in_progress) {
786 // prevent recursion in copy_to_survivor_space()
787 _promo_failure_drain_in_progress = true;
788 drain_promo_failure_scan_stack();
789 _promo_failure_drain_in_progress = false;
790 }
791 }
792
793 oop DefNewGeneration::copy_to_survivor_space(oop old) {
794 assert(is_in_reserved(old) && !old->is_forwarded(),
795 "shouldn't be scavenging this oop");
796 size_t s = old->size();
797 oop obj = nullptr;
798
799 // Try allocating obj in to-space (unless too old)
800 if (old->age() < tenuring_threshold()) {
801 obj = cast_to_oop(to()->allocate(s));
|
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/serial/cardTableRS.hpp"
27 #include "gc/serial/serialGcRefProcProxyTask.hpp"
28 #include "gc/serial/serialHeap.inline.hpp"
29 #include "gc/serial/serialStringDedup.inline.hpp"
30 #include "gc/serial/tenuredGeneration.hpp"
31 #include "gc/shared/adaptiveSizePolicy.hpp"
32 #include "gc/shared/ageTable.inline.hpp"
33 #include "gc/shared/collectorCounters.hpp"
34 #include "gc/shared/continuationGCSupport.inline.hpp"
35 #include "gc/shared/gcArguments.hpp"
36 #include "gc/shared/gcHeapSummary.hpp"
37 #include "gc/shared/gcLocker.hpp"
38 #include "gc/shared/gcPolicyCounters.hpp"
39 #include "gc/shared/gcTimer.hpp"
40 #include "gc/shared/gcTrace.hpp"
41 #include "gc/shared/gcTraceTime.inline.hpp"
42 #include "gc/shared/referencePolicy.hpp"
43 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
44 #include "gc/shared/space.hpp"
45 #include "gc/shared/spaceDecorator.hpp"
46 #include "gc/shared/strongRootsScope.hpp"
47 #include "gc/shared/weakProcessor.hpp"
48 #include "logging/log.hpp"
49 #include "memory/iterator.inline.hpp"
50 #include "memory/resourceArea.hpp"
51 #include "oops/instanceRefKlass.hpp"
52 #include "oops/oop.inline.hpp"
53 #include "runtime/java.hpp"
54 #include "runtime/javaThread.hpp"
55 #include "runtime/prefetch.inline.hpp"
56 #include "runtime/threads.hpp"
57 #include "utilities/align.hpp"
58 #include "utilities/copy.hpp"
59 #include "utilities/globalDefinitions.hpp"
60 #include "utilities/stack.inline.hpp"
61
209 OldGenScanClosure* _old_cl;
210 public:
211 FastEvacuateFollowersClosure(SerialHeap* heap,
212 YoungGenScanClosure* young_cl,
213 OldGenScanClosure* old_cl) :
214 _heap(heap), _young_cl(young_cl), _old_cl(old_cl)
215 {}
216
217 void do_void() {
218 _heap->scan_evacuated_objs(_young_cl, _old_cl);
219 }
220 };
221
222 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
223 size_t initial_size,
224 size_t min_size,
225 size_t max_size,
226 const char* policy)
227 : Generation(rs, initial_size),
228 _promotion_failed(false),
229 _promo_failure_drain_in_progress(false),
230 _should_allocate_from_space(false),
231 _string_dedup_requests()
232 {
233 MemRegion cmr((HeapWord*)_virtual_space.low(),
234 (HeapWord*)_virtual_space.high());
235 SerialHeap* gch = SerialHeap::heap();
236
237 gch->rem_set()->resize_covered_region(cmr);
238
239 _eden_space = new ContiguousSpace();
240 _from_space = new ContiguousSpace();
241 _to_space = new ContiguousSpace();
242
243 // Compute the maximum eden and survivor space sizes. These sizes
244 // are computed assuming the entire reserved space is committed.
245 // These values are exported as performance counters.
246 uintx size = _virtual_space.reserved_size();
247 _max_survivor_size = compute_survivor_size(size, SpaceAlignment);
248 _max_eden_size = size - (2*_max_survivor_size);
628 SerialHeap* heap = SerialHeap::heap();
629
630 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
631 _gc_timer->register_gc_start();
632 _gc_tracer->report_gc_start(heap->gc_cause(), _gc_timer->gc_start());
633 _ref_processor->start_discovery(clear_all_soft_refs);
634
635 _old_gen = heap->old_gen();
636
637 init_assuming_no_promotion_failure();
638
639 GCTraceTime(Trace, gc, phases) tm("DefNew", nullptr, heap->gc_cause());
640
641 heap->trace_heap_before_gc(_gc_tracer);
642
643 // These can be shared for all code paths
644 IsAliveClosure is_alive(this);
645
646 age_table()->clear();
647 to()->clear(SpaceDecorator::Mangle);
648
649 YoungGenScanClosure young_gen_cl(this);
650 OldGenScanClosure old_gen_cl(this);
651
652 FastEvacuateFollowersClosure evacuate_followers(heap,
653 &young_gen_cl,
654 &old_gen_cl);
655
656 {
657 StrongRootsScope srs(0);
658 RootScanClosure root_cl{this};
659 CLDScanClosure cld_cl{this};
660
661 MarkingNMethodClosure code_cl(&root_cl,
662 NMethodToOopClosure::FixRelocations,
663 false /* keepalive_nmethods */);
664
665 HeapWord* saved_top_in_old_gen = _old_gen->space()->top();
666 heap->process_roots(SerialHeap::SO_ScavengeCodeCache,
667 &root_cl,
707 assert(!heap->incremental_collection_failed(), "Should be clear");
708 } else {
709 assert(_promo_failure_scan_stack.is_empty(), "post condition");
710 _promo_failure_scan_stack.clear(true); // Clear cached segments.
711
712 remove_forwarding_pointers();
713 log_info(gc, promotion)("Promotion failed");
714 // Add to-space to the list of space to compact
715 // when a promotion failure has occurred. In that
716 // case there can be live objects in to-space
717 // as a result of a partial evacuation of eden
718 // and from-space.
719 swap_spaces(); // For uniformity wrt ParNewGeneration.
720 heap->set_incremental_collection_failed();
721
722 _gc_tracer->report_promotion_failed(_promotion_failed_info);
723
724 // Reset the PromotionFailureALot counters.
725 NOT_PRODUCT(heap->reset_promotion_should_fail();)
726 }
727
728 heap->trace_heap_after_gc(_gc_tracer);
729
730 _gc_timer->register_gc_end();
731
732 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
733
734 return !_promotion_failed;
735 }
736
737 void DefNewGeneration::init_assuming_no_promotion_failure() {
738 _promotion_failed = false;
739 _promotion_failed_info.reset();
740 }
741
742 void DefNewGeneration::remove_forwarding_pointers() {
743 assert(_promotion_failed, "precondition");
744
745 // Will enter Full GC soon due to failed promotion. Must reset the mark word
746 // of objs in young-gen so that no objs are marked (forwarded) when Full GC
747 // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
748 struct ResetForwardedMarkWord : ObjectClosure {
749 void do_object(oop obj) override {
750 if (obj->is_self_forwarded()) {
751 obj->unset_self_forwarded();
752 } else if (obj->is_forwarded()) {
753 obj->forward_safe_init_mark();
754 }
755 }
756 } cl;
757 eden()->object_iterate(&cl);
758 from()->object_iterate(&cl);
759 }
760
761 void DefNewGeneration::handle_promotion_failure(oop old) {
762 log_debug(gc, promotion)("Promotion failure size = " SIZE_FORMAT ") ", old->size());
763
764 _promotion_failed = true;
765 _promotion_failed_info.register_copy_failure(old->size());
766
767 ContinuationGCSupport::transform_stack_chunk(old);
768
769 // forward to self
770 old->forward_to_self();
771
772 _promo_failure_scan_stack.push(old);
773
774 if (!_promo_failure_drain_in_progress) {
775 // prevent recursion in copy_to_survivor_space()
776 _promo_failure_drain_in_progress = true;
777 drain_promo_failure_scan_stack();
778 _promo_failure_drain_in_progress = false;
779 }
780 }
781
782 oop DefNewGeneration::copy_to_survivor_space(oop old) {
783 assert(is_in_reserved(old) && !old->is_forwarded(),
784 "shouldn't be scavenging this oop");
785 size_t s = old->size();
786 oop obj = nullptr;
787
788 // Try allocating obj in to-space (unless too old)
789 if (old->age() < tenuring_threshold()) {
790 obj = cast_to_oop(to()->allocate(s));
|