830
831 adjust_desired_tenuring_threshold();
832
833 // A successful scavenge should restart the GC time limit count which is
834 // for full GC's.
835 AdaptiveSizePolicy* size_policy = heap->size_policy();
836 size_policy->reset_gc_overhead_limit_count();
837 assert(!heap->incremental_collection_failed(), "Should be clear");
838 } else {
839 assert(_promo_failure_scan_stack.is_empty(), "post condition");
840 _promo_failure_scan_stack.clear(true); // Clear cached segments.
841
842 remove_forwarding_pointers();
843 log_info(gc, promotion)("Promotion failed");
844 // Add to-space to the list of space to compact
845 // when a promotion failure has occurred. In that
846 // case there can be live objects in to-space
847 // as a result of a partial evacuation of eden
848 // and from-space.
849 swap_spaces(); // For uniformity wrt ParNewGeneration.
850 from()->set_next_compaction_space(to());
851 heap->set_incremental_collection_failed();
852
853 // Inform the next generation that a promotion failure occurred.
854 _old_gen->promotion_failure_occurred();
855 _gc_tracer->report_promotion_failed(_promotion_failed_info);
856
857 // Reset the PromotionFailureALot counters.
858 NOT_PRODUCT(heap->reset_promotion_should_fail();)
859 }
860 // We should have processed and cleared all the preserved marks.
861 _preserved_marks_set.reclaim();
862
863 heap->trace_heap_after_gc(_gc_tracer);
864
865 _gc_timer->register_gc_end();
866
867 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
868 }
869
870 void DefNewGeneration::init_assuming_no_promotion_failure() {
871 _promotion_failed = false;
872 _promotion_failed_info.reset();
873 from()->set_next_compaction_space(nullptr);
874 }
875
876 void DefNewGeneration::remove_forwarding_pointers() {
877 assert(_promotion_failed, "precondition");
878
879 // Will enter Full GC soon due to failed promotion. Must reset the mark word
880 // of objs in young-gen so that no objs are marked (forwarded) when Full GC
881 // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
882 struct ResetForwardedMarkWord : ObjectClosure {
883 void do_object(oop obj) override {
884 if (obj->is_forwarded()) {
885 obj->init_mark();
886 }
887 }
888 } cl;
889 eden()->object_iterate(&cl);
890 from()->object_iterate(&cl);
891
892 restore_preserved_marks();
893 }
894
895 void DefNewGeneration::restore_preserved_marks() {
896 _preserved_marks_set.restore(nullptr);
897 }
898
899 void DefNewGeneration::handle_promotion_failure(oop old) {
900 log_debug(gc, promotion)("Promotion failure size = " SIZE_FORMAT ") ", old->size());
901
902 _promotion_failed = true;
903 _promotion_failed_info.register_copy_failure(old->size());
904 _preserved_marks_set.get()->push_if_necessary(old, old->mark());
905
906 ContinuationGCSupport::transform_stack_chunk(old);
907
908 // forward to self
909 old->forward_to(old);
910
911 _promo_failure_scan_stack.push(old);
912
913 if (!_promo_failure_drain_in_progress) {
914 // prevent recursion in copy_to_survivor_space()
915 _promo_failure_drain_in_progress = true;
916 drain_promo_failure_scan_stack();
917 _promo_failure_drain_in_progress = false;
918 }
919 }
920
921 oop DefNewGeneration::copy_to_survivor_space(oop old) {
922 assert(is_in_reserved(old) && !old->is_forwarded(),
923 "shouldn't be scavenging this oop");
924 size_t s = old->size();
925 oop obj = nullptr;
926
927 // Try allocating obj in to-space (unless too old)
928 if (old->age() < tenuring_threshold()) {
929 obj = cast_to_oop(to()->allocate(s));
|
830
831 adjust_desired_tenuring_threshold();
832
833 // A successful scavenge should restart the GC time limit count which is
834 // for full GC's.
835 AdaptiveSizePolicy* size_policy = heap->size_policy();
836 size_policy->reset_gc_overhead_limit_count();
837 assert(!heap->incremental_collection_failed(), "Should be clear");
838 } else {
839 assert(_promo_failure_scan_stack.is_empty(), "post condition");
840 _promo_failure_scan_stack.clear(true); // Clear cached segments.
841
842 remove_forwarding_pointers();
843 log_info(gc, promotion)("Promotion failed");
844 // Add to-space to the list of space to compact
845 // when a promotion failure has occurred. In that
846 // case there can be live objects in to-space
847 // as a result of a partial evacuation of eden
848 // and from-space.
849 swap_spaces(); // For uniformity wrt ParNewGeneration.
850 // Ensure that compaction spaces are in address-order.
851 if (from()->bottom() < to()->bottom()) {
852 eden()->set_next_compaction_space(from());
853 from()->set_next_compaction_space(to());
854 to()->set_next_compaction_space(nullptr);
855 } else {
856 eden()->set_next_compaction_space(to());
857 to()->set_next_compaction_space(from());
858 from()->set_next_compaction_space(nullptr);
859 }
860 heap->set_incremental_collection_failed();
861
862 // Inform the next generation that a promotion failure occurred.
863 _old_gen->promotion_failure_occurred();
864 _gc_tracer->report_promotion_failed(_promotion_failed_info);
865
866 // Reset the PromotionFailureALot counters.
867 NOT_PRODUCT(heap->reset_promotion_should_fail();)
868 }
869 // We should have processed and cleared all the preserved marks.
870 _preserved_marks_set.reclaim();
871
872 heap->trace_heap_after_gc(_gc_tracer);
873
874 _gc_timer->register_gc_end();
875
876 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
877 }
878
879 void DefNewGeneration::init_assuming_no_promotion_failure() {
880 _promotion_failed = false;
881 _promotion_failed_info.reset();
882 from()->set_next_compaction_space(nullptr);
883 }
884
885 void DefNewGeneration::remove_forwarding_pointers() {
886 assert(_promotion_failed, "precondition");
887
888 // Will enter Full GC soon due to failed promotion. Must reset the mark word
889 // of objs in young-gen so that no objs are marked (forwarded) when Full GC
890 // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
891 struct ResetForwardedMarkWord : ObjectClosure {
892 void do_object(oop obj) override {
893 if (obj->is_forwarded()) {
894 #ifdef _LP64
895 if (UseCompactObjectHeaders) {
896 oop forwardee = obj->forwardee();
897 markWord header = forwardee->mark();
898 if (header.has_displaced_mark_helper()) {
899 header = header.displaced_mark_helper();
900 }
901 assert(UseCompressedClassPointers, "assume +UseCompressedClassPointers");
902 narrowKlass nklass = header.narrow_klass();
903 obj->set_mark(markWord::prototype().set_narrow_klass(nklass));
904 } else
905 #endif
906 {
907 obj->init_mark();
908 }
909 }
910 }
911 } cl;
912 eden()->object_iterate(&cl);
913 from()->object_iterate(&cl);
914
915 restore_preserved_marks();
916 }
917
918 void DefNewGeneration::restore_preserved_marks() {
919 _preserved_marks_set.restore(nullptr);
920 }
921
922 void DefNewGeneration::handle_promotion_failure(oop old) {
923 log_debug(gc, promotion)("Promotion failure size = " SIZE_FORMAT ") ", old->size());
924
925 _promotion_failed = true;
926 _promotion_failed_info.register_copy_failure(old->size());
927 _preserved_marks_set.get()->push_if_necessary(old, old->mark());
928
929 ContinuationGCSupport::transform_stack_chunk(old);
930
931 // forward to self
932 old->forward_to_self();
933
934 _promo_failure_scan_stack.push(old);
935
936 if (!_promo_failure_drain_in_progress) {
937 // prevent recursion in copy_to_survivor_space()
938 _promo_failure_drain_in_progress = true;
939 drain_promo_failure_scan_stack();
940 _promo_failure_drain_in_progress = false;
941 }
942 }
943
944 oop DefNewGeneration::copy_to_survivor_space(oop old) {
945 assert(is_in_reserved(old) && !old->is_forwarded(),
946 "shouldn't be scavenging this oop");
947 size_t s = old->size();
948 oop obj = nullptr;
949
950 // Try allocating obj in to-space (unless too old)
951 if (old->age() < tenuring_threshold()) {
952 obj = cast_to_oop(to()->allocate(s));
|