< prev index next >

src/hotspot/share/gc/serial/defNewGeneration.cpp

Print this page

683   _gc_timer->register_gc_end();
684 
685   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
686 
687   return !_promotion_failed;
688 }
689 
690 void DefNewGeneration::init_assuming_no_promotion_failure() {
691   _promotion_failed = false;
692   _promotion_failed_info.reset();
693 }
694 
695 void DefNewGeneration::remove_forwarding_pointers() {
696   assert(_promotion_failed, "precondition");
697 
698   // Will enter Full GC soon due to failed promotion. Must reset the mark word
699   // of objs in young-gen so that no objs are marked (forwarded) when Full GC
700   // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
701   struct ResetForwardedMarkWord : ObjectClosure {
702     void do_object(oop obj) override {
703       if (obj->is_self_forwarded()) {
704         obj->unset_self_forwarded();
705       } else if (obj->is_forwarded()) {
706         // To restore the klass-bits in the header.
707         // Needed for object iteration to work properly.
708         obj->set_mark(obj->forwardee()->prototype_mark());
709       }
710     }
711   } cl;
712   eden()->object_iterate(&cl);
713   from()->object_iterate(&cl);
714 }
715 
716 void DefNewGeneration::handle_promotion_failure(oop old) {
717   log_debug(gc, promotion)("Promotion failure size = %zu) ", old->size());
718 
719   _promotion_failed = true;
720   _promotion_failed_info.register_copy_failure(old->size());
721 
722   ContinuationGCSupport::transform_stack_chunk(old);
723 
724   // forward to self
725   old->forward_to_self();
726 
727   _promo_failure_scan_stack.push(old);
728 
729   if (!_promo_failure_drain_in_progress) {
730     // prevent recursion in copy_to_survivor_space()
731     _promo_failure_drain_in_progress = true;
732     drain_promo_failure_scan_stack();
733     _promo_failure_drain_in_progress = false;
734   }
735 }
736 
737 oop DefNewGeneration::copy_to_survivor_space(oop old) {
738   assert(is_in_reserved(old) && !old->is_forwarded(),
739          "shouldn't be scavenging this oop");
740   size_t s = old->size();


741   oop obj = nullptr;
742 
743   // Try allocating obj in to-space (unless too old)
744   if (old->age() < tenuring_threshold()) {
745     obj = cast_to_oop(to()->allocate(s));
746   }
747 
748   bool new_obj_is_tenured = false;
749   // Otherwise try allocating obj tenured
750   if (obj == nullptr) {
751     obj = _old_gen->allocate_for_promotion(old, s);
752     if (obj == nullptr) {
753       handle_promotion_failure(old);
754       return old;
755     }
756 
757     new_obj_is_tenured = true;
758   }
759 
760   // Prefetch beyond obj
761   const intx interval = PrefetchCopyIntervalInBytes;
762   Prefetch::write(obj, interval);
763 
764   // Copy obj
765   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), s);
766 
767   ContinuationGCSupport::transform_stack_chunk(obj);
768 
769   if (!new_obj_is_tenured) {
770     // Increment age if obj still in new generation
771     obj->incr_age();
772     age_table()->add(obj, s);
773   }
774 


775   // Done, insert forward pointer to obj in this header
776   old->forward_to(obj);
777 
778   if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) {
779     // Record old; request adds a new weak reference, which reference
780     // processing expects to refer to a from-space object.
781     _string_dedup_requests.add(old);
782   }
783   return obj;
784 }
785 
786 void DefNewGeneration::drain_promo_failure_scan_stack() {
787   PromoteFailureClosure cl{this};
788   while (!_promo_failure_scan_stack.is_empty()) {
789      oop obj = _promo_failure_scan_stack.pop();
790      obj->oop_iterate(&cl);
791   }
792 }
793 
794 void DefNewGeneration::contribute_scratch(void*& scratch, size_t& num_words) {

683   _gc_timer->register_gc_end();
684 
685   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
686 
687   return !_promotion_failed;
688 }
689 
690 void DefNewGeneration::init_assuming_no_promotion_failure() {
691   _promotion_failed = false;
692   _promotion_failed_info.reset();
693 }
694 
695 void DefNewGeneration::remove_forwarding_pointers() {
696   assert(_promotion_failed, "precondition");
697 
698   // Will enter Full GC soon due to failed promotion. Must reset the mark word
699   // of objs in young-gen so that no objs are marked (forwarded) when Full GC
700   // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
701   struct ResetForwardedMarkWord : ObjectClosure {
702     void do_object(oop obj) override {
703       obj->reset_forwarded();






704     }
705   } cl;
706   eden()->object_iterate(&cl);
707   from()->object_iterate(&cl);
708 }
709 
710 void DefNewGeneration::handle_promotion_failure(oop old) {
711   log_debug(gc, promotion)("Promotion failure size = %zu) ", old->size());
712 
713   _promotion_failed = true;
714   _promotion_failed_info.register_copy_failure(old->size());
715 
716   ContinuationGCSupport::transform_stack_chunk(old);
717 
718   // forward to self
719   old->forward_to_self();
720 
721   _promo_failure_scan_stack.push(old);
722 
723   if (!_promo_failure_drain_in_progress) {
724     // prevent recursion in copy_to_survivor_space()
725     _promo_failure_drain_in_progress = true;
726     drain_promo_failure_scan_stack();
727     _promo_failure_drain_in_progress = false;
728   }
729 }
730 
731 oop DefNewGeneration::copy_to_survivor_space(oop old) {
732   assert(is_in_reserved(old) && !old->is_forwarded(),
733          "shouldn't be scavenging this oop");
734   size_t old_size = old->size();
735   size_t s = old->copy_size(old_size, old->mark());
736 
737   oop obj = nullptr;
738 
739   // Try allocating obj in to-space (unless too old)
740   if (old->age() < tenuring_threshold()) {
741     obj = cast_to_oop(to()->allocate(s));
742   }
743 
744   bool new_obj_is_tenured = false;
745   // Otherwise try allocating obj tenured
746   if (obj == nullptr) {
747     obj = _old_gen->allocate_for_promotion(old, s);
748     if (obj == nullptr) {
749       handle_promotion_failure(old);
750       return old;
751     }
752 
753     new_obj_is_tenured = true;
754   }
755 
756   // Prefetch beyond obj
757   const intx interval = PrefetchCopyIntervalInBytes;
758   Prefetch::write(obj, interval);
759 
760   // Copy obj
761   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), old_size);
762 
763   ContinuationGCSupport::transform_stack_chunk(obj);
764 
765   if (!new_obj_is_tenured) {
766     // Increment age if obj still in new generation
767     obj->incr_age();
768     age_table()->add(obj, s);
769   }
770 
771   obj->initialize_hash_if_necessary(old);
772 
773   // Done, insert forward pointer to obj in this header
774   old->forward_to(obj);
775 
776   if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) {
777     // Record old; request adds a new weak reference, which reference
778     // processing expects to refer to a from-space object.
779     _string_dedup_requests.add(old);
780   }
781   return obj;
782 }
783 
784 void DefNewGeneration::drain_promo_failure_scan_stack() {
785   PromoteFailureClosure cl{this};
786   while (!_promo_failure_scan_stack.is_empty()) {
787      oop obj = _promo_failure_scan_stack.pop();
788      obj->oop_iterate(&cl);
789   }
790 }
791 
792 void DefNewGeneration::contribute_scratch(void*& scratch, size_t& num_words) {
< prev index next >