< prev index next >

src/hotspot/share/gc/serial/defNewGeneration.cpp

Print this page

678   _gc_timer->register_gc_end();
679 
680   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
681 
682   return !_promotion_failed;
683 }
684 
685 void DefNewGeneration::init_assuming_no_promotion_failure() {
686   _promotion_failed = false;
687   _promotion_failed_info.reset();
688 }
689 
690 void DefNewGeneration::remove_forwarding_pointers() {
691   assert(_promotion_failed, "precondition");
692 
693   // Will enter Full GC soon due to failed promotion. Must reset the mark word
694   // of objs in young-gen so that no objs are marked (forwarded) when Full GC
695   // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
696   struct ResetForwardedMarkWord : ObjectClosure {
697     void do_object(oop obj) override {
698       if (obj->is_self_forwarded()) {
699         obj->unset_self_forwarded();
700       } else if (obj->is_forwarded()) {
701         // To restore the klass-bits in the header.
702         // Needed for object iteration to work properly.
703         obj->set_mark(obj->forwardee()->prototype_mark());
704       }
705     }
706   } cl;
707   eden()->object_iterate(&cl);
708   from()->object_iterate(&cl);
709 }
710 
711 void DefNewGeneration::handle_promotion_failure(oop old) {
712   log_debug(gc, promotion)("Promotion failure size = %zu) ", old->size());
713 
714   _promotion_failed = true;
715   _promotion_failed_info.register_copy_failure(old->size());
716 
717   ContinuationGCSupport::transform_stack_chunk(old);
718 
719   // forward to self
720   old->forward_to_self();
721 
722   _promo_failure_scan_stack.push(old);
723 
724   if (!_promo_failure_drain_in_progress) {
725     // prevent recursion in copy_to_survivor_space()
726     _promo_failure_drain_in_progress = true;
727     drain_promo_failure_scan_stack();
728     _promo_failure_drain_in_progress = false;
729   }
730 }
731 
732 oop DefNewGeneration::copy_to_survivor_space(oop old) {
733   assert(is_in_reserved(old) && !old->is_forwarded(),
734          "shouldn't be scavenging this oop");
735   size_t s = old->size();


736   oop obj = nullptr;
737 
738   // Try allocating obj in to-space (unless too old)
739   if (old->age() < tenuring_threshold()) {
740     obj = cast_to_oop(to()->allocate(s));
741   }
742 
743   bool new_obj_is_tenured = false;
744   // Otherwise try allocating obj tenured
745   if (obj == nullptr) {
746     obj = _old_gen->allocate_for_promotion(old, s);
747     if (obj == nullptr) {
748       handle_promotion_failure(old);
749       return old;
750     }
751 
752     new_obj_is_tenured = true;
753   }
754 
755   // Prefetch beyond obj
756   const intx interval = PrefetchCopyIntervalInBytes;
757   Prefetch::write(obj, interval);
758 
759   // Copy obj
760   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), s);
761 
762   ContinuationGCSupport::transform_stack_chunk(obj);
763 
764   if (!new_obj_is_tenured) {
765     // Increment age if obj still in new generation
766     obj->incr_age();
767     age_table()->add(obj, s);
768   }
769 


770   // Done, insert forward pointer to obj in this header
771   old->forward_to(obj);
772 
773   if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) {
774     // Record old; request adds a new weak reference, which reference
775     // processing expects to refer to a from-space object.
776     _string_dedup_requests.add(old);
777   }
778   return obj;
779 }
780 
781 void DefNewGeneration::drain_promo_failure_scan_stack() {
782   PromoteFailureClosure cl{this};
783   while (!_promo_failure_scan_stack.is_empty()) {
784      oop obj = _promo_failure_scan_stack.pop();
785      obj->oop_iterate(&cl);
786   }
787 }
788 
789 void DefNewGeneration::contribute_scratch(void*& scratch, size_t& num_words) {

678   _gc_timer->register_gc_end();
679 
680   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
681 
682   return !_promotion_failed;
683 }
684 
685 void DefNewGeneration::init_assuming_no_promotion_failure() {
686   _promotion_failed = false;
687   _promotion_failed_info.reset();
688 }
689 
690 void DefNewGeneration::remove_forwarding_pointers() {
691   assert(_promotion_failed, "precondition");
692 
693   // Will enter Full GC soon due to failed promotion. Must reset the mark word
694   // of objs in young-gen so that no objs are marked (forwarded) when Full GC
695   // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
696   struct ResetForwardedMarkWord : ObjectClosure {
697     void do_object(oop obj) override {
698       obj->reset_forwarded();






699     }
700   } cl;
701   eden()->object_iterate(&cl);
702   from()->object_iterate(&cl);
703 }
704 
705 void DefNewGeneration::handle_promotion_failure(oop old) {
706   log_debug(gc, promotion)("Promotion failure size = %zu) ", old->size());
707 
708   _promotion_failed = true;
709   _promotion_failed_info.register_copy_failure(old->size());
710 
711   ContinuationGCSupport::transform_stack_chunk(old);
712 
713   // forward to self
714   old->forward_to_self();
715 
716   _promo_failure_scan_stack.push(old);
717 
718   if (!_promo_failure_drain_in_progress) {
719     // prevent recursion in copy_to_survivor_space()
720     _promo_failure_drain_in_progress = true;
721     drain_promo_failure_scan_stack();
722     _promo_failure_drain_in_progress = false;
723   }
724 }
725 
726 oop DefNewGeneration::copy_to_survivor_space(oop old) {
727   assert(is_in_reserved(old) && !old->is_forwarded(),
728          "shouldn't be scavenging this oop");
729   size_t old_size = old->size();
730   size_t s = old->copy_size(old_size, old->mark());
731 
732   oop obj = nullptr;
733 
734   // Try allocating obj in to-space (unless too old)
735   if (old->age() < tenuring_threshold()) {
736     obj = cast_to_oop(to()->allocate(s));
737   }
738 
739   bool new_obj_is_tenured = false;
740   // Otherwise try allocating obj tenured
741   if (obj == nullptr) {
742     obj = _old_gen->allocate_for_promotion(old, s);
743     if (obj == nullptr) {
744       handle_promotion_failure(old);
745       return old;
746     }
747 
748     new_obj_is_tenured = true;
749   }
750 
751   // Prefetch beyond obj
752   const intx interval = PrefetchCopyIntervalInBytes;
753   Prefetch::write(obj, interval);
754 
755   // Copy obj
756   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), old_size);
757 
758   ContinuationGCSupport::transform_stack_chunk(obj);
759 
760   if (!new_obj_is_tenured) {
761     // Increment age if obj still in new generation
762     obj->incr_age();
763     age_table()->add(obj, s);
764   }
765 
766   obj->initialize_hash_if_necessary(old);
767 
768   // Done, insert forward pointer to obj in this header
769   old->forward_to(obj);
770 
771   if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) {
772     // Record old; request adds a new weak reference, which reference
773     // processing expects to refer to a from-space object.
774     _string_dedup_requests.add(old);
775   }
776   return obj;
777 }
778 
779 void DefNewGeneration::drain_promo_failure_scan_stack() {
780   PromoteFailureClosure cl{this};
781   while (!_promo_failure_scan_stack.is_empty()) {
782      oop obj = _promo_failure_scan_stack.pop();
783      obj->oop_iterate(&cl);
784   }
785 }
786 
787 void DefNewGeneration::contribute_scratch(void*& scratch, size_t& num_words) {
< prev index next >