< prev index next >

src/hotspot/share/gc/serial/serialFullGC.cpp

Print this page

 34 #include "compiler/compileBroker.hpp"
 35 #include "compiler/oopMap.hpp"
 36 #include "gc/serial/cardTableRS.hpp"
 37 #include "gc/serial/defNewGeneration.hpp"
 38 #include "gc/serial/serialFullGC.hpp"
 39 #include "gc/serial/serialGcRefProcProxyTask.hpp"
 40 #include "gc/serial/serialHeap.hpp"
 41 #include "gc/serial/serialStringDedup.hpp"
 42 #include "gc/shared/classUnloadingContext.hpp"
 43 #include "gc/shared/collectedHeap.inline.hpp"
 44 #include "gc/shared/continuationGCSupport.inline.hpp"
 45 #include "gc/shared/gcHeapSummary.hpp"
 46 #include "gc/shared/gcTimer.hpp"
 47 #include "gc/shared/gcTrace.hpp"
 48 #include "gc/shared/gcTraceTime.inline.hpp"
 49 #include "gc/shared/gc_globals.hpp"
 50 #include "gc/shared/modRefBarrierSet.hpp"
 51 #include "gc/shared/preservedMarks.inline.hpp"
 52 #include "gc/shared/referencePolicy.hpp"
 53 #include "gc/shared/referenceProcessorPhaseTimes.hpp"

 54 #include "gc/shared/space.inline.hpp"
 55 #include "gc/shared/strongRootsScope.hpp"
 56 #include "gc/shared/weakProcessor.hpp"
 57 #include "memory/iterator.inline.hpp"
 58 #include "memory/universe.hpp"
 59 #include "oops/access.inline.hpp"
 60 #include "oops/compressedOops.inline.hpp"
 61 #include "oops/instanceRefKlass.hpp"
 62 #include "oops/markWord.hpp"
 63 #include "oops/methodData.hpp"
 64 #include "oops/objArrayKlass.inline.hpp"
 65 #include "oops/oop.inline.hpp"
 66 #include "oops/typeArrayOop.inline.hpp"
 67 #include "runtime/prefetch.inline.hpp"
 68 #include "utilities/align.hpp"
 69 #include "utilities/copy.hpp"
 70 #include "utilities/events.hpp"
 71 #include "utilities/stack.inline.hpp"
 72 #if INCLUDE_JVMCI
 73 #include "jvmci/jvmci.hpp"

211     if (PrefetchScanIntervalInBytes >= 0) {
212       Prefetch::read(p, PrefetchScanIntervalInBytes);
213     }
214   }
215 
216   static void prefetch_write_scan(void* p) {
217     if (PrefetchScanIntervalInBytes >= 0) {
218       Prefetch::write(p, PrefetchScanIntervalInBytes);
219     }
220   }
221 
222   static void prefetch_write_copy(void* p) {
223     if (PrefetchCopyIntervalInBytes >= 0) {
224       Prefetch::write(p, PrefetchCopyIntervalInBytes);
225     }
226   }
227 
228   static void forward_obj(oop obj, HeapWord* new_addr) {
229     prefetch_write_scan(obj);
230     if (cast_from_oop<HeapWord*>(obj) != new_addr) {
231       obj->forward_to(cast_to_oop(new_addr));
232     } else {
233       assert(obj->is_gc_marked(), "inv");
234       // This obj will stay in-place. Fix the markword.
235       obj->init_mark();
236     }
237   }
238 
239   static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
240     for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
241       prefetch_read_scan(i_addr);
242       oop obj = cast_to_oop(i_addr);
243       if (obj->is_gc_marked()) {
244         return i_addr;
245       }
246       i_addr += obj->size();
247     }
248     return end;
249   };
250 
251   static size_t relocate(HeapWord* addr) {
252     // Prefetch source and destination
253     prefetch_read_scan(addr);
254 
255     oop obj = cast_to_oop(addr);
256     oop new_obj = obj->forwardee();
257     HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
258     assert(addr != new_addr, "inv");
259     prefetch_write_copy(new_addr);
260 
261     size_t obj_size = obj->size();
262     Copy::aligned_conjoint_words(addr, new_addr, obj_size);
263     new_obj->init_mark();
264 
265     return obj_size;
266   }
267 
268 public:
269   explicit Compacter(SerialHeap* heap) {
270     // In this order so that heap is compacted towards old-gen.
271     _spaces[0].init(heap->old_gen()->space());
272     _spaces[1].init(heap->young_gen()->eden());
273     _spaces[2].init(heap->young_gen()->from());
274 
275     bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
276     if (is_promotion_failed) {

332       while (cur_addr < top) {
333         prefetch_write_scan(cur_addr);
334         if (cur_addr < first_dead || cast_to_oop(cur_addr)->is_gc_marked()) {
335           size_t size = cast_to_oop(cur_addr)->oop_iterate_size(&SerialFullGC::adjust_pointer_closure);
336           cur_addr += size;
337         } else {
338           assert(*(HeapWord**)cur_addr > cur_addr, "forward progress");
339           cur_addr = *(HeapWord**)cur_addr;
340         }
341       }
342     }
343   }
344 
345   void phase4_compact() {
346     for (uint i = 0; i < _num_spaces; ++i) {
347       ContiguousSpace* space = get_space(i);
348       HeapWord* cur_addr = space->bottom();
349       HeapWord* top = space->top();
350 
351       // Check if the first obj inside this space is forwarded.
352       if (!cast_to_oop(cur_addr)->is_forwarded()) {
353         // Jump over consecutive (in-place) live-objs-chunk
354         cur_addr = get_first_dead(i);
355       }
356 
357       while (cur_addr < top) {
358         if (!cast_to_oop(cur_addr)->is_forwarded()) {
359           cur_addr = *(HeapWord**) cur_addr;
360           continue;
361         }
362         cur_addr += relocate(cur_addr);
363       }
364 
365       // Reset top and unused memory
366       space->set_top(get_compaction_top(i));
367       if (ZapUnusedHeapArea) {
368         space->mangle_unused_area();
369       }
370     }
371   }
372 };
373 
374 template <class T> void SerialFullGC::KeepAliveClosure::do_oop_work(T* p) {
375   mark_and_push(p);
376 }
377 
378 void SerialFullGC::push_objarray(oop obj, size_t index) {

569 }
570 
571 void SerialFullGC::deallocate_stacks() {
572   if (_preserved_count_max != 0) {
573     DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
574     young_gen->reset_scratch();
575   }
576 
577   _preserved_overflow_stack_set.reclaim();
578   _marking_stack.clear();
579   _objarray_stack.clear(true);
580 }
581 
582 void SerialFullGC::mark_object(oop obj) {
583   if (StringDedup::is_enabled() &&
584       java_lang_String::is_instance(obj) &&
585       SerialStringDedup::is_candidate_from_mark(obj)) {
586     _string_dedup_requests->add(obj);
587   }
588 




589   // some marks may contain information we need to preserve so we store them away
590   // and overwrite the mark.  We'll restore it at the end of serial full GC.
591   markWord mark = obj->mark();
592   obj->set_mark(markWord::prototype().set_marked());
593 
594   ContinuationGCSupport::transform_stack_chunk(obj);
595 
596   if (obj->mark_must_be_preserved(mark)) {
597     preserve_mark(obj, mark);
598   }
599 }
600 
601 template <class T> void SerialFullGC::mark_and_push(T* p) {
602   T heap_oop = RawAccess<>::oop_load(p);
603   if (!CompressedOops::is_null(heap_oop)) {
604     oop obj = CompressedOops::decode_not_null(heap_oop);
605     if (!obj->mark().is_marked()) {
606       mark_object(obj);
607       _marking_stack.push(obj);
608     }
609   }
610 }
611 
612 template <typename T>
613 void MarkAndPushClosure::do_oop_work(T* p)            { SerialFullGC::mark_and_push(p); }
614 void MarkAndPushClosure::do_oop(      oop* p)         { do_oop_work(p); }
615 void MarkAndPushClosure::do_oop(narrowOop* p)         { do_oop_work(p); }
616 
617 template <class T> void SerialFullGC::adjust_pointer(T* p) {
618   T heap_oop = RawAccess<>::oop_load(p);
619   if (!CompressedOops::is_null(heap_oop)) {
620     oop obj = CompressedOops::decode_not_null(heap_oop);
621     assert(Universe::heap()->is_in(obj), "should be in heap");
622 
623     if (obj->is_forwarded()) {
624       oop new_obj = obj->forwardee();
625       assert(is_object_aligned(new_obj), "oop must be aligned");
626       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
627     }
628   }
629 }
630 
631 template <typename T>
632 void AdjustPointerClosure::do_oop_work(T* p)           { SerialFullGC::adjust_pointer(p); }
633 inline void AdjustPointerClosure::do_oop(oop* p)       { do_oop_work(p); }
634 inline void AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_work(p); }
635 
636 AdjustPointerClosure SerialFullGC::adjust_pointer_closure;
637 
638 void SerialFullGC::adjust_marks() {
639   // adjust the oops we saved earlier
640   for (size_t i = 0; i < _preserved_count; i++) {
641     PreservedMarks::adjust_preserved_mark(_preserved_marks + i);
642   }
643 
644   // deal with the overflow stack

683   SerialHeap* gch = SerialHeap::heap();
684 #ifdef ASSERT
685   if (gch->soft_ref_policy()->should_clear_all_soft_refs()) {
686     assert(clear_all_softrefs, "Policy should have been checked earlier");
687   }
688 #endif
689 
690   gch->trace_heap_before_gc(_gc_tracer);
691 
692   // Increment the invocation count
693   _total_invocations++;
694 
695   // Capture used regions for old-gen to reestablish old-to-young invariant
696   // after full-gc.
697   gch->old_gen()->save_used_region();
698 
699   allocate_stacks();
700 
701   phase1_mark(clear_all_softrefs);
702 


703   Compacter compacter{gch};
704 
705   {
706     // Now all live objects are marked, compute the new object addresses.
707     GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
708 
709     compacter.phase2_calculate_new_addr();
710   }
711 
712   // Don't add any more derived pointers during phase3
713 #if COMPILER2_OR_JVMCI
714   assert(DerivedPointerTable::is_active(), "Sanity");
715   DerivedPointerTable::set_active(false);
716 #endif
717 
718   {
719     // Adjust the pointers to reflect the new locations
720     GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
721 
722     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);

730 
731     WeakProcessor::oops_do(&adjust_pointer_closure);
732 
733     adjust_marks();
734     compacter.phase3_adjust_pointers();
735   }
736 
737   {
738     // All pointers are now adjusted, move objects accordingly
739     GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
740 
741     compacter.phase4_compact();
742   }
743 
744   restore_marks();
745 
746   // Set saved marks for allocation profiler (and other things? -- dld)
747   // (Should this be in general part?)
748   gch->save_marks();
749 


750   deallocate_stacks();
751 
752   SerialFullGC::_string_dedup_requests->flush();
753 
754   bool is_young_gen_empty = (gch->young_gen()->used() == 0);
755   gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
756 
757   gch->prune_scavengable_nmethods();
758 
759   // Update heap occupancy information which is used as
760   // input to soft ref clearing policy at the next gc.
761   Universe::heap()->update_capacity_and_used_at_gc();
762 
763   // Signal that we have completed a visit to all live objects.
764   Universe::heap()->record_whole_heap_examined_timestamp();
765 
766   gch->trace_heap_after_gc(_gc_tracer);
767 }

 34 #include "compiler/compileBroker.hpp"
 35 #include "compiler/oopMap.hpp"
 36 #include "gc/serial/cardTableRS.hpp"
 37 #include "gc/serial/defNewGeneration.hpp"
 38 #include "gc/serial/serialFullGC.hpp"
 39 #include "gc/serial/serialGcRefProcProxyTask.hpp"
 40 #include "gc/serial/serialHeap.hpp"
 41 #include "gc/serial/serialStringDedup.hpp"
 42 #include "gc/shared/classUnloadingContext.hpp"
 43 #include "gc/shared/collectedHeap.inline.hpp"
 44 #include "gc/shared/continuationGCSupport.inline.hpp"
 45 #include "gc/shared/gcHeapSummary.hpp"
 46 #include "gc/shared/gcTimer.hpp"
 47 #include "gc/shared/gcTrace.hpp"
 48 #include "gc/shared/gcTraceTime.inline.hpp"
 49 #include "gc/shared/gc_globals.hpp"
 50 #include "gc/shared/modRefBarrierSet.hpp"
 51 #include "gc/shared/preservedMarks.inline.hpp"
 52 #include "gc/shared/referencePolicy.hpp"
 53 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
 54 #include "gc/shared/slidingForwarding.inline.hpp"
 55 #include "gc/shared/space.inline.hpp"
 56 #include "gc/shared/strongRootsScope.hpp"
 57 #include "gc/shared/weakProcessor.hpp"
 58 #include "memory/iterator.inline.hpp"
 59 #include "memory/universe.hpp"
 60 #include "oops/access.inline.hpp"
 61 #include "oops/compressedOops.inline.hpp"
 62 #include "oops/instanceRefKlass.hpp"
 63 #include "oops/markWord.hpp"
 64 #include "oops/methodData.hpp"
 65 #include "oops/objArrayKlass.inline.hpp"
 66 #include "oops/oop.inline.hpp"
 67 #include "oops/typeArrayOop.inline.hpp"
 68 #include "runtime/prefetch.inline.hpp"
 69 #include "utilities/align.hpp"
 70 #include "utilities/copy.hpp"
 71 #include "utilities/events.hpp"
 72 #include "utilities/stack.inline.hpp"
 73 #if INCLUDE_JVMCI
 74 #include "jvmci/jvmci.hpp"

212     if (PrefetchScanIntervalInBytes >= 0) {
213       Prefetch::read(p, PrefetchScanIntervalInBytes);
214     }
215   }
216 
217   static void prefetch_write_scan(void* p) {
218     if (PrefetchScanIntervalInBytes >= 0) {
219       Prefetch::write(p, PrefetchScanIntervalInBytes);
220     }
221   }
222 
223   static void prefetch_write_copy(void* p) {
224     if (PrefetchCopyIntervalInBytes >= 0) {
225       Prefetch::write(p, PrefetchCopyIntervalInBytes);
226     }
227   }
228 
229   static void forward_obj(oop obj, HeapWord* new_addr) {
230     prefetch_write_scan(obj);
231     if (cast_from_oop<HeapWord*>(obj) != new_addr) {
232       SlidingForwarding::forward_to(obj, cast_to_oop(new_addr));
233     } else {
234       assert(obj->is_gc_marked(), "inv");
235       // This obj will stay in-place. Fix the markword.
236       obj->init_mark();
237     }
238   }
239 
240   static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
241     for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
242       prefetch_read_scan(i_addr);
243       oop obj = cast_to_oop(i_addr);
244       if (obj->is_gc_marked()) {
245         return i_addr;
246       }
247       i_addr += obj->size();
248     }
249     return end;
250   };
251 
252   static size_t relocate(HeapWord* addr) {
253     // Prefetch source and destination
254     prefetch_read_scan(addr);
255 
256     oop obj = cast_to_oop(addr);
257     oop new_obj = SlidingForwarding::forwardee(obj);
258     HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
259     assert(addr != new_addr, "inv");
260     prefetch_write_copy(new_addr);
261 
262     size_t obj_size = obj->size();
263     Copy::aligned_conjoint_words(addr, new_addr, obj_size);
264     new_obj->init_mark();
265 
266     return obj_size;
267   }
268 
269 public:
270   explicit Compacter(SerialHeap* heap) {
271     // In this order so that heap is compacted towards old-gen.
272     _spaces[0].init(heap->old_gen()->space());
273     _spaces[1].init(heap->young_gen()->eden());
274     _spaces[2].init(heap->young_gen()->from());
275 
276     bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
277     if (is_promotion_failed) {

333       while (cur_addr < top) {
334         prefetch_write_scan(cur_addr);
335         if (cur_addr < first_dead || cast_to_oop(cur_addr)->is_gc_marked()) {
336           size_t size = cast_to_oop(cur_addr)->oop_iterate_size(&SerialFullGC::adjust_pointer_closure);
337           cur_addr += size;
338         } else {
339           assert(*(HeapWord**)cur_addr > cur_addr, "forward progress");
340           cur_addr = *(HeapWord**)cur_addr;
341         }
342       }
343     }
344   }
345 
346   void phase4_compact() {
347     for (uint i = 0; i < _num_spaces; ++i) {
348       ContiguousSpace* space = get_space(i);
349       HeapWord* cur_addr = space->bottom();
350       HeapWord* top = space->top();
351 
352       // Check if the first obj inside this space is forwarded.
353       if (SlidingForwarding::is_not_forwarded(cast_to_oop(cur_addr))) {
354         // Jump over consecutive (in-place) live-objs-chunk
355         cur_addr = get_first_dead(i);
356       }
357 
358       while (cur_addr < top) {
359         if (SlidingForwarding::is_not_forwarded(cast_to_oop(cur_addr))) {
360           cur_addr = *(HeapWord**) cur_addr;
361           continue;
362         }
363         cur_addr += relocate(cur_addr);
364       }
365 
366       // Reset top and unused memory
367       space->set_top(get_compaction_top(i));
368       if (ZapUnusedHeapArea) {
369         space->mangle_unused_area();
370       }
371     }
372   }
373 };
374 
375 template <class T> void SerialFullGC::KeepAliveClosure::do_oop_work(T* p) {
376   mark_and_push(p);
377 }
378 
379 void SerialFullGC::push_objarray(oop obj, size_t index) {

570 }
571 
572 void SerialFullGC::deallocate_stacks() {
573   if (_preserved_count_max != 0) {
574     DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
575     young_gen->reset_scratch();
576   }
577 
578   _preserved_overflow_stack_set.reclaim();
579   _marking_stack.clear();
580   _objarray_stack.clear(true);
581 }
582 
583 void SerialFullGC::mark_object(oop obj) {
584   if (StringDedup::is_enabled() &&
585       java_lang_String::is_instance(obj) &&
586       SerialStringDedup::is_candidate_from_mark(obj)) {
587     _string_dedup_requests->add(obj);
588   }
589 
590   // Do the transform while we still have the header intact,
591   // which might include important class information.
592   ContinuationGCSupport::transform_stack_chunk(obj);
593 
594   // some marks may contain information we need to preserve so we store them away
595   // and overwrite the mark.  We'll restore it at the end of serial full GC.
596   markWord mark = obj->mark();
597   obj->set_mark(obj->prototype_mark().set_marked());


598 
599   if (obj->mark_must_be_preserved(mark)) {
600     preserve_mark(obj, mark);
601   }
602 }
603 
604 template <class T> void SerialFullGC::mark_and_push(T* p) {
605   T heap_oop = RawAccess<>::oop_load(p);
606   if (!CompressedOops::is_null(heap_oop)) {
607     oop obj = CompressedOops::decode_not_null(heap_oop);
608     if (!obj->mark().is_marked()) {
609       mark_object(obj);
610       _marking_stack.push(obj);
611     }
612   }
613 }
614 
615 template <typename T>
616 void MarkAndPushClosure::do_oop_work(T* p)            { SerialFullGC::mark_and_push(p); }
617 void MarkAndPushClosure::do_oop(      oop* p)         { do_oop_work(p); }
618 void MarkAndPushClosure::do_oop(narrowOop* p)         { do_oop_work(p); }
619 
620 template <class T> void SerialFullGC::adjust_pointer(T* p) {
621   T heap_oop = RawAccess<>::oop_load(p);
622   if (!CompressedOops::is_null(heap_oop)) {
623     oop obj = CompressedOops::decode_not_null(heap_oop);
624     assert(Universe::heap()->is_in(obj), "should be in heap");
625 
626     if (SlidingForwarding::is_forwarded(obj)) {
627       oop new_obj = SlidingForwarding::forwardee(obj);
628       assert(is_object_aligned(new_obj), "oop must be aligned");
629       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
630     }
631   }
632 }
633 
634 template <typename T>
635 void AdjustPointerClosure::do_oop_work(T* p)           { SerialFullGC::adjust_pointer(p); }
636 inline void AdjustPointerClosure::do_oop(oop* p)       { do_oop_work(p); }
637 inline void AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_work(p); }
638 
639 AdjustPointerClosure SerialFullGC::adjust_pointer_closure;
640 
641 void SerialFullGC::adjust_marks() {
642   // adjust the oops we saved earlier
643   for (size_t i = 0; i < _preserved_count; i++) {
644     PreservedMarks::adjust_preserved_mark(_preserved_marks + i);
645   }
646 
647   // deal with the overflow stack

686   SerialHeap* gch = SerialHeap::heap();
687 #ifdef ASSERT
688   if (gch->soft_ref_policy()->should_clear_all_soft_refs()) {
689     assert(clear_all_softrefs, "Policy should have been checked earlier");
690   }
691 #endif
692 
693   gch->trace_heap_before_gc(_gc_tracer);
694 
695   // Increment the invocation count
696   _total_invocations++;
697 
698   // Capture used regions for old-gen to reestablish old-to-young invariant
699   // after full-gc.
700   gch->old_gen()->save_used_region();
701 
702   allocate_stacks();
703 
704   phase1_mark(clear_all_softrefs);
705 
706   SlidingForwarding::begin();
707 
708   Compacter compacter{gch};
709 
710   {
711     // Now all live objects are marked, compute the new object addresses.
712     GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
713 
714     compacter.phase2_calculate_new_addr();
715   }
716 
717   // Don't add any more derived pointers during phase3
718 #if COMPILER2_OR_JVMCI
719   assert(DerivedPointerTable::is_active(), "Sanity");
720   DerivedPointerTable::set_active(false);
721 #endif
722 
723   {
724     // Adjust the pointers to reflect the new locations
725     GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
726 
727     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);

735 
736     WeakProcessor::oops_do(&adjust_pointer_closure);
737 
738     adjust_marks();
739     compacter.phase3_adjust_pointers();
740   }
741 
742   {
743     // All pointers are now adjusted, move objects accordingly
744     GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
745 
746     compacter.phase4_compact();
747   }
748 
749   restore_marks();
750 
751   // Set saved marks for allocation profiler (and other things? -- dld)
752   // (Should this be in general part?)
753   gch->save_marks();
754 
755   SlidingForwarding::end();
756 
757   deallocate_stacks();
758 
759   SerialFullGC::_string_dedup_requests->flush();
760 
761   bool is_young_gen_empty = (gch->young_gen()->used() == 0);
762   gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
763 
764   gch->prune_scavengable_nmethods();
765 
766   // Update heap occupancy information which is used as
767   // input to soft ref clearing policy at the next gc.
768   Universe::heap()->update_capacity_and_used_at_gc();
769 
770   // Signal that we have completed a visit to all live objects.
771   Universe::heap()->record_whole_heap_examined_timestamp();
772 
773   gch->trace_heap_after_gc(_gc_tracer);
774 }
< prev index next >