35 #include "compiler/oopMap.hpp"
36 #include "gc/serial/cardTableRS.hpp"
37 #include "gc/serial/defNewGeneration.hpp"
38 #include "gc/serial/serialFullGC.hpp"
39 #include "gc/serial/serialGcRefProcProxyTask.hpp"
40 #include "gc/serial/serialHeap.hpp"
41 #include "gc/serial/serialStringDedup.hpp"
42 #include "gc/serial/tenuredGeneration.inline.hpp"
43 #include "gc/shared/classUnloadingContext.hpp"
44 #include "gc/shared/collectedHeap.inline.hpp"
45 #include "gc/shared/continuationGCSupport.inline.hpp"
46 #include "gc/shared/gcHeapSummary.hpp"
47 #include "gc/shared/gcTimer.hpp"
48 #include "gc/shared/gcTrace.hpp"
49 #include "gc/shared/gcTraceTime.inline.hpp"
50 #include "gc/shared/gc_globals.hpp"
51 #include "gc/shared/modRefBarrierSet.hpp"
52 #include "gc/shared/preservedMarks.inline.hpp"
53 #include "gc/shared/referencePolicy.hpp"
54 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
55 #include "gc/shared/space.hpp"
56 #include "gc/shared/strongRootsScope.hpp"
57 #include "gc/shared/weakProcessor.hpp"
58 #include "memory/iterator.inline.hpp"
59 #include "memory/universe.hpp"
60 #include "oops/access.inline.hpp"
61 #include "oops/compressedOops.inline.hpp"
62 #include "oops/instanceRefKlass.hpp"
63 #include "oops/markWord.hpp"
64 #include "oops/methodData.hpp"
65 #include "oops/objArrayKlass.inline.hpp"
66 #include "oops/oop.inline.hpp"
67 #include "oops/typeArrayOop.inline.hpp"
68 #include "runtime/prefetch.inline.hpp"
69 #include "utilities/align.hpp"
70 #include "utilities/copy.hpp"
71 #include "utilities/events.hpp"
72 #include "utilities/stack.inline.hpp"
73 #if INCLUDE_JVMCI
74 #include "jvmci/jvmci.hpp"
213 if (PrefetchScanIntervalInBytes >= 0) {
214 Prefetch::read(p, PrefetchScanIntervalInBytes);
215 }
216 }
217
218 static void prefetch_write_scan(void* p) {
219 if (PrefetchScanIntervalInBytes >= 0) {
220 Prefetch::write(p, PrefetchScanIntervalInBytes);
221 }
222 }
223
224 static void prefetch_write_copy(void* p) {
225 if (PrefetchCopyIntervalInBytes >= 0) {
226 Prefetch::write(p, PrefetchCopyIntervalInBytes);
227 }
228 }
229
230 static void forward_obj(oop obj, HeapWord* new_addr) {
231 prefetch_write_scan(obj);
232 if (cast_from_oop<HeapWord*>(obj) != new_addr) {
233 obj->forward_to(cast_to_oop(new_addr));
234 } else {
235 assert(obj->is_gc_marked(), "inv");
236 // This obj will stay in-place. Fix the markword.
237 obj->init_mark();
238 }
239 }
240
241 static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
242 for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
243 prefetch_read_scan(i_addr);
244 oop obj = cast_to_oop(i_addr);
245 if (obj->is_gc_marked()) {
246 return i_addr;
247 }
248 i_addr += obj->size();
249 }
250 return end;
251 };
252
253 static size_t relocate(HeapWord* addr) {
254 // Prefetch source and destination
255 prefetch_read_scan(addr);
256
257 oop obj = cast_to_oop(addr);
258 oop new_obj = obj->forwardee();
259 HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
260 assert(addr != new_addr, "inv");
261 prefetch_write_copy(new_addr);
262
263 size_t obj_size = obj->size();
264 Copy::aligned_conjoint_words(addr, new_addr, obj_size);
265 new_obj->init_mark();
266
267 return obj_size;
268 }
269
270 public:
271 explicit Compacter(SerialHeap* heap) {
272 // In this order so that heap is compacted towards old-gen.
273 _spaces[0].init(heap->old_gen()->space());
274 _spaces[1].init(heap->young_gen()->eden());
275 _spaces[2].init(heap->young_gen()->from());
276
277 bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
278 if (is_promotion_failed) {
335 while (cur_addr < top) {
336 prefetch_write_scan(cur_addr);
337 if (cur_addr < first_dead || cast_to_oop(cur_addr)->is_gc_marked()) {
338 size_t size = cast_to_oop(cur_addr)->oop_iterate_size(&SerialFullGC::adjust_pointer_closure);
339 cur_addr += size;
340 } else {
341 assert(*(HeapWord**)cur_addr > cur_addr, "forward progress");
342 cur_addr = *(HeapWord**)cur_addr;
343 }
344 }
345 }
346 }
347
348 void phase4_compact() {
349 for (uint i = 0; i < _num_spaces; ++i) {
350 ContiguousSpace* space = get_space(i);
351 HeapWord* cur_addr = space->bottom();
352 HeapWord* top = space->top();
353
354 // Check if the first obj inside this space is forwarded.
355 if (!cast_to_oop(cur_addr)->is_forwarded()) {
356 // Jump over consecutive (in-place) live-objs-chunk
357 cur_addr = get_first_dead(i);
358 }
359
360 while (cur_addr < top) {
361 if (!cast_to_oop(cur_addr)->is_forwarded()) {
362 cur_addr = *(HeapWord**) cur_addr;
363 continue;
364 }
365 cur_addr += relocate(cur_addr);
366 }
367
368 // Reset top and unused memory
369 HeapWord* new_top = get_compaction_top(i);
370 space->set_top(new_top);
371 if (ZapUnusedHeapArea && new_top < top) {
372 space->mangle_unused_area(MemRegion(new_top, top));
373 }
374 }
375 }
376 };
377
378 template <class T> void SerialFullGC::KeepAliveClosure::do_oop_work(T* p) {
379 mark_and_push(p);
380 }
381
573 }
574
575 void SerialFullGC::deallocate_stacks() {
576 if (_preserved_count_max != 0) {
577 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
578 young_gen->reset_scratch();
579 }
580
581 _preserved_overflow_stack_set.reclaim();
582 _marking_stack.clear();
583 _objarray_stack.clear(true);
584 }
585
586 void SerialFullGC::mark_object(oop obj) {
587 if (StringDedup::is_enabled() &&
588 java_lang_String::is_instance(obj) &&
589 SerialStringDedup::is_candidate_from_mark(obj)) {
590 _string_dedup_requests->add(obj);
591 }
592
593 // some marks may contain information we need to preserve so we store them away
594 // and overwrite the mark. We'll restore it at the end of serial full GC.
595 markWord mark = obj->mark();
596 obj->set_mark(markWord::prototype().set_marked());
597
598 ContinuationGCSupport::transform_stack_chunk(obj);
599
600 if (obj->mark_must_be_preserved(mark)) {
601 preserve_mark(obj, mark);
602 }
603 }
604
605 template <class T> void SerialFullGC::mark_and_push(T* p) {
606 T heap_oop = RawAccess<>::oop_load(p);
607 if (!CompressedOops::is_null(heap_oop)) {
608 oop obj = CompressedOops::decode_not_null(heap_oop);
609 if (!obj->mark().is_marked()) {
610 mark_object(obj);
611 _marking_stack.push(obj);
612 }
613 }
614 }
615
616 template <typename T>
617 void MarkAndPushClosure::do_oop_work(T* p) { SerialFullGC::mark_and_push(p); }
618 void MarkAndPushClosure::do_oop( oop* p) { do_oop_work(p); }
619 void MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_work(p); }
620
621 template <class T> void SerialFullGC::adjust_pointer(T* p) {
622 T heap_oop = RawAccess<>::oop_load(p);
623 if (!CompressedOops::is_null(heap_oop)) {
624 oop obj = CompressedOops::decode_not_null(heap_oop);
625 assert(Universe::heap()->is_in(obj), "should be in heap");
626
627 if (obj->is_forwarded()) {
628 oop new_obj = obj->forwardee();
629 assert(is_object_aligned(new_obj), "oop must be aligned");
630 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
631 }
632 }
633 }
634
635 template <typename T>
636 void AdjustPointerClosure::do_oop_work(T* p) { SerialFullGC::adjust_pointer(p); }
637 inline void AdjustPointerClosure::do_oop(oop* p) { do_oop_work(p); }
638 inline void AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_work(p); }
639
640 AdjustPointerClosure SerialFullGC::adjust_pointer_closure;
641
642 void SerialFullGC::adjust_marks() {
643 // adjust the oops we saved earlier
644 for (size_t i = 0; i < _preserved_count; i++) {
645 PreservedMarks::adjust_preserved_mark(_preserved_marks + i);
646 }
647
648 // deal with the overflow stack
679 // to discovery, hence the _always_true_closure.
680 SerialFullGC::_ref_processor = new ReferenceProcessor(&_always_true_closure);
681 mark_and_push_closure.set_ref_discoverer(_ref_processor);
682 }
683
684 void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) {
685 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
686
687 SerialHeap* gch = SerialHeap::heap();
688
689 gch->trace_heap_before_gc(_gc_tracer);
690
691 // Capture used regions for old-gen to reestablish old-to-young invariant
692 // after full-gc.
693 gch->old_gen()->save_used_region();
694
695 allocate_stacks();
696
697 phase1_mark(clear_all_softrefs);
698
699 Compacter compacter{gch};
700
701 {
702 // Now all live objects are marked, compute the new object addresses.
703 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
704
705 compacter.phase2_calculate_new_addr();
706 }
707
708 // Don't add any more derived pointers during phase3
709 #if COMPILER2_OR_JVMCI
710 assert(DerivedPointerTable::is_active(), "Sanity");
711 DerivedPointerTable::set_active(false);
712 #endif
713
714 {
715 // Adjust the pointers to reflect the new locations
716 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
717
718 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
722 &adjust_pointer_closure,
723 &adjust_cld_closure,
724 &adjust_cld_closure,
725 &code_closure);
726
727 WeakProcessor::oops_do(&adjust_pointer_closure);
728
729 adjust_marks();
730 compacter.phase3_adjust_pointers();
731 }
732
733 {
734 // All pointers are now adjusted, move objects accordingly
735 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
736
737 compacter.phase4_compact();
738 }
739
740 restore_marks();
741
742 deallocate_stacks();
743
744 SerialFullGC::_string_dedup_requests->flush();
745
746 bool is_young_gen_empty = (gch->young_gen()->used() == 0);
747 gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
748
749 gch->prune_scavengable_nmethods();
750
751 // Update heap occupancy information which is used as
752 // input to soft ref clearing policy at the next gc.
753 Universe::heap()->update_capacity_and_used_at_gc();
754
755 // Signal that we have completed a visit to all live objects.
756 Universe::heap()->record_whole_heap_examined_timestamp();
757
758 gch->trace_heap_after_gc(_gc_tracer);
759 }
|
35 #include "compiler/oopMap.hpp"
36 #include "gc/serial/cardTableRS.hpp"
37 #include "gc/serial/defNewGeneration.hpp"
38 #include "gc/serial/serialFullGC.hpp"
39 #include "gc/serial/serialGcRefProcProxyTask.hpp"
40 #include "gc/serial/serialHeap.hpp"
41 #include "gc/serial/serialStringDedup.hpp"
42 #include "gc/serial/tenuredGeneration.inline.hpp"
43 #include "gc/shared/classUnloadingContext.hpp"
44 #include "gc/shared/collectedHeap.inline.hpp"
45 #include "gc/shared/continuationGCSupport.inline.hpp"
46 #include "gc/shared/gcHeapSummary.hpp"
47 #include "gc/shared/gcTimer.hpp"
48 #include "gc/shared/gcTrace.hpp"
49 #include "gc/shared/gcTraceTime.inline.hpp"
50 #include "gc/shared/gc_globals.hpp"
51 #include "gc/shared/modRefBarrierSet.hpp"
52 #include "gc/shared/preservedMarks.inline.hpp"
53 #include "gc/shared/referencePolicy.hpp"
54 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
55 #include "gc/shared/slidingForwarding.inline.hpp"
56 #include "gc/shared/space.hpp"
57 #include "gc/shared/strongRootsScope.hpp"
58 #include "gc/shared/weakProcessor.hpp"
59 #include "memory/iterator.inline.hpp"
60 #include "memory/universe.hpp"
61 #include "oops/access.inline.hpp"
62 #include "oops/compressedOops.inline.hpp"
63 #include "oops/instanceRefKlass.hpp"
64 #include "oops/markWord.hpp"
65 #include "oops/methodData.hpp"
66 #include "oops/objArrayKlass.inline.hpp"
67 #include "oops/oop.inline.hpp"
68 #include "oops/typeArrayOop.inline.hpp"
69 #include "runtime/prefetch.inline.hpp"
70 #include "utilities/align.hpp"
71 #include "utilities/copy.hpp"
72 #include "utilities/events.hpp"
73 #include "utilities/stack.inline.hpp"
74 #if INCLUDE_JVMCI
75 #include "jvmci/jvmci.hpp"
214 if (PrefetchScanIntervalInBytes >= 0) {
215 Prefetch::read(p, PrefetchScanIntervalInBytes);
216 }
217 }
218
219 static void prefetch_write_scan(void* p) {
220 if (PrefetchScanIntervalInBytes >= 0) {
221 Prefetch::write(p, PrefetchScanIntervalInBytes);
222 }
223 }
224
225 static void prefetch_write_copy(void* p) {
226 if (PrefetchCopyIntervalInBytes >= 0) {
227 Prefetch::write(p, PrefetchCopyIntervalInBytes);
228 }
229 }
230
231 static void forward_obj(oop obj, HeapWord* new_addr) {
232 prefetch_write_scan(obj);
233 if (cast_from_oop<HeapWord*>(obj) != new_addr) {
234 SlidingForwarding::forward_to(obj, cast_to_oop(new_addr));
235 } else {
236 assert(obj->is_gc_marked(), "inv");
237 // This obj will stay in-place. Fix the markword.
238 obj->init_mark();
239 }
240 }
241
242 static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
243 for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
244 prefetch_read_scan(i_addr);
245 oop obj = cast_to_oop(i_addr);
246 if (obj->is_gc_marked()) {
247 return i_addr;
248 }
249 i_addr += obj->size();
250 }
251 return end;
252 };
253
254 static size_t relocate(HeapWord* addr) {
255 // Prefetch source and destination
256 prefetch_read_scan(addr);
257
258 oop obj = cast_to_oop(addr);
259 oop new_obj = SlidingForwarding::forwardee(obj);
260 HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
261 assert(addr != new_addr, "inv");
262 prefetch_write_copy(new_addr);
263
264 size_t obj_size = obj->size();
265 Copy::aligned_conjoint_words(addr, new_addr, obj_size);
266 new_obj->init_mark();
267
268 return obj_size;
269 }
270
271 public:
272 explicit Compacter(SerialHeap* heap) {
273 // In this order so that heap is compacted towards old-gen.
274 _spaces[0].init(heap->old_gen()->space());
275 _spaces[1].init(heap->young_gen()->eden());
276 _spaces[2].init(heap->young_gen()->from());
277
278 bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
279 if (is_promotion_failed) {
336 while (cur_addr < top) {
337 prefetch_write_scan(cur_addr);
338 if (cur_addr < first_dead || cast_to_oop(cur_addr)->is_gc_marked()) {
339 size_t size = cast_to_oop(cur_addr)->oop_iterate_size(&SerialFullGC::adjust_pointer_closure);
340 cur_addr += size;
341 } else {
342 assert(*(HeapWord**)cur_addr > cur_addr, "forward progress");
343 cur_addr = *(HeapWord**)cur_addr;
344 }
345 }
346 }
347 }
348
349 void phase4_compact() {
350 for (uint i = 0; i < _num_spaces; ++i) {
351 ContiguousSpace* space = get_space(i);
352 HeapWord* cur_addr = space->bottom();
353 HeapWord* top = space->top();
354
355 // Check if the first obj inside this space is forwarded.
356 if (SlidingForwarding::is_not_forwarded(cast_to_oop(cur_addr))) {
357 // Jump over consecutive (in-place) live-objs-chunk
358 cur_addr = get_first_dead(i);
359 }
360
361 while (cur_addr < top) {
362 if (SlidingForwarding::is_not_forwarded(cast_to_oop(cur_addr))) {
363 cur_addr = *(HeapWord**) cur_addr;
364 continue;
365 }
366 cur_addr += relocate(cur_addr);
367 }
368
369 // Reset top and unused memory
370 HeapWord* new_top = get_compaction_top(i);
371 space->set_top(new_top);
372 if (ZapUnusedHeapArea && new_top < top) {
373 space->mangle_unused_area(MemRegion(new_top, top));
374 }
375 }
376 }
377 };
378
379 template <class T> void SerialFullGC::KeepAliveClosure::do_oop_work(T* p) {
380 mark_and_push(p);
381 }
382
574 }
575
576 void SerialFullGC::deallocate_stacks() {
577 if (_preserved_count_max != 0) {
578 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
579 young_gen->reset_scratch();
580 }
581
582 _preserved_overflow_stack_set.reclaim();
583 _marking_stack.clear();
584 _objarray_stack.clear(true);
585 }
586
587 void SerialFullGC::mark_object(oop obj) {
588 if (StringDedup::is_enabled() &&
589 java_lang_String::is_instance(obj) &&
590 SerialStringDedup::is_candidate_from_mark(obj)) {
591 _string_dedup_requests->add(obj);
592 }
593
594 // Do the transform while we still have the header intact,
595 // which might include important class information.
596 ContinuationGCSupport::transform_stack_chunk(obj);
597
598 // some marks may contain information we need to preserve so we store them away
599 // and overwrite the mark. We'll restore it at the end of serial full GC.
600 markWord mark = obj->mark();
601 obj->set_mark(obj->prototype_mark().set_marked());
602
603 if (obj->mark_must_be_preserved(mark)) {
604 preserve_mark(obj, mark);
605 }
606 }
607
608 template <class T> void SerialFullGC::mark_and_push(T* p) {
609 T heap_oop = RawAccess<>::oop_load(p);
610 if (!CompressedOops::is_null(heap_oop)) {
611 oop obj = CompressedOops::decode_not_null(heap_oop);
612 if (!obj->mark().is_marked()) {
613 mark_object(obj);
614 _marking_stack.push(obj);
615 }
616 }
617 }
618
619 template <typename T>
620 void MarkAndPushClosure::do_oop_work(T* p) { SerialFullGC::mark_and_push(p); }
621 void MarkAndPushClosure::do_oop( oop* p) { do_oop_work(p); }
622 void MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_work(p); }
623
624 template <class T> void SerialFullGC::adjust_pointer(T* p) {
625 T heap_oop = RawAccess<>::oop_load(p);
626 if (!CompressedOops::is_null(heap_oop)) {
627 oop obj = CompressedOops::decode_not_null(heap_oop);
628 assert(Universe::heap()->is_in(obj), "should be in heap");
629
630 if (SlidingForwarding::is_forwarded(obj)) {
631 oop new_obj = SlidingForwarding::forwardee(obj);
632 assert(is_object_aligned(new_obj), "oop must be aligned");
633 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
634 }
635 }
636 }
637
638 template <typename T>
639 void AdjustPointerClosure::do_oop_work(T* p) { SerialFullGC::adjust_pointer(p); }
640 inline void AdjustPointerClosure::do_oop(oop* p) { do_oop_work(p); }
641 inline void AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_work(p); }
642
643 AdjustPointerClosure SerialFullGC::adjust_pointer_closure;
644
645 void SerialFullGC::adjust_marks() {
646 // adjust the oops we saved earlier
647 for (size_t i = 0; i < _preserved_count; i++) {
648 PreservedMarks::adjust_preserved_mark(_preserved_marks + i);
649 }
650
651 // deal with the overflow stack
682 // to discovery, hence the _always_true_closure.
683 SerialFullGC::_ref_processor = new ReferenceProcessor(&_always_true_closure);
684 mark_and_push_closure.set_ref_discoverer(_ref_processor);
685 }
686
687 void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) {
688 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
689
690 SerialHeap* gch = SerialHeap::heap();
691
692 gch->trace_heap_before_gc(_gc_tracer);
693
694 // Capture used regions for old-gen to reestablish old-to-young invariant
695 // after full-gc.
696 gch->old_gen()->save_used_region();
697
698 allocate_stacks();
699
700 phase1_mark(clear_all_softrefs);
701
702 SlidingForwarding::begin();
703
704 Compacter compacter{gch};
705
706 {
707 // Now all live objects are marked, compute the new object addresses.
708 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
709
710 compacter.phase2_calculate_new_addr();
711 }
712
713 // Don't add any more derived pointers during phase3
714 #if COMPILER2_OR_JVMCI
715 assert(DerivedPointerTable::is_active(), "Sanity");
716 DerivedPointerTable::set_active(false);
717 #endif
718
719 {
720 // Adjust the pointers to reflect the new locations
721 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
722
723 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
727 &adjust_pointer_closure,
728 &adjust_cld_closure,
729 &adjust_cld_closure,
730 &code_closure);
731
732 WeakProcessor::oops_do(&adjust_pointer_closure);
733
734 adjust_marks();
735 compacter.phase3_adjust_pointers();
736 }
737
738 {
739 // All pointers are now adjusted, move objects accordingly
740 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
741
742 compacter.phase4_compact();
743 }
744
745 restore_marks();
746
747 SlidingForwarding::end();
748
749 deallocate_stacks();
750
751 SerialFullGC::_string_dedup_requests->flush();
752
753 bool is_young_gen_empty = (gch->young_gen()->used() == 0);
754 gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
755
756 gch->prune_scavengable_nmethods();
757
758 // Update heap occupancy information which is used as
759 // input to soft ref clearing policy at the next gc.
760 Universe::heap()->update_capacity_and_used_at_gc();
761
762 // Signal that we have completed a visit to all live objects.
763 Universe::heap()->record_whole_heap_examined_timestamp();
764
765 gch->trace_heap_after_gc(_gc_tracer);
766 }
|