174 // Used for BOT update
175 TenuredGeneration* _old_gen;
176
177 HeapWord* get_compaction_top(uint index) const {
178 return _spaces[index]._compaction_top;
179 }
180
181 HeapWord* get_first_dead(uint index) const {
182 return _spaces[index]._first_dead;
183 }
184
185 ContiguousSpace* get_space(uint index) const {
186 return _spaces[index]._space;
187 }
188
189 void record_first_dead(uint index, HeapWord* first_dead) {
190 assert(_spaces[index]._first_dead == nullptr, "should write only once");
191 _spaces[index]._first_dead = first_dead;
192 }
193
194 HeapWord* alloc(size_t words) {
195 while (true) {
196 if (words <= pointer_delta(_spaces[_index]._space->end(),
197 _spaces[_index]._compaction_top)) {
198 HeapWord* result = _spaces[_index]._compaction_top;
199 _spaces[_index]._compaction_top += words;
200 if (_index == 0) {
201 // old-gen requires BOT update
202 _old_gen->update_for_block(result, result + words);
203 }
204 return result;
205 }
206
207 // out-of-memory in this space
208 _index++;
209 assert(_index < max_num_spaces - 1, "the last space should not be used");
210 }
211 }
212
213 static void prefetch_read_scan(void* p) {
214 if (PrefetchScanIntervalInBytes >= 0) {
215 Prefetch::read(p, PrefetchScanIntervalInBytes);
216 }
217 }
218
219 static void prefetch_write_scan(void* p) {
220 if (PrefetchScanIntervalInBytes >= 0) {
221 Prefetch::write(p, PrefetchScanIntervalInBytes);
222 }
223 }
224
225 static void prefetch_write_copy(void* p) {
226 if (PrefetchCopyIntervalInBytes >= 0) {
227 Prefetch::write(p, PrefetchCopyIntervalInBytes);
228 }
229 }
241
242 static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
243 for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
244 prefetch_read_scan(i_addr);
245 oop obj = cast_to_oop(i_addr);
246 if (obj->is_gc_marked()) {
247 return i_addr;
248 }
249 i_addr += obj->size();
250 }
251 return end;
252 };
253
254 static size_t relocate(HeapWord* addr) {
255 // Prefetch source and destination
256 prefetch_read_scan(addr);
257
258 oop obj = cast_to_oop(addr);
259 oop new_obj = FullGCForwarding::forwardee(obj);
260 HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
261 assert(addr != new_addr, "inv");
262 prefetch_write_copy(new_addr);
263
264 size_t obj_size = obj->size();
265 Copy::aligned_conjoint_words(addr, new_addr, obj_size);
266 new_obj->init_mark();
267
268 return obj_size;
269 }
270
271 public:
272 explicit Compacter(SerialHeap* heap) {
273 // In this order so that heap is compacted towards old-gen.
274 _spaces[0].init(heap->old_gen()->space());
275 _spaces[1].init(heap->young_gen()->eden());
276 _spaces[2].init(heap->young_gen()->from());
277
278 bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
279 if (is_promotion_failed) {
280 _spaces[3].init(heap->young_gen()->to());
281 _num_spaces = 4;
282 } else {
283 _num_spaces = 3;
284 }
285 _index = 0;
286 _old_gen = heap->old_gen();
287 }
288
289 void phase2_calculate_new_addr() {
290 for (uint i = 0; i < _num_spaces; ++i) {
291 ContiguousSpace* space = get_space(i);
292 HeapWord* cur_addr = space->bottom();
293 HeapWord* top = space->top();
294
295 bool record_first_dead_done = false;
296
297 DeadSpacer dead_spacer(space);
298
299 while (cur_addr < top) {
300 oop obj = cast_to_oop(cur_addr);
301 size_t obj_size = obj->size();
302 if (obj->is_gc_marked()) {
303 HeapWord* new_addr = alloc(obj_size);
304 forward_obj(obj, new_addr);
305 cur_addr += obj_size;
306 } else {
307 // Skipping the current known-unmarked obj
308 HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
309 if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
310 // Register space for the filler obj
311 alloc(pointer_delta(next_live_addr, cur_addr));
312 } else {
313 if (!record_first_dead_done) {
314 record_first_dead(i, cur_addr);
315 record_first_dead_done = true;
316 }
317 *(HeapWord**)cur_addr = next_live_addr;
318 }
319 cur_addr = next_live_addr;
320 }
321 }
322
323 if (!record_first_dead_done) {
324 record_first_dead(i, top);
325 }
326 }
327 }
328
329 void phase3_adjust_pointers() {
330 for (uint i = 0; i < _num_spaces; ++i) {
331 ContiguousSpace* space = get_space(i);
577 if (_preserved_count_max != 0) {
578 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
579 young_gen->reset_scratch();
580 }
581
582 _preserved_overflow_stack_set.reclaim();
583 _marking_stack.clear();
584 _objarray_stack.clear(true);
585 }
586
587 void SerialFullGC::mark_object(oop obj) {
588 if (StringDedup::is_enabled() &&
589 java_lang_String::is_instance(obj) &&
590 SerialStringDedup::is_candidate_from_mark(obj)) {
591 _string_dedup_requests->add(obj);
592 }
593
594 // some marks may contain information we need to preserve so we store them away
595 // and overwrite the mark. We'll restore it at the end of serial full GC.
596 markWord mark = obj->mark();
597 obj->set_mark(obj->prototype_mark().set_marked());
598
599 ContinuationGCSupport::transform_stack_chunk(obj);
600
601 if (obj->mark_must_be_preserved(mark)) {
602 preserve_mark(obj, mark);
603 }
604 }
605
606 template <class T> void SerialFullGC::mark_and_push(T* p) {
607 T heap_oop = RawAccess<>::oop_load(p);
608 if (!CompressedOops::is_null(heap_oop)) {
609 oop obj = CompressedOops::decode_not_null(heap_oop);
610 if (!obj->mark().is_marked()) {
611 mark_object(obj);
612 _marking_stack.push(obj);
613 }
614 }
615 }
616
617 template <typename T>
680 // to discovery, hence the _always_true_closure.
681 SerialFullGC::_ref_processor = new ReferenceProcessor(&_always_true_closure);
682 mark_and_push_closure.set_ref_discoverer(_ref_processor);
683 }
684
685 void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) {
686 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
687
688 SerialHeap* gch = SerialHeap::heap();
689
690 gch->trace_heap_before_gc(_gc_tracer);
691
692 // Capture used regions for old-gen to reestablish old-to-young invariant
693 // after full-gc.
694 gch->old_gen()->save_used_region();
695
696 allocate_stacks();
697
698 phase1_mark(clear_all_softrefs);
699
700 Compacter compacter{gch};
701
702 {
703 // Now all live objects are marked, compute the new object addresses.
704 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
705
706 compacter.phase2_calculate_new_addr();
707 }
708
709 // Don't add any more derived pointers during phase3
710 #if COMPILER2_OR_JVMCI
711 assert(DerivedPointerTable::is_active(), "Sanity");
712 DerivedPointerTable::set_active(false);
713 #endif
714
715 {
716 // Adjust the pointers to reflect the new locations
717 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
718
719 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
723 &adjust_pointer_closure,
724 &adjust_cld_closure,
725 &adjust_cld_closure,
726 &code_closure);
727
728 WeakProcessor::oops_do(&adjust_pointer_closure);
729
730 adjust_marks();
731 compacter.phase3_adjust_pointers();
732 }
733
734 {
735 // All pointers are now adjusted, move objects accordingly
736 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
737
738 compacter.phase4_compact();
739 }
740
741 restore_marks();
742
743 deallocate_stacks();
744
745 SerialFullGC::_string_dedup_requests->flush();
746
747 bool is_young_gen_empty = (gch->young_gen()->used() == 0);
748 gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
749
750 gch->prune_scavengable_nmethods();
751
752 // Update heap occupancy information which is used as
753 // input to soft ref clearing policy at the next gc.
754 Universe::heap()->update_capacity_and_used_at_gc();
755
756 // Signal that we have completed a visit to all live objects.
757 Universe::heap()->record_whole_heap_examined_timestamp();
758
759 gch->trace_heap_after_gc(_gc_tracer);
760 }
|
174 // Used for BOT update
175 TenuredGeneration* _old_gen;
176
177 HeapWord* get_compaction_top(uint index) const {
178 return _spaces[index]._compaction_top;
179 }
180
181 HeapWord* get_first_dead(uint index) const {
182 return _spaces[index]._first_dead;
183 }
184
185 ContiguousSpace* get_space(uint index) const {
186 return _spaces[index]._space;
187 }
188
189 void record_first_dead(uint index, HeapWord* first_dead) {
190 assert(_spaces[index]._first_dead == nullptr, "should write only once");
191 _spaces[index]._first_dead = first_dead;
192 }
193
194 HeapWord* alloc(size_t old_size, size_t new_size, HeapWord* old_obj) {
195 size_t words = (old_obj == _spaces[_index]._compaction_top) ? old_size : new_size;
196 while (true) {
197 if (words <= pointer_delta(_spaces[_index]._space->end(),
198 _spaces[_index]._compaction_top)) {
199 HeapWord* result = _spaces[_index]._compaction_top;
200 _spaces[_index]._compaction_top += words;
201 if (_index == 0) {
202 // old-gen requires BOT update
203 _old_gen->update_for_block(result, result + words);
204 }
205 return result;
206 }
207
208 // out-of-memory in this space
209 _index++;
210 assert(_index < max_num_spaces - 1, "the last space should not be used");
211 words = (old_obj == _spaces[_index]._compaction_top) ? old_size : new_size;
212 }
213 }
214
215 static void prefetch_read_scan(void* p) {
216 if (PrefetchScanIntervalInBytes >= 0) {
217 Prefetch::read(p, PrefetchScanIntervalInBytes);
218 }
219 }
220
221 static void prefetch_write_scan(void* p) {
222 if (PrefetchScanIntervalInBytes >= 0) {
223 Prefetch::write(p, PrefetchScanIntervalInBytes);
224 }
225 }
226
227 static void prefetch_write_copy(void* p) {
228 if (PrefetchCopyIntervalInBytes >= 0) {
229 Prefetch::write(p, PrefetchCopyIntervalInBytes);
230 }
231 }
243
244 static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
245 for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
246 prefetch_read_scan(i_addr);
247 oop obj = cast_to_oop(i_addr);
248 if (obj->is_gc_marked()) {
249 return i_addr;
250 }
251 i_addr += obj->size();
252 }
253 return end;
254 };
255
256 static size_t relocate(HeapWord* addr) {
257 // Prefetch source and destination
258 prefetch_read_scan(addr);
259
260 oop obj = cast_to_oop(addr);
261 oop new_obj = FullGCForwarding::forwardee(obj);
262 HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
263
264 size_t obj_size = obj->size();
265 if (addr != new_addr) {
266 prefetch_write_copy(new_addr);
267 Copy::aligned_conjoint_words(addr, new_addr, obj_size);
268 }
269 new_obj->init_mark();
270 if (addr != new_addr) {
271 new_obj->initialize_hash_if_necessary(obj);
272 }
273
274 return obj_size;
275 }
276
277 public:
278 explicit Compacter(SerialHeap* heap) {
279 // In this order so that heap is compacted towards old-gen.
280 _spaces[0].init(heap->old_gen()->space());
281 _spaces[1].init(heap->young_gen()->eden());
282 _spaces[2].init(heap->young_gen()->from());
283
284 bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
285 if (is_promotion_failed) {
286 _spaces[3].init(heap->young_gen()->to());
287 _num_spaces = 4;
288 } else {
289 _num_spaces = 3;
290 }
291 _index = 0;
292 _old_gen = heap->old_gen();
293 }
294
295 void phase2_calculate_new_addr() {
296 for (uint i = 0; i < _num_spaces; ++i) {
297 ContiguousSpace* space = get_space(i);
298 HeapWord* cur_addr = space->bottom();
299 HeapWord* top = space->top();
300
301 bool record_first_dead_done = false;
302
303 DeadSpacer dead_spacer(space);
304
305 while (cur_addr < top) {
306 oop obj = cast_to_oop(cur_addr);
307 size_t obj_size = obj->size();
308 size_t new_size = obj->copy_size(obj_size, obj->mark());
309 if (obj->is_gc_marked()) {
310 HeapWord* new_addr = alloc(obj_size, new_size, cur_addr);
311 forward_obj(obj, new_addr);
312 assert(obj->size() == obj_size, "size must not change after forwarding");
313 cur_addr += obj_size;
314 } else {
315 // Skipping the current known-unmarked obj
316 HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
317 if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
318 // Register space for the filler obj
319 size_t size = pointer_delta(next_live_addr, cur_addr);
320 alloc(size, size, cur_addr);
321 } else {
322 if (!record_first_dead_done) {
323 record_first_dead(i, cur_addr);
324 record_first_dead_done = true;
325 }
326 *(HeapWord**)cur_addr = next_live_addr;
327 }
328 cur_addr = next_live_addr;
329 }
330 }
331
332 if (!record_first_dead_done) {
333 record_first_dead(i, top);
334 }
335 }
336 }
337
338 void phase3_adjust_pointers() {
339 for (uint i = 0; i < _num_spaces; ++i) {
340 ContiguousSpace* space = get_space(i);
586 if (_preserved_count_max != 0) {
587 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
588 young_gen->reset_scratch();
589 }
590
591 _preserved_overflow_stack_set.reclaim();
592 _marking_stack.clear();
593 _objarray_stack.clear(true);
594 }
595
596 void SerialFullGC::mark_object(oop obj) {
597 if (StringDedup::is_enabled() &&
598 java_lang_String::is_instance(obj) &&
599 SerialStringDedup::is_candidate_from_mark(obj)) {
600 _string_dedup_requests->add(obj);
601 }
602
603 // some marks may contain information we need to preserve so we store them away
604 // and overwrite the mark. We'll restore it at the end of serial full GC.
605 markWord mark = obj->mark();
606 obj->set_mark(mark.set_marked());
607
608 ContinuationGCSupport::transform_stack_chunk(obj);
609
610 if (obj->mark_must_be_preserved(mark)) {
611 preserve_mark(obj, mark);
612 }
613 }
614
615 template <class T> void SerialFullGC::mark_and_push(T* p) {
616 T heap_oop = RawAccess<>::oop_load(p);
617 if (!CompressedOops::is_null(heap_oop)) {
618 oop obj = CompressedOops::decode_not_null(heap_oop);
619 if (!obj->mark().is_marked()) {
620 mark_object(obj);
621 _marking_stack.push(obj);
622 }
623 }
624 }
625
626 template <typename T>
689 // to discovery, hence the _always_true_closure.
690 SerialFullGC::_ref_processor = new ReferenceProcessor(&_always_true_closure);
691 mark_and_push_closure.set_ref_discoverer(_ref_processor);
692 }
693
694 void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) {
695 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
696
697 SerialHeap* gch = SerialHeap::heap();
698
699 gch->trace_heap_before_gc(_gc_tracer);
700
701 // Capture used regions for old-gen to reestablish old-to-young invariant
702 // after full-gc.
703 gch->old_gen()->save_used_region();
704
705 allocate_stacks();
706
707 phase1_mark(clear_all_softrefs);
708
709 FullGCForwarding::begin();
710
711 Compacter compacter{gch};
712
713 {
714 // Now all live objects are marked, compute the new object addresses.
715 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
716
717 compacter.phase2_calculate_new_addr();
718 }
719
720 // Don't add any more derived pointers during phase3
721 #if COMPILER2_OR_JVMCI
722 assert(DerivedPointerTable::is_active(), "Sanity");
723 DerivedPointerTable::set_active(false);
724 #endif
725
726 {
727 // Adjust the pointers to reflect the new locations
728 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
729
730 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
734 &adjust_pointer_closure,
735 &adjust_cld_closure,
736 &adjust_cld_closure,
737 &code_closure);
738
739 WeakProcessor::oops_do(&adjust_pointer_closure);
740
741 adjust_marks();
742 compacter.phase3_adjust_pointers();
743 }
744
745 {
746 // All pointers are now adjusted, move objects accordingly
747 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
748
749 compacter.phase4_compact();
750 }
751
752 restore_marks();
753
754 FullGCForwarding::end();
755
756 deallocate_stacks();
757
758 SerialFullGC::_string_dedup_requests->flush();
759
760 bool is_young_gen_empty = (gch->young_gen()->used() == 0);
761 gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
762
763 gch->prune_scavengable_nmethods();
764
765 // Update heap occupancy information which is used as
766 // input to soft ref clearing policy at the next gc.
767 Universe::heap()->update_capacity_and_used_at_gc();
768
769 // Signal that we have completed a visit to all live objects.
770 Universe::heap()->record_whole_heap_examined_timestamp();
771
772 gch->trace_heap_after_gc(_gc_tracer);
773 }
|