173 // Used for BOT update
174 TenuredGeneration* _old_gen;
175
176 HeapWord* get_compaction_top(uint index) const {
177 return _spaces[index]._compaction_top;
178 }
179
180 HeapWord* get_first_dead(uint index) const {
181 return _spaces[index]._first_dead;
182 }
183
184 ContiguousSpace* get_space(uint index) const {
185 return _spaces[index]._space;
186 }
187
188 void record_first_dead(uint index, HeapWord* first_dead) {
189 assert(_spaces[index]._first_dead == nullptr, "should write only once");
190 _spaces[index]._first_dead = first_dead;
191 }
192
193 HeapWord* alloc(size_t words) {
194 while (true) {
195 if (words <= pointer_delta(_spaces[_index]._space->end(),
196 _spaces[_index]._compaction_top)) {
197 HeapWord* result = _spaces[_index]._compaction_top;
198 _spaces[_index]._compaction_top += words;
199 if (_index == 0) {
200 // old-gen requires BOT update
201 _old_gen->update_for_block(result, result + words);
202 }
203 return result;
204 }
205
206 // out-of-memory in this space
207 _index++;
208 assert(_index < max_num_spaces - 1, "the last space should not be used");
209 }
210 }
211
212 static void prefetch_read_scan(void* p) {
213 if (PrefetchScanIntervalInBytes >= 0) {
214 Prefetch::read(p, PrefetchScanIntervalInBytes);
215 }
216 }
217
218 static void prefetch_write_scan(void* p) {
219 if (PrefetchScanIntervalInBytes >= 0) {
220 Prefetch::write(p, PrefetchScanIntervalInBytes);
221 }
222 }
223
224 static void prefetch_write_copy(void* p) {
225 if (PrefetchCopyIntervalInBytes >= 0) {
226 Prefetch::write(p, PrefetchCopyIntervalInBytes);
227 }
228 }
229
230 static void forward_obj(oop obj, HeapWord* new_addr) {
231 prefetch_write_scan(obj);
232 if (cast_from_oop<HeapWord*>(obj) != new_addr) {
233 FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
234 } else {
235 assert(obj->is_gc_marked(), "inv");
236 // This obj will stay in-place. Fix the markword.
237 obj->init_mark();
238 }
239 }
240
241 static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
242 for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
243 prefetch_read_scan(i_addr);
244 oop obj = cast_to_oop(i_addr);
245 if (obj->is_gc_marked()) {
246 return i_addr;
247 }
248 i_addr += obj->size();
249 }
250 return end;
251 };
252
253 static size_t relocate(HeapWord* addr) {
254 // Prefetch source and destination
255 prefetch_read_scan(addr);
256
257 oop obj = cast_to_oop(addr);
258 oop new_obj = FullGCForwarding::forwardee(obj);
259 HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
260 assert(addr != new_addr, "inv");
261 prefetch_write_copy(new_addr);
262
263 size_t obj_size = obj->size();
264 Copy::aligned_conjoint_words(addr, new_addr, obj_size);
265 new_obj->init_mark();
266
267 return obj_size;
268 }
269
270 public:
271 explicit Compacter(SerialHeap* heap) {
272 // In this order so that heap is compacted towards old-gen.
273 _spaces[0].init(heap->old_gen()->space());
274 _spaces[1].init(heap->young_gen()->eden());
275 _spaces[2].init(heap->young_gen()->from());
276
277 bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
278 if (is_promotion_failed) {
279 _spaces[3].init(heap->young_gen()->to());
280 _num_spaces = 4;
281 } else {
282 _num_spaces = 3;
283 }
284 _index = 0;
285 _old_gen = heap->old_gen();
286 }
287
288 void phase2_calculate_new_addr() {
289 for (uint i = 0; i < _num_spaces; ++i) {
290 ContiguousSpace* space = get_space(i);
291 HeapWord* cur_addr = space->bottom();
292 HeapWord* top = space->top();
293
294 bool record_first_dead_done = false;
295
296 DeadSpacer dead_spacer(space);
297
298 while (cur_addr < top) {
299 oop obj = cast_to_oop(cur_addr);
300 size_t obj_size = obj->size();
301 if (obj->is_gc_marked()) {
302 HeapWord* new_addr = alloc(obj_size);
303 forward_obj(obj, new_addr);
304 cur_addr += obj_size;
305 } else {
306 // Skipping the current known-unmarked obj
307 HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
308 if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
309 // Register space for the filler obj
310 alloc(pointer_delta(next_live_addr, cur_addr));
311 } else {
312 if (!record_first_dead_done) {
313 record_first_dead(i, cur_addr);
314 record_first_dead_done = true;
315 }
316 *(HeapWord**)cur_addr = next_live_addr;
317 }
318 cur_addr = next_live_addr;
319 }
320 }
321
322 if (!record_first_dead_done) {
323 record_first_dead(i, top);
324 }
325 }
326 }
327
328 void phase3_adjust_pointers() {
329 for (uint i = 0; i < _num_spaces; ++i) {
330 ContiguousSpace* space = get_space(i);
576 if (_preserved_count_max != 0) {
577 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
578 young_gen->reset_scratch();
579 }
580
581 _preserved_overflow_stack_set.reclaim();
582 _marking_stack.clear();
583 _objarray_stack.clear(true);
584 }
585
586 void SerialFullGC::mark_object(oop obj) {
587 if (StringDedup::is_enabled() &&
588 java_lang_String::is_instance(obj) &&
589 SerialStringDedup::is_candidate_from_mark(obj)) {
590 _string_dedup_requests->add(obj);
591 }
592
593 // some marks may contain information we need to preserve so we store them away
594 // and overwrite the mark. We'll restore it at the end of serial full GC.
595 markWord mark = obj->mark();
596 obj->set_mark(obj->prototype_mark().set_marked());
597
598 ContinuationGCSupport::transform_stack_chunk(obj);
599
600 if (obj->mark_must_be_preserved(mark)) {
601 preserve_mark(obj, mark);
602 }
603 }
604
605 template <class T> void SerialFullGC::mark_and_push(T* p) {
606 T heap_oop = RawAccess<>::oop_load(p);
607 if (!CompressedOops::is_null(heap_oop)) {
608 oop obj = CompressedOops::decode_not_null(heap_oop);
609 if (!obj->mark().is_marked()) {
610 mark_object(obj);
611 _marking_stack.push(obj);
612 }
613 }
614 }
615
616 template <typename T>
679 // to discovery, hence the _always_true_closure.
680 SerialFullGC::_ref_processor = new ReferenceProcessor(&_always_true_closure);
681 mark_and_push_closure.set_ref_discoverer(_ref_processor);
682 }
683
684 void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) {
685 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
686
687 SerialHeap* gch = SerialHeap::heap();
688
689 gch->trace_heap_before_gc(_gc_tracer);
690
691 // Capture used regions for old-gen to reestablish old-to-young invariant
692 // after full-gc.
693 gch->old_gen()->save_used_region();
694
695 allocate_stacks();
696
697 phase1_mark(clear_all_softrefs);
698
699 Compacter compacter{gch};
700
701 {
702 // Now all live objects are marked, compute the new object addresses.
703 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
704
705 compacter.phase2_calculate_new_addr();
706 }
707
708 // Don't add any more derived pointers during phase3
709 #if COMPILER2_OR_JVMCI
710 assert(DerivedPointerTable::is_active(), "Sanity");
711 DerivedPointerTable::set_active(false);
712 #endif
713
714 {
715 // Adjust the pointers to reflect the new locations
716 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
717
718 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
722 &adjust_pointer_closure,
723 &adjust_cld_closure,
724 &adjust_cld_closure,
725 &code_closure);
726
727 WeakProcessor::oops_do(&adjust_pointer_closure);
728
729 adjust_marks();
730 compacter.phase3_adjust_pointers();
731 }
732
733 {
734 // All pointers are now adjusted, move objects accordingly
735 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
736
737 compacter.phase4_compact();
738 }
739
740 restore_marks();
741
742 deallocate_stacks();
743
744 SerialFullGC::_string_dedup_requests->flush();
745
746 bool is_young_gen_empty = (gch->young_gen()->used() == 0);
747 gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
748
749 gch->prune_scavengable_nmethods();
750
751 // Update heap occupancy information which is used as
752 // input to soft ref clearing policy at the next gc.
753 Universe::heap()->update_capacity_and_used_at_gc();
754
755 // Signal that we have completed a visit to all live objects.
756 Universe::heap()->record_whole_heap_examined_timestamp();
757
758 gch->trace_heap_after_gc(_gc_tracer);
759 }
|
173 // Used for BOT update
174 TenuredGeneration* _old_gen;
175
176 HeapWord* get_compaction_top(uint index) const {
177 return _spaces[index]._compaction_top;
178 }
179
180 HeapWord* get_first_dead(uint index) const {
181 return _spaces[index]._first_dead;
182 }
183
184 ContiguousSpace* get_space(uint index) const {
185 return _spaces[index]._space;
186 }
187
188 void record_first_dead(uint index, HeapWord* first_dead) {
189 assert(_spaces[index]._first_dead == nullptr, "should write only once");
190 _spaces[index]._first_dead = first_dead;
191 }
192
193 HeapWord* alloc(size_t old_size, size_t new_size, HeapWord* old_obj) {
194 size_t words = (old_obj == _spaces[_index]._compaction_top) ? old_size : new_size;
195 while (true) {
196 if (words <= pointer_delta(_spaces[_index]._space->end(),
197 _spaces[_index]._compaction_top)) {
198 HeapWord* result = _spaces[_index]._compaction_top;
199 _spaces[_index]._compaction_top += words;
200 if (_index == 0) {
201 // old-gen requires BOT update
202 _old_gen->update_for_block(result, result + words);
203 }
204 return result;
205 }
206
207 // out-of-memory in this space
208 _index++;
209 assert(_index < max_num_spaces - 1, "the last space should not be used");
210 words = (old_obj == _spaces[_index]._compaction_top) ? old_size : new_size;
211 }
212 }
213
214 static void prefetch_read_scan(void* p) {
215 if (PrefetchScanIntervalInBytes >= 0) {
216 Prefetch::read(p, PrefetchScanIntervalInBytes);
217 }
218 }
219
220 static void prefetch_write_scan(void* p) {
221 if (PrefetchScanIntervalInBytes >= 0) {
222 Prefetch::write(p, PrefetchScanIntervalInBytes);
223 }
224 }
225
226 static void prefetch_write_copy(void* p) {
227 if (PrefetchCopyIntervalInBytes >= 0) {
228 Prefetch::write(p, PrefetchCopyIntervalInBytes);
229 }
230 }
231
232 static void forward_obj(oop obj, HeapWord* new_addr, bool after_first_dead) {
233 prefetch_write_scan(obj);
234 if (cast_from_oop<HeapWord*>(obj) != new_addr) {
235 FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
236 } else {
237 assert(obj->is_gc_marked(), "inv");
238 if (!after_first_dead) {
239 // This obj will stay in-place and we'll not see it during relocation.
240 // Fix the markword.
241 obj->init_mark();
242 } else {
243 FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
244 }
245 }
246 }
247
248 static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
249 for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
250 prefetch_read_scan(i_addr);
251 oop obj = cast_to_oop(i_addr);
252 if (obj->is_gc_marked()) {
253 return i_addr;
254 }
255 i_addr += obj->size();
256 }
257 return end;
258 };
259
260 static size_t relocate(HeapWord* addr) {
261 // Prefetch source and destination
262 prefetch_read_scan(addr);
263
264 oop obj = cast_to_oop(addr);
265 oop new_obj = FullGCForwarding::forwardee(obj);
266 HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
267
268 size_t obj_size = obj->size();
269 if (addr != new_addr) {
270 prefetch_write_copy(new_addr);
271 Copy::aligned_conjoint_words(addr, new_addr, obj_size);
272 }
273 new_obj->init_mark();
274 if (addr != new_addr) {
275 new_obj->initialize_hash_if_necessary(obj);
276 }
277
278 return obj_size;
279 }
280
281 public:
282 explicit Compacter(SerialHeap* heap) {
283 // In this order so that heap is compacted towards old-gen.
284 _spaces[0].init(heap->old_gen()->space());
285 _spaces[1].init(heap->young_gen()->eden());
286 _spaces[2].init(heap->young_gen()->from());
287
288 bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
289 if (is_promotion_failed) {
290 _spaces[3].init(heap->young_gen()->to());
291 _num_spaces = 4;
292 } else {
293 _num_spaces = 3;
294 }
295 _index = 0;
296 _old_gen = heap->old_gen();
297 }
298
299 void phase2_calculate_new_addr() {
300 for (uint i = 0; i < _num_spaces; ++i) {
301 ContiguousSpace* space = get_space(i);
302 HeapWord* cur_addr = space->bottom();
303 HeapWord* top = space->top();
304
305 bool record_first_dead_done = false;
306
307 DeadSpacer dead_spacer(space);
308
309 while (cur_addr < top) {
310 oop obj = cast_to_oop(cur_addr);
311 size_t obj_size = obj->size();
312 size_t new_size = obj->copy_size(obj_size, obj->mark());
313 if (obj->is_gc_marked()) {
314 HeapWord* new_addr = alloc(obj_size, new_size, cur_addr);
315 forward_obj(obj, new_addr, record_first_dead_done);
316 assert(obj->size() == obj_size, "size must not change after forwarding");
317 cur_addr += obj_size;
318 } else {
319 // Skipping the current known-unmarked obj
320 HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
321 if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
322 // Register space for the filler obj
323 size_t size = pointer_delta(next_live_addr, cur_addr);
324 alloc(size, size, cur_addr);
325 } else {
326 if (!record_first_dead_done) {
327 record_first_dead(i, cur_addr);
328 record_first_dead_done = true;
329 }
330 *(HeapWord**)cur_addr = next_live_addr;
331 }
332 cur_addr = next_live_addr;
333 }
334 }
335
336 if (!record_first_dead_done) {
337 record_first_dead(i, top);
338 }
339 }
340 }
341
342 void phase3_adjust_pointers() {
343 for (uint i = 0; i < _num_spaces; ++i) {
344 ContiguousSpace* space = get_space(i);
590 if (_preserved_count_max != 0) {
591 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
592 young_gen->reset_scratch();
593 }
594
595 _preserved_overflow_stack_set.reclaim();
596 _marking_stack.clear();
597 _objarray_stack.clear(true);
598 }
599
600 void SerialFullGC::mark_object(oop obj) {
601 if (StringDedup::is_enabled() &&
602 java_lang_String::is_instance(obj) &&
603 SerialStringDedup::is_candidate_from_mark(obj)) {
604 _string_dedup_requests->add(obj);
605 }
606
607 // some marks may contain information we need to preserve so we store them away
608 // and overwrite the mark. We'll restore it at the end of serial full GC.
609 markWord mark = obj->mark();
610 obj->set_mark(mark.set_marked());
611
612 ContinuationGCSupport::transform_stack_chunk(obj);
613
614 if (obj->mark_must_be_preserved(mark)) {
615 preserve_mark(obj, mark);
616 }
617 }
618
619 template <class T> void SerialFullGC::mark_and_push(T* p) {
620 T heap_oop = RawAccess<>::oop_load(p);
621 if (!CompressedOops::is_null(heap_oop)) {
622 oop obj = CompressedOops::decode_not_null(heap_oop);
623 if (!obj->mark().is_marked()) {
624 mark_object(obj);
625 _marking_stack.push(obj);
626 }
627 }
628 }
629
630 template <typename T>
693 // to discovery, hence the _always_true_closure.
694 SerialFullGC::_ref_processor = new ReferenceProcessor(&_always_true_closure);
695 mark_and_push_closure.set_ref_discoverer(_ref_processor);
696 }
697
698 void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) {
699 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
700
701 SerialHeap* gch = SerialHeap::heap();
702
703 gch->trace_heap_before_gc(_gc_tracer);
704
705 // Capture used regions for old-gen to reestablish old-to-young invariant
706 // after full-gc.
707 gch->old_gen()->save_used_region();
708
709 allocate_stacks();
710
711 phase1_mark(clear_all_softrefs);
712
713 FullGCForwarding::begin();
714
715 Compacter compacter{gch};
716
717 {
718 // Now all live objects are marked, compute the new object addresses.
719 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
720
721 compacter.phase2_calculate_new_addr();
722 }
723
724 // Don't add any more derived pointers during phase3
725 #if COMPILER2_OR_JVMCI
726 assert(DerivedPointerTable::is_active(), "Sanity");
727 DerivedPointerTable::set_active(false);
728 #endif
729
730 {
731 // Adjust the pointers to reflect the new locations
732 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
733
734 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
738 &adjust_pointer_closure,
739 &adjust_cld_closure,
740 &adjust_cld_closure,
741 &code_closure);
742
743 WeakProcessor::oops_do(&adjust_pointer_closure);
744
745 adjust_marks();
746 compacter.phase3_adjust_pointers();
747 }
748
749 {
750 // All pointers are now adjusted, move objects accordingly
751 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
752
753 compacter.phase4_compact();
754 }
755
756 restore_marks();
757
758 FullGCForwarding::end();
759
760 deallocate_stacks();
761
762 SerialFullGC::_string_dedup_requests->flush();
763
764 bool is_young_gen_empty = (gch->young_gen()->used() == 0);
765 gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
766
767 gch->prune_scavengable_nmethods();
768
769 // Update heap occupancy information which is used as
770 // input to soft ref clearing policy at the next gc.
771 Universe::heap()->update_capacity_and_used_at_gc();
772
773 // Signal that we have completed a visit to all live objects.
774 Universe::heap()->record_whole_heap_examined_timestamp();
775
776 gch->trace_heap_after_gc(_gc_tracer);
777 }
|