< prev index next >

src/hotspot/share/gc/serial/serialFullGC.cpp

Print this page

173   // Used for BOT update
174   TenuredGeneration* _old_gen;
175 
176   HeapWord* get_compaction_top(uint index) const {
177     return _spaces[index]._compaction_top;
178   }
179 
180   HeapWord* get_first_dead(uint index) const {
181     return _spaces[index]._first_dead;
182   }
183 
184   ContiguousSpace* get_space(uint index) const {
185     return _spaces[index]._space;
186   }
187 
188   void record_first_dead(uint index, HeapWord* first_dead) {
189     assert(_spaces[index]._first_dead == nullptr, "should write only once");
190     _spaces[index]._first_dead = first_dead;
191   }
192 
193   HeapWord* alloc(size_t words) {

194     while (true) {
195       if (words <= pointer_delta(_spaces[_index]._space->end(),
196                                  _spaces[_index]._compaction_top)) {
197         HeapWord* result = _spaces[_index]._compaction_top;
198         _spaces[_index]._compaction_top += words;
199         if (_index == 0) {
200           // old-gen requires BOT update
201           _old_gen->update_for_block(result, result + words);
202         }
203         return result;
204       }
205 
206       // out-of-memory in this space
207       _index++;
208       assert(_index < max_num_spaces - 1, "the last space should not be used");

209     }
210   }
211 
212   static void prefetch_read_scan(void* p) {
213     if (PrefetchScanIntervalInBytes >= 0) {
214       Prefetch::read(p, PrefetchScanIntervalInBytes);
215     }
216   }
217 
218   static void prefetch_write_scan(void* p) {
219     if (PrefetchScanIntervalInBytes >= 0) {
220       Prefetch::write(p, PrefetchScanIntervalInBytes);
221     }
222   }
223 
224   static void prefetch_write_copy(void* p) {
225     if (PrefetchCopyIntervalInBytes >= 0) {
226       Prefetch::write(p, PrefetchCopyIntervalInBytes);
227     }
228   }

240 
241   static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
242     for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
243       prefetch_read_scan(i_addr);
244       oop obj = cast_to_oop(i_addr);
245       if (obj->is_gc_marked()) {
246         return i_addr;
247       }
248       i_addr += obj->size();
249     }
250     return end;
251   };
252 
253   static size_t relocate(HeapWord* addr) {
254     // Prefetch source and destination
255     prefetch_read_scan(addr);
256 
257     oop obj = cast_to_oop(addr);
258     oop new_obj = FullGCForwarding::forwardee(obj);
259     HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
260     assert(addr != new_addr, "inv");
261     prefetch_write_copy(new_addr);
262 
263     size_t obj_size = obj->size();
264     Copy::aligned_conjoint_words(addr, new_addr, obj_size);



265     new_obj->init_mark();



266 
267     return obj_size;
268   }
269 
270 public:
271   explicit Compacter(SerialHeap* heap) {
272     // In this order so that heap is compacted towards old-gen.
273     _spaces[0].init(heap->old_gen()->space());
274     _spaces[1].init(heap->young_gen()->eden());
275     _spaces[2].init(heap->young_gen()->from());
276 
277     bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
278     if (is_promotion_failed) {
279       _spaces[3].init(heap->young_gen()->to());
280       _num_spaces = 4;
281     } else {
282       _num_spaces = 3;
283     }
284     _index = 0;
285     _old_gen = heap->old_gen();
286   }
287 
288   void phase2_calculate_new_addr() {
289     for (uint i = 0; i < _num_spaces; ++i) {
290       ContiguousSpace* space = get_space(i);
291       HeapWord* cur_addr = space->bottom();
292       HeapWord* top = space->top();
293 
294       bool record_first_dead_done = false;
295 
296       DeadSpacer dead_spacer(space);
297 
298       while (cur_addr < top) {
299         oop obj = cast_to_oop(cur_addr);
300         size_t obj_size = obj->size();

301         if (obj->is_gc_marked()) {
302           HeapWord* new_addr = alloc(obj_size);
303           forward_obj(obj, new_addr);

304           cur_addr += obj_size;
305         } else {
306           // Skipping the current known-unmarked obj
307           HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
308           if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
309             // Register space for the filler obj
310             alloc(pointer_delta(next_live_addr, cur_addr));

311           } else {
312             if (!record_first_dead_done) {
313               record_first_dead(i, cur_addr);
314               record_first_dead_done = true;
315             }
316             *(HeapWord**)cur_addr = next_live_addr;
317           }
318           cur_addr = next_live_addr;
319         }
320       }
321 
322       if (!record_first_dead_done) {
323         record_first_dead(i, top);
324       }
325     }
326   }
327 
328   void phase3_adjust_pointers() {
329     for (uint i = 0; i < _num_spaces; ++i) {
330       ContiguousSpace* space = get_space(i);

576   if (_preserved_count_max != 0) {
577     DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
578     young_gen->reset_scratch();
579   }
580 
581   _preserved_overflow_stack_set.reclaim();
582   _marking_stack.clear();
583   _objarray_stack.clear(true);
584 }
585 
586 void SerialFullGC::mark_object(oop obj) {
587   if (StringDedup::is_enabled() &&
588       java_lang_String::is_instance(obj) &&
589       SerialStringDedup::is_candidate_from_mark(obj)) {
590     _string_dedup_requests->add(obj);
591   }
592 
593   // some marks may contain information we need to preserve so we store them away
594   // and overwrite the mark.  We'll restore it at the end of serial full GC.
595   markWord mark = obj->mark();
596   obj->set_mark(obj->prototype_mark().set_marked());
597 
598   ContinuationGCSupport::transform_stack_chunk(obj);
599 
600   if (obj->mark_must_be_preserved(mark)) {
601     preserve_mark(obj, mark);
602   }
603 }
604 
605 template <class T> void SerialFullGC::mark_and_push(T* p) {
606   T heap_oop = RawAccess<>::oop_load(p);
607   if (!CompressedOops::is_null(heap_oop)) {
608     oop obj = CompressedOops::decode_not_null(heap_oop);
609     if (!obj->mark().is_marked()) {
610       mark_object(obj);
611       _marking_stack.push(obj);
612     }
613   }
614 }
615 
616 template <typename T>

679   // to discovery, hence the _always_true_closure.
680   SerialFullGC::_ref_processor = new ReferenceProcessor(&_always_true_closure);
681   mark_and_push_closure.set_ref_discoverer(_ref_processor);
682 }
683 
684 void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) {
685   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
686 
687   SerialHeap* gch = SerialHeap::heap();
688 
689   gch->trace_heap_before_gc(_gc_tracer);
690 
691   // Capture used regions for old-gen to reestablish old-to-young invariant
692   // after full-gc.
693   gch->old_gen()->save_used_region();
694 
695   allocate_stacks();
696 
697   phase1_mark(clear_all_softrefs);
698 


699   Compacter compacter{gch};
700 
701   {
702     // Now all live objects are marked, compute the new object addresses.
703     GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
704 
705     compacter.phase2_calculate_new_addr();
706   }
707 
708   // Don't add any more derived pointers during phase3
709 #if COMPILER2_OR_JVMCI
710   assert(DerivedPointerTable::is_active(), "Sanity");
711   DerivedPointerTable::set_active(false);
712 #endif
713 
714   {
715     // Adjust the pointers to reflect the new locations
716     GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
717 
718     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);

722                        &adjust_pointer_closure,
723                        &adjust_cld_closure,
724                        &adjust_cld_closure,
725                        &code_closure);
726 
727     WeakProcessor::oops_do(&adjust_pointer_closure);
728 
729     adjust_marks();
730     compacter.phase3_adjust_pointers();
731   }
732 
733   {
734     // All pointers are now adjusted, move objects accordingly
735     GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
736 
737     compacter.phase4_compact();
738   }
739 
740   restore_marks();
741 


742   deallocate_stacks();
743 
744   SerialFullGC::_string_dedup_requests->flush();
745 
746   bool is_young_gen_empty = (gch->young_gen()->used() == 0);
747   gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
748 
749   gch->prune_scavengable_nmethods();
750 
751   // Update heap occupancy information which is used as
752   // input to soft ref clearing policy at the next gc.
753   Universe::heap()->update_capacity_and_used_at_gc();
754 
755   // Signal that we have completed a visit to all live objects.
756   Universe::heap()->record_whole_heap_examined_timestamp();
757 
758   gch->trace_heap_after_gc(_gc_tracer);
759 }

173   // Used for BOT update
174   TenuredGeneration* _old_gen;
175 
176   HeapWord* get_compaction_top(uint index) const {
177     return _spaces[index]._compaction_top;
178   }
179 
180   HeapWord* get_first_dead(uint index) const {
181     return _spaces[index]._first_dead;
182   }
183 
184   ContiguousSpace* get_space(uint index) const {
185     return _spaces[index]._space;
186   }
187 
188   void record_first_dead(uint index, HeapWord* first_dead) {
189     assert(_spaces[index]._first_dead == nullptr, "should write only once");
190     _spaces[index]._first_dead = first_dead;
191   }
192 
193   HeapWord* alloc(size_t old_size, size_t new_size, HeapWord* old_obj) {
194     size_t words = (old_obj == _spaces[_index]._compaction_top) ? old_size : new_size;
195     while (true) {
196       if (words <= pointer_delta(_spaces[_index]._space->end(),
197                                  _spaces[_index]._compaction_top)) {
198         HeapWord* result = _spaces[_index]._compaction_top;
199         _spaces[_index]._compaction_top += words;
200         if (_index == 0) {
201           // old-gen requires BOT update
202           _old_gen->update_for_block(result, result + words);
203         }
204         return result;
205       }
206 
207       // out-of-memory in this space
208       _index++;
209       assert(_index < max_num_spaces - 1, "the last space should not be used");
210       words = (old_obj == _spaces[_index]._compaction_top) ? old_size : new_size;
211     }
212   }
213 
214   static void prefetch_read_scan(void* p) {
215     if (PrefetchScanIntervalInBytes >= 0) {
216       Prefetch::read(p, PrefetchScanIntervalInBytes);
217     }
218   }
219 
220   static void prefetch_write_scan(void* p) {
221     if (PrefetchScanIntervalInBytes >= 0) {
222       Prefetch::write(p, PrefetchScanIntervalInBytes);
223     }
224   }
225 
226   static void prefetch_write_copy(void* p) {
227     if (PrefetchCopyIntervalInBytes >= 0) {
228       Prefetch::write(p, PrefetchCopyIntervalInBytes);
229     }
230   }

242 
243   static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
244     for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
245       prefetch_read_scan(i_addr);
246       oop obj = cast_to_oop(i_addr);
247       if (obj->is_gc_marked()) {
248         return i_addr;
249       }
250       i_addr += obj->size();
251     }
252     return end;
253   };
254 
255   static size_t relocate(HeapWord* addr) {
256     // Prefetch source and destination
257     prefetch_read_scan(addr);
258 
259     oop obj = cast_to_oop(addr);
260     oop new_obj = FullGCForwarding::forwardee(obj);
261     HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);


262 
263     size_t obj_size = obj->size();
264     if (addr != new_addr) {
265       prefetch_write_copy(new_addr);
266       Copy::aligned_conjoint_words(addr, new_addr, obj_size);
267     }
268     new_obj->init_mark();
269     if (addr != new_addr) {
270       new_obj->initialize_hash_if_necessary(obj);
271     }
272 
273     return obj_size;
274   }
275 
276 public:
277   explicit Compacter(SerialHeap* heap) {
278     // In this order so that heap is compacted towards old-gen.
279     _spaces[0].init(heap->old_gen()->space());
280     _spaces[1].init(heap->young_gen()->eden());
281     _spaces[2].init(heap->young_gen()->from());
282 
283     bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
284     if (is_promotion_failed) {
285       _spaces[3].init(heap->young_gen()->to());
286       _num_spaces = 4;
287     } else {
288       _num_spaces = 3;
289     }
290     _index = 0;
291     _old_gen = heap->old_gen();
292   }
293 
294   void phase2_calculate_new_addr() {
295     for (uint i = 0; i < _num_spaces; ++i) {
296       ContiguousSpace* space = get_space(i);
297       HeapWord* cur_addr = space->bottom();
298       HeapWord* top = space->top();
299 
300       bool record_first_dead_done = false;
301 
302       DeadSpacer dead_spacer(space);
303 
304       while (cur_addr < top) {
305         oop obj = cast_to_oop(cur_addr);
306         size_t obj_size = obj->size();
307         size_t new_size = obj->copy_size(obj_size, obj->mark());
308         if (obj->is_gc_marked()) {
309           HeapWord* new_addr = alloc(obj_size, new_size, cur_addr);
310           forward_obj(obj, new_addr);
311           assert(obj->size() == obj_size, "size must not change after forwarding");
312           cur_addr += obj_size;
313         } else {
314           // Skipping the current known-unmarked obj
315           HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
316           if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
317             // Register space for the filler obj
318             size_t size = pointer_delta(next_live_addr, cur_addr);
319             alloc(size, size, cur_addr);
320           } else {
321             if (!record_first_dead_done) {
322               record_first_dead(i, cur_addr);
323               record_first_dead_done = true;
324             }
325             *(HeapWord**)cur_addr = next_live_addr;
326           }
327           cur_addr = next_live_addr;
328         }
329       }
330 
331       if (!record_first_dead_done) {
332         record_first_dead(i, top);
333       }
334     }
335   }
336 
337   void phase3_adjust_pointers() {
338     for (uint i = 0; i < _num_spaces; ++i) {
339       ContiguousSpace* space = get_space(i);

585   if (_preserved_count_max != 0) {
586     DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
587     young_gen->reset_scratch();
588   }
589 
590   _preserved_overflow_stack_set.reclaim();
591   _marking_stack.clear();
592   _objarray_stack.clear(true);
593 }
594 
595 void SerialFullGC::mark_object(oop obj) {
596   if (StringDedup::is_enabled() &&
597       java_lang_String::is_instance(obj) &&
598       SerialStringDedup::is_candidate_from_mark(obj)) {
599     _string_dedup_requests->add(obj);
600   }
601 
602   // some marks may contain information we need to preserve so we store them away
603   // and overwrite the mark.  We'll restore it at the end of serial full GC.
604   markWord mark = obj->mark();
605   obj->set_mark(mark.set_marked());
606 
607   ContinuationGCSupport::transform_stack_chunk(obj);
608 
609   if (obj->mark_must_be_preserved(mark)) {
610     preserve_mark(obj, mark);
611   }
612 }
613 
614 template <class T> void SerialFullGC::mark_and_push(T* p) {
615   T heap_oop = RawAccess<>::oop_load(p);
616   if (!CompressedOops::is_null(heap_oop)) {
617     oop obj = CompressedOops::decode_not_null(heap_oop);
618     if (!obj->mark().is_marked()) {
619       mark_object(obj);
620       _marking_stack.push(obj);
621     }
622   }
623 }
624 
625 template <typename T>

688   // to discovery, hence the _always_true_closure.
689   SerialFullGC::_ref_processor = new ReferenceProcessor(&_always_true_closure);
690   mark_and_push_closure.set_ref_discoverer(_ref_processor);
691 }
692 
693 void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) {
694   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
695 
696   SerialHeap* gch = SerialHeap::heap();
697 
698   gch->trace_heap_before_gc(_gc_tracer);
699 
700   // Capture used regions for old-gen to reestablish old-to-young invariant
701   // after full-gc.
702   gch->old_gen()->save_used_region();
703 
704   allocate_stacks();
705 
706   phase1_mark(clear_all_softrefs);
707 
708   FullGCForwarding::begin();
709 
710   Compacter compacter{gch};
711 
712   {
713     // Now all live objects are marked, compute the new object addresses.
714     GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
715 
716     compacter.phase2_calculate_new_addr();
717   }
718 
719   // Don't add any more derived pointers during phase3
720 #if COMPILER2_OR_JVMCI
721   assert(DerivedPointerTable::is_active(), "Sanity");
722   DerivedPointerTable::set_active(false);
723 #endif
724 
725   {
726     // Adjust the pointers to reflect the new locations
727     GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
728 
729     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);

733                        &adjust_pointer_closure,
734                        &adjust_cld_closure,
735                        &adjust_cld_closure,
736                        &code_closure);
737 
738     WeakProcessor::oops_do(&adjust_pointer_closure);
739 
740     adjust_marks();
741     compacter.phase3_adjust_pointers();
742   }
743 
744   {
745     // All pointers are now adjusted, move objects accordingly
746     GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
747 
748     compacter.phase4_compact();
749   }
750 
751   restore_marks();
752 
753   FullGCForwarding::end();
754 
755   deallocate_stacks();
756 
757   SerialFullGC::_string_dedup_requests->flush();
758 
759   bool is_young_gen_empty = (gch->young_gen()->used() == 0);
760   gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
761 
762   gch->prune_scavengable_nmethods();
763 
764   // Update heap occupancy information which is used as
765   // input to soft ref clearing policy at the next gc.
766   Universe::heap()->update_capacity_and_used_at_gc();
767 
768   // Signal that we have completed a visit to all live objects.
769   Universe::heap()->record_whole_heap_examined_timestamp();
770 
771   gch->trace_heap_after_gc(_gc_tracer);
772 }
< prev index next >