< prev index next >

src/hotspot/share/gc/serial/serialFullGC.cpp

Print this page

173   // Used for BOT update
174   TenuredGeneration* _old_gen;
175 
176   HeapWord* get_compaction_top(uint index) const {
177     return _spaces[index]._compaction_top;
178   }
179 
180   HeapWord* get_first_dead(uint index) const {
181     return _spaces[index]._first_dead;
182   }
183 
184   ContiguousSpace* get_space(uint index) const {
185     return _spaces[index]._space;
186   }
187 
188   void record_first_dead(uint index, HeapWord* first_dead) {
189     assert(_spaces[index]._first_dead == nullptr, "should write only once");
190     _spaces[index]._first_dead = first_dead;
191   }
192 
193   HeapWord* alloc(size_t words) {

194     while (true) {
195       if (words <= pointer_delta(_spaces[_index]._space->end(),
196                                  _spaces[_index]._compaction_top)) {
197         HeapWord* result = _spaces[_index]._compaction_top;
198         _spaces[_index]._compaction_top += words;
199         if (_index == 0) {
200           // old-gen requires BOT update
201           _old_gen->update_for_block(result, result + words);
202         }
203         return result;
204       }
205 
206       // out-of-memory in this space
207       _index++;
208       assert(_index < max_num_spaces - 1, "the last space should not be used");

209     }
210   }
211 
212   static void prefetch_read_scan(void* p) {
213     if (PrefetchScanIntervalInBytes >= 0) {
214       Prefetch::read(p, PrefetchScanIntervalInBytes);
215     }
216   }
217 
218   static void prefetch_write_scan(void* p) {
219     if (PrefetchScanIntervalInBytes >= 0) {
220       Prefetch::write(p, PrefetchScanIntervalInBytes);
221     }
222   }
223 
224   static void prefetch_write_copy(void* p) {
225     if (PrefetchCopyIntervalInBytes >= 0) {
226       Prefetch::write(p, PrefetchCopyIntervalInBytes);
227     }
228   }
229 
230   static void forward_obj(oop obj, HeapWord* new_addr) {
231     prefetch_write_scan(obj);
232     if (cast_from_oop<HeapWord*>(obj) != new_addr) {
233       FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
234     } else {
235       assert(obj->is_gc_marked(), "inv");
236       // This obj will stay in-place. Fix the markword.
237       obj->init_mark();





238     }
239   }
240 
241   static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
242     for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
243       prefetch_read_scan(i_addr);
244       oop obj = cast_to_oop(i_addr);
245       if (obj->is_gc_marked()) {
246         return i_addr;
247       }
248       i_addr += obj->size();
249     }
250     return end;
251   };
252 
253   static size_t relocate(HeapWord* addr) {
254     // Prefetch source and destination
255     prefetch_read_scan(addr);
256 
257     oop obj = cast_to_oop(addr);
258     oop new_obj = FullGCForwarding::forwardee(obj);
259     HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
260     assert(addr != new_addr, "inv");
261     prefetch_write_copy(new_addr);
262 
263     size_t obj_size = obj->size();
264     Copy::aligned_conjoint_words(addr, new_addr, obj_size);



265     new_obj->init_mark();



266 
267     return obj_size;
268   }
269 
270 public:
271   explicit Compacter(SerialHeap* heap) {
272     // In this order so that heap is compacted towards old-gen.
273     _spaces[0].init(heap->old_gen()->space());
274     _spaces[1].init(heap->young_gen()->eden());
275     _spaces[2].init(heap->young_gen()->from());
276 
277     bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
278     if (is_promotion_failed) {
279       _spaces[3].init(heap->young_gen()->to());
280       _num_spaces = 4;
281     } else {
282       _num_spaces = 3;
283     }
284     _index = 0;
285     _old_gen = heap->old_gen();
286   }
287 
288   void phase2_calculate_new_addr() {
289     for (uint i = 0; i < _num_spaces; ++i) {
290       ContiguousSpace* space = get_space(i);
291       HeapWord* cur_addr = space->bottom();
292       HeapWord* top = space->top();
293 
294       bool record_first_dead_done = false;
295 
296       DeadSpacer dead_spacer(space);
297 
298       while (cur_addr < top) {
299         oop obj = cast_to_oop(cur_addr);
300         size_t obj_size = obj->size();

301         if (obj->is_gc_marked()) {
302           HeapWord* new_addr = alloc(obj_size);
303           forward_obj(obj, new_addr);

304           cur_addr += obj_size;
305         } else {
306           // Skipping the current known-unmarked obj
307           HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
308           if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
309             // Register space for the filler obj
310             alloc(pointer_delta(next_live_addr, cur_addr));

311           } else {
312             if (!record_first_dead_done) {
313               record_first_dead(i, cur_addr);
314               record_first_dead_done = true;
315             }
316             *(HeapWord**)cur_addr = next_live_addr;
317           }
318           cur_addr = next_live_addr;
319         }
320       }
321 
322       if (!record_first_dead_done) {
323         record_first_dead(i, top);
324       }
325     }
326   }
327 
328   void phase3_adjust_pointers() {
329     for (uint i = 0; i < _num_spaces; ++i) {
330       ContiguousSpace* space = get_space(i);

586   if (_preserved_count_max != 0) {
587     DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
588     young_gen->reset_scratch();
589   }
590 
591   _preserved_overflow_stack_set.reclaim();
592   _marking_stack.clear();
593   _objarray_stack.clear(true);
594 }
595 
596 void SerialFullGC::mark_object(oop obj) {
597   if (StringDedup::is_enabled() &&
598       java_lang_String::is_instance(obj) &&
599       SerialStringDedup::is_candidate_from_mark(obj)) {
600     _string_dedup_requests->add(obj);
601   }
602 
603   // some marks may contain information we need to preserve so we store them away
604   // and overwrite the mark.  We'll restore it at the end of serial full GC.
605   markWord mark = obj->mark();
606   obj->set_mark(obj->prototype_mark().set_marked());
607 
608   ContinuationGCSupport::transform_stack_chunk(obj);
609 
610   if (obj->mark_must_be_preserved(mark)) {
611     preserve_mark(obj, mark);
612   }
613 }
614 
615 template <class T> void SerialFullGC::mark_and_push(T* p) {
616   T heap_oop = RawAccess<>::oop_load(p);
617   if (!CompressedOops::is_null(heap_oop)) {
618     oop obj = CompressedOops::decode_not_null(heap_oop);
619     if (!obj->mark().is_marked()) {
620       mark_object(obj);
621       _marking_stack.push(obj);
622     }
623   }
624 }
625 
626 template <typename T>

699   gch->trace_heap_before_gc(_gc_tracer);
700 
701   // Capture used regions for old-gen to reestablish old-to-young invariant
702   // after full-gc.
703   gch->old_gen()->save_used_region();
704 
705   allocate_stacks();
706 
707   // Usually, all class unloading work occurs at the end of phase 1, but Serial
708   // full-gc accesses dead-objs' klass to find out the start of next live-obj
709   // during phase 2. This requires klasses of dead-objs to be kept loaded.
710   // Therefore, we declare ClassUnloadingContext at the same level as
711   // full-gc phases, and purge dead classes (invoking
712   // ClassLoaderDataGraph::purge) after all phases of full-gc.
713   ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
714                             false /* unregister_nmethods_during_purge */,
715                             false /* lock_nmethod_free_separately */);
716 
717   phase1_mark(clear_all_softrefs);
718 


719   Compacter compacter{gch};
720 
721   {
722     // Now all live objects are marked, compute the new object addresses.
723     GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
724 
725     compacter.phase2_calculate_new_addr();
726   }
727 
728   // Don't add any more derived pointers during phase3
729 #if COMPILER2_OR_JVMCI
730   assert(DerivedPointerTable::is_active(), "Sanity");
731   DerivedPointerTable::set_active(false);
732 #endif
733 
734   {
735     // Adjust the pointers to reflect the new locations
736     GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
737 
738     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);

756     adjust_marks();
757     compacter.phase3_adjust_pointers();
758   }
759 
760   {
761     // All pointers are now adjusted, move objects accordingly
762     GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
763 
764     compacter.phase4_compact();
765   }
766 
767   // Delete metaspaces for unloaded class loaders and clean up CLDG.
768   ClassLoaderDataGraph::purge(true /* at_safepoint */);
769   DEBUG_ONLY(MetaspaceUtils::verify();)
770 
771   // Need to clear claim bits for the next full-gc (specifically phase 1 and 3).
772   ClassLoaderDataGraph::clear_claimed_marks();
773 
774   restore_marks();
775 


776   deallocate_stacks();
777 
778   SerialFullGC::_string_dedup_requests->flush();
779 
780   bool is_young_gen_empty = (gch->young_gen()->used() == 0);
781   gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
782 
783   gch->prune_scavengable_nmethods();
784 
785   // Update heap occupancy information which is used as
786   // input to soft ref clearing policy at the next gc.
787   Universe::heap()->update_capacity_and_used_at_gc();
788 
789   // Signal that we have completed a visit to all live objects.
790   Universe::heap()->record_whole_heap_examined_timestamp();
791 
792   gch->trace_heap_after_gc(_gc_tracer);
793 }

173   // Used for BOT update
174   TenuredGeneration* _old_gen;
175 
176   HeapWord* get_compaction_top(uint index) const {
177     return _spaces[index]._compaction_top;
178   }
179 
180   HeapWord* get_first_dead(uint index) const {
181     return _spaces[index]._first_dead;
182   }
183 
184   ContiguousSpace* get_space(uint index) const {
185     return _spaces[index]._space;
186   }
187 
188   void record_first_dead(uint index, HeapWord* first_dead) {
189     assert(_spaces[index]._first_dead == nullptr, "should write only once");
190     _spaces[index]._first_dead = first_dead;
191   }
192 
193   HeapWord* alloc(size_t old_size, size_t new_size, HeapWord* old_obj) {
194     size_t words = (old_obj == _spaces[_index]._compaction_top) ? old_size : new_size;
195     while (true) {
196       if (words <= pointer_delta(_spaces[_index]._space->end(),
197                                  _spaces[_index]._compaction_top)) {
198         HeapWord* result = _spaces[_index]._compaction_top;
199         _spaces[_index]._compaction_top += words;
200         if (_index == 0) {
201           // old-gen requires BOT update
202           _old_gen->update_for_block(result, result + words);
203         }
204         return result;
205       }
206 
207       // out-of-memory in this space
208       _index++;
209       assert(_index < max_num_spaces - 1, "the last space should not be used");
210       words = (old_obj == _spaces[_index]._compaction_top) ? old_size : new_size;
211     }
212   }
213 
214   static void prefetch_read_scan(void* p) {
215     if (PrefetchScanIntervalInBytes >= 0) {
216       Prefetch::read(p, PrefetchScanIntervalInBytes);
217     }
218   }
219 
220   static void prefetch_write_scan(void* p) {
221     if (PrefetchScanIntervalInBytes >= 0) {
222       Prefetch::write(p, PrefetchScanIntervalInBytes);
223     }
224   }
225 
226   static void prefetch_write_copy(void* p) {
227     if (PrefetchCopyIntervalInBytes >= 0) {
228       Prefetch::write(p, PrefetchCopyIntervalInBytes);
229     }
230   }
231 
232   static void forward_obj(oop obj, HeapWord* new_addr, bool after_first_dead) {
233     prefetch_write_scan(obj);
234     if (cast_from_oop<HeapWord*>(obj) != new_addr) {
235       FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
236     } else {
237       assert(obj->is_gc_marked(), "inv");
238       if (!after_first_dead) {
239         // This obj will stay in-place and we'll not see it during relocation.
240         // Fix the markword.
241         obj->init_mark();
242       } else {
243         FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
244       }
245     }
246   }
247 
248   static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
249     for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
250       prefetch_read_scan(i_addr);
251       oop obj = cast_to_oop(i_addr);
252       if (obj->is_gc_marked()) {
253         return i_addr;
254       }
255       i_addr += obj->size();
256     }
257     return end;
258   };
259 
260   static size_t relocate(HeapWord* addr) {
261     // Prefetch source and destination
262     prefetch_read_scan(addr);
263 
264     oop obj = cast_to_oop(addr);
265     oop new_obj = FullGCForwarding::forwardee(obj);
266     HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);


267 
268     size_t obj_size = obj->size();
269     if (addr != new_addr) {
270       prefetch_write_copy(new_addr);
271       Copy::aligned_conjoint_words(addr, new_addr, obj_size);
272     }
273     new_obj->init_mark();
274     if (addr != new_addr) {
275       new_obj->initialize_hash_if_necessary(obj);
276     }
277 
278     return obj_size;
279   }
280 
281 public:
282   explicit Compacter(SerialHeap* heap) {
283     // In this order so that heap is compacted towards old-gen.
284     _spaces[0].init(heap->old_gen()->space());
285     _spaces[1].init(heap->young_gen()->eden());
286     _spaces[2].init(heap->young_gen()->from());
287 
288     bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
289     if (is_promotion_failed) {
290       _spaces[3].init(heap->young_gen()->to());
291       _num_spaces = 4;
292     } else {
293       _num_spaces = 3;
294     }
295     _index = 0;
296     _old_gen = heap->old_gen();
297   }
298 
299   void phase2_calculate_new_addr() {
300     for (uint i = 0; i < _num_spaces; ++i) {
301       ContiguousSpace* space = get_space(i);
302       HeapWord* cur_addr = space->bottom();
303       HeapWord* top = space->top();
304 
305       bool record_first_dead_done = false;
306 
307       DeadSpacer dead_spacer(space);
308 
309       while (cur_addr < top) {
310         oop obj = cast_to_oop(cur_addr);
311         size_t obj_size = obj->size();
312         size_t new_size = obj->copy_size(obj_size, obj->mark());
313         if (obj->is_gc_marked()) {
314           HeapWord* new_addr = alloc(obj_size, new_size, cur_addr);
315           forward_obj(obj, new_addr, record_first_dead_done);
316           assert(obj->size() == obj_size, "size must not change after forwarding");
317           cur_addr += obj_size;
318         } else {
319           // Skipping the current known-unmarked obj
320           HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
321           if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
322             // Register space for the filler obj
323             size_t size = pointer_delta(next_live_addr, cur_addr);
324             alloc(size, size, cur_addr);
325           } else {
326             if (!record_first_dead_done) {
327               record_first_dead(i, cur_addr);
328               record_first_dead_done = true;
329             }
330             *(HeapWord**)cur_addr = next_live_addr;
331           }
332           cur_addr = next_live_addr;
333         }
334       }
335 
336       if (!record_first_dead_done) {
337         record_first_dead(i, top);
338       }
339     }
340   }
341 
342   void phase3_adjust_pointers() {
343     for (uint i = 0; i < _num_spaces; ++i) {
344       ContiguousSpace* space = get_space(i);

600   if (_preserved_count_max != 0) {
601     DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
602     young_gen->reset_scratch();
603   }
604 
605   _preserved_overflow_stack_set.reclaim();
606   _marking_stack.clear();
607   _objarray_stack.clear(true);
608 }
609 
610 void SerialFullGC::mark_object(oop obj) {
611   if (StringDedup::is_enabled() &&
612       java_lang_String::is_instance(obj) &&
613       SerialStringDedup::is_candidate_from_mark(obj)) {
614     _string_dedup_requests->add(obj);
615   }
616 
617   // some marks may contain information we need to preserve so we store them away
618   // and overwrite the mark.  We'll restore it at the end of serial full GC.
619   markWord mark = obj->mark();
620   obj->set_mark(mark.set_marked());
621 
622   ContinuationGCSupport::transform_stack_chunk(obj);
623 
624   if (obj->mark_must_be_preserved(mark)) {
625     preserve_mark(obj, mark);
626   }
627 }
628 
629 template <class T> void SerialFullGC::mark_and_push(T* p) {
630   T heap_oop = RawAccess<>::oop_load(p);
631   if (!CompressedOops::is_null(heap_oop)) {
632     oop obj = CompressedOops::decode_not_null(heap_oop);
633     if (!obj->mark().is_marked()) {
634       mark_object(obj);
635       _marking_stack.push(obj);
636     }
637   }
638 }
639 
640 template <typename T>

713   gch->trace_heap_before_gc(_gc_tracer);
714 
715   // Capture used regions for old-gen to reestablish old-to-young invariant
716   // after full-gc.
717   gch->old_gen()->save_used_region();
718 
719   allocate_stacks();
720 
721   // Usually, all class unloading work occurs at the end of phase 1, but Serial
722   // full-gc accesses dead-objs' klass to find out the start of next live-obj
723   // during phase 2. This requires klasses of dead-objs to be kept loaded.
724   // Therefore, we declare ClassUnloadingContext at the same level as
725   // full-gc phases, and purge dead classes (invoking
726   // ClassLoaderDataGraph::purge) after all phases of full-gc.
727   ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
728                             false /* unregister_nmethods_during_purge */,
729                             false /* lock_nmethod_free_separately */);
730 
731   phase1_mark(clear_all_softrefs);
732 
733   FullGCForwarding::begin();
734 
735   Compacter compacter{gch};
736 
737   {
738     // Now all live objects are marked, compute the new object addresses.
739     GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
740 
741     compacter.phase2_calculate_new_addr();
742   }
743 
744   // Don't add any more derived pointers during phase3
745 #if COMPILER2_OR_JVMCI
746   assert(DerivedPointerTable::is_active(), "Sanity");
747   DerivedPointerTable::set_active(false);
748 #endif
749 
750   {
751     // Adjust the pointers to reflect the new locations
752     GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
753 
754     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);

772     adjust_marks();
773     compacter.phase3_adjust_pointers();
774   }
775 
776   {
777     // All pointers are now adjusted, move objects accordingly
778     GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
779 
780     compacter.phase4_compact();
781   }
782 
783   // Delete metaspaces for unloaded class loaders and clean up CLDG.
784   ClassLoaderDataGraph::purge(true /* at_safepoint */);
785   DEBUG_ONLY(MetaspaceUtils::verify();)
786 
787   // Need to clear claim bits for the next full-gc (specifically phase 1 and 3).
788   ClassLoaderDataGraph::clear_claimed_marks();
789 
790   restore_marks();
791 
792   FullGCForwarding::end();
793 
794   deallocate_stacks();
795 
796   SerialFullGC::_string_dedup_requests->flush();
797 
798   bool is_young_gen_empty = (gch->young_gen()->used() == 0);
799   gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
800 
801   gch->prune_scavengable_nmethods();
802 
803   // Update heap occupancy information which is used as
804   // input to soft ref clearing policy at the next gc.
805   Universe::heap()->update_capacity_and_used_at_gc();
806 
807   // Signal that we have completed a visit to all live objects.
808   Universe::heap()->record_whole_heap_examined_timestamp();
809 
810   gch->trace_heap_after_gc(_gc_tracer);
811 }
< prev index next >