1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "classfile/classLoaderData.inline.hpp"
 26 #include "classfile/classLoaderDataGraph.hpp"
 27 #include "classfile/javaClasses.inline.hpp"
 28 #include "classfile/stringTable.hpp"
 29 #include "classfile/symbolTable.hpp"
 30 #include "classfile/systemDictionary.hpp"
 31 #include "classfile/vmSymbols.hpp"
 32 #include "code/codeCache.hpp"
 33 #include "compiler/compileBroker.hpp"
 34 #include "compiler/oopMap.hpp"
 35 #include "gc/serial/cardTableRS.hpp"
 36 #include "gc/serial/defNewGeneration.hpp"
 37 #include "gc/serial/serialFullGC.hpp"
 38 #include "gc/serial/serialGcRefProcProxyTask.hpp"
 39 #include "gc/serial/serialHeap.hpp"
 40 #include "gc/serial/serialStringDedup.hpp"
 41 #include "gc/serial/tenuredGeneration.inline.hpp"
 42 #include "gc/shared/classUnloadingContext.hpp"
 43 #include "gc/shared/collectedHeap.inline.hpp"
 44 #include "gc/shared/continuationGCSupport.inline.hpp"
 45 #include "gc/shared/fullGCForwarding.inline.hpp"
 46 #include "gc/shared/gc_globals.hpp"
 47 #include "gc/shared/gcHeapSummary.hpp"
 48 #include "gc/shared/gcTimer.hpp"
 49 #include "gc/shared/gcTrace.hpp"
 50 #include "gc/shared/gcTraceTime.inline.hpp"
 51 #include "gc/shared/modRefBarrierSet.hpp"
 52 #include "gc/shared/oopStorageSet.inline.hpp"
 53 #include "gc/shared/preservedMarks.inline.hpp"
 54 #include "gc/shared/referencePolicy.hpp"
 55 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
 56 #include "gc/shared/space.hpp"
 57 #include "gc/shared/strongRootsScope.hpp"
 58 #include "gc/shared/weakProcessor.hpp"
 59 #include "memory/iterator.inline.hpp"
 60 #include "memory/universe.hpp"
 61 #include "oops/access.inline.hpp"
 62 #include "oops/compressedOops.inline.hpp"
 63 #include "oops/instanceRefKlass.hpp"
 64 #include "oops/markWord.hpp"
 65 #include "oops/methodData.hpp"
 66 #include "oops/objArrayKlass.inline.hpp"
 67 #include "oops/oop.inline.hpp"
 68 #include "oops/typeArrayOop.inline.hpp"
 69 #include "runtime/prefetch.inline.hpp"
 70 #include "runtime/threads.hpp"
 71 #include "utilities/align.hpp"
 72 #include "utilities/copy.hpp"
 73 #include "utilities/events.hpp"
 74 #include "utilities/stack.inline.hpp"
 75 #if INCLUDE_JVMCI
 76 #include "jvmci/jvmci.hpp"
 77 #endif
 78 
 79 Stack<oop, mtGC>              SerialFullGC::_marking_stack;
 80 Stack<ObjArrayTask, mtGC>     SerialFullGC::_objarray_stack;
 81 
 82 PreservedMarksSet       SerialFullGC::_preserved_overflow_stack_set(false /* in_c_heap */);
 83 size_t                  SerialFullGC::_preserved_count = 0;
 84 size_t                  SerialFullGC::_preserved_count_max = 0;
 85 PreservedMark*          SerialFullGC::_preserved_marks = nullptr;
 86 STWGCTimer*             SerialFullGC::_gc_timer        = nullptr;
 87 SerialOldTracer*        SerialFullGC::_gc_tracer       = nullptr;
 88 
 89 AlwaysTrueClosure   SerialFullGC::_always_true_closure;
 90 ReferenceProcessor* SerialFullGC::_ref_processor;
 91 
 92 StringDedup::Requests*  SerialFullGC::_string_dedup_requests = nullptr;
 93 
 94 SerialFullGC::FollowRootClosure  SerialFullGC::follow_root_closure;
 95 
 96 MarkAndPushClosure SerialFullGC::mark_and_push_closure(ClassLoaderData::_claim_stw_fullgc_mark);
 97 CLDToOopClosure    SerialFullGC::follow_cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark);
 98 CLDToOopClosure    SerialFullGC::adjust_cld_closure(&adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
 99 
100 class DeadSpacer : StackObj {
101   size_t _allowed_deadspace_words;
102   bool _active;
103   ContiguousSpace* _space;
104 
105 public:
106   DeadSpacer(ContiguousSpace* space) : _allowed_deadspace_words(0), _space(space) {
107     size_t ratio = (_space == SerialHeap::heap()->old_gen()->space())
108                    ? MarkSweepDeadRatio : 0;
109     _active = ratio > 0;
110 
111     if (_active) {
112       // We allow some amount of garbage towards the bottom of the space, so
113       // we don't start compacting before there is a significant gain to be made.
114       // Occasionally, we want to ensure a full compaction, which is determined
115       // by the MarkSweepAlwaysCompactCount parameter.
116       if ((SerialHeap::heap()->total_full_collections() % MarkSweepAlwaysCompactCount) != 0) {
117         _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize;
118       } else {
119         _active = false;
120       }
121     }
122   }
123 
124   bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) {
125     if (!_active) {
126       return false;
127     }
128 
129     size_t dead_length = pointer_delta(dead_end, dead_start);
130     if (_allowed_deadspace_words >= dead_length) {
131       _allowed_deadspace_words -= dead_length;
132       CollectedHeap::fill_with_object(dead_start, dead_length);
133       oop obj = cast_to_oop(dead_start);
134       // obj->set_mark(obj->mark().set_marked());
135 
136       assert(dead_length == obj->size(), "bad filler object size");
137       log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", %zub",
138                                         p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize);
139 
140       return true;
141     } else {
142       _active = false;
143       return false;
144     }
145   }
146 };
147 
148 // Implement the "compaction" part of the mark-compact GC algorithm.
149 class Compacter {
150   // There are four spaces in total, but only the first three can be used after
151   // compact. IOW, old and eden/from must be enough for all live objs
152   static constexpr uint max_num_spaces = 4;
153 
154   struct CompactionSpace {
155     ContiguousSpace* _space;
156     // Will be the new top after compaction is complete.
157     HeapWord* _compaction_top;
158     // The first dead word in this contiguous space. It's an optimization to
159     // skip large chunk of live objects at the beginning.
160     HeapWord* _first_dead;
161 
162     void init(ContiguousSpace* space) {
163       _space = space;
164       _compaction_top = space->bottom();
165       _first_dead = nullptr;
166     }
167   };
168 
169   CompactionSpace _spaces[max_num_spaces];
170   // The num of spaces to be compacted, i.e. containing live objs.
171   uint _num_spaces;
172 
173   uint _index;
174 
175   // Used for BOT update
176   TenuredGeneration* _old_gen;
177 
178   HeapWord* get_compaction_top(uint index) const {
179     return _spaces[index]._compaction_top;
180   }
181 
182   HeapWord* get_first_dead(uint index) const {
183     return _spaces[index]._first_dead;
184   }
185 
186   ContiguousSpace* get_space(uint index) const {
187     return _spaces[index]._space;
188   }
189 
190   void record_first_dead(uint index, HeapWord* first_dead) {
191     assert(_spaces[index]._first_dead == nullptr, "should write only once");
192     _spaces[index]._first_dead = first_dead;
193   }
194 
195   HeapWord* alloc(size_t words) {
196     while (true) {
197       if (words <= pointer_delta(_spaces[_index]._space->end(),
198                                  _spaces[_index]._compaction_top)) {
199         HeapWord* result = _spaces[_index]._compaction_top;
200         _spaces[_index]._compaction_top += words;
201         if (_index == 0) {
202           // old-gen requires BOT update
203           _old_gen->update_for_block(result, result + words);
204         }
205         return result;
206       }
207 
208       // out-of-memory in this space
209       _index++;
210       assert(_index < max_num_spaces - 1, "the last space should not be used");
211     }
212   }
213 
214   static void prefetch_read_scan(void* p) {
215     if (PrefetchScanIntervalInBytes >= 0) {
216       Prefetch::read(p, PrefetchScanIntervalInBytes);
217     }
218   }
219 
220   static void prefetch_write_scan(void* p) {
221     if (PrefetchScanIntervalInBytes >= 0) {
222       Prefetch::write(p, PrefetchScanIntervalInBytes);
223     }
224   }
225 
226   static void prefetch_write_copy(void* p) {
227     if (PrefetchCopyIntervalInBytes >= 0) {
228       Prefetch::write(p, PrefetchCopyIntervalInBytes);
229     }
230   }
231 
232   static void forward_obj(oop obj, HeapWord* new_addr) {
233     prefetch_write_scan(obj);
234     if (cast_from_oop<HeapWord*>(obj) != new_addr) {
235       FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
236     } else {
237       assert(obj->is_gc_marked(), "inv");
238       // This obj will stay in-place. Fix the markword.
239       obj->init_mark();
240     }
241   }
242 
243   static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
244     for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
245       prefetch_read_scan(i_addr);
246       oop obj = cast_to_oop(i_addr);
247       if (obj->is_gc_marked()) {
248         return i_addr;
249       }
250       i_addr += obj->size();
251     }
252     return end;
253   };
254 
255   static size_t relocate(HeapWord* addr) {
256     // Prefetch source and destination
257     prefetch_read_scan(addr);
258 
259     oop obj = cast_to_oop(addr);
260     oop new_obj = FullGCForwarding::forwardee(obj);
261     HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
262     assert(addr != new_addr, "inv");
263     prefetch_write_copy(new_addr);
264 
265     size_t obj_size = obj->size();
266     Copy::aligned_conjoint_words(addr, new_addr, obj_size);
267     new_obj->init_mark();
268 
269     return obj_size;
270   }
271 
272 public:
273   explicit Compacter(SerialHeap* heap) {
274     // In this order so that heap is compacted towards old-gen.
275     _spaces[0].init(heap->old_gen()->space());
276     _spaces[1].init(heap->young_gen()->eden());
277     _spaces[2].init(heap->young_gen()->from());
278 
279     bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
280     if (is_promotion_failed) {
281       _spaces[3].init(heap->young_gen()->to());
282       _num_spaces = 4;
283     } else {
284       _num_spaces = 3;
285     }
286     _index = 0;
287     _old_gen = heap->old_gen();
288   }
289 
290   void phase2_calculate_new_addr() {
291     for (uint i = 0; i < _num_spaces; ++i) {
292       ContiguousSpace* space = get_space(i);
293       HeapWord* cur_addr = space->bottom();
294       HeapWord* top = space->top();
295 
296       bool record_first_dead_done = false;
297 
298       DeadSpacer dead_spacer(space);
299 
300       while (cur_addr < top) {
301         oop obj = cast_to_oop(cur_addr);
302         size_t obj_size = obj->size();
303         if (obj->is_gc_marked()) {
304           HeapWord* new_addr = alloc(obj_size);
305           forward_obj(obj, new_addr);
306           cur_addr += obj_size;
307         } else {
308           // Skipping the current known-unmarked obj
309           HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
310           if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
311             // Register space for the filler obj
312             alloc(pointer_delta(next_live_addr, cur_addr));
313           } else {
314             if (!record_first_dead_done) {
315               record_first_dead(i, cur_addr);
316               record_first_dead_done = true;
317             }
318             *(HeapWord**)cur_addr = next_live_addr;
319           }
320           cur_addr = next_live_addr;
321         }
322       }
323 
324       if (!record_first_dead_done) {
325         record_first_dead(i, top);
326       }
327     }
328   }
329 
330   void phase3_adjust_pointers() {
331     for (uint i = 0; i < _num_spaces; ++i) {
332       ContiguousSpace* space = get_space(i);
333       HeapWord* cur_addr = space->bottom();
334       HeapWord* const top = space->top();
335       HeapWord* const first_dead = get_first_dead(i);
336 
337       while (cur_addr < top) {
338         prefetch_write_scan(cur_addr);
339         if (cur_addr < first_dead || cast_to_oop(cur_addr)->is_gc_marked()) {
340           size_t size = cast_to_oop(cur_addr)->oop_iterate_size(&SerialFullGC::adjust_pointer_closure);
341           cur_addr += size;
342         } else {
343           assert(*(HeapWord**)cur_addr > cur_addr, "forward progress");
344           cur_addr = *(HeapWord**)cur_addr;
345         }
346       }
347     }
348   }
349 
350   void phase4_compact() {
351     for (uint i = 0; i < _num_spaces; ++i) {
352       ContiguousSpace* space = get_space(i);
353       HeapWord* cur_addr = space->bottom();
354       HeapWord* top = space->top();
355 
356       // Check if the first obj inside this space is forwarded.
357       if (!FullGCForwarding::is_forwarded(cast_to_oop(cur_addr))) {
358         // Jump over consecutive (in-place) live-objs-chunk
359         cur_addr = get_first_dead(i);
360       }
361 
362       while (cur_addr < top) {
363         if (!FullGCForwarding::is_forwarded(cast_to_oop(cur_addr))) {
364           cur_addr = *(HeapWord**) cur_addr;
365           continue;
366         }
367         cur_addr += relocate(cur_addr);
368       }
369 
370       // Reset top and unused memory
371       HeapWord* new_top = get_compaction_top(i);
372       space->set_top(new_top);
373       if (ZapUnusedHeapArea && new_top < top) {
374         space->mangle_unused_area(MemRegion(new_top, top));
375       }
376     }
377   }
378 };
379 
380 template <class T> void SerialFullGC::KeepAliveClosure::do_oop_work(T* p) {
381   mark_and_push(p);
382 }
383 
384 void SerialFullGC::push_objarray(oop obj, size_t index) {
385   assert(obj->is_refArray(), "Must be");
386   ObjArrayTask task(obj, index);
387   assert(task.is_valid(), "bad ObjArrayTask");
388   _objarray_stack.push(task);
389 }
390 
391 void SerialFullGC::follow_array(objArrayOop array) {
392   mark_and_push_closure.do_klass(array->klass());
393   // Don't push empty arrays to avoid unnecessary work.
394   if (array->length() > 0) {
395     SerialFullGC::push_objarray(array, 0);
396   }
397 }
398 
399 void SerialFullGC::follow_object(oop obj) {
400   assert(obj->is_gc_marked(), "should be marked");
401   if (obj->is_refArray()) {
402     // Handle object arrays explicitly to allow them to
403     // be split into chunks if needed.
404     SerialFullGC::follow_array((objArrayOop)obj);
405   } else {
406     obj->oop_iterate(&mark_and_push_closure);
407   }
408 }
409 
410 void SerialFullGC::follow_array_chunk(objArrayOop array, int index) {
411   const int len = array->length();
412   const int beg_index = index;
413   assert(beg_index < len || len == 0, "index too large");
414 
415   const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
416   const int end_index = beg_index + stride;
417 
418   refArrayOop(array)->oop_iterate_range(&mark_and_push_closure, beg_index, end_index);
419 
420   if (end_index < len) {
421     SerialFullGC::push_objarray(array, end_index); // Push the continuation.
422   }
423 }
424 
425 void SerialFullGC::follow_stack() {
426   do {
427     while (!_marking_stack.is_empty()) {
428       oop obj = _marking_stack.pop();
429       assert (obj->is_gc_marked(), "p must be marked");
430       follow_object(obj);
431     }
432     // Process ObjArrays one at a time to avoid marking stack bloat.
433     if (!_objarray_stack.is_empty()) {
434       ObjArrayTask task = _objarray_stack.pop();
435       follow_array_chunk(objArrayOop(task.obj()), task.index());
436     }
437   } while (!_marking_stack.is_empty() || !_objarray_stack.is_empty());
438 }
439 
440 SerialFullGC::FollowStackClosure SerialFullGC::follow_stack_closure;
441 
442 void SerialFullGC::FollowStackClosure::do_void() { follow_stack(); }
443 
444 template <class T> void SerialFullGC::follow_root(T* p) {
445   assert(!Universe::heap()->is_in(p),
446          "roots shouldn't be things within the heap");
447   T heap_oop = RawAccess<>::oop_load(p);
448   if (!CompressedOops::is_null(heap_oop)) {
449     oop obj = CompressedOops::decode_not_null(heap_oop);
450     if (!obj->mark().is_marked()) {
451       mark_object(obj);
452       follow_object(obj);
453     }
454   }
455   follow_stack();
456 }
457 
458 void SerialFullGC::FollowRootClosure::do_oop(oop* p)       { follow_root(p); }
459 void SerialFullGC::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
460 
461 // We preserve the mark which should be replaced at the end and the location
462 // that it will go.  Note that the object that this markWord belongs to isn't
463 // currently at that address but it will be after phase4
464 void SerialFullGC::preserve_mark(oop obj, markWord mark) {
465   // We try to store preserved marks in the to space of the new generation since
466   // this is storage which should be available.  Most of the time this should be
467   // sufficient space for the marks we need to preserve but if it isn't we fall
468   // back to using Stacks to keep track of the overflow.
469   if (_preserved_count < _preserved_count_max) {
470     _preserved_marks[_preserved_count++] = PreservedMark(obj, mark);
471   } else {
472     _preserved_overflow_stack_set.get()->push_always(obj, mark);
473   }
474 }
475 
476 void SerialFullGC::phase1_mark(bool clear_all_softrefs) {
477   // Recursively traverse all live objects and mark them
478   GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
479 
480   SerialHeap* gch = SerialHeap::heap();
481 
482   ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
483 
484   ref_processor()->start_discovery(clear_all_softrefs);
485 
486   {
487     StrongRootsScope srs(0);
488 
489     MarkingNMethodClosure mark_code_closure(&follow_root_closure,
490                                             !NMethodToOopClosure::FixRelocations,
491                                             true);
492 
493     // Start tracing from roots, there are 3 kinds of roots in full-gc.
494     //
495     // 1. CLD. This method internally takes care of whether class loading is
496     // enabled or not, applying the closure to both strong and weak or only
497     // strong CLDs.
498     ClassLoaderDataGraph::always_strong_cld_do(&follow_cld_closure);
499 
500     // 2. Threads stack frames and active nmethods in them.
501     Threads::oops_do(&follow_root_closure, &mark_code_closure);
502 
503     // 3. VM internal roots.
504     OopStorageSet::strong_oops_do(&follow_root_closure);
505   }
506 
507   // Process reference objects found during marking
508   {
509     GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer());
510 
511     ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues());
512     SerialGCRefProcProxyTask task(is_alive, keep_alive, follow_stack_closure);
513     const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references(task, nullptr, pt);
514     pt.print_all_references();
515     gc_tracer()->report_gc_reference_stats(stats);
516   }
517 
518   // This is the point where the entire marking should have completed.
519   assert(_marking_stack.is_empty(), "Marking should have completed");
520 
521   {
522     GCTraceTime(Debug, gc, phases) tm_m("Weak Processing", gc_timer());
523     WeakProcessor::weak_oops_do(&is_alive, &do_nothing_cl);
524   }
525 
526   {
527     GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", gc_timer());
528 
529     ClassUnloadingContext* ctx = ClassUnloadingContext::context();
530 
531     bool unloading_occurred;
532     {
533       CodeCache::UnlinkingScope scope(&is_alive);
534 
535       // Unload classes and purge the SystemDictionary.
536       unloading_occurred = SystemDictionary::do_unloading(gc_timer());
537 
538       // Unload nmethods.
539       CodeCache::do_unloading(unloading_occurred);
540     }
541 
542     {
543       GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
544       // Release unloaded nmethod's memory.
545       ctx->purge_nmethods();
546     }
547     {
548       GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", gc_timer());
549       gch->prune_unlinked_nmethods();
550     }
551     {
552       GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
553       ctx->free_nmethods();
554     }
555 
556     // Prune dead klasses from subklass/sibling/implementor lists.
557     Klass::clean_weak_klass_links(unloading_occurred);
558 
559     // Clean JVMCI metadata handles.
560     JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
561   }
562 
563   {
564     GCTraceTime(Debug, gc, phases) tm_m("Report Object Count", gc_timer());
565     gc_tracer()->report_object_count_after_gc(&is_alive, nullptr);
566   }
567 }
568 
569 void SerialFullGC::allocate_stacks() {
570   void* scratch = nullptr;
571   size_t num_words;
572   DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
573   young_gen->contribute_scratch(scratch, num_words);
574 
575   if (scratch != nullptr) {
576     _preserved_count_max = num_words * HeapWordSize / sizeof(PreservedMark);
577   } else {
578     _preserved_count_max = 0;
579   }
580 
581   _preserved_marks = (PreservedMark*)scratch;
582   _preserved_count = 0;
583 
584   _preserved_overflow_stack_set.init(1);
585 }
586 
587 void SerialFullGC::deallocate_stacks() {
588   if (_preserved_count_max != 0) {
589     DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
590     young_gen->reset_scratch();
591   }
592 
593   _preserved_overflow_stack_set.reclaim();
594   _marking_stack.clear();
595   _objarray_stack.clear(true);
596 }
597 
598 void SerialFullGC::mark_object(oop obj) {
599   if (StringDedup::is_enabled() &&
600       java_lang_String::is_instance(obj) &&
601       SerialStringDedup::is_candidate_from_mark(obj)) {
602     _string_dedup_requests->add(obj);
603   }
604 
605   // some marks may contain information we need to preserve so we store them away
606   // and overwrite the mark.  We'll restore it at the end of serial full GC.
607   markWord mark = obj->mark();
608   obj->set_mark(obj->prototype_mark().set_marked());
609 
610   ContinuationGCSupport::transform_stack_chunk(obj);
611 
612   if (obj->mark_must_be_preserved(mark)) {
613     preserve_mark(obj, mark);
614   }
615 }
616 
617 template <class T> void SerialFullGC::mark_and_push(T* p) {
618   T heap_oop = RawAccess<>::oop_load(p);
619   if (!CompressedOops::is_null(heap_oop)) {
620     oop obj = CompressedOops::decode_not_null(heap_oop);
621     if (!obj->mark().is_marked()) {
622       mark_object(obj);
623       _marking_stack.push(obj);
624     }
625   }
626 }
627 
628 template <typename T>
629 void MarkAndPushClosure::do_oop_work(T* p)            { SerialFullGC::mark_and_push(p); }
630 void MarkAndPushClosure::do_oop(      oop* p)         { do_oop_work(p); }
631 void MarkAndPushClosure::do_oop(narrowOop* p)         { do_oop_work(p); }
632 
633 template <class T> void SerialFullGC::adjust_pointer(T* p) {
634   T heap_oop = RawAccess<>::oop_load(p);
635   if (!CompressedOops::is_null(heap_oop)) {
636     oop obj = CompressedOops::decode_not_null(heap_oop);
637     assert(Universe::heap()->is_in(obj), "should be in heap");
638 
639     if (FullGCForwarding::is_forwarded(obj)) {
640       oop new_obj = FullGCForwarding::forwardee(obj);
641       assert(is_object_aligned(new_obj), "oop must be aligned");
642       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
643     }
644   }
645 }
646 
647 template <typename T>
648 void AdjustPointerClosure::do_oop_work(T* p)           { SerialFullGC::adjust_pointer(p); }
649 inline void AdjustPointerClosure::do_oop(oop* p)       { do_oop_work(p); }
650 inline void AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_work(p); }
651 
652 AdjustPointerClosure SerialFullGC::adjust_pointer_closure;
653 
654 void SerialFullGC::adjust_marks() {
655   // adjust the oops we saved earlier
656   for (size_t i = 0; i < _preserved_count; i++) {
657     PreservedMarks::adjust_preserved_mark(_preserved_marks + i);
658   }
659 
660   // deal with the overflow stack
661   _preserved_overflow_stack_set.get()->adjust_during_full_gc();
662 }
663 
664 void SerialFullGC::restore_marks() {
665   log_trace(gc)("Restoring %zu marks", _preserved_count + _preserved_overflow_stack_set.get()->size());
666 
667   // restore the marks we saved earlier
668   for (size_t i = 0; i < _preserved_count; i++) {
669     _preserved_marks[i].set_mark();
670   }
671 
672   // deal with the overflow
673   _preserved_overflow_stack_set.restore(nullptr);
674 }
675 
676 SerialFullGC::IsAliveClosure   SerialFullGC::is_alive;
677 
678 bool SerialFullGC::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); }
679 
680 SerialFullGC::KeepAliveClosure SerialFullGC::keep_alive;
681 
682 void SerialFullGC::KeepAliveClosure::do_oop(oop* p)       { SerialFullGC::KeepAliveClosure::do_oop_work(p); }
683 void SerialFullGC::KeepAliveClosure::do_oop(narrowOop* p) { SerialFullGC::KeepAliveClosure::do_oop_work(p); }
684 
685 void SerialFullGC::initialize() {
686   SerialFullGC::_gc_timer = new STWGCTimer();
687   SerialFullGC::_gc_tracer = new SerialOldTracer();
688   SerialFullGC::_string_dedup_requests = new StringDedup::Requests();
689 
690   // The Full GC operates on the entire heap so all objects should be subject
691   // to discovery, hence the _always_true_closure.
692   SerialFullGC::_ref_processor = new ReferenceProcessor(&_always_true_closure);
693   mark_and_push_closure.set_ref_discoverer(_ref_processor);
694 }
695 
696 void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) {
697   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
698 
699   SerialHeap* gch = SerialHeap::heap();
700 
701   gch->trace_heap_before_gc(_gc_tracer);
702 
703   // Capture used regions for old-gen to reestablish old-to-young invariant
704   // after full-gc.
705   gch->old_gen()->save_used_region();
706 
707   allocate_stacks();
708 
709   // Usually, all class unloading work occurs at the end of phase 1, but Serial
710   // full-gc accesses dead-objs' klass to find out the start of next live-obj
711   // during phase 2. This requires klasses of dead-objs to be kept loaded.
712   // Therefore, we declare ClassUnloadingContext at the same level as
713   // full-gc phases, and purge dead classes (invoking
714   // ClassLoaderDataGraph::purge) after all phases of full-gc.
715   ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
716                             false /* unregister_nmethods_during_purge */,
717                             false /* lock_nmethod_free_separately */);
718 
719   phase1_mark(clear_all_softrefs);
720 
721   Compacter compacter{gch};
722 
723   {
724     // Now all live objects are marked, compute the new object addresses.
725     GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
726 
727     compacter.phase2_calculate_new_addr();
728   }
729 
730   // Don't add any more derived pointers during phase3
731 #if COMPILER2_OR_JVMCI
732   assert(DerivedPointerTable::is_active(), "Sanity");
733   DerivedPointerTable::set_active(false);
734 #endif
735 
736   {
737     // Adjust the pointers to reflect the new locations
738     GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
739 
740     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
741 
742     // Remap strong and weak roots in adjust phase.
743     // 1. All (strong and weak) CLDs.
744     ClassLoaderDataGraph::cld_do(&adjust_cld_closure);
745 
746     // 2. Threads stack frames. No need to visit on-stack nmethods, because all
747     // nmethods are visited in one go via CodeCache::nmethods_do.
748     Threads::oops_do(&adjust_pointer_closure, nullptr);
749     NMethodToOopClosure nmethod_cl(&adjust_pointer_closure, NMethodToOopClosure::FixRelocations);
750     CodeCache::nmethods_do(&nmethod_cl);
751 
752     // 3. VM internal roots
753     OopStorageSet::strong_oops_do(&adjust_pointer_closure);
754 
755     // 4. VM internal weak roots
756     WeakProcessor::oops_do(&adjust_pointer_closure);
757 
758     adjust_marks();
759     compacter.phase3_adjust_pointers();
760   }
761 
762   {
763     // All pointers are now adjusted, move objects accordingly
764     GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
765 
766     compacter.phase4_compact();
767   }
768 
769   // Delete metaspaces for unloaded class loaders and clean up CLDG.
770   ClassLoaderDataGraph::purge(true /* at_safepoint */);
771   DEBUG_ONLY(MetaspaceUtils::verify();)
772 
773   // Need to clear claim bits for the next full-gc (specifically phase 1 and 3).
774   ClassLoaderDataGraph::clear_claimed_marks();
775 
776   restore_marks();
777 
778   deallocate_stacks();
779 
780   SerialFullGC::_string_dedup_requests->flush();
781 
782   bool is_young_gen_empty = (gch->young_gen()->used() == 0);
783   gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
784 
785   gch->prune_scavengable_nmethods();
786 
787   // Update heap occupancy information which is used as
788   // input to soft ref clearing policy at the next gc.
789   Universe::heap()->update_capacity_and_used_at_gc();
790 
791   // Signal that we have completed a visit to all live objects.
792   Universe::heap()->record_whole_heap_examined_timestamp();
793 
794   gch->trace_heap_after_gc(_gc_tracer);
795 }