1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "classfile/classLoaderDataGraph.hpp"
 26 #include "classfile/classLoaderData.inline.hpp"
 27 #include "classfile/javaClasses.inline.hpp"
 28 #include "classfile/stringTable.hpp"
 29 #include "classfile/symbolTable.hpp"
 30 #include "classfile/systemDictionary.hpp"
 31 #include "classfile/vmSymbols.hpp"
 32 #include "code/codeCache.hpp"
 33 #include "compiler/compileBroker.hpp"
 34 #include "compiler/oopMap.hpp"
 35 #include "gc/serial/cardTableRS.hpp"
 36 #include "gc/serial/defNewGeneration.hpp"
 37 #include "gc/serial/serialFullGC.hpp"
 38 #include "gc/serial/serialGcRefProcProxyTask.hpp"
 39 #include "gc/serial/serialHeap.hpp"
 40 #include "gc/serial/serialStringDedup.hpp"
 41 #include "gc/serial/tenuredGeneration.inline.hpp"
 42 #include "gc/shared/classUnloadingContext.hpp"
 43 #include "gc/shared/collectedHeap.inline.hpp"
 44 #include "gc/shared/continuationGCSupport.inline.hpp"
 45 #include "gc/shared/fullGCForwarding.inline.hpp"
 46 #include "gc/shared/gcHeapSummary.hpp"
 47 #include "gc/shared/gcTimer.hpp"
 48 #include "gc/shared/gcTrace.hpp"
 49 #include "gc/shared/gcTraceTime.inline.hpp"
 50 #include "gc/shared/gc_globals.hpp"
 51 #include "gc/shared/modRefBarrierSet.hpp"
 52 #include "gc/shared/preservedMarks.inline.hpp"
 53 #include "gc/shared/referencePolicy.hpp"
 54 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
 55 #include "gc/shared/space.hpp"
 56 #include "gc/shared/strongRootsScope.hpp"
 57 #include "gc/shared/weakProcessor.hpp"
 58 #include "memory/iterator.inline.hpp"
 59 #include "memory/universe.hpp"
 60 #include "oops/access.inline.hpp"
 61 #include "oops/compressedOops.inline.hpp"
 62 #include "oops/instanceRefKlass.hpp"
 63 #include "oops/markWord.hpp"
 64 #include "oops/methodData.hpp"
 65 #include "oops/objArrayKlass.inline.hpp"
 66 #include "oops/oop.inline.hpp"
 67 #include "oops/typeArrayOop.inline.hpp"
 68 #include "runtime/prefetch.inline.hpp"
 69 #include "utilities/align.hpp"
 70 #include "utilities/copy.hpp"
 71 #include "utilities/events.hpp"
 72 #include "utilities/stack.inline.hpp"
 73 #if INCLUDE_JVMCI
 74 #include "jvmci/jvmci.hpp"
 75 #endif
 76 
 77 Stack<oop, mtGC>              SerialFullGC::_marking_stack;
 78 Stack<ObjArrayTask, mtGC>     SerialFullGC::_objarray_stack;
 79 
 80 PreservedMarksSet       SerialFullGC::_preserved_overflow_stack_set(false /* in_c_heap */);
 81 size_t                  SerialFullGC::_preserved_count = 0;
 82 size_t                  SerialFullGC::_preserved_count_max = 0;
 83 PreservedMark*          SerialFullGC::_preserved_marks = nullptr;
 84 STWGCTimer*             SerialFullGC::_gc_timer        = nullptr;
 85 SerialOldTracer*        SerialFullGC::_gc_tracer       = nullptr;
 86 
 87 AlwaysTrueClosure   SerialFullGC::_always_true_closure;
 88 ReferenceProcessor* SerialFullGC::_ref_processor;
 89 
 90 StringDedup::Requests*  SerialFullGC::_string_dedup_requests = nullptr;
 91 
 92 SerialFullGC::FollowRootClosure  SerialFullGC::follow_root_closure;
 93 
 94 MarkAndPushClosure SerialFullGC::mark_and_push_closure(ClassLoaderData::_claim_stw_fullgc_mark);
 95 CLDToOopClosure    SerialFullGC::follow_cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark);
 96 CLDToOopClosure    SerialFullGC::adjust_cld_closure(&adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
 97 
 98 class DeadSpacer : StackObj {
 99   size_t _allowed_deadspace_words;
100   bool _active;
101   ContiguousSpace* _space;
102 
103 public:
104   DeadSpacer(ContiguousSpace* space) : _allowed_deadspace_words(0), _space(space) {
105     size_t ratio = (_space == SerialHeap::heap()->old_gen()->space())
106                    ? MarkSweepDeadRatio : 0;
107     _active = ratio > 0;
108 
109     if (_active) {
110       // We allow some amount of garbage towards the bottom of the space, so
111       // we don't start compacting before there is a significant gain to be made.
112       // Occasionally, we want to ensure a full compaction, which is determined
113       // by the MarkSweepAlwaysCompactCount parameter.
114       if ((SerialHeap::heap()->total_full_collections() % MarkSweepAlwaysCompactCount) != 0) {
115         _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize;
116       } else {
117         _active = false;
118       }
119     }
120   }
121 
122   bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) {
123     if (!_active) {
124       return false;
125     }
126 
127     size_t dead_length = pointer_delta(dead_end, dead_start);
128     if (_allowed_deadspace_words >= dead_length) {
129       _allowed_deadspace_words -= dead_length;
130       CollectedHeap::fill_with_object(dead_start, dead_length);
131       oop obj = cast_to_oop(dead_start);
132       // obj->set_mark(obj->mark().set_marked());
133 
134       assert(dead_length == obj->size(), "bad filler object size");
135       log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", %zub",
136                                         p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize);
137 
138       return true;
139     } else {
140       _active = false;
141       return false;
142     }
143   }
144 };
145 
146 // Implement the "compaction" part of the mark-compact GC algorithm.
147 class Compacter {
148   // There are four spaces in total, but only the first three can be used after
149   // compact. IOW, old and eden/from must be enough for all live objs
150   static constexpr uint max_num_spaces = 4;
151 
152   struct CompactionSpace {
153     ContiguousSpace* _space;
154     // Will be the new top after compaction is complete.
155     HeapWord* _compaction_top;
156     // The first dead word in this contiguous space. It's an optimization to
157     // skip large chunk of live objects at the beginning.
158     HeapWord* _first_dead;
159 
160     void init(ContiguousSpace* space) {
161       _space = space;
162       _compaction_top = space->bottom();
163       _first_dead = nullptr;
164     }
165   };
166 
167   CompactionSpace _spaces[max_num_spaces];
168   // The num of spaces to be compacted, i.e. containing live objs.
169   uint _num_spaces;
170 
171   uint _index;
172 
173   // Used for BOT update
174   TenuredGeneration* _old_gen;
175 
176   HeapWord* get_compaction_top(uint index) const {
177     return _spaces[index]._compaction_top;
178   }
179 
180   HeapWord* get_first_dead(uint index) const {
181     return _spaces[index]._first_dead;
182   }
183 
184   ContiguousSpace* get_space(uint index) const {
185     return _spaces[index]._space;
186   }
187 
188   void record_first_dead(uint index, HeapWord* first_dead) {
189     assert(_spaces[index]._first_dead == nullptr, "should write only once");
190     _spaces[index]._first_dead = first_dead;
191   }
192 
193   HeapWord* alloc(size_t old_size, size_t new_size, HeapWord* old_obj) {
194     size_t words = (old_obj == _spaces[_index]._compaction_top) ? old_size : new_size;
195     while (true) {
196       if (words <= pointer_delta(_spaces[_index]._space->end(),
197                                  _spaces[_index]._compaction_top)) {
198         HeapWord* result = _spaces[_index]._compaction_top;
199         _spaces[_index]._compaction_top += words;
200         if (_index == 0) {
201           // old-gen requires BOT update
202           _old_gen->update_for_block(result, result + words);
203         }
204         return result;
205       }
206 
207       // out-of-memory in this space
208       _index++;
209       assert(_index < max_num_spaces - 1, "the last space should not be used");
210       words = (old_obj == _spaces[_index]._compaction_top) ? old_size : new_size;
211     }
212   }
213 
214   static void prefetch_read_scan(void* p) {
215     if (PrefetchScanIntervalInBytes >= 0) {
216       Prefetch::read(p, PrefetchScanIntervalInBytes);
217     }
218   }
219 
220   static void prefetch_write_scan(void* p) {
221     if (PrefetchScanIntervalInBytes >= 0) {
222       Prefetch::write(p, PrefetchScanIntervalInBytes);
223     }
224   }
225 
226   static void prefetch_write_copy(void* p) {
227     if (PrefetchCopyIntervalInBytes >= 0) {
228       Prefetch::write(p, PrefetchCopyIntervalInBytes);
229     }
230   }
231 
232   static void forward_obj(oop obj, HeapWord* new_addr) {
233     prefetch_write_scan(obj);
234     if (cast_from_oop<HeapWord*>(obj) != new_addr) {
235       FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
236     } else {
237       assert(obj->is_gc_marked(), "inv");
238       // This obj will stay in-place. Fix the markword.
239       obj->init_mark();
240     }
241   }
242 
243   static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
244     for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
245       prefetch_read_scan(i_addr);
246       oop obj = cast_to_oop(i_addr);
247       if (obj->is_gc_marked()) {
248         return i_addr;
249       }
250       i_addr += obj->size();
251     }
252     return end;
253   };
254 
255   static size_t relocate(HeapWord* addr) {
256     // Prefetch source and destination
257     prefetch_read_scan(addr);
258 
259     oop obj = cast_to_oop(addr);
260     oop new_obj = FullGCForwarding::forwardee(obj);
261     HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
262 
263     size_t obj_size = obj->size();
264     if (addr != new_addr) {
265       prefetch_write_copy(new_addr);
266       Copy::aligned_conjoint_words(addr, new_addr, obj_size);
267     }
268     new_obj->init_mark();
269     if (addr != new_addr) {
270       new_obj->initialize_hash_if_necessary(obj);
271     }
272 
273     return obj_size;
274   }
275 
276 public:
277   explicit Compacter(SerialHeap* heap) {
278     // In this order so that heap is compacted towards old-gen.
279     _spaces[0].init(heap->old_gen()->space());
280     _spaces[1].init(heap->young_gen()->eden());
281     _spaces[2].init(heap->young_gen()->from());
282 
283     bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
284     if (is_promotion_failed) {
285       _spaces[3].init(heap->young_gen()->to());
286       _num_spaces = 4;
287     } else {
288       _num_spaces = 3;
289     }
290     _index = 0;
291     _old_gen = heap->old_gen();
292   }
293 
294   void phase2_calculate_new_addr() {
295     for (uint i = 0; i < _num_spaces; ++i) {
296       ContiguousSpace* space = get_space(i);
297       HeapWord* cur_addr = space->bottom();
298       HeapWord* top = space->top();
299 
300       bool record_first_dead_done = false;
301 
302       DeadSpacer dead_spacer(space);
303 
304       while (cur_addr < top) {
305         oop obj = cast_to_oop(cur_addr);
306         size_t obj_size = obj->size();
307         size_t new_size = obj->copy_size(obj_size, obj->mark());
308         if (obj->is_gc_marked()) {
309           HeapWord* new_addr = alloc(obj_size, new_size, cur_addr);
310           forward_obj(obj, new_addr);
311           assert(obj->size() == obj_size, "size must not change after forwarding");
312           cur_addr += obj_size;
313         } else {
314           // Skipping the current known-unmarked obj
315           HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
316           if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
317             // Register space for the filler obj
318             size_t size = pointer_delta(next_live_addr, cur_addr);
319             alloc(size, size, cur_addr);
320           } else {
321             if (!record_first_dead_done) {
322               record_first_dead(i, cur_addr);
323               record_first_dead_done = true;
324             }
325             *(HeapWord**)cur_addr = next_live_addr;
326           }
327           cur_addr = next_live_addr;
328         }
329       }
330 
331       if (!record_first_dead_done) {
332         record_first_dead(i, top);
333       }
334     }
335   }
336 
337   void phase3_adjust_pointers() {
338     for (uint i = 0; i < _num_spaces; ++i) {
339       ContiguousSpace* space = get_space(i);
340       HeapWord* cur_addr = space->bottom();
341       HeapWord* const top = space->top();
342       HeapWord* const first_dead = get_first_dead(i);
343 
344       while (cur_addr < top) {
345         prefetch_write_scan(cur_addr);
346         if (cur_addr < first_dead || cast_to_oop(cur_addr)->is_gc_marked()) {
347           size_t size = cast_to_oop(cur_addr)->oop_iterate_size(&SerialFullGC::adjust_pointer_closure);
348           cur_addr += size;
349         } else {
350           assert(*(HeapWord**)cur_addr > cur_addr, "forward progress");
351           cur_addr = *(HeapWord**)cur_addr;
352         }
353       }
354     }
355   }
356 
357   void phase4_compact() {
358     for (uint i = 0; i < _num_spaces; ++i) {
359       ContiguousSpace* space = get_space(i);
360       HeapWord* cur_addr = space->bottom();
361       HeapWord* top = space->top();
362 
363       // Check if the first obj inside this space is forwarded.
364       if (!FullGCForwarding::is_forwarded(cast_to_oop(cur_addr))) {
365         // Jump over consecutive (in-place) live-objs-chunk
366         cur_addr = get_first_dead(i);
367       }
368 
369       while (cur_addr < top) {
370         if (!FullGCForwarding::is_forwarded(cast_to_oop(cur_addr))) {
371           cur_addr = *(HeapWord**) cur_addr;
372           continue;
373         }
374         cur_addr += relocate(cur_addr);
375       }
376 
377       // Reset top and unused memory
378       HeapWord* new_top = get_compaction_top(i);
379       space->set_top(new_top);
380       if (ZapUnusedHeapArea && new_top < top) {
381         space->mangle_unused_area(MemRegion(new_top, top));
382       }
383     }
384   }
385 };
386 
387 template <class T> void SerialFullGC::KeepAliveClosure::do_oop_work(T* p) {
388   mark_and_push(p);
389 }
390 
391 void SerialFullGC::push_objarray(oop obj, size_t index) {
392   ObjArrayTask task(obj, index);
393   assert(task.is_valid(), "bad ObjArrayTask");
394   _objarray_stack.push(task);
395 }
396 
397 void SerialFullGC::follow_array(objArrayOop array) {
398   mark_and_push_closure.do_klass(array->klass());
399   // Don't push empty arrays to avoid unnecessary work.
400   if (array->length() > 0) {
401     SerialFullGC::push_objarray(array, 0);
402   }
403 }
404 
405 void SerialFullGC::follow_object(oop obj) {
406   assert(obj->is_gc_marked(), "should be marked");
407   if (obj->is_objArray()) {
408     // Handle object arrays explicitly to allow them to
409     // be split into chunks if needed.
410     SerialFullGC::follow_array((objArrayOop)obj);
411   } else {
412     obj->oop_iterate(&mark_and_push_closure);
413   }
414 }
415 
416 void SerialFullGC::follow_array_chunk(objArrayOop array, int index) {
417   const int len = array->length();
418   const int beg_index = index;
419   assert(beg_index < len || len == 0, "index too large");
420 
421   const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
422   const int end_index = beg_index + stride;
423 
424   array->oop_iterate_range(&mark_and_push_closure, beg_index, end_index);
425 
426   if (end_index < len) {
427     SerialFullGC::push_objarray(array, end_index); // Push the continuation.
428   }
429 }
430 
431 void SerialFullGC::follow_stack() {
432   do {
433     while (!_marking_stack.is_empty()) {
434       oop obj = _marking_stack.pop();
435       assert (obj->is_gc_marked(), "p must be marked");
436       follow_object(obj);
437     }
438     // Process ObjArrays one at a time to avoid marking stack bloat.
439     if (!_objarray_stack.is_empty()) {
440       ObjArrayTask task = _objarray_stack.pop();
441       follow_array_chunk(objArrayOop(task.obj()), task.index());
442     }
443   } while (!_marking_stack.is_empty() || !_objarray_stack.is_empty());
444 }
445 
446 SerialFullGC::FollowStackClosure SerialFullGC::follow_stack_closure;
447 
448 void SerialFullGC::FollowStackClosure::do_void() { follow_stack(); }
449 
450 template <class T> void SerialFullGC::follow_root(T* p) {
451   assert(!Universe::heap()->is_in(p),
452          "roots shouldn't be things within the heap");
453   T heap_oop = RawAccess<>::oop_load(p);
454   if (!CompressedOops::is_null(heap_oop)) {
455     oop obj = CompressedOops::decode_not_null(heap_oop);
456     if (!obj->mark().is_marked()) {
457       mark_object(obj);
458       follow_object(obj);
459     }
460   }
461   follow_stack();
462 }
463 
464 void SerialFullGC::FollowRootClosure::do_oop(oop* p)       { follow_root(p); }
465 void SerialFullGC::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
466 
467 // We preserve the mark which should be replaced at the end and the location
468 // that it will go.  Note that the object that this markWord belongs to isn't
469 // currently at that address but it will be after phase4
470 void SerialFullGC::preserve_mark(oop obj, markWord mark) {
471   // We try to store preserved marks in the to space of the new generation since
472   // this is storage which should be available.  Most of the time this should be
473   // sufficient space for the marks we need to preserve but if it isn't we fall
474   // back to using Stacks to keep track of the overflow.
475   if (_preserved_count < _preserved_count_max) {
476     _preserved_marks[_preserved_count++] = PreservedMark(obj, mark);
477   } else {
478     _preserved_overflow_stack_set.get()->push_always(obj, mark);
479   }
480 }
481 
482 void SerialFullGC::phase1_mark(bool clear_all_softrefs) {
483   // Recursively traverse all live objects and mark them
484   GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
485 
486   SerialHeap* gch = SerialHeap::heap();
487 
488   ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
489 
490   ref_processor()->start_discovery(clear_all_softrefs);
491 
492   {
493     StrongRootsScope srs(0);
494 
495     CLDClosure* weak_cld_closure = ClassUnloading ? nullptr : &follow_cld_closure;
496     MarkingNMethodClosure mark_code_closure(&follow_root_closure, !NMethodToOopClosure::FixRelocations, true);
497     gch->process_roots(SerialHeap::SO_None,
498                        &follow_root_closure,
499                        &follow_cld_closure,
500                        weak_cld_closure,
501                        &mark_code_closure);
502   }
503 
504   // Process reference objects found during marking
505   {
506     GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer());
507 
508     ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues());
509     SerialGCRefProcProxyTask task(is_alive, keep_alive, follow_stack_closure);
510     const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references(task, pt);
511     pt.print_all_references();
512     gc_tracer()->report_gc_reference_stats(stats);
513   }
514 
515   // This is the point where the entire marking should have completed.
516   assert(_marking_stack.is_empty(), "Marking should have completed");
517 
518   {
519     GCTraceTime(Debug, gc, phases) tm_m("Weak Processing", gc_timer());
520     WeakProcessor::weak_oops_do(&is_alive, &do_nothing_cl);
521   }
522 
523   {
524     GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", gc_timer());
525 
526     ClassUnloadingContext* ctx = ClassUnloadingContext::context();
527 
528     bool unloading_occurred;
529     {
530       CodeCache::UnlinkingScope scope(&is_alive);
531 
532       // Unload classes and purge the SystemDictionary.
533       unloading_occurred = SystemDictionary::do_unloading(gc_timer());
534 
535       // Unload nmethods.
536       CodeCache::do_unloading(unloading_occurred);
537     }
538 
539     {
540       GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
541       // Release unloaded nmethod's memory.
542       ctx->purge_nmethods();
543     }
544     {
545       GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", gc_timer());
546       gch->prune_unlinked_nmethods();
547     }
548     {
549       GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
550       ctx->free_nmethods();
551     }
552 
553     // Prune dead klasses from subklass/sibling/implementor lists.
554     Klass::clean_weak_klass_links(unloading_occurred);
555 
556     // Clean JVMCI metadata handles.
557     JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
558   }
559 
560   {
561     GCTraceTime(Debug, gc, phases) tm_m("Report Object Count", gc_timer());
562     gc_tracer()->report_object_count_after_gc(&is_alive, nullptr);
563   }
564 }
565 
566 void SerialFullGC::allocate_stacks() {
567   void* scratch = nullptr;
568   size_t num_words;
569   DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
570   young_gen->contribute_scratch(scratch, num_words);
571 
572   if (scratch != nullptr) {
573     _preserved_count_max = num_words * HeapWordSize / sizeof(PreservedMark);
574   } else {
575     _preserved_count_max = 0;
576   }
577 
578   _preserved_marks = (PreservedMark*)scratch;
579   _preserved_count = 0;
580 
581   _preserved_overflow_stack_set.init(1);
582 }
583 
584 void SerialFullGC::deallocate_stacks() {
585   if (_preserved_count_max != 0) {
586     DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
587     young_gen->reset_scratch();
588   }
589 
590   _preserved_overflow_stack_set.reclaim();
591   _marking_stack.clear();
592   _objarray_stack.clear(true);
593 }
594 
595 void SerialFullGC::mark_object(oop obj) {
596   if (StringDedup::is_enabled() &&
597       java_lang_String::is_instance(obj) &&
598       SerialStringDedup::is_candidate_from_mark(obj)) {
599     _string_dedup_requests->add(obj);
600   }
601 
602   // some marks may contain information we need to preserve so we store them away
603   // and overwrite the mark.  We'll restore it at the end of serial full GC.
604   markWord mark = obj->mark();
605   obj->set_mark(mark.set_marked());
606 
607   ContinuationGCSupport::transform_stack_chunk(obj);
608 
609   if (obj->mark_must_be_preserved(mark)) {
610     preserve_mark(obj, mark);
611   }
612 }
613 
614 template <class T> void SerialFullGC::mark_and_push(T* p) {
615   T heap_oop = RawAccess<>::oop_load(p);
616   if (!CompressedOops::is_null(heap_oop)) {
617     oop obj = CompressedOops::decode_not_null(heap_oop);
618     if (!obj->mark().is_marked()) {
619       mark_object(obj);
620       _marking_stack.push(obj);
621     }
622   }
623 }
624 
625 template <typename T>
626 void MarkAndPushClosure::do_oop_work(T* p)            { SerialFullGC::mark_and_push(p); }
627 void MarkAndPushClosure::do_oop(      oop* p)         { do_oop_work(p); }
628 void MarkAndPushClosure::do_oop(narrowOop* p)         { do_oop_work(p); }
629 
630 template <class T> void SerialFullGC::adjust_pointer(T* p) {
631   T heap_oop = RawAccess<>::oop_load(p);
632   if (!CompressedOops::is_null(heap_oop)) {
633     oop obj = CompressedOops::decode_not_null(heap_oop);
634     assert(Universe::heap()->is_in(obj), "should be in heap");
635 
636     if (FullGCForwarding::is_forwarded(obj)) {
637       oop new_obj = FullGCForwarding::forwardee(obj);
638       assert(is_object_aligned(new_obj), "oop must be aligned");
639       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
640     }
641   }
642 }
643 
644 template <typename T>
645 void AdjustPointerClosure::do_oop_work(T* p)           { SerialFullGC::adjust_pointer(p); }
646 inline void AdjustPointerClosure::do_oop(oop* p)       { do_oop_work(p); }
647 inline void AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_work(p); }
648 
649 AdjustPointerClosure SerialFullGC::adjust_pointer_closure;
650 
651 void SerialFullGC::adjust_marks() {
652   // adjust the oops we saved earlier
653   for (size_t i = 0; i < _preserved_count; i++) {
654     PreservedMarks::adjust_preserved_mark(_preserved_marks + i);
655   }
656 
657   // deal with the overflow stack
658   _preserved_overflow_stack_set.get()->adjust_during_full_gc();
659 }
660 
661 void SerialFullGC::restore_marks() {
662   log_trace(gc)("Restoring %zu marks", _preserved_count + _preserved_overflow_stack_set.get()->size());
663 
664   // restore the marks we saved earlier
665   for (size_t i = 0; i < _preserved_count; i++) {
666     _preserved_marks[i].set_mark();
667   }
668 
669   // deal with the overflow
670   _preserved_overflow_stack_set.restore(nullptr);
671 }
672 
673 SerialFullGC::IsAliveClosure   SerialFullGC::is_alive;
674 
675 bool SerialFullGC::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); }
676 
677 SerialFullGC::KeepAliveClosure SerialFullGC::keep_alive;
678 
679 void SerialFullGC::KeepAliveClosure::do_oop(oop* p)       { SerialFullGC::KeepAliveClosure::do_oop_work(p); }
680 void SerialFullGC::KeepAliveClosure::do_oop(narrowOop* p) { SerialFullGC::KeepAliveClosure::do_oop_work(p); }
681 
682 void SerialFullGC::initialize() {
683   SerialFullGC::_gc_timer = new STWGCTimer();
684   SerialFullGC::_gc_tracer = new SerialOldTracer();
685   SerialFullGC::_string_dedup_requests = new StringDedup::Requests();
686 
687   // The Full GC operates on the entire heap so all objects should be subject
688   // to discovery, hence the _always_true_closure.
689   SerialFullGC::_ref_processor = new ReferenceProcessor(&_always_true_closure);
690   mark_and_push_closure.set_ref_discoverer(_ref_processor);
691 }
692 
693 void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) {
694   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
695 
696   SerialHeap* gch = SerialHeap::heap();
697 
698   gch->trace_heap_before_gc(_gc_tracer);
699 
700   // Capture used regions for old-gen to reestablish old-to-young invariant
701   // after full-gc.
702   gch->old_gen()->save_used_region();
703 
704   allocate_stacks();
705 
706   phase1_mark(clear_all_softrefs);
707 
708   FullGCForwarding::begin();
709 
710   Compacter compacter{gch};
711 
712   {
713     // Now all live objects are marked, compute the new object addresses.
714     GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
715 
716     compacter.phase2_calculate_new_addr();
717   }
718 
719   // Don't add any more derived pointers during phase3
720 #if COMPILER2_OR_JVMCI
721   assert(DerivedPointerTable::is_active(), "Sanity");
722   DerivedPointerTable::set_active(false);
723 #endif
724 
725   {
726     // Adjust the pointers to reflect the new locations
727     GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
728 
729     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
730 
731     NMethodToOopClosure code_closure(&adjust_pointer_closure, NMethodToOopClosure::FixRelocations);
732     gch->process_roots(SerialHeap::SO_AllCodeCache,
733                        &adjust_pointer_closure,
734                        &adjust_cld_closure,
735                        &adjust_cld_closure,
736                        &code_closure);
737 
738     WeakProcessor::oops_do(&adjust_pointer_closure);
739 
740     adjust_marks();
741     compacter.phase3_adjust_pointers();
742   }
743 
744   {
745     // All pointers are now adjusted, move objects accordingly
746     GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
747 
748     compacter.phase4_compact();
749   }
750 
751   restore_marks();
752 
753   FullGCForwarding::end();
754 
755   deallocate_stacks();
756 
757   SerialFullGC::_string_dedup_requests->flush();
758 
759   bool is_young_gen_empty = (gch->young_gen()->used() == 0);
760   gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
761 
762   gch->prune_scavengable_nmethods();
763 
764   // Update heap occupancy information which is used as
765   // input to soft ref clearing policy at the next gc.
766   Universe::heap()->update_capacity_and_used_at_gc();
767 
768   // Signal that we have completed a visit to all live objects.
769   Universe::heap()->record_whole_heap_examined_timestamp();
770 
771   gch->trace_heap_after_gc(_gc_tracer);
772 }