1 /*
  2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/classLoaderDataGraph.hpp"
 27 #include "classfile/classLoaderData.inline.hpp"
 28 #include "classfile/javaClasses.inline.hpp"
 29 #include "classfile/stringTable.hpp"
 30 #include "classfile/symbolTable.hpp"
 31 #include "classfile/systemDictionary.hpp"
 32 #include "classfile/vmSymbols.hpp"
 33 #include "code/codeCache.hpp"
 34 #include "compiler/compileBroker.hpp"
 35 #include "compiler/oopMap.hpp"
 36 #include "gc/serial/cardTableRS.hpp"
 37 #include "gc/serial/defNewGeneration.hpp"
 38 #include "gc/serial/serialFullGC.hpp"
 39 #include "gc/serial/serialGcRefProcProxyTask.hpp"
 40 #include "gc/serial/serialHeap.hpp"
 41 #include "gc/serial/serialStringDedup.hpp"
 42 #include "gc/shared/classUnloadingContext.hpp"
 43 #include "gc/shared/collectedHeap.inline.hpp"
 44 #include "gc/shared/continuationGCSupport.inline.hpp"
 45 #include "gc/shared/gcHeapSummary.hpp"
 46 #include "gc/shared/gcTimer.hpp"
 47 #include "gc/shared/gcTrace.hpp"
 48 #include "gc/shared/gcTraceTime.inline.hpp"
 49 #include "gc/shared/gc_globals.hpp"
 50 #include "gc/shared/modRefBarrierSet.hpp"
 51 #include "gc/shared/preservedMarks.inline.hpp"
 52 #include "gc/shared/referencePolicy.hpp"
 53 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
 54 #include "gc/shared/space.inline.hpp"
 55 #include "gc/shared/strongRootsScope.hpp"
 56 #include "gc/shared/weakProcessor.hpp"
 57 #include "memory/iterator.inline.hpp"
 58 #include "memory/universe.hpp"
 59 #include "oops/access.inline.hpp"
 60 #include "oops/compressedOops.inline.hpp"
 61 #include "oops/instanceRefKlass.hpp"
 62 #include "oops/markWord.hpp"
 63 #include "oops/methodData.hpp"
 64 #include "oops/objArrayKlass.inline.hpp"
 65 #include "oops/oop.inline.hpp"
 66 #include "oops/typeArrayOop.inline.hpp"
 67 #include "runtime/prefetch.inline.hpp"
 68 #include "utilities/align.hpp"
 69 #include "utilities/copy.hpp"
 70 #include "utilities/events.hpp"
 71 #include "utilities/stack.inline.hpp"
 72 #if INCLUDE_JVMCI
 73 #include "jvmci/jvmci.hpp"
 74 #endif
 75 
 76 uint                    SerialFullGC::_total_invocations = 0;
 77 
 78 Stack<oop, mtGC>              SerialFullGC::_marking_stack;
 79 Stack<ObjArrayTask, mtGC>     SerialFullGC::_objarray_stack;
 80 
 81 PreservedMarksSet       SerialFullGC::_preserved_overflow_stack_set(false /* in_c_heap */);
 82 size_t                  SerialFullGC::_preserved_count = 0;
 83 size_t                  SerialFullGC::_preserved_count_max = 0;
 84 PreservedMark*          SerialFullGC::_preserved_marks = nullptr;
 85 STWGCTimer*             SerialFullGC::_gc_timer        = nullptr;
 86 SerialOldTracer*        SerialFullGC::_gc_tracer       = nullptr;
 87 
 88 AlwaysTrueClosure   SerialFullGC::_always_true_closure;
 89 ReferenceProcessor* SerialFullGC::_ref_processor;
 90 
 91 StringDedup::Requests*  SerialFullGC::_string_dedup_requests = nullptr;
 92 
 93 SerialFullGC::FollowRootClosure  SerialFullGC::follow_root_closure;
 94 
 95 MarkAndPushClosure SerialFullGC::mark_and_push_closure(ClassLoaderData::_claim_stw_fullgc_mark);
 96 CLDToOopClosure    SerialFullGC::follow_cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark);
 97 CLDToOopClosure    SerialFullGC::adjust_cld_closure(&adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
 98 
 99 class DeadSpacer : StackObj {
100   size_t _allowed_deadspace_words;
101   bool _active;
102   ContiguousSpace* _space;
103 
104 public:
105   DeadSpacer(ContiguousSpace* space) : _allowed_deadspace_words(0), _space(space) {
106     size_t ratio = (_space == SerialHeap::heap()->old_gen()->space())
107                    ? MarkSweepDeadRatio : 0;
108     _active = ratio > 0;
109 
110     if (_active) {
111       // We allow some amount of garbage towards the bottom of the space, so
112       // we don't start compacting before there is a significant gain to be made.
113       // Occasionally, we want to ensure a full compaction, which is determined
114       // by the MarkSweepAlwaysCompactCount parameter.
115       if ((SerialFullGC::total_invocations() % MarkSweepAlwaysCompactCount) != 0) {
116         _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize;
117       } else {
118         _active = false;
119       }
120     }
121   }
122 
123   bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) {
124     if (!_active) {
125       return false;
126     }
127 
128     size_t dead_length = pointer_delta(dead_end, dead_start);
129     if (_allowed_deadspace_words >= dead_length) {
130       _allowed_deadspace_words -= dead_length;
131       CollectedHeap::fill_with_object(dead_start, dead_length);
132       oop obj = cast_to_oop(dead_start);
133       // obj->set_mark(obj->mark().set_marked());
134 
135       assert(dead_length == obj->size(), "bad filler object size");
136       log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b",
137                                         p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize);
138 
139       return true;
140     } else {
141       _active = false;
142       return false;
143     }
144   }
145 };
146 
147 // Implement the "compaction" part of the mark-compact GC algorithm.
148 class Compacter {
149   // There are four spaces in total, but only the first three can be used after
150   // compact. IOW, old and eden/from must be enough for all live objs
151   static constexpr uint max_num_spaces = 4;
152 
153   struct CompactionSpace {
154     ContiguousSpace* _space;
155     // Will be the new top after compaction is complete.
156     HeapWord* _compaction_top;
157     // The first dead word in this contiguous space. It's an optimization to
158     // skip large chunk of live objects at the beginning.
159     HeapWord* _first_dead;
160 
161     void init(ContiguousSpace* space) {
162       _space = space;
163       _compaction_top = space->bottom();
164       _first_dead = nullptr;
165     }
166   };
167 
168   CompactionSpace _spaces[max_num_spaces];
169   // The num of spaces to be compacted, i.e. containing live objs.
170   uint _num_spaces;
171 
172   uint _index;
173 
174   HeapWord* get_compaction_top(uint index) const {
175     return _spaces[index]._compaction_top;
176   }
177 
178   HeapWord* get_first_dead(uint index) const {
179     return _spaces[index]._first_dead;
180   }
181 
182   ContiguousSpace* get_space(uint index) const {
183     return _spaces[index]._space;
184   }
185 
186   void record_first_dead(uint index, HeapWord* first_dead) {
187     assert(_spaces[index]._first_dead == nullptr, "should write only once");
188     _spaces[index]._first_dead = first_dead;
189   }
190 
191   HeapWord* alloc(size_t words) {
192     while (true) {
193       if (words <= pointer_delta(_spaces[_index]._space->end(),
194                                  _spaces[_index]._compaction_top)) {
195         HeapWord* result = _spaces[_index]._compaction_top;
196         _spaces[_index]._compaction_top += words;
197         if (_index == 0) {
198           // old-gen requires BOT update
199           static_cast<TenuredSpace*>(_spaces[0]._space)->update_for_block(result, result + words);
200         }
201         return result;
202       }
203 
204       // out-of-memory in this space
205       _index++;
206       assert(_index < max_num_spaces - 1, "the last space should not be used");
207     }
208   }
209 
210   static void prefetch_read_scan(void* p) {
211     if (PrefetchScanIntervalInBytes >= 0) {
212       Prefetch::read(p, PrefetchScanIntervalInBytes);
213     }
214   }
215 
216   static void prefetch_write_scan(void* p) {
217     if (PrefetchScanIntervalInBytes >= 0) {
218       Prefetch::write(p, PrefetchScanIntervalInBytes);
219     }
220   }
221 
222   static void prefetch_write_copy(void* p) {
223     if (PrefetchCopyIntervalInBytes >= 0) {
224       Prefetch::write(p, PrefetchCopyIntervalInBytes);
225     }
226   }
227 
228   static void forward_obj(oop obj, HeapWord* new_addr) {
229     prefetch_write_scan(obj);
230     if (cast_from_oop<HeapWord*>(obj) != new_addr) {
231       obj->forward_to(cast_to_oop(new_addr));
232     } else {
233       assert(obj->is_gc_marked(), "inv");
234       // This obj will stay in-place. Fix the markword.
235       obj->init_mark();
236     }
237   }
238 
239   static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
240     for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
241       prefetch_read_scan(i_addr);
242       oop obj = cast_to_oop(i_addr);
243       if (obj->is_gc_marked()) {
244         return i_addr;
245       }
246       i_addr += obj->size();
247     }
248     return end;
249   };
250 
251   static size_t relocate(HeapWord* addr) {
252     // Prefetch source and destination
253     prefetch_read_scan(addr);
254 
255     oop obj = cast_to_oop(addr);
256     oop new_obj = obj->forwardee();
257     HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
258     assert(addr != new_addr, "inv");
259     prefetch_write_copy(new_addr);
260 
261     size_t obj_size = obj->size();
262     Copy::aligned_conjoint_words(addr, new_addr, obj_size);
263     new_obj->init_mark();
264 
265     return obj_size;
266   }
267 
268 public:
269   explicit Compacter(SerialHeap* heap) {
270     // In this order so that heap is compacted towards old-gen.
271     _spaces[0].init(heap->old_gen()->space());
272     _spaces[1].init(heap->young_gen()->eden());
273     _spaces[2].init(heap->young_gen()->from());
274 
275     bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
276     if (is_promotion_failed) {
277       _spaces[3].init(heap->young_gen()->to());
278       _num_spaces = 4;
279     } else {
280       _num_spaces = 3;
281     }
282     _index = 0;
283   }
284 
285   void phase2_calculate_new_addr() {
286     for (uint i = 0; i < _num_spaces; ++i) {
287       ContiguousSpace* space = get_space(i);
288       HeapWord* cur_addr = space->bottom();
289       HeapWord* top = space->top();
290 
291       bool record_first_dead_done = false;
292 
293       DeadSpacer dead_spacer(space);
294 
295       while (cur_addr < top) {
296         oop obj = cast_to_oop(cur_addr);
297         size_t obj_size = obj->size();
298         if (obj->is_gc_marked()) {
299           HeapWord* new_addr = alloc(obj_size);
300           forward_obj(obj, new_addr);
301           cur_addr += obj_size;
302         } else {
303           // Skipping the current known-unmarked obj
304           HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
305           if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
306             // Register space for the filler obj
307             alloc(pointer_delta(next_live_addr, cur_addr));
308           } else {
309             if (!record_first_dead_done) {
310               record_first_dead(i, cur_addr);
311               record_first_dead_done = true;
312             }
313             *(HeapWord**)cur_addr = next_live_addr;
314           }
315           cur_addr = next_live_addr;
316         }
317       }
318 
319       if (!record_first_dead_done) {
320         record_first_dead(i, top);
321       }
322     }
323   }
324 
325   void phase3_adjust_pointers() {
326     for (uint i = 0; i < _num_spaces; ++i) {
327       ContiguousSpace* space = get_space(i);
328       HeapWord* cur_addr = space->bottom();
329       HeapWord* const top = space->top();
330       HeapWord* const first_dead = get_first_dead(i);
331 
332       while (cur_addr < top) {
333         prefetch_write_scan(cur_addr);
334         if (cur_addr < first_dead || cast_to_oop(cur_addr)->is_gc_marked()) {
335           size_t size = cast_to_oop(cur_addr)->oop_iterate_size(&SerialFullGC::adjust_pointer_closure);
336           cur_addr += size;
337         } else {
338           assert(*(HeapWord**)cur_addr > cur_addr, "forward progress");
339           cur_addr = *(HeapWord**)cur_addr;
340         }
341       }
342     }
343   }
344 
345   void phase4_compact() {
346     for (uint i = 0; i < _num_spaces; ++i) {
347       ContiguousSpace* space = get_space(i);
348       HeapWord* cur_addr = space->bottom();
349       HeapWord* top = space->top();
350 
351       // Check if the first obj inside this space is forwarded.
352       if (!cast_to_oop(cur_addr)->is_forwarded()) {
353         // Jump over consecutive (in-place) live-objs-chunk
354         cur_addr = get_first_dead(i);
355       }
356 
357       while (cur_addr < top) {
358         if (!cast_to_oop(cur_addr)->is_forwarded()) {
359           cur_addr = *(HeapWord**) cur_addr;
360           continue;
361         }
362         cur_addr += relocate(cur_addr);
363       }
364 
365       // Reset top and unused memory
366       space->set_top(get_compaction_top(i));
367       if (ZapUnusedHeapArea) {
368         space->mangle_unused_area();
369       }
370     }
371   }
372 };
373 
374 template <class T> void SerialFullGC::KeepAliveClosure::do_oop_work(T* p) {
375   mark_and_push(p);
376 }
377 
378 void SerialFullGC::push_objarray(oop obj, size_t index) {
379   ObjArrayTask task(obj, index);
380   assert(task.is_valid(), "bad ObjArrayTask");
381   _objarray_stack.push(task);
382 }
383 
384 void SerialFullGC::follow_array(objArrayOop array) {
385   mark_and_push_closure.do_klass(array->klass());
386   // Don't push empty arrays to avoid unnecessary work.
387   if (array->length() > 0) {
388     SerialFullGC::push_objarray(array, 0);
389   }
390 }
391 
392 void SerialFullGC::follow_object(oop obj) {
393   assert(obj->is_gc_marked(), "should be marked");
394   if (obj->is_objArray()) {
395     // Handle object arrays explicitly to allow them to
396     // be split into chunks if needed.
397     SerialFullGC::follow_array((objArrayOop)obj);
398   } else {
399     obj->oop_iterate(&mark_and_push_closure);
400   }
401 }
402 
403 void SerialFullGC::follow_array_chunk(objArrayOop array, int index) {
404   const int len = array->length();
405   const int beg_index = index;
406   assert(beg_index < len || len == 0, "index too large");
407 
408   const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
409   const int end_index = beg_index + stride;
410 
411   array->oop_iterate_range(&mark_and_push_closure, beg_index, end_index);
412 
413   if (end_index < len) {
414     SerialFullGC::push_objarray(array, end_index); // Push the continuation.
415   }
416 }
417 
418 void SerialFullGC::follow_stack() {
419   do {
420     while (!_marking_stack.is_empty()) {
421       oop obj = _marking_stack.pop();
422       assert (obj->is_gc_marked(), "p must be marked");
423       follow_object(obj);
424     }
425     // Process ObjArrays one at a time to avoid marking stack bloat.
426     if (!_objarray_stack.is_empty()) {
427       ObjArrayTask task = _objarray_stack.pop();
428       follow_array_chunk(objArrayOop(task.obj()), task.index());
429     }
430   } while (!_marking_stack.is_empty() || !_objarray_stack.is_empty());
431 }
432 
433 SerialFullGC::FollowStackClosure SerialFullGC::follow_stack_closure;
434 
435 void SerialFullGC::FollowStackClosure::do_void() { follow_stack(); }
436 
437 template <class T> void SerialFullGC::follow_root(T* p) {
438   assert(!Universe::heap()->is_in(p),
439          "roots shouldn't be things within the heap");
440   T heap_oop = RawAccess<>::oop_load(p);
441   if (!CompressedOops::is_null(heap_oop)) {
442     oop obj = CompressedOops::decode_not_null(heap_oop);
443     if (!obj->mark().is_marked()) {
444       mark_object(obj);
445       follow_object(obj);
446     }
447   }
448   follow_stack();
449 }
450 
451 void SerialFullGC::FollowRootClosure::do_oop(oop* p)       { follow_root(p); }
452 void SerialFullGC::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
453 
454 // We preserve the mark which should be replaced at the end and the location
455 // that it will go.  Note that the object that this markWord belongs to isn't
456 // currently at that address but it will be after phase4
457 void SerialFullGC::preserve_mark(oop obj, markWord mark) {
458   // We try to store preserved marks in the to space of the new generation since
459   // this is storage which should be available.  Most of the time this should be
460   // sufficient space for the marks we need to preserve but if it isn't we fall
461   // back to using Stacks to keep track of the overflow.
462   if (_preserved_count < _preserved_count_max) {
463     _preserved_marks[_preserved_count++] = PreservedMark(obj, mark);
464   } else {
465     _preserved_overflow_stack_set.get()->push_always(obj, mark);
466   }
467 }
468 
469 void SerialFullGC::phase1_mark(bool clear_all_softrefs) {
470   // Recursively traverse all live objects and mark them
471   GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
472 
473   SerialHeap* gch = SerialHeap::heap();
474 
475   ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
476 
477   ref_processor()->start_discovery(clear_all_softrefs);
478 
479   {
480     StrongRootsScope srs(0);
481 
482     CLDClosure* weak_cld_closure = ClassUnloading ? nullptr : &follow_cld_closure;
483     MarkingNMethodClosure mark_code_closure(&follow_root_closure, !NMethodToOopClosure::FixRelocations, true);
484     gch->process_roots(SerialHeap::SO_None,
485                        &follow_root_closure,
486                        &follow_cld_closure,
487                        weak_cld_closure,
488                        &mark_code_closure);
489   }
490 
491   // Process reference objects found during marking
492   {
493     GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer());
494 
495     ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues());
496     SerialGCRefProcProxyTask task(is_alive, keep_alive, follow_stack_closure);
497     const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references(task, pt);
498     pt.print_all_references();
499     gc_tracer()->report_gc_reference_stats(stats);
500   }
501 
502   // This is the point where the entire marking should have completed.
503   assert(_marking_stack.is_empty(), "Marking should have completed");
504 
505   {
506     GCTraceTime(Debug, gc, phases) tm_m("Weak Processing", gc_timer());
507     WeakProcessor::weak_oops_do(&is_alive, &do_nothing_cl);
508   }
509 
510   {
511     GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", gc_timer());
512 
513     ClassUnloadingContext* ctx = ClassUnloadingContext::context();
514 
515     bool unloading_occurred;
516     {
517       CodeCache::UnlinkingScope scope(&is_alive);
518 
519       // Unload classes and purge the SystemDictionary.
520       unloading_occurred = SystemDictionary::do_unloading(gc_timer());
521 
522       // Unload nmethods.
523       CodeCache::do_unloading(unloading_occurred);
524     }
525 
526     {
527       GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
528       // Release unloaded nmethod's memory.
529       ctx->purge_nmethods();
530     }
531     {
532       GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", gc_timer());
533       gch->prune_unlinked_nmethods();
534     }
535     {
536       GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
537       ctx->free_nmethods();
538     }
539 
540     // Prune dead klasses from subklass/sibling/implementor lists.
541     Klass::clean_weak_klass_links(unloading_occurred);
542 
543     // Clean JVMCI metadata handles.
544     JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
545   }
546 
547   {
548     GCTraceTime(Debug, gc, phases) tm_m("Report Object Count", gc_timer());
549     gc_tracer()->report_object_count_after_gc(&is_alive, nullptr);
550   }
551 }
552 
553 void SerialFullGC::allocate_stacks() {
554   void* scratch = nullptr;
555   size_t num_words;
556   DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
557   young_gen->contribute_scratch(scratch, num_words);
558 
559   if (scratch != nullptr) {
560     _preserved_count_max = num_words * HeapWordSize / sizeof(PreservedMark);
561   } else {
562     _preserved_count_max = 0;
563   }
564 
565   _preserved_marks = (PreservedMark*)scratch;
566   _preserved_count = 0;
567 
568   _preserved_overflow_stack_set.init(1);
569 }
570 
571 void SerialFullGC::deallocate_stacks() {
572   if (_preserved_count_max != 0) {
573     DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
574     young_gen->reset_scratch();
575   }
576 
577   _preserved_overflow_stack_set.reclaim();
578   _marking_stack.clear();
579   _objarray_stack.clear(true);
580 }
581 
582 void SerialFullGC::mark_object(oop obj) {
583   if (StringDedup::is_enabled() &&
584       java_lang_String::is_instance(obj) &&
585       SerialStringDedup::is_candidate_from_mark(obj)) {
586     _string_dedup_requests->add(obj);
587   }
588 
589   // some marks may contain information we need to preserve so we store them away
590   // and overwrite the mark.  We'll restore it at the end of serial full GC.
591   markWord mark = obj->mark();
592   obj->set_mark(markWord::prototype().set_marked());
593 
594   ContinuationGCSupport::transform_stack_chunk(obj);
595 
596   if (obj->mark_must_be_preserved(mark)) {
597     preserve_mark(obj, mark);
598   }
599 }
600 
601 template <class T> void SerialFullGC::mark_and_push(T* p) {
602   T heap_oop = RawAccess<>::oop_load(p);
603   if (!CompressedOops::is_null(heap_oop)) {
604     oop obj = CompressedOops::decode_not_null(heap_oop);
605     if (!obj->mark().is_marked()) {
606       mark_object(obj);
607       _marking_stack.push(obj);
608     }
609   }
610 }
611 
612 template <typename T>
613 void MarkAndPushClosure::do_oop_work(T* p)            { SerialFullGC::mark_and_push(p); }
614 void MarkAndPushClosure::do_oop(      oop* p)         { do_oop_work(p); }
615 void MarkAndPushClosure::do_oop(narrowOop* p)         { do_oop_work(p); }
616 
617 template <class T> void SerialFullGC::adjust_pointer(T* p) {
618   T heap_oop = RawAccess<>::oop_load(p);
619   if (!CompressedOops::is_null(heap_oop)) {
620     oop obj = CompressedOops::decode_not_null(heap_oop);
621     assert(Universe::heap()->is_in(obj), "should be in heap");
622 
623     if (obj->is_forwarded()) {
624       oop new_obj = obj->forwardee();
625       assert(is_object_aligned(new_obj), "oop must be aligned");
626       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
627     }
628   }
629 }
630 
631 template <typename T>
632 void AdjustPointerClosure::do_oop_work(T* p)           { SerialFullGC::adjust_pointer(p); }
633 inline void AdjustPointerClosure::do_oop(oop* p)       { do_oop_work(p); }
634 inline void AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_work(p); }
635 
636 AdjustPointerClosure SerialFullGC::adjust_pointer_closure;
637 
638 void SerialFullGC::adjust_marks() {
639   // adjust the oops we saved earlier
640   for (size_t i = 0; i < _preserved_count; i++) {
641     PreservedMarks::adjust_preserved_mark(_preserved_marks + i);
642   }
643 
644   // deal with the overflow stack
645   _preserved_overflow_stack_set.get()->adjust_during_full_gc();
646 }
647 
648 void SerialFullGC::restore_marks() {
649   log_trace(gc)("Restoring " SIZE_FORMAT " marks", _preserved_count + _preserved_overflow_stack_set.get()->size());
650 
651   // restore the marks we saved earlier
652   for (size_t i = 0; i < _preserved_count; i++) {
653     _preserved_marks[i].set_mark();
654   }
655 
656   // deal with the overflow
657   _preserved_overflow_stack_set.restore(nullptr);
658 }
659 
660 SerialFullGC::IsAliveClosure   SerialFullGC::is_alive;
661 
662 bool SerialFullGC::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); }
663 
664 SerialFullGC::KeepAliveClosure SerialFullGC::keep_alive;
665 
666 void SerialFullGC::KeepAliveClosure::do_oop(oop* p)       { SerialFullGC::KeepAliveClosure::do_oop_work(p); }
667 void SerialFullGC::KeepAliveClosure::do_oop(narrowOop* p) { SerialFullGC::KeepAliveClosure::do_oop_work(p); }
668 
669 void SerialFullGC::initialize() {
670   SerialFullGC::_gc_timer = new STWGCTimer();
671   SerialFullGC::_gc_tracer = new SerialOldTracer();
672   SerialFullGC::_string_dedup_requests = new StringDedup::Requests();
673 
674   // The Full GC operates on the entire heap so all objects should be subject
675   // to discovery, hence the _always_true_closure.
676   SerialFullGC::_ref_processor = new ReferenceProcessor(&_always_true_closure);
677   mark_and_push_closure.set_ref_discoverer(_ref_processor);
678 }
679 
680 void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) {
681   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
682 
683   SerialHeap* gch = SerialHeap::heap();
684 #ifdef ASSERT
685   if (gch->soft_ref_policy()->should_clear_all_soft_refs()) {
686     assert(clear_all_softrefs, "Policy should have been checked earlier");
687   }
688 #endif
689 
690   gch->trace_heap_before_gc(_gc_tracer);
691 
692   // Increment the invocation count
693   _total_invocations++;
694 
695   // Capture used regions for old-gen to reestablish old-to-young invariant
696   // after full-gc.
697   gch->old_gen()->save_used_region();
698 
699   allocate_stacks();
700 
701   phase1_mark(clear_all_softrefs);
702 
703   Compacter compacter{gch};
704 
705   {
706     // Now all live objects are marked, compute the new object addresses.
707     GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
708 
709     compacter.phase2_calculate_new_addr();
710   }
711 
712   // Don't add any more derived pointers during phase3
713 #if COMPILER2_OR_JVMCI
714   assert(DerivedPointerTable::is_active(), "Sanity");
715   DerivedPointerTable::set_active(false);
716 #endif
717 
718   {
719     // Adjust the pointers to reflect the new locations
720     GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
721 
722     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
723 
724     NMethodToOopClosure code_closure(&adjust_pointer_closure, NMethodToOopClosure::FixRelocations);
725     gch->process_roots(SerialHeap::SO_AllCodeCache,
726                        &adjust_pointer_closure,
727                        &adjust_cld_closure,
728                        &adjust_cld_closure,
729                        &code_closure);
730 
731     WeakProcessor::oops_do(&adjust_pointer_closure);
732 
733     adjust_marks();
734     compacter.phase3_adjust_pointers();
735   }
736 
737   {
738     // All pointers are now adjusted, move objects accordingly
739     GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
740 
741     compacter.phase4_compact();
742   }
743 
744   restore_marks();
745 
746   // Set saved marks for allocation profiler (and other things? -- dld)
747   // (Should this be in general part?)
748   gch->save_marks();
749 
750   deallocate_stacks();
751 
752   SerialFullGC::_string_dedup_requests->flush();
753 
754   bool is_young_gen_empty = (gch->young_gen()->used() == 0);
755   gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
756 
757   gch->prune_scavengable_nmethods();
758 
759   // Update heap occupancy information which is used as
760   // input to soft ref clearing policy at the next gc.
761   Universe::heap()->update_capacity_and_used_at_gc();
762 
763   // Signal that we have completed a visit to all live objects.
764   Universe::heap()->record_whole_heap_examined_timestamp();
765 
766   gch->trace_heap_after_gc(_gc_tracer);
767 }