1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/classLoaderData.inline.hpp"
26 #include "classfile/classLoaderDataGraph.hpp"
27 #include "classfile/javaClasses.inline.hpp"
28 #include "classfile/stringTable.hpp"
29 #include "classfile/symbolTable.hpp"
30 #include "classfile/systemDictionary.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/codeCache.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "gc/serial/cardTableRS.hpp"
36 #include "gc/serial/defNewGeneration.hpp"
37 #include "gc/serial/serialFullGC.hpp"
38 #include "gc/serial/serialGcRefProcProxyTask.hpp"
39 #include "gc/serial/serialHeap.hpp"
40 #include "gc/serial/serialStringDedup.hpp"
41 #include "gc/serial/tenuredGeneration.inline.hpp"
42 #include "gc/shared/classUnloadingContext.hpp"
43 #include "gc/shared/collectedHeap.inline.hpp"
44 #include "gc/shared/continuationGCSupport.inline.hpp"
45 #include "gc/shared/fullGCForwarding.inline.hpp"
46 #include "gc/shared/gc_globals.hpp"
47 #include "gc/shared/gcHeapSummary.hpp"
48 #include "gc/shared/gcTimer.hpp"
49 #include "gc/shared/gcTrace.hpp"
50 #include "gc/shared/gcTraceTime.inline.hpp"
51 #include "gc/shared/modRefBarrierSet.hpp"
52 #include "gc/shared/oopStorageSet.inline.hpp"
53 #include "gc/shared/preservedMarks.inline.hpp"
54 #include "gc/shared/referencePolicy.hpp"
55 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
56 #include "gc/shared/space.hpp"
57 #include "gc/shared/weakProcessor.hpp"
58 #include "memory/iterator.inline.hpp"
59 #include "memory/universe.hpp"
60 #include "oops/access.inline.hpp"
61 #include "oops/compressedOops.inline.hpp"
62 #include "oops/instanceRefKlass.hpp"
63 #include "oops/markWord.hpp"
64 #include "oops/methodData.hpp"
65 #include "oops/objArrayKlass.inline.hpp"
66 #include "oops/oop.inline.hpp"
67 #include "oops/typeArrayOop.inline.hpp"
68 #include "runtime/prefetch.inline.hpp"
69 #include "runtime/threads.hpp"
70 #include "utilities/align.hpp"
71 #include "utilities/copy.hpp"
72 #include "utilities/events.hpp"
73 #include "utilities/stack.inline.hpp"
74 #if INCLUDE_JVMCI
75 #include "jvmci/jvmci.hpp"
76 #endif
77
78 Stack<oop, mtGC> SerialFullGC::_marking_stack;
79 Stack<ObjArrayTask, mtGC> SerialFullGC::_objarray_stack;
80
81 PreservedMarksSet SerialFullGC::_preserved_overflow_stack_set(false /* in_c_heap */);
82 size_t SerialFullGC::_preserved_count = 0;
83 size_t SerialFullGC::_preserved_count_max = 0;
84 PreservedMark* SerialFullGC::_preserved_marks = nullptr;
85 STWGCTimer* SerialFullGC::_gc_timer = nullptr;
86 SerialOldTracer* SerialFullGC::_gc_tracer = nullptr;
87
88 AlwaysTrueClosure SerialFullGC::_always_true_closure;
89 ReferenceProcessor* SerialFullGC::_ref_processor;
90
91 StringDedup::Requests* SerialFullGC::_string_dedup_requests = nullptr;
92
93 SerialFullGC::FollowRootClosure SerialFullGC::follow_root_closure;
94
95 MarkAndPushClosure SerialFullGC::mark_and_push_closure(ClassLoaderData::_claim_stw_fullgc_mark);
96 CLDToOopClosure SerialFullGC::follow_cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark);
97 CLDToOopClosure SerialFullGC::adjust_cld_closure(&adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
98
99 class DeadSpacer : StackObj {
100 size_t _allowed_deadspace_words;
101 bool _active;
102 ContiguousSpace* _space;
103
104 public:
105 DeadSpacer(ContiguousSpace* space) : _allowed_deadspace_words(0), _space(space) {
106 size_t ratio = (_space == SerialHeap::heap()->old_gen()->space())
107 ? MarkSweepDeadRatio : 0;
108 _active = ratio > 0;
109
110 if (_active) {
111 // We allow some amount of garbage towards the bottom of the space, so
112 // we don't start compacting before there is a significant gain to be made.
113 // Occasionally, we want to ensure a full compaction, which is determined
114 // by the MarkSweepAlwaysCompactCount parameter.
115 if ((SerialHeap::heap()->total_full_collections() % MarkSweepAlwaysCompactCount) != 0) {
116 _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize;
117 } else {
118 _active = false;
119 }
120 }
121 }
122
123 bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) {
124 if (!_active) {
125 return false;
126 }
127
128 size_t dead_length = pointer_delta(dead_end, dead_start);
129 if (_allowed_deadspace_words >= dead_length) {
130 _allowed_deadspace_words -= dead_length;
131 CollectedHeap::fill_with_object(dead_start, dead_length);
132 oop obj = cast_to_oop(dead_start);
133 // obj->set_mark(obj->mark().set_marked());
134
135 assert(dead_length == obj->size(), "bad filler object size");
136 log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", %zub",
137 p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize);
138
139 return true;
140 } else {
141 _active = false;
142 return false;
143 }
144 }
145 };
146
147 // Implement the "compaction" part of the mark-compact GC algorithm.
148 class Compacter {
149 // There are four spaces in total, but only the first three can be used after
150 // compact. IOW, old and eden/from must be enough for all live objs
151 static constexpr uint max_num_spaces = 4;
152
153 struct CompactionSpace {
154 ContiguousSpace* _space;
155 // Will be the new top after compaction is complete.
156 HeapWord* _compaction_top;
157 // The first dead word in this contiguous space. It's an optimization to
158 // skip large chunk of live objects at the beginning.
159 HeapWord* _first_dead;
160
161 void init(ContiguousSpace* space) {
162 _space = space;
163 _compaction_top = space->bottom();
164 _first_dead = nullptr;
165 }
166 };
167
168 CompactionSpace _spaces[max_num_spaces];
169 // The num of spaces to be compacted, i.e. containing live objs.
170 uint _num_spaces;
171
172 uint _index;
173
174 // Used for BOT update
175 TenuredGeneration* _old_gen;
176
177 HeapWord* get_compaction_top(uint index) const {
178 return _spaces[index]._compaction_top;
179 }
180
181 HeapWord* get_first_dead(uint index) const {
182 return _spaces[index]._first_dead;
183 }
184
185 ContiguousSpace* get_space(uint index) const {
186 return _spaces[index]._space;
187 }
188
189 void record_first_dead(uint index, HeapWord* first_dead) {
190 assert(_spaces[index]._first_dead == nullptr, "should write only once");
191 _spaces[index]._first_dead = first_dead;
192 }
193
194 HeapWord* alloc(size_t words) {
195 while (true) {
196 if (words <= pointer_delta(_spaces[_index]._space->end(),
197 _spaces[_index]._compaction_top)) {
198 HeapWord* result = _spaces[_index]._compaction_top;
199 _spaces[_index]._compaction_top += words;
200 if (_index == 0) {
201 // old-gen requires BOT update
202 _old_gen->update_for_block(result, result + words);
203 }
204 return result;
205 }
206
207 // out-of-memory in this space
208 _index++;
209 assert(_index < max_num_spaces - 1, "the last space should not be used");
210 }
211 }
212
213 static void prefetch_read_scan(void* p) {
214 if (PrefetchScanIntervalInBytes >= 0) {
215 Prefetch::read(p, PrefetchScanIntervalInBytes);
216 }
217 }
218
219 static void prefetch_write_scan(void* p) {
220 if (PrefetchScanIntervalInBytes >= 0) {
221 Prefetch::write(p, PrefetchScanIntervalInBytes);
222 }
223 }
224
225 static void prefetch_write_copy(void* p) {
226 if (PrefetchCopyIntervalInBytes >= 0) {
227 Prefetch::write(p, PrefetchCopyIntervalInBytes);
228 }
229 }
230
231 static void forward_obj(oop obj, HeapWord* new_addr) {
232 prefetch_write_scan(obj);
233 if (cast_from_oop<HeapWord*>(obj) != new_addr) {
234 FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
235 } else {
236 assert(obj->is_gc_marked(), "inv");
237 // This obj will stay in-place. Fix the markword.
238 obj->init_mark();
239 }
240 }
241
242 static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
243 for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
244 prefetch_read_scan(i_addr);
245 oop obj = cast_to_oop(i_addr);
246 if (obj->is_gc_marked()) {
247 return i_addr;
248 }
249 i_addr += obj->size();
250 }
251 return end;
252 };
253
254 static size_t relocate(HeapWord* addr) {
255 // Prefetch source and destination
256 prefetch_read_scan(addr);
257
258 oop obj = cast_to_oop(addr);
259 oop new_obj = FullGCForwarding::forwardee(obj);
260 HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
261 assert(addr != new_addr, "inv");
262 prefetch_write_copy(new_addr);
263
264 size_t obj_size = obj->size();
265 Copy::aligned_conjoint_words(addr, new_addr, obj_size);
266 new_obj->init_mark();
267
268 return obj_size;
269 }
270
271 public:
272 explicit Compacter(SerialHeap* heap) {
273 // In this order so that heap is compacted towards old-gen.
274 _spaces[0].init(heap->old_gen()->space());
275 _spaces[1].init(heap->young_gen()->eden());
276 _spaces[2].init(heap->young_gen()->from());
277
278 bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
279 if (is_promotion_failed) {
280 _spaces[3].init(heap->young_gen()->to());
281 _num_spaces = 4;
282 } else {
283 _num_spaces = 3;
284 }
285 _index = 0;
286 _old_gen = heap->old_gen();
287 }
288
289 void phase2_calculate_new_addr() {
290 for (uint i = 0; i < _num_spaces; ++i) {
291 ContiguousSpace* space = get_space(i);
292 HeapWord* cur_addr = space->bottom();
293 HeapWord* top = space->top();
294
295 bool record_first_dead_done = false;
296
297 DeadSpacer dead_spacer(space);
298
299 while (cur_addr < top) {
300 oop obj = cast_to_oop(cur_addr);
301 size_t obj_size = obj->size();
302 if (obj->is_gc_marked()) {
303 HeapWord* new_addr = alloc(obj_size);
304 forward_obj(obj, new_addr);
305 cur_addr += obj_size;
306 } else {
307 // Skipping the current known-unmarked obj
308 HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
309 if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
310 // Register space for the filler obj
311 alloc(pointer_delta(next_live_addr, cur_addr));
312 } else {
313 if (!record_first_dead_done) {
314 record_first_dead(i, cur_addr);
315 record_first_dead_done = true;
316 }
317 *(HeapWord**)cur_addr = next_live_addr;
318 }
319 cur_addr = next_live_addr;
320 }
321 }
322
323 if (!record_first_dead_done) {
324 record_first_dead(i, top);
325 }
326 }
327 }
328
329 void phase3_adjust_pointers() {
330 for (uint i = 0; i < _num_spaces; ++i) {
331 ContiguousSpace* space = get_space(i);
332 HeapWord* cur_addr = space->bottom();
333 HeapWord* const top = space->top();
334 HeapWord* const first_dead = get_first_dead(i);
335
336 while (cur_addr < top) {
337 prefetch_write_scan(cur_addr);
338 if (cur_addr < first_dead || cast_to_oop(cur_addr)->is_gc_marked()) {
339 size_t size = cast_to_oop(cur_addr)->oop_iterate_size(&SerialFullGC::adjust_pointer_closure);
340 cur_addr += size;
341 } else {
342 assert(*(HeapWord**)cur_addr > cur_addr, "forward progress");
343 cur_addr = *(HeapWord**)cur_addr;
344 }
345 }
346 }
347 }
348
349 void phase4_compact() {
350 for (uint i = 0; i < _num_spaces; ++i) {
351 ContiguousSpace* space = get_space(i);
352 HeapWord* cur_addr = space->bottom();
353 HeapWord* top = space->top();
354
355 // Check if the first obj inside this space is forwarded.
356 if (!FullGCForwarding::is_forwarded(cast_to_oop(cur_addr))) {
357 // Jump over consecutive (in-place) live-objs-chunk
358 cur_addr = get_first_dead(i);
359 }
360
361 while (cur_addr < top) {
362 if (!FullGCForwarding::is_forwarded(cast_to_oop(cur_addr))) {
363 cur_addr = *(HeapWord**) cur_addr;
364 continue;
365 }
366 cur_addr += relocate(cur_addr);
367 }
368
369 // Reset top and unused memory
370 HeapWord* new_top = get_compaction_top(i);
371 space->set_top(new_top);
372 if (ZapUnusedHeapArea && new_top < top) {
373 space->mangle_unused_area(MemRegion(new_top, top));
374 }
375 }
376 }
377 };
378
379 template <class T> void SerialFullGC::KeepAliveClosure::do_oop_work(T* p) {
380 mark_and_push(p);
381 }
382
383 void SerialFullGC::push_objarray(oop obj, size_t index) {
384 assert(obj->is_refArray(), "Must be");
385 ObjArrayTask task(obj, index);
386 assert(task.is_valid(), "bad ObjArrayTask");
387 _objarray_stack.push(task);
388 }
389
390 void SerialFullGC::follow_array(objArrayOop array) {
391 mark_and_push_closure.do_klass(array->klass());
392 // Don't push empty arrays to avoid unnecessary work.
393 if (array->length() > 0) {
394 SerialFullGC::push_objarray(array, 0);
395 }
396 }
397
398 void SerialFullGC::follow_object(oop obj) {
399 assert(obj->is_gc_marked(), "should be marked");
400 if (obj->is_refArray()) {
401 // Handle object arrays explicitly to allow them to
402 // be split into chunks if needed.
403 SerialFullGC::follow_array((objArrayOop)obj);
404 } else {
405 obj->oop_iterate(&mark_and_push_closure);
406 }
407 }
408
409 void SerialFullGC::follow_array_chunk(objArrayOop array, int index) {
410 const int len = array->length();
411 const int beg_index = index;
412 assert(beg_index < len || len == 0, "index too large");
413
414 const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
415 const int end_index = beg_index + stride;
416
417 refArrayOop(array)->oop_iterate_range(&mark_and_push_closure, beg_index, end_index);
418
419 if (end_index < len) {
420 SerialFullGC::push_objarray(array, end_index); // Push the continuation.
421 }
422 }
423
424 void SerialFullGC::follow_stack() {
425 do {
426 while (!_marking_stack.is_empty()) {
427 oop obj = _marking_stack.pop();
428 assert (obj->is_gc_marked(), "p must be marked");
429 follow_object(obj);
430 }
431 // Process ObjArrays one at a time to avoid marking stack bloat.
432 if (!_objarray_stack.is_empty()) {
433 ObjArrayTask task = _objarray_stack.pop();
434 follow_array_chunk(objArrayOop(task.obj()), task.index());
435 }
436 } while (!_marking_stack.is_empty() || !_objarray_stack.is_empty());
437 }
438
439 SerialFullGC::FollowStackClosure SerialFullGC::follow_stack_closure;
440
441 void SerialFullGC::FollowStackClosure::do_void() { follow_stack(); }
442
443 template <class T> void SerialFullGC::follow_root(T* p) {
444 assert(!Universe::heap()->is_in(p),
445 "roots shouldn't be things within the heap");
446 T heap_oop = RawAccess<>::oop_load(p);
447 if (!CompressedOops::is_null(heap_oop)) {
448 oop obj = CompressedOops::decode_not_null(heap_oop);
449 if (!obj->mark().is_marked()) {
450 mark_object(obj);
451 follow_object(obj);
452 }
453 }
454 follow_stack();
455 }
456
457 void SerialFullGC::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
458 void SerialFullGC::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
459
460 // We preserve the mark which should be replaced at the end and the location
461 // that it will go. Note that the object that this markWord belongs to isn't
462 // currently at that address but it will be after phase4
463 void SerialFullGC::preserve_mark(oop obj, markWord mark) {
464 // We try to store preserved marks in the to space of the new generation since
465 // this is storage which should be available. Most of the time this should be
466 // sufficient space for the marks we need to preserve but if it isn't we fall
467 // back to using Stacks to keep track of the overflow.
468 if (_preserved_count < _preserved_count_max) {
469 _preserved_marks[_preserved_count++] = PreservedMark(obj, mark);
470 } else {
471 _preserved_overflow_stack_set.get()->push_always(obj, mark);
472 }
473 }
474
475 void SerialFullGC::phase1_mark(bool clear_all_softrefs) {
476 // Recursively traverse all live objects and mark them
477 GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
478
479 SerialHeap* gch = SerialHeap::heap();
480
481 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
482
483 ref_processor()->start_discovery(clear_all_softrefs);
484
485 {
486 GCTraceTime(Debug, gc, phases) tm_m("Marking From Roots", gc_timer());
487
488 // Start tracing from roots, there are 3 kinds of roots in full-gc.
489 //
490 // 1. CLD. This method internally takes care of whether class loading is
491 // enabled or not, applying the closure to both strong and weak or only
492 // strong CLDs.
493 ClassLoaderDataGraph::always_strong_cld_do(&follow_cld_closure);
494
495 {
496 // 2. Threads stack frames and active nmethods in them.
497 NMethodMarkingScope nmethod_marking_scope;
498 MarkingNMethodClosure mark_code_closure(&follow_root_closure);
499
500 Threads::oops_do(&follow_root_closure, &mark_code_closure);
501 }
502
503 // 3. VM internal roots.
504 OopStorageSet::strong_oops_do(&follow_root_closure);
505 }
506
507 // Process reference objects found during marking
508 {
509 GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer());
510
511 ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues());
512 SerialGCRefProcProxyTask task(is_alive, keep_alive, follow_stack_closure);
513 const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references(task, nullptr, pt);
514 pt.print_all_references();
515 gc_tracer()->report_gc_reference_stats(stats);
516 }
517
518 // This is the point where the entire marking should have completed.
519 assert(_marking_stack.is_empty(), "Marking should have completed");
520
521 {
522 GCTraceTime(Debug, gc, phases) tm_m("Weak Processing", gc_timer());
523 WeakProcessor::weak_oops_do(&is_alive, &do_nothing_cl);
524 }
525
526 {
527 GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", gc_timer());
528
529 ClassUnloadingContext* ctx = ClassUnloadingContext::context();
530
531 bool unloading_occurred;
532 {
533 CodeCache::UnlinkingScope scope(&is_alive);
534
535 // Unload classes and purge the SystemDictionary.
536 unloading_occurred = SystemDictionary::do_unloading(gc_timer());
537
538 // Unload nmethods.
539 CodeCache::do_unloading(unloading_occurred);
540 }
541
542 {
543 GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
544 // Release unloaded nmethod's memory.
545 ctx->purge_nmethods();
546 }
547 {
548 GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", gc_timer());
549 gch->prune_unlinked_nmethods();
550 }
551 {
552 GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
553 ctx->free_nmethods();
554 }
555
556 // Prune dead klasses from subklass/sibling/implementor lists.
557 Klass::clean_weak_klass_links(unloading_occurred);
558
559 // Clean JVMCI metadata handles.
560 JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
561 }
562
563 {
564 GCTraceTime(Debug, gc, phases) tm_m("Report Object Count", gc_timer());
565 gc_tracer()->report_object_count_after_gc(&is_alive, nullptr);
566 }
567 }
568
569 void SerialFullGC::allocate_stacks() {
570 void* scratch = nullptr;
571 size_t num_words;
572 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
573 young_gen->contribute_scratch(scratch, num_words);
574
575 if (scratch != nullptr) {
576 _preserved_count_max = num_words * HeapWordSize / sizeof(PreservedMark);
577 } else {
578 _preserved_count_max = 0;
579 }
580
581 _preserved_marks = (PreservedMark*)scratch;
582 _preserved_count = 0;
583
584 _preserved_overflow_stack_set.init(1);
585 }
586
587 void SerialFullGC::deallocate_stacks() {
588 if (_preserved_count_max != 0) {
589 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
590 young_gen->reset_scratch();
591 }
592
593 _preserved_overflow_stack_set.reclaim();
594 _marking_stack.clear();
595 _objarray_stack.clear(true);
596 }
597
598 void SerialFullGC::mark_object(oop obj) {
599 if (StringDedup::is_enabled() &&
600 java_lang_String::is_instance(obj) &&
601 SerialStringDedup::is_candidate_from_mark(obj)) {
602 _string_dedup_requests->add(obj);
603 }
604
605 // some marks may contain information we need to preserve so we store them away
606 // and overwrite the mark. We'll restore it at the end of serial full GC.
607 markWord mark = obj->mark();
608 obj->set_mark(obj->prototype_mark().set_marked());
609
610 ContinuationGCSupport::transform_stack_chunk(obj);
611
612 if (obj->mark_must_be_preserved(mark)) {
613 preserve_mark(obj, mark);
614 }
615 }
616
617 template <class T> void SerialFullGC::mark_and_push(T* p) {
618 T heap_oop = RawAccess<>::oop_load(p);
619 if (!CompressedOops::is_null(heap_oop)) {
620 oop obj = CompressedOops::decode_not_null(heap_oop);
621 if (!obj->mark().is_marked()) {
622 mark_object(obj);
623 _marking_stack.push(obj);
624 }
625 }
626 }
627
628 template <typename T>
629 void MarkAndPushClosure::do_oop_work(T* p) { SerialFullGC::mark_and_push(p); }
630 void MarkAndPushClosure::do_oop( oop* p) { do_oop_work(p); }
631 void MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_work(p); }
632
633 template <class T> void SerialFullGC::adjust_pointer(T* p) {
634 T heap_oop = RawAccess<>::oop_load(p);
635 if (!CompressedOops::is_null(heap_oop)) {
636 oop obj = CompressedOops::decode_not_null(heap_oop);
637 assert(Universe::heap()->is_in(obj), "should be in heap");
638
639 if (FullGCForwarding::is_forwarded(obj)) {
640 oop new_obj = FullGCForwarding::forwardee(obj);
641 assert(is_object_aligned(new_obj), "oop must be aligned");
642 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
643 }
644 }
645 }
646
647 template <typename T>
648 void AdjustPointerClosure::do_oop_work(T* p) { SerialFullGC::adjust_pointer(p); }
649 inline void AdjustPointerClosure::do_oop(oop* p) { do_oop_work(p); }
650 inline void AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_work(p); }
651
652 AdjustPointerClosure SerialFullGC::adjust_pointer_closure;
653
654 void SerialFullGC::adjust_marks() {
655 // adjust the oops we saved earlier
656 for (size_t i = 0; i < _preserved_count; i++) {
657 PreservedMarks::adjust_preserved_mark(_preserved_marks + i);
658 }
659
660 // deal with the overflow stack
661 _preserved_overflow_stack_set.get()->adjust_during_full_gc();
662 }
663
664 void SerialFullGC::restore_marks() {
665 log_trace(gc)("Restoring %zu marks", _preserved_count + _preserved_overflow_stack_set.get()->size());
666
667 // restore the marks we saved earlier
668 for (size_t i = 0; i < _preserved_count; i++) {
669 _preserved_marks[i].set_mark();
670 }
671
672 // deal with the overflow
673 _preserved_overflow_stack_set.restore(nullptr);
674 }
675
676 SerialFullGC::IsAliveClosure SerialFullGC::is_alive;
677
678 bool SerialFullGC::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); }
679
680 SerialFullGC::KeepAliveClosure SerialFullGC::keep_alive;
681
682 void SerialFullGC::KeepAliveClosure::do_oop(oop* p) { SerialFullGC::KeepAliveClosure::do_oop_work(p); }
683 void SerialFullGC::KeepAliveClosure::do_oop(narrowOop* p) { SerialFullGC::KeepAliveClosure::do_oop_work(p); }
684
685 void SerialFullGC::initialize() {
686 SerialFullGC::_gc_timer = new STWGCTimer();
687 SerialFullGC::_gc_tracer = new SerialOldTracer();
688 SerialFullGC::_string_dedup_requests = new StringDedup::Requests();
689
690 // The Full GC operates on the entire heap so all objects should be subject
691 // to discovery, hence the _always_true_closure.
692 SerialFullGC::_ref_processor = new ReferenceProcessor(&_always_true_closure);
693 mark_and_push_closure.set_ref_discoverer(_ref_processor);
694 }
695
696 void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) {
697 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
698
699 SerialHeap* gch = SerialHeap::heap();
700
701 gch->trace_heap_before_gc(_gc_tracer);
702
703 // Capture used regions for old-gen to reestablish old-to-young invariant
704 // after full-gc.
705 gch->old_gen()->save_used_region();
706
707 allocate_stacks();
708
709 // Usually, all class unloading work occurs at the end of phase 1, but Serial
710 // full-gc accesses dead-objs' klass to find out the start of next live-obj
711 // during phase 2. This requires klasses of dead-objs to be kept loaded.
712 // Therefore, we declare ClassUnloadingContext at the same level as
713 // full-gc phases, and purge dead classes (invoking
714 // ClassLoaderDataGraph::purge) after all phases of full-gc.
715 ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
716 false /* unregister_nmethods_during_purge */,
717 false /* lock_nmethod_free_separately */);
718
719 phase1_mark(clear_all_softrefs);
720
721 Compacter compacter{gch};
722
723 {
724 // Now all live objects are marked, compute the new object addresses.
725 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
726
727 compacter.phase2_calculate_new_addr();
728 }
729
730 // Don't add any more derived pointers during phase3
731 #if COMPILER2_OR_JVMCI
732 assert(DerivedPointerTable::is_active(), "Sanity");
733 DerivedPointerTable::set_active(false);
734 #endif
735
736 {
737 // Adjust the pointers to reflect the new locations
738 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
739
740 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
741
742 // Remap strong and weak roots in adjust phase.
743 // 1. All (strong and weak) CLDs.
744 ClassLoaderDataGraph::cld_do(&adjust_cld_closure);
745
746 // 2. Threads stack frames. No need to visit on-stack nmethods, because all
747 // nmethods are visited in one go via CodeCache::nmethods_do.
748 Threads::oops_do(&adjust_pointer_closure, nullptr);
749 NMethodToOopClosure nmethod_cl(&adjust_pointer_closure, NMethodToOopClosure::FixRelocations);
750 CodeCache::nmethods_do(&nmethod_cl);
751
752 // 3. VM internal roots
753 OopStorageSet::strong_oops_do(&adjust_pointer_closure);
754
755 // 4. VM internal weak roots
756 WeakProcessor::oops_do(&adjust_pointer_closure);
757
758 adjust_marks();
759 compacter.phase3_adjust_pointers();
760 }
761
762 {
763 // All pointers are now adjusted, move objects accordingly
764 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
765
766 compacter.phase4_compact();
767 }
768
769 // Delete metaspaces for unloaded class loaders and clean up CLDG.
770 ClassLoaderDataGraph::purge(true /* at_safepoint */);
771 DEBUG_ONLY(MetaspaceUtils::verify();)
772
773 // Need to clear claim bits for the next full-gc (specifically phase 1 and 3).
774 ClassLoaderDataGraph::clear_claimed_marks();
775
776 restore_marks();
777
778 deallocate_stacks();
779
780 SerialFullGC::_string_dedup_requests->flush();
781
782 bool is_young_gen_empty = (gch->young_gen()->used() == 0);
783 gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
784
785 gch->prune_scavengable_nmethods();
786
787 // Update heap occupancy information which is used as
788 // input to soft ref clearing policy at the next gc.
789 Universe::heap()->update_capacity_and_used_at_gc();
790
791 // Signal that we have completed a visit to all live objects.
792 Universe::heap()->record_whole_heap_examined_timestamp();
793
794 gch->trace_heap_after_gc(_gc_tracer);
795 }