1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/classLoaderData.inline.hpp"
26 #include "classfile/classLoaderDataGraph.hpp"
27 #include "classfile/javaClasses.inline.hpp"
28 #include "classfile/stringTable.hpp"
29 #include "classfile/symbolTable.hpp"
30 #include "classfile/systemDictionary.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/codeCache.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "gc/serial/cardTableRS.hpp"
36 #include "gc/serial/defNewGeneration.hpp"
37 #include "gc/serial/serialFullGC.hpp"
38 #include "gc/serial/serialGcRefProcProxyTask.hpp"
39 #include "gc/serial/serialHeap.hpp"
40 #include "gc/serial/serialStringDedup.hpp"
41 #include "gc/serial/tenuredGeneration.inline.hpp"
42 #include "gc/shared/classUnloadingContext.hpp"
43 #include "gc/shared/collectedHeap.inline.hpp"
44 #include "gc/shared/continuationGCSupport.inline.hpp"
45 #include "gc/shared/fullGCForwarding.inline.hpp"
46 #include "gc/shared/gc_globals.hpp"
47 #include "gc/shared/gcHeapSummary.hpp"
48 #include "gc/shared/gcTimer.hpp"
49 #include "gc/shared/gcTrace.hpp"
50 #include "gc/shared/gcTraceTime.inline.hpp"
51 #include "gc/shared/modRefBarrierSet.hpp"
52 #include "gc/shared/preservedMarks.inline.hpp"
53 #include "gc/shared/referencePolicy.hpp"
54 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
55 #include "gc/shared/space.hpp"
56 #include "gc/shared/strongRootsScope.hpp"
57 #include "gc/shared/weakProcessor.hpp"
58 #include "memory/iterator.inline.hpp"
59 #include "memory/universe.hpp"
60 #include "oops/access.inline.hpp"
61 #include "oops/compressedOops.inline.hpp"
62 #include "oops/instanceRefKlass.hpp"
63 #include "oops/markWord.hpp"
64 #include "oops/methodData.hpp"
65 #include "oops/objArrayKlass.inline.hpp"
66 #include "oops/oop.inline.hpp"
67 #include "oops/typeArrayOop.inline.hpp"
68 #include "runtime/prefetch.inline.hpp"
69 #include "utilities/align.hpp"
70 #include "utilities/copy.hpp"
71 #include "utilities/events.hpp"
72 #include "utilities/stack.inline.hpp"
73 #if INCLUDE_JVMCI
74 #include "jvmci/jvmci.hpp"
75 #endif
76
77 Stack<oop, mtGC> SerialFullGC::_marking_stack;
78 Stack<ObjArrayTask, mtGC> SerialFullGC::_objarray_stack;
79
80 PreservedMarksSet SerialFullGC::_preserved_overflow_stack_set(false /* in_c_heap */);
81 size_t SerialFullGC::_preserved_count = 0;
82 size_t SerialFullGC::_preserved_count_max = 0;
83 PreservedMark* SerialFullGC::_preserved_marks = nullptr;
84 STWGCTimer* SerialFullGC::_gc_timer = nullptr;
85 SerialOldTracer* SerialFullGC::_gc_tracer = nullptr;
86
87 AlwaysTrueClosure SerialFullGC::_always_true_closure;
88 ReferenceProcessor* SerialFullGC::_ref_processor;
89
90 StringDedup::Requests* SerialFullGC::_string_dedup_requests = nullptr;
91
92 SerialFullGC::FollowRootClosure SerialFullGC::follow_root_closure;
93
94 MarkAndPushClosure SerialFullGC::mark_and_push_closure(ClassLoaderData::_claim_stw_fullgc_mark);
95 CLDToOopClosure SerialFullGC::follow_cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark);
96 CLDToOopClosure SerialFullGC::adjust_cld_closure(&adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
97
98 class DeadSpacer : StackObj {
99 size_t _allowed_deadspace_words;
100 bool _active;
101 ContiguousSpace* _space;
102
103 public:
104 DeadSpacer(ContiguousSpace* space) : _allowed_deadspace_words(0), _space(space) {
105 size_t ratio = (_space == SerialHeap::heap()->old_gen()->space())
106 ? MarkSweepDeadRatio : 0;
107 _active = ratio > 0;
108
109 if (_active) {
110 // We allow some amount of garbage towards the bottom of the space, so
111 // we don't start compacting before there is a significant gain to be made.
112 // Occasionally, we want to ensure a full compaction, which is determined
113 // by the MarkSweepAlwaysCompactCount parameter.
114 if ((SerialHeap::heap()->total_full_collections() % MarkSweepAlwaysCompactCount) != 0) {
115 _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize;
116 } else {
117 _active = false;
118 }
119 }
120 }
121
122 bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) {
123 if (!_active) {
124 return false;
125 }
126
127 size_t dead_length = pointer_delta(dead_end, dead_start);
128 if (_allowed_deadspace_words >= dead_length) {
129 _allowed_deadspace_words -= dead_length;
130 CollectedHeap::fill_with_object(dead_start, dead_length);
131 oop obj = cast_to_oop(dead_start);
132 // obj->set_mark(obj->mark().set_marked());
133
134 assert(dead_length == obj->size(), "bad filler object size");
135 log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", %zub",
136 p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize);
137
138 return true;
139 } else {
140 _active = false;
141 return false;
142 }
143 }
144 };
145
146 // Implement the "compaction" part of the mark-compact GC algorithm.
147 class Compacter {
148 // There are four spaces in total, but only the first three can be used after
149 // compact. IOW, old and eden/from must be enough for all live objs
150 static constexpr uint max_num_spaces = 4;
151
152 struct CompactionSpace {
153 ContiguousSpace* _space;
154 // Will be the new top after compaction is complete.
155 HeapWord* _compaction_top;
156 // The first dead word in this contiguous space. It's an optimization to
157 // skip large chunk of live objects at the beginning.
158 HeapWord* _first_dead;
159
160 void init(ContiguousSpace* space) {
161 _space = space;
162 _compaction_top = space->bottom();
163 _first_dead = nullptr;
164 }
165 };
166
167 CompactionSpace _spaces[max_num_spaces];
168 // The num of spaces to be compacted, i.e. containing live objs.
169 uint _num_spaces;
170
171 uint _index;
172
173 // Used for BOT update
174 TenuredGeneration* _old_gen;
175
176 HeapWord* get_compaction_top(uint index) const {
177 return _spaces[index]._compaction_top;
178 }
179
180 HeapWord* get_first_dead(uint index) const {
181 return _spaces[index]._first_dead;
182 }
183
184 ContiguousSpace* get_space(uint index) const {
185 return _spaces[index]._space;
186 }
187
188 void record_first_dead(uint index, HeapWord* first_dead) {
189 assert(_spaces[index]._first_dead == nullptr, "should write only once");
190 _spaces[index]._first_dead = first_dead;
191 }
192
193 HeapWord* alloc(size_t old_size, size_t new_size, HeapWord* old_obj) {
194 size_t words = (old_obj == _spaces[_index]._compaction_top) ? old_size : new_size;
195 while (true) {
196 if (words <= pointer_delta(_spaces[_index]._space->end(),
197 _spaces[_index]._compaction_top)) {
198 HeapWord* result = _spaces[_index]._compaction_top;
199 _spaces[_index]._compaction_top += words;
200 if (_index == 0) {
201 // old-gen requires BOT update
202 _old_gen->update_for_block(result, result + words);
203 }
204 return result;
205 }
206
207 // out-of-memory in this space
208 _index++;
209 assert(_index < max_num_spaces - 1, "the last space should not be used");
210 words = (old_obj == _spaces[_index]._compaction_top) ? old_size : new_size;
211 }
212 }
213
214 static void prefetch_read_scan(void* p) {
215 if (PrefetchScanIntervalInBytes >= 0) {
216 Prefetch::read(p, PrefetchScanIntervalInBytes);
217 }
218 }
219
220 static void prefetch_write_scan(void* p) {
221 if (PrefetchScanIntervalInBytes >= 0) {
222 Prefetch::write(p, PrefetchScanIntervalInBytes);
223 }
224 }
225
226 static void prefetch_write_copy(void* p) {
227 if (PrefetchCopyIntervalInBytes >= 0) {
228 Prefetch::write(p, PrefetchCopyIntervalInBytes);
229 }
230 }
231
232 static void forward_obj(oop obj, HeapWord* new_addr, bool after_first_dead) {
233 prefetch_write_scan(obj);
234 if (cast_from_oop<HeapWord*>(obj) != new_addr) {
235 FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
236 } else {
237 assert(obj->is_gc_marked(), "inv");
238 if (!after_first_dead) {
239 // This obj will stay in-place and we'll not see it during relocation.
240 // Fix the markword.
241 obj->init_mark();
242 } else {
243 FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
244 }
245 }
246 }
247
248 static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
249 for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
250 prefetch_read_scan(i_addr);
251 oop obj = cast_to_oop(i_addr);
252 if (obj->is_gc_marked()) {
253 return i_addr;
254 }
255 i_addr += obj->size();
256 }
257 return end;
258 };
259
260 static size_t relocate(HeapWord* addr) {
261 // Prefetch source and destination
262 prefetch_read_scan(addr);
263
264 oop obj = cast_to_oop(addr);
265 oop new_obj = FullGCForwarding::forwardee(obj);
266 HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
267
268 size_t obj_size = obj->size();
269 if (addr != new_addr) {
270 prefetch_write_copy(new_addr);
271 Copy::aligned_conjoint_words(addr, new_addr, obj_size);
272 }
273 new_obj->init_mark();
274 if (addr != new_addr) {
275 new_obj->initialize_hash_if_necessary(obj);
276 }
277
278 return obj_size;
279 }
280
281 public:
282 explicit Compacter(SerialHeap* heap) {
283 // In this order so that heap is compacted towards old-gen.
284 _spaces[0].init(heap->old_gen()->space());
285 _spaces[1].init(heap->young_gen()->eden());
286 _spaces[2].init(heap->young_gen()->from());
287
288 bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
289 if (is_promotion_failed) {
290 _spaces[3].init(heap->young_gen()->to());
291 _num_spaces = 4;
292 } else {
293 _num_spaces = 3;
294 }
295 _index = 0;
296 _old_gen = heap->old_gen();
297 }
298
299 void phase2_calculate_new_addr() {
300 for (uint i = 0; i < _num_spaces; ++i) {
301 ContiguousSpace* space = get_space(i);
302 HeapWord* cur_addr = space->bottom();
303 HeapWord* top = space->top();
304
305 bool record_first_dead_done = false;
306
307 DeadSpacer dead_spacer(space);
308
309 while (cur_addr < top) {
310 oop obj = cast_to_oop(cur_addr);
311 size_t obj_size = obj->size();
312 size_t new_size = obj->copy_size(obj_size, obj->mark());
313 if (obj->is_gc_marked()) {
314 HeapWord* new_addr = alloc(obj_size, new_size, cur_addr);
315 forward_obj(obj, new_addr, record_first_dead_done);
316 assert(obj->size() == obj_size, "size must not change after forwarding");
317 cur_addr += obj_size;
318 } else {
319 // Skipping the current known-unmarked obj
320 HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
321 if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
322 // Register space for the filler obj
323 size_t size = pointer_delta(next_live_addr, cur_addr);
324 alloc(size, size, cur_addr);
325 } else {
326 if (!record_first_dead_done) {
327 record_first_dead(i, cur_addr);
328 record_first_dead_done = true;
329 }
330 *(HeapWord**)cur_addr = next_live_addr;
331 }
332 cur_addr = next_live_addr;
333 }
334 }
335
336 if (!record_first_dead_done) {
337 record_first_dead(i, top);
338 }
339 }
340 }
341
342 void phase3_adjust_pointers() {
343 for (uint i = 0; i < _num_spaces; ++i) {
344 ContiguousSpace* space = get_space(i);
345 HeapWord* cur_addr = space->bottom();
346 HeapWord* const top = space->top();
347 HeapWord* const first_dead = get_first_dead(i);
348
349 while (cur_addr < top) {
350 prefetch_write_scan(cur_addr);
351 if (cur_addr < first_dead || cast_to_oop(cur_addr)->is_gc_marked()) {
352 size_t size = cast_to_oop(cur_addr)->oop_iterate_size(&SerialFullGC::adjust_pointer_closure);
353 cur_addr += size;
354 } else {
355 assert(*(HeapWord**)cur_addr > cur_addr, "forward progress");
356 cur_addr = *(HeapWord**)cur_addr;
357 }
358 }
359 }
360 }
361
362 void phase4_compact() {
363 for (uint i = 0; i < _num_spaces; ++i) {
364 ContiguousSpace* space = get_space(i);
365 HeapWord* cur_addr = space->bottom();
366 HeapWord* top = space->top();
367
368 // Check if the first obj inside this space is forwarded.
369 if (!FullGCForwarding::is_forwarded(cast_to_oop(cur_addr))) {
370 // Jump over consecutive (in-place) live-objs-chunk
371 cur_addr = get_first_dead(i);
372 }
373
374 while (cur_addr < top) {
375 if (!FullGCForwarding::is_forwarded(cast_to_oop(cur_addr))) {
376 cur_addr = *(HeapWord**) cur_addr;
377 continue;
378 }
379 cur_addr += relocate(cur_addr);
380 }
381
382 // Reset top and unused memory
383 HeapWord* new_top = get_compaction_top(i);
384 space->set_top(new_top);
385 if (ZapUnusedHeapArea && new_top < top) {
386 space->mangle_unused_area(MemRegion(new_top, top));
387 }
388 }
389 }
390 };
391
392 template <class T> void SerialFullGC::KeepAliveClosure::do_oop_work(T* p) {
393 mark_and_push(p);
394 }
395
396 void SerialFullGC::push_objarray(oop obj, size_t index) {
397 ObjArrayTask task(obj, index);
398 assert(task.is_valid(), "bad ObjArrayTask");
399 _objarray_stack.push(task);
400 }
401
402 void SerialFullGC::follow_array(objArrayOop array) {
403 mark_and_push_closure.do_klass(array->klass());
404 // Don't push empty arrays to avoid unnecessary work.
405 if (array->length() > 0) {
406 SerialFullGC::push_objarray(array, 0);
407 }
408 }
409
410 void SerialFullGC::follow_object(oop obj) {
411 assert(obj->is_gc_marked(), "should be marked");
412 if (obj->is_objArray()) {
413 // Handle object arrays explicitly to allow them to
414 // be split into chunks if needed.
415 SerialFullGC::follow_array((objArrayOop)obj);
416 } else {
417 obj->oop_iterate(&mark_and_push_closure);
418 }
419 }
420
421 void SerialFullGC::follow_array_chunk(objArrayOop array, int index) {
422 const int len = array->length();
423 const int beg_index = index;
424 assert(beg_index < len || len == 0, "index too large");
425
426 const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
427 const int end_index = beg_index + stride;
428
429 array->oop_iterate_range(&mark_and_push_closure, beg_index, end_index);
430
431 if (end_index < len) {
432 SerialFullGC::push_objarray(array, end_index); // Push the continuation.
433 }
434 }
435
436 void SerialFullGC::follow_stack() {
437 do {
438 while (!_marking_stack.is_empty()) {
439 oop obj = _marking_stack.pop();
440 assert (obj->is_gc_marked(), "p must be marked");
441 follow_object(obj);
442 }
443 // Process ObjArrays one at a time to avoid marking stack bloat.
444 if (!_objarray_stack.is_empty()) {
445 ObjArrayTask task = _objarray_stack.pop();
446 follow_array_chunk(objArrayOop(task.obj()), task.index());
447 }
448 } while (!_marking_stack.is_empty() || !_objarray_stack.is_empty());
449 }
450
451 SerialFullGC::FollowStackClosure SerialFullGC::follow_stack_closure;
452
453 void SerialFullGC::FollowStackClosure::do_void() { follow_stack(); }
454
455 template <class T> void SerialFullGC::follow_root(T* p) {
456 assert(!Universe::heap()->is_in(p),
457 "roots shouldn't be things within the heap");
458 T heap_oop = RawAccess<>::oop_load(p);
459 if (!CompressedOops::is_null(heap_oop)) {
460 oop obj = CompressedOops::decode_not_null(heap_oop);
461 if (!obj->mark().is_marked()) {
462 mark_object(obj);
463 follow_object(obj);
464 }
465 }
466 follow_stack();
467 }
468
469 void SerialFullGC::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
470 void SerialFullGC::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
471
472 // We preserve the mark which should be replaced at the end and the location
473 // that it will go. Note that the object that this markWord belongs to isn't
474 // currently at that address but it will be after phase4
475 void SerialFullGC::preserve_mark(oop obj, markWord mark) {
476 // We try to store preserved marks in the to space of the new generation since
477 // this is storage which should be available. Most of the time this should be
478 // sufficient space for the marks we need to preserve but if it isn't we fall
479 // back to using Stacks to keep track of the overflow.
480 if (_preserved_count < _preserved_count_max) {
481 _preserved_marks[_preserved_count++] = PreservedMark(obj, mark);
482 } else {
483 _preserved_overflow_stack_set.get()->push_always(obj, mark);
484 }
485 }
486
487 void SerialFullGC::phase1_mark(bool clear_all_softrefs) {
488 // Recursively traverse all live objects and mark them
489 GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
490
491 SerialHeap* gch = SerialHeap::heap();
492
493 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
494
495 ref_processor()->start_discovery(clear_all_softrefs);
496
497 {
498 StrongRootsScope srs(0);
499
500 CLDClosure* weak_cld_closure = ClassUnloading ? nullptr : &follow_cld_closure;
501 MarkingNMethodClosure mark_code_closure(&follow_root_closure, !NMethodToOopClosure::FixRelocations, true);
502 gch->process_roots(SerialHeap::SO_None,
503 &follow_root_closure,
504 &follow_cld_closure,
505 weak_cld_closure,
506 &mark_code_closure);
507 }
508
509 // Process reference objects found during marking
510 {
511 GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer());
512
513 ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues());
514 SerialGCRefProcProxyTask task(is_alive, keep_alive, follow_stack_closure);
515 const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references(task, nullptr, pt);
516 pt.print_all_references();
517 gc_tracer()->report_gc_reference_stats(stats);
518 }
519
520 // This is the point where the entire marking should have completed.
521 assert(_marking_stack.is_empty(), "Marking should have completed");
522
523 {
524 GCTraceTime(Debug, gc, phases) tm_m("Weak Processing", gc_timer());
525 WeakProcessor::weak_oops_do(&is_alive, &do_nothing_cl);
526 }
527
528 {
529 GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", gc_timer());
530
531 ClassUnloadingContext* ctx = ClassUnloadingContext::context();
532
533 bool unloading_occurred;
534 {
535 CodeCache::UnlinkingScope scope(&is_alive);
536
537 // Unload classes and purge the SystemDictionary.
538 unloading_occurred = SystemDictionary::do_unloading(gc_timer());
539
540 // Unload nmethods.
541 CodeCache::do_unloading(unloading_occurred);
542 }
543
544 {
545 GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
546 // Release unloaded nmethod's memory.
547 ctx->purge_nmethods();
548 }
549 {
550 GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", gc_timer());
551 gch->prune_unlinked_nmethods();
552 }
553 {
554 GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
555 ctx->free_nmethods();
556 }
557
558 // Prune dead klasses from subklass/sibling/implementor lists.
559 Klass::clean_weak_klass_links(unloading_occurred);
560
561 // Clean JVMCI metadata handles.
562 JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
563 }
564
565 {
566 GCTraceTime(Debug, gc, phases) tm_m("Report Object Count", gc_timer());
567 gc_tracer()->report_object_count_after_gc(&is_alive, nullptr);
568 }
569 }
570
571 void SerialFullGC::allocate_stacks() {
572 void* scratch = nullptr;
573 size_t num_words;
574 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
575 young_gen->contribute_scratch(scratch, num_words);
576
577 if (scratch != nullptr) {
578 _preserved_count_max = num_words * HeapWordSize / sizeof(PreservedMark);
579 } else {
580 _preserved_count_max = 0;
581 }
582
583 _preserved_marks = (PreservedMark*)scratch;
584 _preserved_count = 0;
585
586 _preserved_overflow_stack_set.init(1);
587 }
588
589 void SerialFullGC::deallocate_stacks() {
590 if (_preserved_count_max != 0) {
591 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
592 young_gen->reset_scratch();
593 }
594
595 _preserved_overflow_stack_set.reclaim();
596 _marking_stack.clear();
597 _objarray_stack.clear(true);
598 }
599
600 void SerialFullGC::mark_object(oop obj) {
601 if (StringDedup::is_enabled() &&
602 java_lang_String::is_instance(obj) &&
603 SerialStringDedup::is_candidate_from_mark(obj)) {
604 _string_dedup_requests->add(obj);
605 }
606
607 // some marks may contain information we need to preserve so we store them away
608 // and overwrite the mark. We'll restore it at the end of serial full GC.
609 markWord mark = obj->mark();
610 obj->set_mark(mark.set_marked());
611
612 ContinuationGCSupport::transform_stack_chunk(obj);
613
614 if (obj->mark_must_be_preserved(mark)) {
615 preserve_mark(obj, mark);
616 }
617 }
618
619 template <class T> void SerialFullGC::mark_and_push(T* p) {
620 T heap_oop = RawAccess<>::oop_load(p);
621 if (!CompressedOops::is_null(heap_oop)) {
622 oop obj = CompressedOops::decode_not_null(heap_oop);
623 if (!obj->mark().is_marked()) {
624 mark_object(obj);
625 _marking_stack.push(obj);
626 }
627 }
628 }
629
630 template <typename T>
631 void MarkAndPushClosure::do_oop_work(T* p) { SerialFullGC::mark_and_push(p); }
632 void MarkAndPushClosure::do_oop( oop* p) { do_oop_work(p); }
633 void MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_work(p); }
634
635 template <class T> void SerialFullGC::adjust_pointer(T* p) {
636 T heap_oop = RawAccess<>::oop_load(p);
637 if (!CompressedOops::is_null(heap_oop)) {
638 oop obj = CompressedOops::decode_not_null(heap_oop);
639 assert(Universe::heap()->is_in(obj), "should be in heap");
640
641 if (FullGCForwarding::is_forwarded(obj)) {
642 oop new_obj = FullGCForwarding::forwardee(obj);
643 assert(is_object_aligned(new_obj), "oop must be aligned");
644 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
645 }
646 }
647 }
648
649 template <typename T>
650 void AdjustPointerClosure::do_oop_work(T* p) { SerialFullGC::adjust_pointer(p); }
651 inline void AdjustPointerClosure::do_oop(oop* p) { do_oop_work(p); }
652 inline void AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_work(p); }
653
654 AdjustPointerClosure SerialFullGC::adjust_pointer_closure;
655
656 void SerialFullGC::adjust_marks() {
657 // adjust the oops we saved earlier
658 for (size_t i = 0; i < _preserved_count; i++) {
659 PreservedMarks::adjust_preserved_mark(_preserved_marks + i);
660 }
661
662 // deal with the overflow stack
663 _preserved_overflow_stack_set.get()->adjust_during_full_gc();
664 }
665
666 void SerialFullGC::restore_marks() {
667 log_trace(gc)("Restoring %zu marks", _preserved_count + _preserved_overflow_stack_set.get()->size());
668
669 // restore the marks we saved earlier
670 for (size_t i = 0; i < _preserved_count; i++) {
671 _preserved_marks[i].set_mark();
672 }
673
674 // deal with the overflow
675 _preserved_overflow_stack_set.restore(nullptr);
676 }
677
678 SerialFullGC::IsAliveClosure SerialFullGC::is_alive;
679
680 bool SerialFullGC::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); }
681
682 SerialFullGC::KeepAliveClosure SerialFullGC::keep_alive;
683
684 void SerialFullGC::KeepAliveClosure::do_oop(oop* p) { SerialFullGC::KeepAliveClosure::do_oop_work(p); }
685 void SerialFullGC::KeepAliveClosure::do_oop(narrowOop* p) { SerialFullGC::KeepAliveClosure::do_oop_work(p); }
686
687 void SerialFullGC::initialize() {
688 SerialFullGC::_gc_timer = new STWGCTimer();
689 SerialFullGC::_gc_tracer = new SerialOldTracer();
690 SerialFullGC::_string_dedup_requests = new StringDedup::Requests();
691
692 // The Full GC operates on the entire heap so all objects should be subject
693 // to discovery, hence the _always_true_closure.
694 SerialFullGC::_ref_processor = new ReferenceProcessor(&_always_true_closure);
695 mark_and_push_closure.set_ref_discoverer(_ref_processor);
696 }
697
698 void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) {
699 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
700
701 SerialHeap* gch = SerialHeap::heap();
702
703 gch->trace_heap_before_gc(_gc_tracer);
704
705 // Capture used regions for old-gen to reestablish old-to-young invariant
706 // after full-gc.
707 gch->old_gen()->save_used_region();
708
709 allocate_stacks();
710
711 phase1_mark(clear_all_softrefs);
712
713 FullGCForwarding::begin();
714
715 Compacter compacter{gch};
716
717 {
718 // Now all live objects are marked, compute the new object addresses.
719 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
720
721 compacter.phase2_calculate_new_addr();
722 }
723
724 // Don't add any more derived pointers during phase3
725 #if COMPILER2_OR_JVMCI
726 assert(DerivedPointerTable::is_active(), "Sanity");
727 DerivedPointerTable::set_active(false);
728 #endif
729
730 {
731 // Adjust the pointers to reflect the new locations
732 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
733
734 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
735
736 NMethodToOopClosure code_closure(&adjust_pointer_closure, NMethodToOopClosure::FixRelocations);
737 gch->process_roots(SerialHeap::SO_AllCodeCache,
738 &adjust_pointer_closure,
739 &adjust_cld_closure,
740 &adjust_cld_closure,
741 &code_closure);
742
743 WeakProcessor::oops_do(&adjust_pointer_closure);
744
745 adjust_marks();
746 compacter.phase3_adjust_pointers();
747 }
748
749 {
750 // All pointers are now adjusted, move objects accordingly
751 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
752
753 compacter.phase4_compact();
754 }
755
756 restore_marks();
757
758 FullGCForwarding::end();
759
760 deallocate_stacks();
761
762 SerialFullGC::_string_dedup_requests->flush();
763
764 bool is_young_gen_empty = (gch->young_gen()->used() == 0);
765 gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
766
767 gch->prune_scavengable_nmethods();
768
769 // Update heap occupancy information which is used as
770 // input to soft ref clearing policy at the next gc.
771 Universe::heap()->update_capacity_and_used_at_gc();
772
773 // Signal that we have completed a visit to all live objects.
774 Universe::heap()->record_whole_heap_examined_timestamp();
775
776 gch->trace_heap_after_gc(_gc_tracer);
777 }