1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/classLoaderData.inline.hpp"
26 #include "classfile/classLoaderDataGraph.hpp"
27 #include "classfile/javaClasses.inline.hpp"
28 #include "classfile/stringTable.hpp"
29 #include "classfile/symbolTable.hpp"
30 #include "classfile/systemDictionary.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/codeCache.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "gc/serial/cardTableRS.hpp"
36 #include "gc/serial/defNewGeneration.hpp"
37 #include "gc/serial/serialFullGC.hpp"
38 #include "gc/serial/serialGcRefProcProxyTask.hpp"
39 #include "gc/serial/serialHeap.hpp"
40 #include "gc/serial/serialStringDedup.hpp"
41 #include "gc/serial/tenuredGeneration.inline.hpp"
42 #include "gc/shared/classUnloadingContext.hpp"
43 #include "gc/shared/collectedHeap.inline.hpp"
44 #include "gc/shared/continuationGCSupport.inline.hpp"
45 #include "gc/shared/fullGCForwarding.inline.hpp"
46 #include "gc/shared/gc_globals.hpp"
47 #include "gc/shared/gcHeapSummary.hpp"
48 #include "gc/shared/gcTimer.hpp"
49 #include "gc/shared/gcTrace.hpp"
50 #include "gc/shared/gcTraceTime.inline.hpp"
51 #include "gc/shared/modRefBarrierSet.hpp"
52 #include "gc/shared/preservedMarks.inline.hpp"
53 #include "gc/shared/referencePolicy.hpp"
54 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
55 #include "gc/shared/space.hpp"
56 #include "gc/shared/strongRootsScope.hpp"
57 #include "gc/shared/weakProcessor.hpp"
58 #include "memory/iterator.inline.hpp"
59 #include "memory/universe.hpp"
60 #include "oops/access.inline.hpp"
61 #include "oops/compressedOops.inline.hpp"
62 #include "oops/instanceRefKlass.hpp"
63 #include "oops/markWord.hpp"
64 #include "oops/methodData.hpp"
65 #include "oops/objArrayKlass.inline.hpp"
66 #include "oops/oop.inline.hpp"
67 #include "oops/typeArrayOop.inline.hpp"
68 #include "runtime/prefetch.inline.hpp"
69 #include "utilities/align.hpp"
70 #include "utilities/copy.hpp"
71 #include "utilities/events.hpp"
72 #include "utilities/stack.inline.hpp"
73 #if INCLUDE_JVMCI
74 #include "jvmci/jvmci.hpp"
75 #endif
76
77 Stack<oop, mtGC> SerialFullGC::_marking_stack;
78 Stack<ObjArrayTask, mtGC> SerialFullGC::_objarray_stack;
79
80 PreservedMarksSet SerialFullGC::_preserved_overflow_stack_set(false /* in_c_heap */);
81 size_t SerialFullGC::_preserved_count = 0;
82 size_t SerialFullGC::_preserved_count_max = 0;
83 PreservedMark* SerialFullGC::_preserved_marks = nullptr;
84 STWGCTimer* SerialFullGC::_gc_timer = nullptr;
85 SerialOldTracer* SerialFullGC::_gc_tracer = nullptr;
86
87 AlwaysTrueClosure SerialFullGC::_always_true_closure;
88 ReferenceProcessor* SerialFullGC::_ref_processor;
89
90 StringDedup::Requests* SerialFullGC::_string_dedup_requests = nullptr;
91
92 SerialFullGC::FollowRootClosure SerialFullGC::follow_root_closure;
93
94 MarkAndPushClosure SerialFullGC::mark_and_push_closure(ClassLoaderData::_claim_stw_fullgc_mark);
95 CLDToOopClosure SerialFullGC::follow_cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark);
96 CLDToOopClosure SerialFullGC::adjust_cld_closure(&adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
97
98 class DeadSpacer : StackObj {
99 size_t _allowed_deadspace_words;
100 bool _active;
101 ContiguousSpace* _space;
102
103 public:
104 DeadSpacer(ContiguousSpace* space) : _allowed_deadspace_words(0), _space(space) {
105 size_t ratio = (_space == SerialHeap::heap()->old_gen()->space())
106 ? MarkSweepDeadRatio : 0;
107 _active = ratio > 0;
108
109 if (_active) {
110 // We allow some amount of garbage towards the bottom of the space, so
111 // we don't start compacting before there is a significant gain to be made.
112 // Occasionally, we want to ensure a full compaction, which is determined
113 // by the MarkSweepAlwaysCompactCount parameter.
114 if ((SerialHeap::heap()->total_full_collections() % MarkSweepAlwaysCompactCount) != 0) {
115 _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize;
116 } else {
117 _active = false;
118 }
119 }
120 }
121
122 bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) {
123 if (!_active) {
124 return false;
125 }
126
127 size_t dead_length = pointer_delta(dead_end, dead_start);
128 if (_allowed_deadspace_words >= dead_length) {
129 _allowed_deadspace_words -= dead_length;
130 CollectedHeap::fill_with_object(dead_start, dead_length);
131 oop obj = cast_to_oop(dead_start);
132 // obj->set_mark(obj->mark().set_marked());
133
134 assert(dead_length == obj->size(), "bad filler object size");
135 log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", %zub",
136 p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize);
137
138 return true;
139 } else {
140 _active = false;
141 return false;
142 }
143 }
144 };
145
146 // Implement the "compaction" part of the mark-compact GC algorithm.
147 class Compacter {
148 // There are four spaces in total, but only the first three can be used after
149 // compact. IOW, old and eden/from must be enough for all live objs
150 static constexpr uint max_num_spaces = 4;
151
152 struct CompactionSpace {
153 ContiguousSpace* _space;
154 // Will be the new top after compaction is complete.
155 HeapWord* _compaction_top;
156 // The first dead word in this contiguous space. It's an optimization to
157 // skip large chunk of live objects at the beginning.
158 HeapWord* _first_dead;
159
160 void init(ContiguousSpace* space) {
161 _space = space;
162 _compaction_top = space->bottom();
163 _first_dead = nullptr;
164 }
165 };
166
167 CompactionSpace _spaces[max_num_spaces];
168 // The num of spaces to be compacted, i.e. containing live objs.
169 uint _num_spaces;
170
171 uint _index;
172
173 // Used for BOT update
174 TenuredGeneration* _old_gen;
175
176 HeapWord* get_compaction_top(uint index) const {
177 return _spaces[index]._compaction_top;
178 }
179
180 HeapWord* get_first_dead(uint index) const {
181 return _spaces[index]._first_dead;
182 }
183
184 ContiguousSpace* get_space(uint index) const {
185 return _spaces[index]._space;
186 }
187
188 void record_first_dead(uint index, HeapWord* first_dead) {
189 assert(_spaces[index]._first_dead == nullptr, "should write only once");
190 _spaces[index]._first_dead = first_dead;
191 }
192
193 HeapWord* alloc(size_t words) {
194 while (true) {
195 if (words <= pointer_delta(_spaces[_index]._space->end(),
196 _spaces[_index]._compaction_top)) {
197 HeapWord* result = _spaces[_index]._compaction_top;
198 _spaces[_index]._compaction_top += words;
199 if (_index == 0) {
200 // old-gen requires BOT update
201 _old_gen->update_for_block(result, result + words);
202 }
203 return result;
204 }
205
206 // out-of-memory in this space
207 _index++;
208 assert(_index < max_num_spaces - 1, "the last space should not be used");
209 }
210 }
211
212 static void prefetch_read_scan(void* p) {
213 if (PrefetchScanIntervalInBytes >= 0) {
214 Prefetch::read(p, PrefetchScanIntervalInBytes);
215 }
216 }
217
218 static void prefetch_write_scan(void* p) {
219 if (PrefetchScanIntervalInBytes >= 0) {
220 Prefetch::write(p, PrefetchScanIntervalInBytes);
221 }
222 }
223
224 static void prefetch_write_copy(void* p) {
225 if (PrefetchCopyIntervalInBytes >= 0) {
226 Prefetch::write(p, PrefetchCopyIntervalInBytes);
227 }
228 }
229
230 static void forward_obj(oop obj, HeapWord* new_addr) {
231 prefetch_write_scan(obj);
232 if (cast_from_oop<HeapWord*>(obj) != new_addr) {
233 FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
234 } else {
235 assert(obj->is_gc_marked(), "inv");
236 // This obj will stay in-place. Fix the markword.
237 obj->init_mark();
238 }
239 }
240
241 static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) {
242 for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
243 prefetch_read_scan(i_addr);
244 oop obj = cast_to_oop(i_addr);
245 if (obj->is_gc_marked()) {
246 return i_addr;
247 }
248 i_addr += obj->size();
249 }
250 return end;
251 };
252
253 static size_t relocate(HeapWord* addr) {
254 // Prefetch source and destination
255 prefetch_read_scan(addr);
256
257 oop obj = cast_to_oop(addr);
258 oop new_obj = FullGCForwarding::forwardee(obj);
259 HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj);
260 assert(addr != new_addr, "inv");
261 prefetch_write_copy(new_addr);
262
263 size_t obj_size = obj->size();
264 Copy::aligned_conjoint_words(addr, new_addr, obj_size);
265 new_obj->init_mark();
266
267 return obj_size;
268 }
269
270 public:
271 explicit Compacter(SerialHeap* heap) {
272 // In this order so that heap is compacted towards old-gen.
273 _spaces[0].init(heap->old_gen()->space());
274 _spaces[1].init(heap->young_gen()->eden());
275 _spaces[2].init(heap->young_gen()->from());
276
277 bool is_promotion_failed = !heap->young_gen()->to()->is_empty();
278 if (is_promotion_failed) {
279 _spaces[3].init(heap->young_gen()->to());
280 _num_spaces = 4;
281 } else {
282 _num_spaces = 3;
283 }
284 _index = 0;
285 _old_gen = heap->old_gen();
286 }
287
288 void phase2_calculate_new_addr() {
289 for (uint i = 0; i < _num_spaces; ++i) {
290 ContiguousSpace* space = get_space(i);
291 HeapWord* cur_addr = space->bottom();
292 HeapWord* top = space->top();
293
294 bool record_first_dead_done = false;
295
296 DeadSpacer dead_spacer(space);
297
298 while (cur_addr < top) {
299 oop obj = cast_to_oop(cur_addr);
300 size_t obj_size = obj->size();
301 if (obj->is_gc_marked()) {
302 HeapWord* new_addr = alloc(obj_size);
303 forward_obj(obj, new_addr);
304 cur_addr += obj_size;
305 } else {
306 // Skipping the current known-unmarked obj
307 HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
308 if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
309 // Register space for the filler obj
310 alloc(pointer_delta(next_live_addr, cur_addr));
311 } else {
312 if (!record_first_dead_done) {
313 record_first_dead(i, cur_addr);
314 record_first_dead_done = true;
315 }
316 *(HeapWord**)cur_addr = next_live_addr;
317 }
318 cur_addr = next_live_addr;
319 }
320 }
321
322 if (!record_first_dead_done) {
323 record_first_dead(i, top);
324 }
325 }
326 }
327
328 void phase3_adjust_pointers() {
329 for (uint i = 0; i < _num_spaces; ++i) {
330 ContiguousSpace* space = get_space(i);
331 HeapWord* cur_addr = space->bottom();
332 HeapWord* const top = space->top();
333 HeapWord* const first_dead = get_first_dead(i);
334
335 while (cur_addr < top) {
336 prefetch_write_scan(cur_addr);
337 if (cur_addr < first_dead || cast_to_oop(cur_addr)->is_gc_marked()) {
338 size_t size = cast_to_oop(cur_addr)->oop_iterate_size(&SerialFullGC::adjust_pointer_closure);
339 cur_addr += size;
340 } else {
341 assert(*(HeapWord**)cur_addr > cur_addr, "forward progress");
342 cur_addr = *(HeapWord**)cur_addr;
343 }
344 }
345 }
346 }
347
348 void phase4_compact() {
349 for (uint i = 0; i < _num_spaces; ++i) {
350 ContiguousSpace* space = get_space(i);
351 HeapWord* cur_addr = space->bottom();
352 HeapWord* top = space->top();
353
354 // Check if the first obj inside this space is forwarded.
355 if (!FullGCForwarding::is_forwarded(cast_to_oop(cur_addr))) {
356 // Jump over consecutive (in-place) live-objs-chunk
357 cur_addr = get_first_dead(i);
358 }
359
360 while (cur_addr < top) {
361 if (!FullGCForwarding::is_forwarded(cast_to_oop(cur_addr))) {
362 cur_addr = *(HeapWord**) cur_addr;
363 continue;
364 }
365 cur_addr += relocate(cur_addr);
366 }
367
368 // Reset top and unused memory
369 HeapWord* new_top = get_compaction_top(i);
370 space->set_top(new_top);
371 if (ZapUnusedHeapArea && new_top < top) {
372 space->mangle_unused_area(MemRegion(new_top, top));
373 }
374 }
375 }
376 };
377
378 template <class T> void SerialFullGC::KeepAliveClosure::do_oop_work(T* p) {
379 mark_and_push(p);
380 }
381
382 void SerialFullGC::push_objarray(oop obj, size_t index) {
383 ObjArrayTask task(obj, index);
384 assert(task.is_valid(), "bad ObjArrayTask");
385 _objarray_stack.push(task);
386 }
387
388 void SerialFullGC::follow_array(objArrayOop array) {
389 mark_and_push_closure.do_klass(array->klass());
390 // Don't push empty arrays to avoid unnecessary work.
391 if (array->length() > 0) {
392 SerialFullGC::push_objarray(array, 0);
393 }
394 }
395
396 void SerialFullGC::follow_object(oop obj) {
397 assert(obj->is_gc_marked(), "should be marked");
398 if (obj->is_objArray()) {
399 // Handle object arrays explicitly to allow them to
400 // be split into chunks if needed.
401 SerialFullGC::follow_array((objArrayOop)obj);
402 } else {
403 obj->oop_iterate(&mark_and_push_closure);
404 }
405 }
406
407 void SerialFullGC::follow_array_chunk(objArrayOop array, int index) {
408 const int len = array->length();
409 const int beg_index = index;
410 assert(beg_index < len || len == 0, "index too large");
411
412 const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
413 const int end_index = beg_index + stride;
414
415 array->oop_iterate_range(&mark_and_push_closure, beg_index, end_index);
416
417 if (end_index < len) {
418 SerialFullGC::push_objarray(array, end_index); // Push the continuation.
419 }
420 }
421
422 void SerialFullGC::follow_stack() {
423 do {
424 while (!_marking_stack.is_empty()) {
425 oop obj = _marking_stack.pop();
426 assert (obj->is_gc_marked(), "p must be marked");
427 follow_object(obj);
428 }
429 // Process ObjArrays one at a time to avoid marking stack bloat.
430 if (!_objarray_stack.is_empty()) {
431 ObjArrayTask task = _objarray_stack.pop();
432 follow_array_chunk(objArrayOop(task.obj()), task.index());
433 }
434 } while (!_marking_stack.is_empty() || !_objarray_stack.is_empty());
435 }
436
437 SerialFullGC::FollowStackClosure SerialFullGC::follow_stack_closure;
438
439 void SerialFullGC::FollowStackClosure::do_void() { follow_stack(); }
440
441 template <class T> void SerialFullGC::follow_root(T* p) {
442 assert(!Universe::heap()->is_in(p),
443 "roots shouldn't be things within the heap");
444 T heap_oop = RawAccess<>::oop_load(p);
445 if (!CompressedOops::is_null(heap_oop)) {
446 oop obj = CompressedOops::decode_not_null(heap_oop);
447 if (!obj->mark().is_marked()) {
448 mark_object(obj);
449 follow_object(obj);
450 }
451 }
452 follow_stack();
453 }
454
455 void SerialFullGC::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
456 void SerialFullGC::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
457
458 // We preserve the mark which should be replaced at the end and the location
459 // that it will go. Note that the object that this markWord belongs to isn't
460 // currently at that address but it will be after phase4
461 void SerialFullGC::preserve_mark(oop obj, markWord mark) {
462 // We try to store preserved marks in the to space of the new generation since
463 // this is storage which should be available. Most of the time this should be
464 // sufficient space for the marks we need to preserve but if it isn't we fall
465 // back to using Stacks to keep track of the overflow.
466 if (_preserved_count < _preserved_count_max) {
467 _preserved_marks[_preserved_count++] = PreservedMark(obj, mark);
468 } else {
469 _preserved_overflow_stack_set.get()->push_always(obj, mark);
470 }
471 }
472
473 void SerialFullGC::phase1_mark(bool clear_all_softrefs) {
474 // Recursively traverse all live objects and mark them
475 GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
476
477 SerialHeap* gch = SerialHeap::heap();
478
479 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
480
481 ref_processor()->start_discovery(clear_all_softrefs);
482
483 {
484 StrongRootsScope srs(0);
485
486 CLDClosure* weak_cld_closure = ClassUnloading ? nullptr : &follow_cld_closure;
487 MarkingNMethodClosure mark_code_closure(&follow_root_closure, !NMethodToOopClosure::FixRelocations, true);
488 gch->process_roots(SerialHeap::SO_None,
489 &follow_root_closure,
490 &follow_cld_closure,
491 weak_cld_closure,
492 &mark_code_closure);
493 }
494
495 // Process reference objects found during marking
496 {
497 GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer());
498
499 ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues());
500 SerialGCRefProcProxyTask task(is_alive, keep_alive, follow_stack_closure);
501 const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references(task, nullptr, pt);
502 pt.print_all_references();
503 gc_tracer()->report_gc_reference_stats(stats);
504 }
505
506 // This is the point where the entire marking should have completed.
507 assert(_marking_stack.is_empty(), "Marking should have completed");
508
509 {
510 GCTraceTime(Debug, gc, phases) tm_m("Weak Processing", gc_timer());
511 WeakProcessor::weak_oops_do(&is_alive, &do_nothing_cl);
512 }
513
514 {
515 GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", gc_timer());
516
517 ClassUnloadingContext* ctx = ClassUnloadingContext::context();
518
519 bool unloading_occurred;
520 {
521 CodeCache::UnlinkingScope scope(&is_alive);
522
523 // Unload classes and purge the SystemDictionary.
524 unloading_occurred = SystemDictionary::do_unloading(gc_timer());
525
526 // Unload nmethods.
527 CodeCache::do_unloading(unloading_occurred);
528 }
529
530 {
531 GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
532 // Release unloaded nmethod's memory.
533 ctx->purge_nmethods();
534 }
535 {
536 GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", gc_timer());
537 gch->prune_unlinked_nmethods();
538 }
539 {
540 GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
541 ctx->free_nmethods();
542 }
543
544 // Prune dead klasses from subklass/sibling/implementor lists.
545 Klass::clean_weak_klass_links(unloading_occurred);
546
547 // Clean JVMCI metadata handles.
548 JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
549 }
550
551 {
552 GCTraceTime(Debug, gc, phases) tm_m("Report Object Count", gc_timer());
553 gc_tracer()->report_object_count_after_gc(&is_alive, nullptr);
554 }
555 }
556
557 void SerialFullGC::allocate_stacks() {
558 void* scratch = nullptr;
559 size_t num_words;
560 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
561 young_gen->contribute_scratch(scratch, num_words);
562
563 if (scratch != nullptr) {
564 _preserved_count_max = num_words * HeapWordSize / sizeof(PreservedMark);
565 } else {
566 _preserved_count_max = 0;
567 }
568
569 _preserved_marks = (PreservedMark*)scratch;
570 _preserved_count = 0;
571
572 _preserved_overflow_stack_set.init(1);
573 }
574
575 void SerialFullGC::deallocate_stacks() {
576 if (_preserved_count_max != 0) {
577 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen();
578 young_gen->reset_scratch();
579 }
580
581 _preserved_overflow_stack_set.reclaim();
582 _marking_stack.clear();
583 _objarray_stack.clear(true);
584 }
585
586 void SerialFullGC::mark_object(oop obj) {
587 if (StringDedup::is_enabled() &&
588 java_lang_String::is_instance(obj) &&
589 SerialStringDedup::is_candidate_from_mark(obj)) {
590 _string_dedup_requests->add(obj);
591 }
592
593 // some marks may contain information we need to preserve so we store them away
594 // and overwrite the mark. We'll restore it at the end of serial full GC.
595 markWord mark = obj->mark();
596 obj->set_mark(obj->prototype_mark().set_marked());
597
598 ContinuationGCSupport::transform_stack_chunk(obj);
599
600 if (obj->mark_must_be_preserved(mark)) {
601 preserve_mark(obj, mark);
602 }
603 }
604
605 template <class T> void SerialFullGC::mark_and_push(T* p) {
606 T heap_oop = RawAccess<>::oop_load(p);
607 if (!CompressedOops::is_null(heap_oop)) {
608 oop obj = CompressedOops::decode_not_null(heap_oop);
609 if (!obj->mark().is_marked()) {
610 mark_object(obj);
611 _marking_stack.push(obj);
612 }
613 }
614 }
615
616 template <typename T>
617 void MarkAndPushClosure::do_oop_work(T* p) { SerialFullGC::mark_and_push(p); }
618 void MarkAndPushClosure::do_oop( oop* p) { do_oop_work(p); }
619 void MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_work(p); }
620
621 template <class T> void SerialFullGC::adjust_pointer(T* p) {
622 T heap_oop = RawAccess<>::oop_load(p);
623 if (!CompressedOops::is_null(heap_oop)) {
624 oop obj = CompressedOops::decode_not_null(heap_oop);
625 assert(Universe::heap()->is_in(obj), "should be in heap");
626
627 if (FullGCForwarding::is_forwarded(obj)) {
628 oop new_obj = FullGCForwarding::forwardee(obj);
629 assert(is_object_aligned(new_obj), "oop must be aligned");
630 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
631 }
632 }
633 }
634
635 template <typename T>
636 void AdjustPointerClosure::do_oop_work(T* p) { SerialFullGC::adjust_pointer(p); }
637 inline void AdjustPointerClosure::do_oop(oop* p) { do_oop_work(p); }
638 inline void AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_work(p); }
639
640 AdjustPointerClosure SerialFullGC::adjust_pointer_closure;
641
642 void SerialFullGC::adjust_marks() {
643 // adjust the oops we saved earlier
644 for (size_t i = 0; i < _preserved_count; i++) {
645 PreservedMarks::adjust_preserved_mark(_preserved_marks + i);
646 }
647
648 // deal with the overflow stack
649 _preserved_overflow_stack_set.get()->adjust_during_full_gc();
650 }
651
652 void SerialFullGC::restore_marks() {
653 log_trace(gc)("Restoring %zu marks", _preserved_count + _preserved_overflow_stack_set.get()->size());
654
655 // restore the marks we saved earlier
656 for (size_t i = 0; i < _preserved_count; i++) {
657 _preserved_marks[i].set_mark();
658 }
659
660 // deal with the overflow
661 _preserved_overflow_stack_set.restore(nullptr);
662 }
663
664 SerialFullGC::IsAliveClosure SerialFullGC::is_alive;
665
666 bool SerialFullGC::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); }
667
668 SerialFullGC::KeepAliveClosure SerialFullGC::keep_alive;
669
670 void SerialFullGC::KeepAliveClosure::do_oop(oop* p) { SerialFullGC::KeepAliveClosure::do_oop_work(p); }
671 void SerialFullGC::KeepAliveClosure::do_oop(narrowOop* p) { SerialFullGC::KeepAliveClosure::do_oop_work(p); }
672
673 void SerialFullGC::initialize() {
674 SerialFullGC::_gc_timer = new STWGCTimer();
675 SerialFullGC::_gc_tracer = new SerialOldTracer();
676 SerialFullGC::_string_dedup_requests = new StringDedup::Requests();
677
678 // The Full GC operates on the entire heap so all objects should be subject
679 // to discovery, hence the _always_true_closure.
680 SerialFullGC::_ref_processor = new ReferenceProcessor(&_always_true_closure);
681 mark_and_push_closure.set_ref_discoverer(_ref_processor);
682 }
683
684 void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) {
685 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
686
687 SerialHeap* gch = SerialHeap::heap();
688
689 gch->trace_heap_before_gc(_gc_tracer);
690
691 // Capture used regions for old-gen to reestablish old-to-young invariant
692 // after full-gc.
693 gch->old_gen()->save_used_region();
694
695 allocate_stacks();
696
697 phase1_mark(clear_all_softrefs);
698
699 Compacter compacter{gch};
700
701 {
702 // Now all live objects are marked, compute the new object addresses.
703 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
704
705 compacter.phase2_calculate_new_addr();
706 }
707
708 // Don't add any more derived pointers during phase3
709 #if COMPILER2_OR_JVMCI
710 assert(DerivedPointerTable::is_active(), "Sanity");
711 DerivedPointerTable::set_active(false);
712 #endif
713
714 {
715 // Adjust the pointers to reflect the new locations
716 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
717
718 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
719
720 NMethodToOopClosure code_closure(&adjust_pointer_closure, NMethodToOopClosure::FixRelocations);
721 gch->process_roots(SerialHeap::SO_AllCodeCache,
722 &adjust_pointer_closure,
723 &adjust_cld_closure,
724 &adjust_cld_closure,
725 &code_closure);
726
727 WeakProcessor::oops_do(&adjust_pointer_closure);
728
729 adjust_marks();
730 compacter.phase3_adjust_pointers();
731 }
732
733 {
734 // All pointers are now adjusted, move objects accordingly
735 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
736
737 compacter.phase4_compact();
738 }
739
740 restore_marks();
741
742 deallocate_stacks();
743
744 SerialFullGC::_string_dedup_requests->flush();
745
746 bool is_young_gen_empty = (gch->young_gen()->used() == 0);
747 gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty);
748
749 gch->prune_scavengable_nmethods();
750
751 // Update heap occupancy information which is used as
752 // input to soft ref clearing policy at the next gc.
753 Universe::heap()->update_capacity_and_used_at_gc();
754
755 // Signal that we have completed a visit to all live objects.
756 Universe::heap()->record_whole_heap_examined_timestamp();
757
758 gch->trace_heap_after_gc(_gc_tracer);
759 }
--- EOF ---