1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "classfile/classLoaderData.inline.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "classfile/javaClasses.inline.hpp" 28 #include "classfile/stringTable.hpp" 29 #include "classfile/symbolTable.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/codeCache.hpp" 33 #include "compiler/compileBroker.hpp" 34 #include "compiler/oopMap.hpp" 35 #include "gc/serial/cardTableRS.hpp" 36 #include "gc/serial/defNewGeneration.hpp" 37 #include "gc/serial/serialFullGC.hpp" 38 #include "gc/serial/serialGcRefProcProxyTask.hpp" 39 #include "gc/serial/serialHeap.hpp" 40 #include "gc/serial/serialStringDedup.hpp" 41 #include "gc/serial/tenuredGeneration.inline.hpp" 42 #include "gc/shared/classUnloadingContext.hpp" 43 #include "gc/shared/collectedHeap.inline.hpp" 44 #include "gc/shared/continuationGCSupport.inline.hpp" 45 #include "gc/shared/fullGCForwarding.inline.hpp" 46 #include "gc/shared/gc_globals.hpp" 47 #include "gc/shared/gcHeapSummary.hpp" 48 #include "gc/shared/gcTimer.hpp" 49 #include "gc/shared/gcTrace.hpp" 50 #include "gc/shared/gcTraceTime.inline.hpp" 51 #include "gc/shared/modRefBarrierSet.hpp" 52 #include "gc/shared/oopStorageSet.inline.hpp" 53 #include "gc/shared/preservedMarks.inline.hpp" 54 #include "gc/shared/referencePolicy.hpp" 55 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 56 #include "gc/shared/space.hpp" 57 #include "gc/shared/strongRootsScope.hpp" 58 #include "gc/shared/weakProcessor.hpp" 59 #include "memory/iterator.inline.hpp" 60 #include "memory/universe.hpp" 61 #include "oops/access.inline.hpp" 62 #include "oops/compressedOops.inline.hpp" 63 #include "oops/instanceRefKlass.hpp" 64 #include "oops/markWord.hpp" 65 #include "oops/methodData.hpp" 66 #include "oops/objArrayKlass.inline.hpp" 67 #include "oops/oop.inline.hpp" 68 #include "oops/typeArrayOop.inline.hpp" 69 #include "runtime/prefetch.inline.hpp" 70 #include "runtime/threads.hpp" 71 #include "utilities/align.hpp" 72 #include "utilities/copy.hpp" 73 #include "utilities/events.hpp" 74 #include "utilities/stack.inline.hpp" 75 #if INCLUDE_JVMCI 76 #include "jvmci/jvmci.hpp" 77 #endif 78 79 Stack<oop, mtGC> SerialFullGC::_marking_stack; 80 Stack<ObjArrayTask, mtGC> SerialFullGC::_objarray_stack; 81 82 PreservedMarksSet SerialFullGC::_preserved_overflow_stack_set(false /* in_c_heap */); 83 size_t SerialFullGC::_preserved_count = 0; 84 size_t SerialFullGC::_preserved_count_max = 0; 85 PreservedMark* SerialFullGC::_preserved_marks = nullptr; 86 STWGCTimer* SerialFullGC::_gc_timer = nullptr; 87 SerialOldTracer* SerialFullGC::_gc_tracer = nullptr; 88 89 AlwaysTrueClosure SerialFullGC::_always_true_closure; 90 ReferenceProcessor* SerialFullGC::_ref_processor; 91 92 StringDedup::Requests* SerialFullGC::_string_dedup_requests = nullptr; 93 94 SerialFullGC::FollowRootClosure SerialFullGC::follow_root_closure; 95 96 MarkAndPushClosure SerialFullGC::mark_and_push_closure(ClassLoaderData::_claim_stw_fullgc_mark); 97 CLDToOopClosure SerialFullGC::follow_cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark); 98 CLDToOopClosure SerialFullGC::adjust_cld_closure(&adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust); 99 100 class DeadSpacer : StackObj { 101 size_t _allowed_deadspace_words; 102 bool _active; 103 ContiguousSpace* _space; 104 105 public: 106 DeadSpacer(ContiguousSpace* space) : _allowed_deadspace_words(0), _space(space) { 107 size_t ratio = (_space == SerialHeap::heap()->old_gen()->space()) 108 ? MarkSweepDeadRatio : 0; 109 _active = ratio > 0; 110 111 if (_active) { 112 // We allow some amount of garbage towards the bottom of the space, so 113 // we don't start compacting before there is a significant gain to be made. 114 // Occasionally, we want to ensure a full compaction, which is determined 115 // by the MarkSweepAlwaysCompactCount parameter. 116 if ((SerialHeap::heap()->total_full_collections() % MarkSweepAlwaysCompactCount) != 0) { 117 _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize; 118 } else { 119 _active = false; 120 } 121 } 122 } 123 124 bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) { 125 if (!_active) { 126 return false; 127 } 128 129 size_t dead_length = pointer_delta(dead_end, dead_start); 130 if (_allowed_deadspace_words >= dead_length) { 131 _allowed_deadspace_words -= dead_length; 132 CollectedHeap::fill_with_object(dead_start, dead_length); 133 oop obj = cast_to_oop(dead_start); 134 // obj->set_mark(obj->mark().set_marked()); 135 136 assert(dead_length == obj->size(), "bad filler object size"); 137 log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", %zub", 138 p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize); 139 140 return true; 141 } else { 142 _active = false; 143 return false; 144 } 145 } 146 }; 147 148 // Implement the "compaction" part of the mark-compact GC algorithm. 149 class Compacter { 150 // There are four spaces in total, but only the first three can be used after 151 // compact. IOW, old and eden/from must be enough for all live objs 152 static constexpr uint max_num_spaces = 4; 153 154 struct CompactionSpace { 155 ContiguousSpace* _space; 156 // Will be the new top after compaction is complete. 157 HeapWord* _compaction_top; 158 // The first dead word in this contiguous space. It's an optimization to 159 // skip large chunk of live objects at the beginning. 160 HeapWord* _first_dead; 161 162 void init(ContiguousSpace* space) { 163 _space = space; 164 _compaction_top = space->bottom(); 165 _first_dead = nullptr; 166 } 167 }; 168 169 CompactionSpace _spaces[max_num_spaces]; 170 // The num of spaces to be compacted, i.e. containing live objs. 171 uint _num_spaces; 172 173 uint _index; 174 175 // Used for BOT update 176 TenuredGeneration* _old_gen; 177 178 HeapWord* get_compaction_top(uint index) const { 179 return _spaces[index]._compaction_top; 180 } 181 182 HeapWord* get_first_dead(uint index) const { 183 return _spaces[index]._first_dead; 184 } 185 186 ContiguousSpace* get_space(uint index) const { 187 return _spaces[index]._space; 188 } 189 190 void record_first_dead(uint index, HeapWord* first_dead) { 191 assert(_spaces[index]._first_dead == nullptr, "should write only once"); 192 _spaces[index]._first_dead = first_dead; 193 } 194 195 HeapWord* alloc(size_t words) { 196 while (true) { 197 if (words <= pointer_delta(_spaces[_index]._space->end(), 198 _spaces[_index]._compaction_top)) { 199 HeapWord* result = _spaces[_index]._compaction_top; 200 _spaces[_index]._compaction_top += words; 201 if (_index == 0) { 202 // old-gen requires BOT update 203 _old_gen->update_for_block(result, result + words); 204 } 205 return result; 206 } 207 208 // out-of-memory in this space 209 _index++; 210 assert(_index < max_num_spaces - 1, "the last space should not be used"); 211 } 212 } 213 214 static void prefetch_read_scan(void* p) { 215 if (PrefetchScanIntervalInBytes >= 0) { 216 Prefetch::read(p, PrefetchScanIntervalInBytes); 217 } 218 } 219 220 static void prefetch_write_scan(void* p) { 221 if (PrefetchScanIntervalInBytes >= 0) { 222 Prefetch::write(p, PrefetchScanIntervalInBytes); 223 } 224 } 225 226 static void prefetch_write_copy(void* p) { 227 if (PrefetchCopyIntervalInBytes >= 0) { 228 Prefetch::write(p, PrefetchCopyIntervalInBytes); 229 } 230 } 231 232 static void forward_obj(oop obj, HeapWord* new_addr) { 233 prefetch_write_scan(obj); 234 if (cast_from_oop<HeapWord*>(obj) != new_addr) { 235 FullGCForwarding::forward_to(obj, cast_to_oop(new_addr)); 236 } else { 237 assert(obj->is_gc_marked(), "inv"); 238 // This obj will stay in-place. Fix the markword. 239 obj->init_mark(); 240 } 241 } 242 243 static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) { 244 for (HeapWord* i_addr = start; i_addr < end; /* empty */) { 245 prefetch_read_scan(i_addr); 246 oop obj = cast_to_oop(i_addr); 247 if (obj->is_gc_marked()) { 248 return i_addr; 249 } 250 i_addr += obj->size(); 251 } 252 return end; 253 }; 254 255 static size_t relocate(HeapWord* addr) { 256 // Prefetch source and destination 257 prefetch_read_scan(addr); 258 259 oop obj = cast_to_oop(addr); 260 oop new_obj = FullGCForwarding::forwardee(obj); 261 HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj); 262 assert(addr != new_addr, "inv"); 263 prefetch_write_copy(new_addr); 264 265 size_t obj_size = obj->size(); 266 Copy::aligned_conjoint_words(addr, new_addr, obj_size); 267 new_obj->init_mark(); 268 269 return obj_size; 270 } 271 272 public: 273 explicit Compacter(SerialHeap* heap) { 274 // In this order so that heap is compacted towards old-gen. 275 _spaces[0].init(heap->old_gen()->space()); 276 _spaces[1].init(heap->young_gen()->eden()); 277 _spaces[2].init(heap->young_gen()->from()); 278 279 bool is_promotion_failed = !heap->young_gen()->to()->is_empty(); 280 if (is_promotion_failed) { 281 _spaces[3].init(heap->young_gen()->to()); 282 _num_spaces = 4; 283 } else { 284 _num_spaces = 3; 285 } 286 _index = 0; 287 _old_gen = heap->old_gen(); 288 } 289 290 void phase2_calculate_new_addr() { 291 for (uint i = 0; i < _num_spaces; ++i) { 292 ContiguousSpace* space = get_space(i); 293 HeapWord* cur_addr = space->bottom(); 294 HeapWord* top = space->top(); 295 296 bool record_first_dead_done = false; 297 298 DeadSpacer dead_spacer(space); 299 300 while (cur_addr < top) { 301 oop obj = cast_to_oop(cur_addr); 302 size_t obj_size = obj->size(); 303 if (obj->is_gc_marked()) { 304 HeapWord* new_addr = alloc(obj_size); 305 forward_obj(obj, new_addr); 306 cur_addr += obj_size; 307 } else { 308 // Skipping the current known-unmarked obj 309 HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top); 310 if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) { 311 // Register space for the filler obj 312 alloc(pointer_delta(next_live_addr, cur_addr)); 313 } else { 314 if (!record_first_dead_done) { 315 record_first_dead(i, cur_addr); 316 record_first_dead_done = true; 317 } 318 *(HeapWord**)cur_addr = next_live_addr; 319 } 320 cur_addr = next_live_addr; 321 } 322 } 323 324 if (!record_first_dead_done) { 325 record_first_dead(i, top); 326 } 327 } 328 } 329 330 void phase3_adjust_pointers() { 331 for (uint i = 0; i < _num_spaces; ++i) { 332 ContiguousSpace* space = get_space(i); 333 HeapWord* cur_addr = space->bottom(); 334 HeapWord* const top = space->top(); 335 HeapWord* const first_dead = get_first_dead(i); 336 337 while (cur_addr < top) { 338 prefetch_write_scan(cur_addr); 339 if (cur_addr < first_dead || cast_to_oop(cur_addr)->is_gc_marked()) { 340 size_t size = cast_to_oop(cur_addr)->oop_iterate_size(&SerialFullGC::adjust_pointer_closure); 341 cur_addr += size; 342 } else { 343 assert(*(HeapWord**)cur_addr > cur_addr, "forward progress"); 344 cur_addr = *(HeapWord**)cur_addr; 345 } 346 } 347 } 348 } 349 350 void phase4_compact() { 351 for (uint i = 0; i < _num_spaces; ++i) { 352 ContiguousSpace* space = get_space(i); 353 HeapWord* cur_addr = space->bottom(); 354 HeapWord* top = space->top(); 355 356 // Check if the first obj inside this space is forwarded. 357 if (!FullGCForwarding::is_forwarded(cast_to_oop(cur_addr))) { 358 // Jump over consecutive (in-place) live-objs-chunk 359 cur_addr = get_first_dead(i); 360 } 361 362 while (cur_addr < top) { 363 if (!FullGCForwarding::is_forwarded(cast_to_oop(cur_addr))) { 364 cur_addr = *(HeapWord**) cur_addr; 365 continue; 366 } 367 cur_addr += relocate(cur_addr); 368 } 369 370 // Reset top and unused memory 371 HeapWord* new_top = get_compaction_top(i); 372 space->set_top(new_top); 373 if (ZapUnusedHeapArea && new_top < top) { 374 space->mangle_unused_area(MemRegion(new_top, top)); 375 } 376 } 377 } 378 }; 379 380 template <class T> void SerialFullGC::KeepAliveClosure::do_oop_work(T* p) { 381 mark_and_push(p); 382 } 383 384 void SerialFullGC::push_objarray(oop obj, size_t index) { 385 ObjArrayTask task(obj, index); 386 assert(task.is_valid(), "bad ObjArrayTask"); 387 _objarray_stack.push(task); 388 } 389 390 void SerialFullGC::follow_array(objArrayOop array) { 391 mark_and_push_closure.do_klass(array->klass()); 392 // Don't push empty arrays to avoid unnecessary work. 393 if (array->length() > 0) { 394 SerialFullGC::push_objarray(array, 0); 395 } 396 } 397 398 void SerialFullGC::follow_object(oop obj) { 399 assert(obj->is_gc_marked(), "should be marked"); 400 if (obj->is_objArray()) { 401 // Handle object arrays explicitly to allow them to 402 // be split into chunks if needed. 403 SerialFullGC::follow_array((objArrayOop)obj); 404 } else { 405 obj->oop_iterate(&mark_and_push_closure); 406 } 407 } 408 409 void SerialFullGC::follow_array_chunk(objArrayOop array, int index) { 410 const int len = array->length(); 411 const int beg_index = index; 412 assert(beg_index < len || len == 0, "index too large"); 413 414 const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride); 415 const int end_index = beg_index + stride; 416 417 array->oop_iterate_range(&mark_and_push_closure, beg_index, end_index); 418 419 if (end_index < len) { 420 SerialFullGC::push_objarray(array, end_index); // Push the continuation. 421 } 422 } 423 424 void SerialFullGC::follow_stack() { 425 do { 426 while (!_marking_stack.is_empty()) { 427 oop obj = _marking_stack.pop(); 428 assert (obj->is_gc_marked(), "p must be marked"); 429 follow_object(obj); 430 } 431 // Process ObjArrays one at a time to avoid marking stack bloat. 432 if (!_objarray_stack.is_empty()) { 433 ObjArrayTask task = _objarray_stack.pop(); 434 follow_array_chunk(objArrayOop(task.obj()), task.index()); 435 } 436 } while (!_marking_stack.is_empty() || !_objarray_stack.is_empty()); 437 } 438 439 SerialFullGC::FollowStackClosure SerialFullGC::follow_stack_closure; 440 441 void SerialFullGC::FollowStackClosure::do_void() { follow_stack(); } 442 443 template <class T> void SerialFullGC::follow_root(T* p) { 444 assert(!Universe::heap()->is_in(p), 445 "roots shouldn't be things within the heap"); 446 T heap_oop = RawAccess<>::oop_load(p); 447 if (!CompressedOops::is_null(heap_oop)) { 448 oop obj = CompressedOops::decode_not_null(heap_oop); 449 if (!obj->mark().is_marked()) { 450 mark_object(obj); 451 follow_object(obj); 452 } 453 } 454 follow_stack(); 455 } 456 457 void SerialFullGC::FollowRootClosure::do_oop(oop* p) { follow_root(p); } 458 void SerialFullGC::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); } 459 460 // We preserve the mark which should be replaced at the end and the location 461 // that it will go. Note that the object that this markWord belongs to isn't 462 // currently at that address but it will be after phase4 463 void SerialFullGC::preserve_mark(oop obj, markWord mark) { 464 // We try to store preserved marks in the to space of the new generation since 465 // this is storage which should be available. Most of the time this should be 466 // sufficient space for the marks we need to preserve but if it isn't we fall 467 // back to using Stacks to keep track of the overflow. 468 if (_preserved_count < _preserved_count_max) { 469 _preserved_marks[_preserved_count++] = PreservedMark(obj, mark); 470 } else { 471 _preserved_overflow_stack_set.get()->push_always(obj, mark); 472 } 473 } 474 475 void SerialFullGC::phase1_mark(bool clear_all_softrefs) { 476 // Recursively traverse all live objects and mark them 477 GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer); 478 479 SerialHeap* gch = SerialHeap::heap(); 480 481 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark); 482 483 ref_processor()->start_discovery(clear_all_softrefs); 484 485 { 486 StrongRootsScope srs(0); 487 488 MarkingNMethodClosure mark_code_closure(&follow_root_closure, 489 !NMethodToOopClosure::FixRelocations, 490 true); 491 492 // Start tracing from roots, there are 3 kinds of roots in full-gc. 493 // 494 // 1. CLD. This method internally takes care of whether class loading is 495 // enabled or not, applying the closure to both strong and weak or only 496 // strong CLDs. 497 ClassLoaderDataGraph::always_strong_cld_do(&follow_cld_closure); 498 499 // 2. Threads stack frames and active nmethods in them. 500 Threads::oops_do(&follow_root_closure, &mark_code_closure); 501 502 // 3. VM internal roots. 503 OopStorageSet::strong_oops_do(&follow_root_closure); 504 } 505 506 // Process reference objects found during marking 507 { 508 GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer()); 509 510 ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues()); 511 SerialGCRefProcProxyTask task(is_alive, keep_alive, follow_stack_closure); 512 const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references(task, nullptr, pt); 513 pt.print_all_references(); 514 gc_tracer()->report_gc_reference_stats(stats); 515 } 516 517 // This is the point where the entire marking should have completed. 518 assert(_marking_stack.is_empty(), "Marking should have completed"); 519 520 { 521 GCTraceTime(Debug, gc, phases) tm_m("Weak Processing", gc_timer()); 522 WeakProcessor::weak_oops_do(&is_alive, &do_nothing_cl); 523 } 524 525 { 526 GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", gc_timer()); 527 528 ClassUnloadingContext* ctx = ClassUnloadingContext::context(); 529 530 bool unloading_occurred; 531 { 532 CodeCache::UnlinkingScope scope(&is_alive); 533 534 // Unload classes and purge the SystemDictionary. 535 unloading_occurred = SystemDictionary::do_unloading(gc_timer()); 536 537 // Unload nmethods. 538 CodeCache::do_unloading(unloading_occurred); 539 } 540 541 { 542 GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer()); 543 // Release unloaded nmethod's memory. 544 ctx->purge_nmethods(); 545 } 546 { 547 GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", gc_timer()); 548 gch->prune_unlinked_nmethods(); 549 } 550 { 551 GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer()); 552 ctx->free_nmethods(); 553 } 554 555 // Prune dead klasses from subklass/sibling/implementor lists. 556 Klass::clean_weak_klass_links(unloading_occurred); 557 558 // Clean JVMCI metadata handles. 559 JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred)); 560 } 561 562 { 563 GCTraceTime(Debug, gc, phases) tm_m("Report Object Count", gc_timer()); 564 gc_tracer()->report_object_count_after_gc(&is_alive, nullptr); 565 } 566 } 567 568 void SerialFullGC::allocate_stacks() { 569 void* scratch = nullptr; 570 size_t num_words; 571 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen(); 572 young_gen->contribute_scratch(scratch, num_words); 573 574 if (scratch != nullptr) { 575 _preserved_count_max = num_words * HeapWordSize / sizeof(PreservedMark); 576 } else { 577 _preserved_count_max = 0; 578 } 579 580 _preserved_marks = (PreservedMark*)scratch; 581 _preserved_count = 0; 582 583 _preserved_overflow_stack_set.init(1); 584 } 585 586 void SerialFullGC::deallocate_stacks() { 587 if (_preserved_count_max != 0) { 588 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen(); 589 young_gen->reset_scratch(); 590 } 591 592 _preserved_overflow_stack_set.reclaim(); 593 _marking_stack.clear(); 594 _objarray_stack.clear(true); 595 } 596 597 void SerialFullGC::mark_object(oop obj) { 598 if (StringDedup::is_enabled() && 599 java_lang_String::is_instance(obj) && 600 SerialStringDedup::is_candidate_from_mark(obj)) { 601 _string_dedup_requests->add(obj); 602 } 603 604 // some marks may contain information we need to preserve so we store them away 605 // and overwrite the mark. We'll restore it at the end of serial full GC. 606 markWord mark = obj->mark(); 607 obj->set_mark(obj->prototype_mark().set_marked()); 608 609 ContinuationGCSupport::transform_stack_chunk(obj); 610 611 if (obj->mark_must_be_preserved(mark)) { 612 preserve_mark(obj, mark); 613 } 614 } 615 616 template <class T> void SerialFullGC::mark_and_push(T* p) { 617 T heap_oop = RawAccess<>::oop_load(p); 618 if (!CompressedOops::is_null(heap_oop)) { 619 oop obj = CompressedOops::decode_not_null(heap_oop); 620 if (!obj->mark().is_marked()) { 621 mark_object(obj); 622 _marking_stack.push(obj); 623 } 624 } 625 } 626 627 template <typename T> 628 void MarkAndPushClosure::do_oop_work(T* p) { SerialFullGC::mark_and_push(p); } 629 void MarkAndPushClosure::do_oop( oop* p) { do_oop_work(p); } 630 void MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_work(p); } 631 632 template <class T> void SerialFullGC::adjust_pointer(T* p) { 633 T heap_oop = RawAccess<>::oop_load(p); 634 if (!CompressedOops::is_null(heap_oop)) { 635 oop obj = CompressedOops::decode_not_null(heap_oop); 636 assert(Universe::heap()->is_in(obj), "should be in heap"); 637 638 if (FullGCForwarding::is_forwarded(obj)) { 639 oop new_obj = FullGCForwarding::forwardee(obj); 640 assert(is_object_aligned(new_obj), "oop must be aligned"); 641 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj); 642 } 643 } 644 } 645 646 template <typename T> 647 void AdjustPointerClosure::do_oop_work(T* p) { SerialFullGC::adjust_pointer(p); } 648 inline void AdjustPointerClosure::do_oop(oop* p) { do_oop_work(p); } 649 inline void AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_work(p); } 650 651 AdjustPointerClosure SerialFullGC::adjust_pointer_closure; 652 653 void SerialFullGC::adjust_marks() { 654 // adjust the oops we saved earlier 655 for (size_t i = 0; i < _preserved_count; i++) { 656 PreservedMarks::adjust_preserved_mark(_preserved_marks + i); 657 } 658 659 // deal with the overflow stack 660 _preserved_overflow_stack_set.get()->adjust_during_full_gc(); 661 } 662 663 void SerialFullGC::restore_marks() { 664 log_trace(gc)("Restoring %zu marks", _preserved_count + _preserved_overflow_stack_set.get()->size()); 665 666 // restore the marks we saved earlier 667 for (size_t i = 0; i < _preserved_count; i++) { 668 _preserved_marks[i].set_mark(); 669 } 670 671 // deal with the overflow 672 _preserved_overflow_stack_set.restore(nullptr); 673 } 674 675 SerialFullGC::IsAliveClosure SerialFullGC::is_alive; 676 677 bool SerialFullGC::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); } 678 679 SerialFullGC::KeepAliveClosure SerialFullGC::keep_alive; 680 681 void SerialFullGC::KeepAliveClosure::do_oop(oop* p) { SerialFullGC::KeepAliveClosure::do_oop_work(p); } 682 void SerialFullGC::KeepAliveClosure::do_oop(narrowOop* p) { SerialFullGC::KeepAliveClosure::do_oop_work(p); } 683 684 void SerialFullGC::initialize() { 685 SerialFullGC::_gc_timer = new STWGCTimer(); 686 SerialFullGC::_gc_tracer = new SerialOldTracer(); 687 SerialFullGC::_string_dedup_requests = new StringDedup::Requests(); 688 689 // The Full GC operates on the entire heap so all objects should be subject 690 // to discovery, hence the _always_true_closure. 691 SerialFullGC::_ref_processor = new ReferenceProcessor(&_always_true_closure); 692 mark_and_push_closure.set_ref_discoverer(_ref_processor); 693 } 694 695 void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) { 696 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 697 698 SerialHeap* gch = SerialHeap::heap(); 699 700 gch->trace_heap_before_gc(_gc_tracer); 701 702 // Capture used regions for old-gen to reestablish old-to-young invariant 703 // after full-gc. 704 gch->old_gen()->save_used_region(); 705 706 allocate_stacks(); 707 708 // Usually, all class unloading work occurs at the end of phase 1, but Serial 709 // full-gc accesses dead-objs' klass to find out the start of next live-obj 710 // during phase 2. This requires klasses of dead-objs to be kept loaded. 711 // Therefore, we declare ClassUnloadingContext at the same level as 712 // full-gc phases, and purge dead classes (invoking 713 // ClassLoaderDataGraph::purge) after all phases of full-gc. 714 ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */, 715 false /* unregister_nmethods_during_purge */, 716 false /* lock_nmethod_free_separately */); 717 718 phase1_mark(clear_all_softrefs); 719 720 Compacter compacter{gch}; 721 722 { 723 // Now all live objects are marked, compute the new object addresses. 724 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer); 725 726 compacter.phase2_calculate_new_addr(); 727 } 728 729 // Don't add any more derived pointers during phase3 730 #if COMPILER2_OR_JVMCI 731 assert(DerivedPointerTable::is_active(), "Sanity"); 732 DerivedPointerTable::set_active(false); 733 #endif 734 735 { 736 // Adjust the pointers to reflect the new locations 737 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer()); 738 739 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust); 740 741 // Remap strong and weak roots in adjust phase. 742 // 1. All (strong and weak) CLDs. 743 ClassLoaderDataGraph::cld_do(&adjust_cld_closure); 744 745 // 2. Threads stack frames. No need to visit on-stack nmethods, because all 746 // nmethods are visited in one go via CodeCache::nmethods_do. 747 Threads::oops_do(&adjust_pointer_closure, nullptr); 748 NMethodToOopClosure nmethod_cl(&adjust_pointer_closure, NMethodToOopClosure::FixRelocations); 749 CodeCache::nmethods_do(&nmethod_cl); 750 751 // 3. VM internal roots 752 OopStorageSet::strong_oops_do(&adjust_pointer_closure); 753 754 // 4. VM internal weak roots 755 WeakProcessor::oops_do(&adjust_pointer_closure); 756 757 adjust_marks(); 758 compacter.phase3_adjust_pointers(); 759 } 760 761 { 762 // All pointers are now adjusted, move objects accordingly 763 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer); 764 765 compacter.phase4_compact(); 766 } 767 768 // Delete metaspaces for unloaded class loaders and clean up CLDG. 769 ClassLoaderDataGraph::purge(true /* at_safepoint */); 770 DEBUG_ONLY(MetaspaceUtils::verify();) 771 772 // Need to clear claim bits for the next full-gc (specifically phase 1 and 3). 773 ClassLoaderDataGraph::clear_claimed_marks(); 774 775 restore_marks(); 776 777 deallocate_stacks(); 778 779 SerialFullGC::_string_dedup_requests->flush(); 780 781 bool is_young_gen_empty = (gch->young_gen()->used() == 0); 782 gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty); 783 784 gch->prune_scavengable_nmethods(); 785 786 // Update heap occupancy information which is used as 787 // input to soft ref clearing policy at the next gc. 788 Universe::heap()->update_capacity_and_used_at_gc(); 789 790 // Signal that we have completed a visit to all live objects. 791 Universe::heap()->record_whole_heap_examined_timestamp(); 792 793 gch->trace_heap_after_gc(_gc_tracer); 794 }