1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "classfile/classLoaderData.inline.hpp" 28 #include "classfile/javaClasses.inline.hpp" 29 #include "classfile/stringTable.hpp" 30 #include "classfile/symbolTable.hpp" 31 #include "classfile/systemDictionary.hpp" 32 #include "classfile/vmSymbols.hpp" 33 #include "code/codeCache.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/oopMap.hpp" 36 #include "gc/serial/cardTableRS.hpp" 37 #include "gc/serial/defNewGeneration.hpp" 38 #include "gc/serial/serialFullGC.hpp" 39 #include "gc/serial/serialGcRefProcProxyTask.hpp" 40 #include "gc/serial/serialHeap.hpp" 41 #include "gc/serial/serialStringDedup.hpp" 42 #include "gc/serial/tenuredGeneration.inline.hpp" 43 #include "gc/shared/classUnloadingContext.hpp" 44 #include "gc/shared/collectedHeap.inline.hpp" 45 #include "gc/shared/continuationGCSupport.inline.hpp" 46 #include "gc/shared/gcHeapSummary.hpp" 47 #include "gc/shared/gcTimer.hpp" 48 #include "gc/shared/gcTrace.hpp" 49 #include "gc/shared/gcTraceTime.inline.hpp" 50 #include "gc/shared/gc_globals.hpp" 51 #include "gc/shared/modRefBarrierSet.hpp" 52 #include "gc/shared/preservedMarks.inline.hpp" 53 #include "gc/shared/referencePolicy.hpp" 54 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 55 #include "gc/shared/slidingForwarding.inline.hpp" 56 #include "gc/shared/space.hpp" 57 #include "gc/shared/strongRootsScope.hpp" 58 #include "gc/shared/weakProcessor.hpp" 59 #include "memory/iterator.inline.hpp" 60 #include "memory/universe.hpp" 61 #include "oops/access.inline.hpp" 62 #include "oops/compressedOops.inline.hpp" 63 #include "oops/instanceRefKlass.hpp" 64 #include "oops/markWord.hpp" 65 #include "oops/methodData.hpp" 66 #include "oops/objArrayKlass.inline.hpp" 67 #include "oops/oop.inline.hpp" 68 #include "oops/typeArrayOop.inline.hpp" 69 #include "runtime/prefetch.inline.hpp" 70 #include "utilities/align.hpp" 71 #include "utilities/copy.hpp" 72 #include "utilities/events.hpp" 73 #include "utilities/stack.inline.hpp" 74 #if INCLUDE_JVMCI 75 #include "jvmci/jvmci.hpp" 76 #endif 77 78 Stack<oop, mtGC> SerialFullGC::_marking_stack; 79 Stack<ObjArrayTask, mtGC> SerialFullGC::_objarray_stack; 80 81 PreservedMarksSet SerialFullGC::_preserved_overflow_stack_set(false /* in_c_heap */); 82 size_t SerialFullGC::_preserved_count = 0; 83 size_t SerialFullGC::_preserved_count_max = 0; 84 PreservedMark* SerialFullGC::_preserved_marks = nullptr; 85 STWGCTimer* SerialFullGC::_gc_timer = nullptr; 86 SerialOldTracer* SerialFullGC::_gc_tracer = nullptr; 87 88 AlwaysTrueClosure SerialFullGC::_always_true_closure; 89 ReferenceProcessor* SerialFullGC::_ref_processor; 90 91 StringDedup::Requests* SerialFullGC::_string_dedup_requests = nullptr; 92 93 SerialFullGC::FollowRootClosure SerialFullGC::follow_root_closure; 94 95 MarkAndPushClosure SerialFullGC::mark_and_push_closure(ClassLoaderData::_claim_stw_fullgc_mark); 96 CLDToOopClosure SerialFullGC::follow_cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark); 97 CLDToOopClosure SerialFullGC::adjust_cld_closure(&adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust); 98 99 class DeadSpacer : StackObj { 100 size_t _allowed_deadspace_words; 101 bool _active; 102 ContiguousSpace* _space; 103 104 public: 105 DeadSpacer(ContiguousSpace* space) : _allowed_deadspace_words(0), _space(space) { 106 size_t ratio = (_space == SerialHeap::heap()->old_gen()->space()) 107 ? MarkSweepDeadRatio : 0; 108 _active = ratio > 0; 109 110 if (_active) { 111 // We allow some amount of garbage towards the bottom of the space, so 112 // we don't start compacting before there is a significant gain to be made. 113 // Occasionally, we want to ensure a full compaction, which is determined 114 // by the MarkSweepAlwaysCompactCount parameter. 115 if ((SerialHeap::heap()->total_full_collections() % MarkSweepAlwaysCompactCount) != 0) { 116 _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize; 117 } else { 118 _active = false; 119 } 120 } 121 } 122 123 bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) { 124 if (!_active) { 125 return false; 126 } 127 128 size_t dead_length = pointer_delta(dead_end, dead_start); 129 if (_allowed_deadspace_words >= dead_length) { 130 _allowed_deadspace_words -= dead_length; 131 CollectedHeap::fill_with_object(dead_start, dead_length); 132 oop obj = cast_to_oop(dead_start); 133 // obj->set_mark(obj->mark().set_marked()); 134 135 assert(dead_length == obj->size(), "bad filler object size"); 136 log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b", 137 p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize); 138 139 return true; 140 } else { 141 _active = false; 142 return false; 143 } 144 } 145 }; 146 147 // Implement the "compaction" part of the mark-compact GC algorithm. 148 class Compacter { 149 // There are four spaces in total, but only the first three can be used after 150 // compact. IOW, old and eden/from must be enough for all live objs 151 static constexpr uint max_num_spaces = 4; 152 153 struct CompactionSpace { 154 ContiguousSpace* _space; 155 // Will be the new top after compaction is complete. 156 HeapWord* _compaction_top; 157 // The first dead word in this contiguous space. It's an optimization to 158 // skip large chunk of live objects at the beginning. 159 HeapWord* _first_dead; 160 161 void init(ContiguousSpace* space) { 162 _space = space; 163 _compaction_top = space->bottom(); 164 _first_dead = nullptr; 165 } 166 }; 167 168 CompactionSpace _spaces[max_num_spaces]; 169 // The num of spaces to be compacted, i.e. containing live objs. 170 uint _num_spaces; 171 172 uint _index; 173 174 // Used for BOT update 175 TenuredGeneration* _old_gen; 176 177 HeapWord* get_compaction_top(uint index) const { 178 return _spaces[index]._compaction_top; 179 } 180 181 HeapWord* get_first_dead(uint index) const { 182 return _spaces[index]._first_dead; 183 } 184 185 ContiguousSpace* get_space(uint index) const { 186 return _spaces[index]._space; 187 } 188 189 void record_first_dead(uint index, HeapWord* first_dead) { 190 assert(_spaces[index]._first_dead == nullptr, "should write only once"); 191 _spaces[index]._first_dead = first_dead; 192 } 193 194 HeapWord* alloc(size_t words) { 195 while (true) { 196 if (words <= pointer_delta(_spaces[_index]._space->end(), 197 _spaces[_index]._compaction_top)) { 198 HeapWord* result = _spaces[_index]._compaction_top; 199 _spaces[_index]._compaction_top += words; 200 if (_index == 0) { 201 // old-gen requires BOT update 202 _old_gen->update_for_block(result, result + words); 203 } 204 return result; 205 } 206 207 // out-of-memory in this space 208 _index++; 209 assert(_index < max_num_spaces - 1, "the last space should not be used"); 210 } 211 } 212 213 static void prefetch_read_scan(void* p) { 214 if (PrefetchScanIntervalInBytes >= 0) { 215 Prefetch::read(p, PrefetchScanIntervalInBytes); 216 } 217 } 218 219 static void prefetch_write_scan(void* p) { 220 if (PrefetchScanIntervalInBytes >= 0) { 221 Prefetch::write(p, PrefetchScanIntervalInBytes); 222 } 223 } 224 225 static void prefetch_write_copy(void* p) { 226 if (PrefetchCopyIntervalInBytes >= 0) { 227 Prefetch::write(p, PrefetchCopyIntervalInBytes); 228 } 229 } 230 231 static void forward_obj(oop obj, HeapWord* new_addr) { 232 prefetch_write_scan(obj); 233 if (cast_from_oop<HeapWord*>(obj) != new_addr) { 234 SlidingForwarding::forward_to(obj, cast_to_oop(new_addr)); 235 } else { 236 assert(obj->is_gc_marked(), "inv"); 237 // This obj will stay in-place. Fix the markword. 238 obj->init_mark(); 239 } 240 } 241 242 static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) { 243 for (HeapWord* i_addr = start; i_addr < end; /* empty */) { 244 prefetch_read_scan(i_addr); 245 oop obj = cast_to_oop(i_addr); 246 if (obj->is_gc_marked()) { 247 return i_addr; 248 } 249 i_addr += obj->size(); 250 } 251 return end; 252 }; 253 254 static size_t relocate(HeapWord* addr) { 255 // Prefetch source and destination 256 prefetch_read_scan(addr); 257 258 oop obj = cast_to_oop(addr); 259 oop new_obj = SlidingForwarding::forwardee(obj); 260 HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj); 261 assert(addr != new_addr, "inv"); 262 prefetch_write_copy(new_addr); 263 264 size_t obj_size = obj->size(); 265 Copy::aligned_conjoint_words(addr, new_addr, obj_size); 266 new_obj->init_mark(); 267 268 return obj_size; 269 } 270 271 public: 272 explicit Compacter(SerialHeap* heap) { 273 // In this order so that heap is compacted towards old-gen. 274 _spaces[0].init(heap->old_gen()->space()); 275 _spaces[1].init(heap->young_gen()->eden()); 276 _spaces[2].init(heap->young_gen()->from()); 277 278 bool is_promotion_failed = !heap->young_gen()->to()->is_empty(); 279 if (is_promotion_failed) { 280 _spaces[3].init(heap->young_gen()->to()); 281 _num_spaces = 4; 282 } else { 283 _num_spaces = 3; 284 } 285 _index = 0; 286 _old_gen = heap->old_gen(); 287 } 288 289 void phase2_calculate_new_addr() { 290 for (uint i = 0; i < _num_spaces; ++i) { 291 ContiguousSpace* space = get_space(i); 292 HeapWord* cur_addr = space->bottom(); 293 HeapWord* top = space->top(); 294 295 bool record_first_dead_done = false; 296 297 DeadSpacer dead_spacer(space); 298 299 while (cur_addr < top) { 300 oop obj = cast_to_oop(cur_addr); 301 size_t obj_size = obj->size(); 302 if (obj->is_gc_marked()) { 303 HeapWord* new_addr = alloc(obj_size); 304 forward_obj(obj, new_addr); 305 cur_addr += obj_size; 306 } else { 307 // Skipping the current known-unmarked obj 308 HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top); 309 if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) { 310 // Register space for the filler obj 311 alloc(pointer_delta(next_live_addr, cur_addr)); 312 } else { 313 if (!record_first_dead_done) { 314 record_first_dead(i, cur_addr); 315 record_first_dead_done = true; 316 } 317 *(HeapWord**)cur_addr = next_live_addr; 318 } 319 cur_addr = next_live_addr; 320 } 321 } 322 323 if (!record_first_dead_done) { 324 record_first_dead(i, top); 325 } 326 } 327 } 328 329 void phase3_adjust_pointers() { 330 for (uint i = 0; i < _num_spaces; ++i) { 331 ContiguousSpace* space = get_space(i); 332 HeapWord* cur_addr = space->bottom(); 333 HeapWord* const top = space->top(); 334 HeapWord* const first_dead = get_first_dead(i); 335 336 while (cur_addr < top) { 337 prefetch_write_scan(cur_addr); 338 if (cur_addr < first_dead || cast_to_oop(cur_addr)->is_gc_marked()) { 339 size_t size = cast_to_oop(cur_addr)->oop_iterate_size(&SerialFullGC::adjust_pointer_closure); 340 cur_addr += size; 341 } else { 342 assert(*(HeapWord**)cur_addr > cur_addr, "forward progress"); 343 cur_addr = *(HeapWord**)cur_addr; 344 } 345 } 346 } 347 } 348 349 void phase4_compact() { 350 for (uint i = 0; i < _num_spaces; ++i) { 351 ContiguousSpace* space = get_space(i); 352 HeapWord* cur_addr = space->bottom(); 353 HeapWord* top = space->top(); 354 355 // Check if the first obj inside this space is forwarded. 356 if (SlidingForwarding::is_not_forwarded(cast_to_oop(cur_addr))) { 357 // Jump over consecutive (in-place) live-objs-chunk 358 cur_addr = get_first_dead(i); 359 } 360 361 while (cur_addr < top) { 362 if (SlidingForwarding::is_not_forwarded(cast_to_oop(cur_addr))) { 363 cur_addr = *(HeapWord**) cur_addr; 364 continue; 365 } 366 cur_addr += relocate(cur_addr); 367 } 368 369 // Reset top and unused memory 370 HeapWord* new_top = get_compaction_top(i); 371 space->set_top(new_top); 372 if (ZapUnusedHeapArea && new_top < top) { 373 space->mangle_unused_area(MemRegion(new_top, top)); 374 } 375 } 376 } 377 }; 378 379 template <class T> void SerialFullGC::KeepAliveClosure::do_oop_work(T* p) { 380 mark_and_push(p); 381 } 382 383 void SerialFullGC::push_objarray(oop obj, size_t index) { 384 ObjArrayTask task(obj, index); 385 assert(task.is_valid(), "bad ObjArrayTask"); 386 _objarray_stack.push(task); 387 } 388 389 void SerialFullGC::follow_array(objArrayOop array) { 390 mark_and_push_closure.do_klass(array->klass()); 391 // Don't push empty arrays to avoid unnecessary work. 392 if (array->length() > 0) { 393 SerialFullGC::push_objarray(array, 0); 394 } 395 } 396 397 void SerialFullGC::follow_object(oop obj) { 398 assert(obj->is_gc_marked(), "should be marked"); 399 if (obj->is_objArray()) { 400 // Handle object arrays explicitly to allow them to 401 // be split into chunks if needed. 402 SerialFullGC::follow_array((objArrayOop)obj); 403 } else { 404 obj->oop_iterate(&mark_and_push_closure); 405 } 406 } 407 408 void SerialFullGC::follow_array_chunk(objArrayOop array, int index) { 409 const int len = array->length(); 410 const int beg_index = index; 411 assert(beg_index < len || len == 0, "index too large"); 412 413 const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride); 414 const int end_index = beg_index + stride; 415 416 array->oop_iterate_range(&mark_and_push_closure, beg_index, end_index); 417 418 if (end_index < len) { 419 SerialFullGC::push_objarray(array, end_index); // Push the continuation. 420 } 421 } 422 423 void SerialFullGC::follow_stack() { 424 do { 425 while (!_marking_stack.is_empty()) { 426 oop obj = _marking_stack.pop(); 427 assert (obj->is_gc_marked(), "p must be marked"); 428 follow_object(obj); 429 } 430 // Process ObjArrays one at a time to avoid marking stack bloat. 431 if (!_objarray_stack.is_empty()) { 432 ObjArrayTask task = _objarray_stack.pop(); 433 follow_array_chunk(objArrayOop(task.obj()), task.index()); 434 } 435 } while (!_marking_stack.is_empty() || !_objarray_stack.is_empty()); 436 } 437 438 SerialFullGC::FollowStackClosure SerialFullGC::follow_stack_closure; 439 440 void SerialFullGC::FollowStackClosure::do_void() { follow_stack(); } 441 442 template <class T> void SerialFullGC::follow_root(T* p) { 443 assert(!Universe::heap()->is_in(p), 444 "roots shouldn't be things within the heap"); 445 T heap_oop = RawAccess<>::oop_load(p); 446 if (!CompressedOops::is_null(heap_oop)) { 447 oop obj = CompressedOops::decode_not_null(heap_oop); 448 if (!obj->mark().is_marked()) { 449 mark_object(obj); 450 follow_object(obj); 451 } 452 } 453 follow_stack(); 454 } 455 456 void SerialFullGC::FollowRootClosure::do_oop(oop* p) { follow_root(p); } 457 void SerialFullGC::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); } 458 459 // We preserve the mark which should be replaced at the end and the location 460 // that it will go. Note that the object that this markWord belongs to isn't 461 // currently at that address but it will be after phase4 462 void SerialFullGC::preserve_mark(oop obj, markWord mark) { 463 // We try to store preserved marks in the to space of the new generation since 464 // this is storage which should be available. Most of the time this should be 465 // sufficient space for the marks we need to preserve but if it isn't we fall 466 // back to using Stacks to keep track of the overflow. 467 if (_preserved_count < _preserved_count_max) { 468 _preserved_marks[_preserved_count++] = PreservedMark(obj, mark); 469 } else { 470 _preserved_overflow_stack_set.get()->push_always(obj, mark); 471 } 472 } 473 474 void SerialFullGC::phase1_mark(bool clear_all_softrefs) { 475 // Recursively traverse all live objects and mark them 476 GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer); 477 478 SerialHeap* gch = SerialHeap::heap(); 479 480 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark); 481 482 ref_processor()->start_discovery(clear_all_softrefs); 483 484 { 485 StrongRootsScope srs(0); 486 487 CLDClosure* weak_cld_closure = ClassUnloading ? nullptr : &follow_cld_closure; 488 MarkingNMethodClosure mark_code_closure(&follow_root_closure, !NMethodToOopClosure::FixRelocations, true); 489 gch->process_roots(SerialHeap::SO_None, 490 &follow_root_closure, 491 &follow_cld_closure, 492 weak_cld_closure, 493 &mark_code_closure); 494 } 495 496 // Process reference objects found during marking 497 { 498 GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer()); 499 500 ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues()); 501 SerialGCRefProcProxyTask task(is_alive, keep_alive, follow_stack_closure); 502 const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references(task, pt); 503 pt.print_all_references(); 504 gc_tracer()->report_gc_reference_stats(stats); 505 } 506 507 // This is the point where the entire marking should have completed. 508 assert(_marking_stack.is_empty(), "Marking should have completed"); 509 510 { 511 GCTraceTime(Debug, gc, phases) tm_m("Weak Processing", gc_timer()); 512 WeakProcessor::weak_oops_do(&is_alive, &do_nothing_cl); 513 } 514 515 { 516 GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", gc_timer()); 517 518 ClassUnloadingContext* ctx = ClassUnloadingContext::context(); 519 520 bool unloading_occurred; 521 { 522 CodeCache::UnlinkingScope scope(&is_alive); 523 524 // Unload classes and purge the SystemDictionary. 525 unloading_occurred = SystemDictionary::do_unloading(gc_timer()); 526 527 // Unload nmethods. 528 CodeCache::do_unloading(unloading_occurred); 529 } 530 531 { 532 GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer()); 533 // Release unloaded nmethod's memory. 534 ctx->purge_nmethods(); 535 } 536 { 537 GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", gc_timer()); 538 gch->prune_unlinked_nmethods(); 539 } 540 { 541 GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer()); 542 ctx->free_nmethods(); 543 } 544 545 // Prune dead klasses from subklass/sibling/implementor lists. 546 Klass::clean_weak_klass_links(unloading_occurred); 547 548 // Clean JVMCI metadata handles. 549 JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred)); 550 } 551 552 { 553 GCTraceTime(Debug, gc, phases) tm_m("Report Object Count", gc_timer()); 554 gc_tracer()->report_object_count_after_gc(&is_alive, nullptr); 555 } 556 } 557 558 void SerialFullGC::allocate_stacks() { 559 void* scratch = nullptr; 560 size_t num_words; 561 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen(); 562 young_gen->contribute_scratch(scratch, num_words); 563 564 if (scratch != nullptr) { 565 _preserved_count_max = num_words * HeapWordSize / sizeof(PreservedMark); 566 } else { 567 _preserved_count_max = 0; 568 } 569 570 _preserved_marks = (PreservedMark*)scratch; 571 _preserved_count = 0; 572 573 _preserved_overflow_stack_set.init(1); 574 } 575 576 void SerialFullGC::deallocate_stacks() { 577 if (_preserved_count_max != 0) { 578 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen(); 579 young_gen->reset_scratch(); 580 } 581 582 _preserved_overflow_stack_set.reclaim(); 583 _marking_stack.clear(); 584 _objarray_stack.clear(true); 585 } 586 587 void SerialFullGC::mark_object(oop obj) { 588 if (StringDedup::is_enabled() && 589 java_lang_String::is_instance(obj) && 590 SerialStringDedup::is_candidate_from_mark(obj)) { 591 _string_dedup_requests->add(obj); 592 } 593 594 // Do the transform while we still have the header intact, 595 // which might include important class information. 596 ContinuationGCSupport::transform_stack_chunk(obj); 597 598 // some marks may contain information we need to preserve so we store them away 599 // and overwrite the mark. We'll restore it at the end of serial full GC. 600 markWord mark = obj->mark(); 601 obj->set_mark(obj->prototype_mark().set_marked()); 602 603 if (obj->mark_must_be_preserved(mark)) { 604 preserve_mark(obj, mark); 605 } 606 } 607 608 template <class T> void SerialFullGC::mark_and_push(T* p) { 609 T heap_oop = RawAccess<>::oop_load(p); 610 if (!CompressedOops::is_null(heap_oop)) { 611 oop obj = CompressedOops::decode_not_null(heap_oop); 612 if (!obj->mark().is_marked()) { 613 mark_object(obj); 614 _marking_stack.push(obj); 615 } 616 } 617 } 618 619 template <typename T> 620 void MarkAndPushClosure::do_oop_work(T* p) { SerialFullGC::mark_and_push(p); } 621 void MarkAndPushClosure::do_oop( oop* p) { do_oop_work(p); } 622 void MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_work(p); } 623 624 template <class T> void SerialFullGC::adjust_pointer(T* p) { 625 T heap_oop = RawAccess<>::oop_load(p); 626 if (!CompressedOops::is_null(heap_oop)) { 627 oop obj = CompressedOops::decode_not_null(heap_oop); 628 assert(Universe::heap()->is_in(obj), "should be in heap"); 629 630 if (SlidingForwarding::is_forwarded(obj)) { 631 oop new_obj = SlidingForwarding::forwardee(obj); 632 assert(is_object_aligned(new_obj), "oop must be aligned"); 633 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj); 634 } 635 } 636 } 637 638 template <typename T> 639 void AdjustPointerClosure::do_oop_work(T* p) { SerialFullGC::adjust_pointer(p); } 640 inline void AdjustPointerClosure::do_oop(oop* p) { do_oop_work(p); } 641 inline void AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_work(p); } 642 643 AdjustPointerClosure SerialFullGC::adjust_pointer_closure; 644 645 void SerialFullGC::adjust_marks() { 646 // adjust the oops we saved earlier 647 for (size_t i = 0; i < _preserved_count; i++) { 648 PreservedMarks::adjust_preserved_mark(_preserved_marks + i); 649 } 650 651 // deal with the overflow stack 652 _preserved_overflow_stack_set.get()->adjust_during_full_gc(); 653 } 654 655 void SerialFullGC::restore_marks() { 656 log_trace(gc)("Restoring " SIZE_FORMAT " marks", _preserved_count + _preserved_overflow_stack_set.get()->size()); 657 658 // restore the marks we saved earlier 659 for (size_t i = 0; i < _preserved_count; i++) { 660 _preserved_marks[i].set_mark(); 661 } 662 663 // deal with the overflow 664 _preserved_overflow_stack_set.restore(nullptr); 665 } 666 667 SerialFullGC::IsAliveClosure SerialFullGC::is_alive; 668 669 bool SerialFullGC::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); } 670 671 SerialFullGC::KeepAliveClosure SerialFullGC::keep_alive; 672 673 void SerialFullGC::KeepAliveClosure::do_oop(oop* p) { SerialFullGC::KeepAliveClosure::do_oop_work(p); } 674 void SerialFullGC::KeepAliveClosure::do_oop(narrowOop* p) { SerialFullGC::KeepAliveClosure::do_oop_work(p); } 675 676 void SerialFullGC::initialize() { 677 SerialFullGC::_gc_timer = new STWGCTimer(); 678 SerialFullGC::_gc_tracer = new SerialOldTracer(); 679 SerialFullGC::_string_dedup_requests = new StringDedup::Requests(); 680 681 // The Full GC operates on the entire heap so all objects should be subject 682 // to discovery, hence the _always_true_closure. 683 SerialFullGC::_ref_processor = new ReferenceProcessor(&_always_true_closure); 684 mark_and_push_closure.set_ref_discoverer(_ref_processor); 685 } 686 687 void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) { 688 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 689 690 SerialHeap* gch = SerialHeap::heap(); 691 692 gch->trace_heap_before_gc(_gc_tracer); 693 694 // Capture used regions for old-gen to reestablish old-to-young invariant 695 // after full-gc. 696 gch->old_gen()->save_used_region(); 697 698 allocate_stacks(); 699 700 phase1_mark(clear_all_softrefs); 701 702 SlidingForwarding::begin(); 703 704 Compacter compacter{gch}; 705 706 { 707 // Now all live objects are marked, compute the new object addresses. 708 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer); 709 710 compacter.phase2_calculate_new_addr(); 711 } 712 713 // Don't add any more derived pointers during phase3 714 #if COMPILER2_OR_JVMCI 715 assert(DerivedPointerTable::is_active(), "Sanity"); 716 DerivedPointerTable::set_active(false); 717 #endif 718 719 { 720 // Adjust the pointers to reflect the new locations 721 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer()); 722 723 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust); 724 725 NMethodToOopClosure code_closure(&adjust_pointer_closure, NMethodToOopClosure::FixRelocations); 726 gch->process_roots(SerialHeap::SO_AllCodeCache, 727 &adjust_pointer_closure, 728 &adjust_cld_closure, 729 &adjust_cld_closure, 730 &code_closure); 731 732 WeakProcessor::oops_do(&adjust_pointer_closure); 733 734 adjust_marks(); 735 compacter.phase3_adjust_pointers(); 736 } 737 738 { 739 // All pointers are now adjusted, move objects accordingly 740 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer); 741 742 compacter.phase4_compact(); 743 } 744 745 restore_marks(); 746 747 SlidingForwarding::end(); 748 749 deallocate_stacks(); 750 751 SerialFullGC::_string_dedup_requests->flush(); 752 753 bool is_young_gen_empty = (gch->young_gen()->used() == 0); 754 gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty); 755 756 gch->prune_scavengable_nmethods(); 757 758 // Update heap occupancy information which is used as 759 // input to soft ref clearing policy at the next gc. 760 Universe::heap()->update_capacity_and_used_at_gc(); 761 762 // Signal that we have completed a visit to all live objects. 763 Universe::heap()->record_whole_heap_examined_timestamp(); 764 765 gch->trace_heap_after_gc(_gc_tracer); 766 } --- EOF ---