1 /* 2 * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "classfile/javaClasses.hpp" 28 #include "classfile/stringTable.hpp" 29 #include "classfile/symbolTable.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/codeCache.hpp" 33 #include "compiler/oopMap.hpp" 34 #include "gc/serial/cardTableRS.hpp" 35 #include "gc/serial/defNewGeneration.hpp" 36 #include "gc/serial/generation.hpp" 37 #include "gc/serial/genMarkSweep.hpp" 38 #include "gc/serial/markSweep.inline.hpp" 39 #include "gc/serial/serialGcRefProcProxyTask.hpp" 40 #include "gc/serial/serialHeap.hpp" 41 #include "gc/shared/classUnloadingContext.hpp" 42 #include "gc/shared/collectedHeap.inline.hpp" 43 #include "gc/shared/gcHeapSummary.hpp" 44 #include "gc/shared/gcTimer.hpp" 45 #include "gc/shared/gcTrace.hpp" 46 #include "gc/shared/gcTraceTime.inline.hpp" 47 #include "gc/shared/modRefBarrierSet.hpp" 48 #include "gc/shared/preservedMarks.inline.hpp" 49 #include "gc/shared/referencePolicy.hpp" 50 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 51 #include "gc/shared/slidingForwarding.inline.hpp" 52 #include "gc/shared/space.inline.hpp" 53 #include "gc/shared/strongRootsScope.hpp" 54 #include "gc/shared/weakProcessor.hpp" 55 #include "memory/universe.hpp" 56 #include "oops/instanceRefKlass.hpp" 57 #include "oops/oop.inline.hpp" 58 #include "prims/jvmtiExport.hpp" 59 #include "runtime/handles.inline.hpp" 60 #include "runtime/javaThread.hpp" 61 #include "runtime/prefetch.inline.hpp" 62 #include "runtime/synchronizer.hpp" 63 #include "runtime/vmThread.hpp" 64 #include "utilities/copy.hpp" 65 #include "utilities/events.hpp" 66 #include "utilities/stack.inline.hpp" 67 #if INCLUDE_JVMCI 68 #include "jvmci/jvmci.hpp" 69 #endif 70 71 class DeadSpacer : StackObj { 72 size_t _allowed_deadspace_words; 73 bool _active; 74 ContiguousSpace* _space; 75 76 public: 77 DeadSpacer(ContiguousSpace* space) : _allowed_deadspace_words(0), _space(space) { 78 size_t ratio = _space->allowed_dead_ratio(); 79 _active = ratio > 0; 80 81 if (_active) { 82 // We allow some amount of garbage towards the bottom of the space, so 83 // we don't start compacting before there is a significant gain to be made. 84 // Occasionally, we want to ensure a full compaction, which is determined 85 // by the MarkSweepAlwaysCompactCount parameter. 86 if ((MarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0) { 87 _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize; 88 } else { 89 _active = false; 90 } 91 } 92 } 93 94 bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) { 95 if (!_active) { 96 return false; 97 } 98 99 size_t dead_length = pointer_delta(dead_end, dead_start); 100 if (_allowed_deadspace_words >= dead_length) { 101 _allowed_deadspace_words -= dead_length; 102 CollectedHeap::fill_with_object(dead_start, dead_length); 103 oop obj = cast_to_oop(dead_start); 104 // obj->set_mark(obj->mark().set_marked()); 105 106 assert(dead_length == obj->size(), "bad filler object size"); 107 log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b", 108 p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize); 109 110 return true; 111 } else { 112 _active = false; 113 return false; 114 } 115 } 116 }; 117 118 // Implement the "compaction" part of the mark-compact GC algorithm. 119 class Compacter { 120 // There are four spaces in total, but only the first three can be used after 121 // compact. IOW, old and eden/from must be enough for all live objs 122 static constexpr uint max_num_spaces = 4; 123 124 struct CompactionSpace { 125 ContiguousSpace* _space; 126 // Will be the new top after compaction is complete. 127 HeapWord* _compaction_top; 128 // The first dead word in this contiguous space. It's an optimization to 129 // skip large chunk of live objects at the beginning. 130 HeapWord* _first_dead; 131 132 void init(ContiguousSpace* space) { 133 _space = space; 134 _compaction_top = space->bottom(); 135 _first_dead = nullptr; 136 } 137 }; 138 139 CompactionSpace _spaces[max_num_spaces]; 140 // The num of spaces to be compacted, i.e. containing live objs. 141 uint _num_spaces; 142 143 uint _index; 144 145 HeapWord* get_compaction_top(uint index) const { 146 return _spaces[index]._compaction_top; 147 } 148 149 HeapWord* get_first_dead(uint index) const { 150 return _spaces[index]._first_dead; 151 } 152 153 ContiguousSpace* get_space(uint index) const { 154 return _spaces[index]._space; 155 } 156 157 void record_first_dead(uint index, HeapWord* first_dead) { 158 assert(_spaces[index]._first_dead == nullptr, "should write only once"); 159 _spaces[index]._first_dead = first_dead; 160 } 161 162 HeapWord* alloc(size_t words) { 163 while (true) { 164 if (words <= pointer_delta(_spaces[_index]._space->end(), 165 _spaces[_index]._compaction_top)) { 166 HeapWord* result = _spaces[_index]._compaction_top; 167 _spaces[_index]._compaction_top += words; 168 if (_index == 0) { 169 // old-gen requires BOT update 170 static_cast<TenuredSpace*>(_spaces[0]._space)->update_for_block(result, result + words); 171 } 172 return result; 173 } 174 175 // out-of-memory in this space 176 _index++; 177 assert(_index < max_num_spaces - 1, "the last space should not be used"); 178 } 179 } 180 181 static void prefetch_read_scan(void* p) { 182 if (PrefetchScanIntervalInBytes >= 0) { 183 Prefetch::read(p, PrefetchScanIntervalInBytes); 184 } 185 } 186 187 static void prefetch_write_scan(void* p) { 188 if (PrefetchScanIntervalInBytes >= 0) { 189 Prefetch::write(p, PrefetchScanIntervalInBytes); 190 } 191 } 192 193 static void prefetch_write_copy(void* p) { 194 if (PrefetchCopyIntervalInBytes >= 0) { 195 Prefetch::write(p, PrefetchCopyIntervalInBytes); 196 } 197 } 198 199 template <bool ALT_FWD> 200 static void forward_obj(oop obj, HeapWord* new_addr) { 201 prefetch_write_scan(obj); 202 if (cast_from_oop<HeapWord*>(obj) != new_addr) { 203 SlidingForwarding::forward_to<ALT_FWD>(obj, cast_to_oop(new_addr)); 204 } else { 205 assert(obj->is_gc_marked(), "inv"); 206 // This obj will stay in-place. Fix the markword. 207 obj->init_mark(); 208 } 209 } 210 211 static HeapWord* find_next_live_addr(HeapWord* start, HeapWord* end) { 212 for (HeapWord* i_addr = start; i_addr < end; /* empty */) { 213 prefetch_read_scan(i_addr); 214 oop obj = cast_to_oop(i_addr); 215 if (obj->is_gc_marked()) { 216 return i_addr; 217 } 218 i_addr += obj->size(); 219 } 220 return end; 221 }; 222 223 template <bool ALT_FWD> 224 static size_t relocate(HeapWord* addr) { 225 // Prefetch source and destination 226 prefetch_read_scan(addr); 227 228 oop obj = cast_to_oop(addr); 229 oop new_obj = SlidingForwarding::forwardee<ALT_FWD>(obj); 230 HeapWord* new_addr = cast_from_oop<HeapWord*>(new_obj); 231 assert(addr != new_addr, "inv"); 232 prefetch_write_copy(new_addr); 233 234 size_t obj_size = obj->size(); 235 Copy::aligned_conjoint_words(addr, new_addr, obj_size); 236 new_obj->init_mark(); 237 238 return obj_size; 239 } 240 241 public: 242 explicit Compacter(SerialHeap* heap) { 243 // In this order so that heap is compacted towards old-gen. 244 _spaces[0].init(heap->old_gen()->space()); 245 _spaces[1].init(heap->young_gen()->eden()); 246 _spaces[2].init(heap->young_gen()->from()); 247 248 bool is_promotion_failed = (heap->young_gen()->from()->next_compaction_space() != nullptr); 249 if (is_promotion_failed) { 250 _spaces[3].init(heap->young_gen()->to()); 251 _num_spaces = 4; 252 } else { 253 _num_spaces = 3; 254 } 255 _index = 0; 256 } 257 258 template <bool ALT_FWD> 259 void phase2_calculate_new_addr() { 260 for (uint i = 0; i < _num_spaces; ++i) { 261 ContiguousSpace* space = get_space(i); 262 HeapWord* cur_addr = space->bottom(); 263 HeapWord* top = space->top(); 264 265 bool record_first_dead_done = false; 266 267 DeadSpacer dead_spacer(space); 268 269 while (cur_addr < top) { 270 oop obj = cast_to_oop(cur_addr); 271 size_t obj_size = obj->size(); 272 if (obj->is_gc_marked()) { 273 HeapWord* new_addr = alloc(obj_size); 274 forward_obj<ALT_FWD>(obj, new_addr); 275 cur_addr += obj_size; 276 } else { 277 // Skipping the current known-unmarked obj 278 HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top); 279 if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) { 280 // Register space for the filler obj 281 alloc(pointer_delta(next_live_addr, cur_addr)); 282 } else { 283 if (!record_first_dead_done) { 284 record_first_dead(i, cur_addr); 285 record_first_dead_done = true; 286 } 287 *(HeapWord**)cur_addr = next_live_addr; 288 } 289 cur_addr = next_live_addr; 290 } 291 } 292 293 if (!record_first_dead_done) { 294 record_first_dead(i, top); 295 } 296 } 297 } 298 299 void phase2_calculate_new_addr() { 300 if (UseAltGCForwarding) { 301 phase2_calculate_new_addr<true>(); 302 } else { 303 phase2_calculate_new_addr<false>(); 304 } 305 } 306 307 template <bool ALT_FWD> 308 void phase3_adjust_pointers() { 309 for (uint i = 0; i < _num_spaces; ++i) { 310 ContiguousSpace* space = get_space(i); 311 HeapWord* cur_addr = space->bottom(); 312 HeapWord* const top = space->top(); 313 HeapWord* const first_dead = get_first_dead(i); 314 315 while (cur_addr < top) { 316 prefetch_write_scan(cur_addr); 317 if (cur_addr < first_dead || cast_to_oop(cur_addr)->is_gc_marked()) { 318 size_t size = MarkSweep::adjust_pointers<ALT_FWD>(cast_to_oop(cur_addr)); 319 cur_addr += size; 320 } else { 321 assert(*(HeapWord**)cur_addr > cur_addr, "forward progress"); 322 cur_addr = *(HeapWord**)cur_addr; 323 } 324 } 325 } 326 } 327 328 void phase3_adjust_pointers() { 329 if (UseAltGCForwarding) { 330 phase3_adjust_pointers<true>(); 331 } else { 332 phase3_adjust_pointers<false>(); 333 } 334 } 335 336 template <bool ALT_FWD> 337 void phase4_compact() { 338 for (uint i = 0; i < _num_spaces; ++i) { 339 ContiguousSpace* space = get_space(i); 340 HeapWord* cur_addr = space->bottom(); 341 HeapWord* top = space->top(); 342 343 // Check if the first obj inside this space is forwarded. 344 if (SlidingForwarding::is_not_forwarded(cast_to_oop(cur_addr))) { 345 // Jump over consecutive (in-place) live-objs-chunk 346 cur_addr = get_first_dead(i); 347 } 348 349 while (cur_addr < top) { 350 if (SlidingForwarding::is_not_forwarded(cast_to_oop(cur_addr))) { 351 cur_addr = *(HeapWord**) cur_addr; 352 continue; 353 } 354 cur_addr += relocate<ALT_FWD>(cur_addr); 355 } 356 357 // Reset top and unused memory 358 space->set_top(get_compaction_top(i)); 359 if (ZapUnusedHeapArea) { 360 space->mangle_unused_area(); 361 } 362 } 363 } 364 365 void phase4_compact() { 366 if (UseAltGCForwarding) { 367 phase4_compact<true>(); 368 } else { 369 phase4_compact<false>(); 370 } 371 } 372 }; 373 374 void GenMarkSweep::phase1_mark(bool clear_all_softrefs) { 375 // Recursively traverse all live objects and mark them 376 GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer); 377 378 SerialHeap* gch = SerialHeap::heap(); 379 380 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark); 381 382 ref_processor()->start_discovery(clear_all_softrefs); 383 384 { 385 StrongRootsScope srs(0); 386 387 CLDClosure* weak_cld_closure = ClassUnloading ? nullptr : &follow_cld_closure; 388 MarkingCodeBlobClosure mark_code_closure(&follow_root_closure, !CodeBlobToOopClosure::FixRelocations, true); 389 gch->process_roots(SerialHeap::SO_None, 390 &follow_root_closure, 391 &follow_cld_closure, 392 weak_cld_closure, 393 &mark_code_closure); 394 } 395 396 // Process reference objects found during marking 397 { 398 GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer()); 399 400 ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues()); 401 SerialGCRefProcProxyTask task(is_alive, keep_alive, follow_stack_closure); 402 const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references(task, pt); 403 pt.print_all_references(); 404 gc_tracer()->report_gc_reference_stats(stats); 405 } 406 407 // This is the point where the entire marking should have completed. 408 assert(_marking_stack.is_empty(), "Marking should have completed"); 409 410 { 411 GCTraceTime(Debug, gc, phases) tm_m("Weak Processing", gc_timer()); 412 WeakProcessor::weak_oops_do(&is_alive, &do_nothing_cl); 413 } 414 415 { 416 GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", gc_timer()); 417 418 ClassUnloadingContext* ctx = ClassUnloadingContext::context(); 419 420 bool unloading_occurred; 421 { 422 CodeCache::UnlinkingScope scope(&is_alive); 423 424 // Unload classes and purge the SystemDictionary. 425 unloading_occurred = SystemDictionary::do_unloading(gc_timer()); 426 427 // Unload nmethods. 428 CodeCache::do_unloading(unloading_occurred); 429 } 430 431 { 432 GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer()); 433 // Release unloaded nmethod's memory. 434 ctx->purge_nmethods(); 435 } 436 { 437 GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", gc_timer()); 438 gch->prune_unlinked_nmethods(); 439 } 440 { 441 GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer()); 442 ctx->free_code_blobs(); 443 } 444 445 // Prune dead klasses from subklass/sibling/implementor lists. 446 Klass::clean_weak_klass_links(unloading_occurred); 447 448 // Clean JVMCI metadata handles. 449 JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred)); 450 } 451 452 { 453 GCTraceTime(Debug, gc, phases) tm_m("Report Object Count", gc_timer()); 454 gc_tracer()->report_object_count_after_gc(&is_alive, nullptr); 455 } 456 } 457 458 void GenMarkSweep::invoke_at_safepoint(bool clear_all_softrefs) { 459 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 460 461 SerialHeap* gch = SerialHeap::heap(); 462 #ifdef ASSERT 463 if (gch->soft_ref_policy()->should_clear_all_soft_refs()) { 464 assert(clear_all_softrefs, "Policy should have been checked earlier"); 465 } 466 #endif 467 468 gch->trace_heap_before_gc(_gc_tracer); 469 470 // Increment the invocation count 471 _total_invocations++; 472 473 // Capture used regions for old-gen to reestablish old-to-young invariant 474 // after full-gc. 475 gch->old_gen()->save_used_region(); 476 477 allocate_stacks(); 478 479 phase1_mark(clear_all_softrefs); 480 481 SlidingForwarding::begin(); 482 483 Compacter compacter{gch}; 484 485 { 486 // Now all live objects are marked, compute the new object addresses. 487 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer); 488 489 compacter.phase2_calculate_new_addr(); 490 } 491 492 // Don't add any more derived pointers during phase3 493 #if COMPILER2_OR_JVMCI 494 assert(DerivedPointerTable::is_active(), "Sanity"); 495 DerivedPointerTable::set_active(false); 496 #endif 497 498 { 499 // Adjust the pointers to reflect the new locations 500 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer()); 501 502 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust); 503 504 if (UseAltGCForwarding) { 505 AdjustPointerClosure<true> adjust_pointer_closure; 506 CLDToOopClosure adjust_cld_closure(&adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust); 507 CodeBlobToOopClosure code_closure(&adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations); 508 gch->process_roots(SerialHeap::SO_AllCodeCache, 509 &adjust_pointer_closure, 510 &adjust_cld_closure, 511 &adjust_cld_closure, 512 &code_closure); 513 514 WeakProcessor::oops_do(&adjust_pointer_closure); 515 } else { 516 AdjustPointerClosure<false> adjust_pointer_closure; 517 CLDToOopClosure adjust_cld_closure(&adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust); 518 CodeBlobToOopClosure code_closure(&adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations); 519 gch->process_roots(SerialHeap::SO_AllCodeCache, 520 &adjust_pointer_closure, 521 &adjust_cld_closure, 522 &adjust_cld_closure, 523 &code_closure); 524 525 WeakProcessor::oops_do(&adjust_pointer_closure); 526 } 527 528 adjust_marks(); 529 compacter.phase3_adjust_pointers(); 530 } 531 532 { 533 // All pointers are now adjusted, move objects accordingly 534 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer); 535 536 compacter.phase4_compact(); 537 } 538 539 SlidingForwarding::end(); 540 541 restore_marks(); 542 543 // Set saved marks for allocation profiler (and other things? -- dld) 544 // (Should this be in general part?) 545 gch->save_marks(); 546 547 deallocate_stacks(); 548 549 MarkSweep::_string_dedup_requests->flush(); 550 551 bool is_young_gen_empty = (gch->young_gen()->used() == 0); 552 gch->rem_set()->maintain_old_to_young_invariant(gch->old_gen(), is_young_gen_empty); 553 554 gch->prune_scavengable_nmethods(); 555 556 // Update heap occupancy information which is used as 557 // input to soft ref clearing policy at the next gc. 558 Universe::heap()->update_capacity_and_used_at_gc(); 559 560 // Signal that we have completed a visit to all live objects. 561 Universe::heap()->record_whole_heap_examined_timestamp(); 562 563 gch->trace_heap_after_gc(_gc_tracer); 564 } 565 566 void GenMarkSweep::allocate_stacks() { 567 void* scratch = nullptr; 568 size_t num_words; 569 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen(); 570 young_gen->contribute_scratch(scratch, num_words); 571 572 if (scratch != nullptr) { 573 _preserved_count_max = num_words * HeapWordSize / sizeof(PreservedMark); 574 } else { 575 _preserved_count_max = 0; 576 } 577 578 _preserved_marks = (PreservedMark*)scratch; 579 _preserved_count = 0; 580 581 _preserved_overflow_stack_set.init(1); 582 } 583 584 void GenMarkSweep::deallocate_stacks() { 585 if (_preserved_count_max != 0) { 586 DefNewGeneration* young_gen = (DefNewGeneration*)SerialHeap::heap()->young_gen(); 587 young_gen->reset_scratch(); 588 } 589 590 _preserved_overflow_stack_set.reclaim(); 591 _marking_stack.clear(); 592 _objarray_stack.clear(true); 593 }