1 /* 2 * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "classfile/stringTable.hpp" 28 #include "classfile/symbolTable.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "code/codeCache.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "gc/serial/cardTableRS.hpp" 33 #include "gc/serial/serialFullGC.hpp" 34 #include "gc/serial/serialHeap.inline.hpp" 35 #include "gc/serial/serialMemoryPools.hpp" 36 #include "gc/serial/serialVMOperations.hpp" 37 #include "gc/serial/tenuredGeneration.inline.hpp" 38 #include "gc/shared/cardTableBarrierSet.hpp" 39 #include "gc/shared/classUnloadingContext.hpp" 40 #include "gc/shared/collectedHeap.inline.hpp" 41 #include "gc/shared/collectorCounters.hpp" 42 #include "gc/shared/continuationGCSupport.inline.hpp" 43 #include "gc/shared/gcId.hpp" 44 #include "gc/shared/gcInitLogger.hpp" 45 #include "gc/shared/gcLocker.inline.hpp" 46 #include "gc/shared/gcPolicyCounters.hpp" 47 #include "gc/shared/gcTrace.hpp" 48 #include "gc/shared/gcTraceTime.inline.hpp" 49 #include "gc/shared/gcVMOperations.hpp" 50 #include "gc/shared/genArguments.hpp" 51 #include "gc/shared/isGCActiveMark.hpp" 52 #include "gc/shared/locationPrinter.inline.hpp" 53 #include "gc/shared/oopStorage.inline.hpp" 54 #include "gc/shared/oopStorageParState.inline.hpp" 55 #include "gc/shared/oopStorageSet.inline.hpp" 56 #include "gc/shared/scavengableNMethods.hpp" 57 #include "gc/shared/slidingForwarding.hpp" 58 #include "gc/shared/space.hpp" 59 #include "gc/shared/strongRootsScope.hpp" 60 #include "gc/shared/suspendibleThreadSet.hpp" 61 #include "gc/shared/weakProcessor.hpp" 62 #include "gc/shared/workerThread.hpp" 63 #include "memory/iterator.hpp" 64 #include "memory/metaspaceCounters.hpp" 65 #include "memory/metaspaceUtils.hpp" 66 #include "memory/resourceArea.hpp" 67 #include "memory/universe.hpp" 68 #include "oops/oop.inline.hpp" 69 #include "runtime/handles.hpp" 70 #include "runtime/handles.inline.hpp" 71 #include "runtime/java.hpp" 72 #include "runtime/mutexLocker.hpp" 73 #include "runtime/threads.hpp" 74 #include "runtime/vmThread.hpp" 75 #include "services/memoryManager.hpp" 76 #include "services/memoryService.hpp" 77 #include "utilities/debug.hpp" 78 #include "utilities/formatBuffer.hpp" 79 #include "utilities/macros.hpp" 80 #include "utilities/stack.inline.hpp" 81 #include "utilities/vmError.hpp" 82 #if INCLUDE_JVMCI 83 #include "jvmci/jvmci.hpp" 84 #endif 85 86 SerialHeap* SerialHeap::heap() { 87 return named_heap<SerialHeap>(CollectedHeap::Serial); 88 } 89 90 SerialHeap::SerialHeap() : 91 CollectedHeap(), 92 _young_gen(nullptr), 93 _old_gen(nullptr), 94 _rem_set(nullptr), 95 _gc_policy_counters(new GCPolicyCounters("Copy:MSC", 2, 2)), 96 _incremental_collection_failed(false), 97 _young_manager(nullptr), 98 _old_manager(nullptr), 99 _eden_pool(nullptr), 100 _survivor_pool(nullptr), 101 _old_pool(nullptr) { 102 _young_manager = new GCMemoryManager("Copy"); 103 _old_manager = new GCMemoryManager("MarkSweepCompact"); 104 } 105 106 void SerialHeap::initialize_serviceability() { 107 DefNewGeneration* young = young_gen(); 108 109 // Add a memory pool for each space and young gen doesn't 110 // support low memory detection as it is expected to get filled up. 111 _eden_pool = new ContiguousSpacePool(young->eden(), 112 "Eden Space", 113 young->max_eden_size(), 114 false /* support_usage_threshold */); 115 _survivor_pool = new SurvivorContiguousSpacePool(young, 116 "Survivor Space", 117 young->max_survivor_size(), 118 false /* support_usage_threshold */); 119 TenuredGeneration* old = old_gen(); 120 _old_pool = new TenuredGenerationPool(old, "Tenured Gen", true); 121 122 _young_manager->add_pool(_eden_pool); 123 _young_manager->add_pool(_survivor_pool); 124 young->set_gc_manager(_young_manager); 125 126 _old_manager->add_pool(_eden_pool); 127 _old_manager->add_pool(_survivor_pool); 128 _old_manager->add_pool(_old_pool); 129 old->set_gc_manager(_old_manager); 130 } 131 132 GrowableArray<GCMemoryManager*> SerialHeap::memory_managers() { 133 GrowableArray<GCMemoryManager*> memory_managers(2); 134 memory_managers.append(_young_manager); 135 memory_managers.append(_old_manager); 136 return memory_managers; 137 } 138 139 GrowableArray<MemoryPool*> SerialHeap::memory_pools() { 140 GrowableArray<MemoryPool*> memory_pools(3); 141 memory_pools.append(_eden_pool); 142 memory_pools.append(_survivor_pool); 143 memory_pools.append(_old_pool); 144 return memory_pools; 145 } 146 147 void SerialHeap::safepoint_synchronize_begin() { 148 if (UseStringDeduplication) { 149 SuspendibleThreadSet::synchronize(); 150 } 151 } 152 153 void SerialHeap::safepoint_synchronize_end() { 154 if (UseStringDeduplication) { 155 SuspendibleThreadSet::desynchronize(); 156 } 157 } 158 159 HeapWord* SerialHeap::allocate_loaded_archive_space(size_t word_size) { 160 MutexLocker ml(Heap_lock); 161 return old_gen()->allocate(word_size, false /* is_tlab */); 162 } 163 164 void SerialHeap::complete_loaded_archive_space(MemRegion archive_space) { 165 assert(old_gen()->used_region().contains(archive_space), "Archive space not contained in old gen"); 166 old_gen()->complete_loaded_archive_space(archive_space); 167 } 168 169 void SerialHeap::pin_object(JavaThread* thread, oop obj) { 170 GCLocker::lock_critical(thread); 171 } 172 173 void SerialHeap::unpin_object(JavaThread* thread, oop obj) { 174 GCLocker::unlock_critical(thread); 175 } 176 177 jint SerialHeap::initialize() { 178 // Allocate space for the heap. 179 180 ReservedHeapSpace heap_rs = allocate(HeapAlignment); 181 182 if (!heap_rs.is_reserved()) { 183 vm_shutdown_during_initialization( 184 "Could not reserve enough space for object heap"); 185 return JNI_ENOMEM; 186 } 187 188 initialize_reserved_region(heap_rs); 189 190 ReservedSpace young_rs = heap_rs.first_part(MaxNewSize); 191 ReservedSpace old_rs = heap_rs.last_part(MaxNewSize); 192 193 _rem_set = new CardTableRS(heap_rs.region()); 194 _rem_set->initialize(young_rs.base(), old_rs.base()); 195 196 CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set); 197 bs->initialize(); 198 BarrierSet::set_barrier_set(bs); 199 200 _young_gen = new DefNewGeneration(young_rs, NewSize, MinNewSize, MaxNewSize); 201 _old_gen = new TenuredGeneration(old_rs, OldSize, MinOldSize, MaxOldSize, rem_set()); 202 203 GCInitLogger::print(); 204 205 SlidingForwarding::initialize(_reserved, SpaceAlignment / HeapWordSize); 206 207 return JNI_OK; 208 } 209 210 ReservedHeapSpace SerialHeap::allocate(size_t alignment) { 211 // Now figure out the total size. 212 const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size(); 213 assert(alignment % pageSize == 0, "Must be"); 214 215 // Check for overflow. 216 size_t total_reserved = MaxNewSize + MaxOldSize; 217 if (total_reserved < MaxNewSize) { 218 vm_exit_during_initialization("The size of the object heap + VM data exceeds " 219 "the maximum representable size"); 220 } 221 assert(total_reserved % alignment == 0, 222 "Gen size; total_reserved=" SIZE_FORMAT ", alignment=" 223 SIZE_FORMAT, total_reserved, alignment); 224 225 ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment); 226 size_t used_page_size = heap_rs.page_size(); 227 228 os::trace_page_sizes("Heap", 229 MinHeapSize, 230 total_reserved, 231 heap_rs.base(), 232 heap_rs.size(), 233 used_page_size); 234 235 return heap_rs; 236 } 237 238 class GenIsScavengable : public BoolObjectClosure { 239 public: 240 bool do_object_b(oop obj) { 241 return SerialHeap::heap()->is_in_young(obj); 242 } 243 }; 244 245 static GenIsScavengable _is_scavengable; 246 247 void SerialHeap::post_initialize() { 248 CollectedHeap::post_initialize(); 249 250 DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen; 251 252 def_new_gen->ref_processor_init(); 253 254 SerialFullGC::initialize(); 255 256 ScavengableNMethods::initialize(&_is_scavengable); 257 } 258 259 PreGenGCValues SerialHeap::get_pre_gc_values() const { 260 const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen(); 261 262 return PreGenGCValues(def_new_gen->used(), 263 def_new_gen->capacity(), 264 def_new_gen->eden()->used(), 265 def_new_gen->eden()->capacity(), 266 def_new_gen->from()->used(), 267 def_new_gen->from()->capacity(), 268 old_gen()->used(), 269 old_gen()->capacity()); 270 } 271 272 size_t SerialHeap::capacity() const { 273 return _young_gen->capacity() + _old_gen->capacity(); 274 } 275 276 size_t SerialHeap::used() const { 277 return _young_gen->used() + _old_gen->used(); 278 } 279 280 size_t SerialHeap::max_capacity() const { 281 return _young_gen->max_capacity() + _old_gen->max_capacity(); 282 } 283 284 // Return true if any of the following is true: 285 // . the allocation won't fit into the current young gen heap 286 // . gc locker is occupied (jni critical section) 287 // . heap memory is tight -- the most recent previous collection 288 // was a full collection because a partial collection (would 289 // have) failed and is likely to fail again 290 bool SerialHeap::should_try_older_generation_allocation(size_t word_size) const { 291 size_t young_capacity = _young_gen->capacity_before_gc(); 292 return (word_size > heap_word_size(young_capacity)) 293 || GCLocker::is_active_and_needs_gc() 294 || incremental_collection_failed(); 295 } 296 297 HeapWord* SerialHeap::expand_heap_and_allocate(size_t size, bool is_tlab) { 298 HeapWord* result = nullptr; 299 if (_old_gen->should_allocate(size, is_tlab)) { 300 result = _old_gen->expand_and_allocate(size, is_tlab); 301 } 302 if (result == nullptr) { 303 if (_young_gen->should_allocate(size, is_tlab)) { 304 result = _young_gen->expand_and_allocate(size, is_tlab); 305 } 306 } 307 assert(result == nullptr || is_in_reserved(result), "result not in heap"); 308 return result; 309 } 310 311 HeapWord* SerialHeap::mem_allocate_work(size_t size, 312 bool is_tlab) { 313 314 HeapWord* result = nullptr; 315 316 // Loop until the allocation is satisfied, or unsatisfied after GC. 317 for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { 318 319 // First allocation attempt is lock-free. 320 DefNewGeneration *young = _young_gen; 321 if (young->should_allocate(size, is_tlab)) { 322 result = young->par_allocate(size, is_tlab); 323 if (result != nullptr) { 324 assert(is_in_reserved(result), "result not in heap"); 325 return result; 326 } 327 } 328 uint gc_count_before; // Read inside the Heap_lock locked region. 329 { 330 MutexLocker ml(Heap_lock); 331 log_trace(gc, alloc)("SerialHeap::mem_allocate_work: attempting locked slow path allocation"); 332 // Note that only large objects get a shot at being 333 // allocated in later generations. 334 bool first_only = !should_try_older_generation_allocation(size); 335 336 result = attempt_allocation(size, is_tlab, first_only); 337 if (result != nullptr) { 338 assert(is_in_reserved(result), "result not in heap"); 339 return result; 340 } 341 342 if (GCLocker::is_active_and_needs_gc()) { 343 if (is_tlab) { 344 return nullptr; // Caller will retry allocating individual object. 345 } 346 if (!is_maximal_no_gc()) { 347 // Try and expand heap to satisfy request. 348 result = expand_heap_and_allocate(size, is_tlab); 349 // Result could be null if we are out of space. 350 if (result != nullptr) { 351 return result; 352 } 353 } 354 355 if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 356 return nullptr; // We didn't get to do a GC and we didn't get any memory. 357 } 358 359 // If this thread is not in a jni critical section, we stall 360 // the requestor until the critical section has cleared and 361 // GC allowed. When the critical section clears, a GC is 362 // initiated by the last thread exiting the critical section; so 363 // we retry the allocation sequence from the beginning of the loop, 364 // rather than causing more, now probably unnecessary, GC attempts. 365 JavaThread* jthr = JavaThread::current(); 366 if (!jthr->in_critical()) { 367 MutexUnlocker mul(Heap_lock); 368 // Wait for JNI critical section to be exited 369 GCLocker::stall_until_clear(); 370 gclocker_stalled_count += 1; 371 continue; 372 } else { 373 if (CheckJNICalls) { 374 fatal("Possible deadlock due to allocating while" 375 " in jni critical section"); 376 } 377 return nullptr; 378 } 379 } 380 381 // Read the gc count while the heap lock is held. 382 gc_count_before = total_collections(); 383 } 384 385 VM_SerialCollectForAllocation op(size, is_tlab, gc_count_before); 386 VMThread::execute(&op); 387 if (op.prologue_succeeded()) { 388 result = op.result(); 389 if (op.gc_locked()) { 390 assert(result == nullptr, "must be null if gc_locked() is true"); 391 continue; // Retry and/or stall as necessary. 392 } 393 394 assert(result == nullptr || is_in_reserved(result), 395 "result not in heap"); 396 return result; 397 } 398 399 // Give a warning if we seem to be looping forever. 400 if ((QueuedAllocationWarningCount > 0) && 401 (try_count % QueuedAllocationWarningCount == 0)) { 402 log_warning(gc, ergo)("SerialHeap::mem_allocate_work retries %d times," 403 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : ""); 404 } 405 } 406 } 407 408 HeapWord* SerialHeap::attempt_allocation(size_t size, 409 bool is_tlab, 410 bool first_only) { 411 HeapWord* res = nullptr; 412 413 if (_young_gen->should_allocate(size, is_tlab)) { 414 res = _young_gen->allocate(size, is_tlab); 415 if (res != nullptr || first_only) { 416 return res; 417 } 418 } 419 420 if (_old_gen->should_allocate(size, is_tlab)) { 421 res = _old_gen->allocate(size, is_tlab); 422 } 423 424 return res; 425 } 426 427 HeapWord* SerialHeap::mem_allocate(size_t size, 428 bool* gc_overhead_limit_was_exceeded) { 429 return mem_allocate_work(size, 430 false /* is_tlab */); 431 } 432 433 bool SerialHeap::must_clear_all_soft_refs() { 434 return _gc_cause == GCCause::_metadata_GC_clear_soft_refs || 435 _gc_cause == GCCause::_wb_full_gc; 436 } 437 438 bool SerialHeap::is_young_gc_safe() const { 439 if (!_young_gen->to()->is_empty()) { 440 return false; 441 } 442 return _old_gen->promotion_attempt_is_safe(_young_gen->used()); 443 } 444 445 bool SerialHeap::do_young_collection(bool clear_soft_refs) { 446 if (!is_young_gc_safe()) { 447 return false; 448 } 449 IsSTWGCActiveMark gc_active_mark; 450 SvcGCMarker sgcm(SvcGCMarker::MINOR); 451 GCIdMark gc_id_mark; 452 GCTraceCPUTime tcpu(_young_gen->gc_tracer()); 453 GCTraceTime(Info, gc) t("Pause Young", nullptr, gc_cause(), true); 454 TraceCollectorStats tcs(_young_gen->counters()); 455 TraceMemoryManagerStats tmms(_young_gen->gc_manager(), gc_cause(), "end of minor GC"); 456 print_heap_before_gc(); 457 const PreGenGCValues pre_gc_values = get_pre_gc_values(); 458 459 increment_total_collections(false); 460 const bool should_verify = total_collections() >= VerifyGCStartAt; 461 if (should_verify && VerifyBeforeGC) { 462 prepare_for_verify(); 463 Universe::verify("Before GC"); 464 } 465 gc_prologue(false); 466 COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear()); 467 468 save_marks(); 469 470 bool result = _young_gen->collect(clear_soft_refs); 471 472 COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers()); 473 474 // Only update stats for successful young-gc 475 if (result) { 476 _old_gen->update_promote_stats(); 477 } 478 479 if (should_verify && VerifyAfterGC) { 480 Universe::verify("After GC"); 481 } 482 483 _young_gen->compute_new_size(); 484 485 print_heap_change(pre_gc_values); 486 487 // Track memory usage and detect low memory after GC finishes 488 MemoryService::track_memory_usage(); 489 490 gc_epilogue(false); 491 492 print_heap_after_gc(); 493 494 return result; 495 } 496 497 void SerialHeap::register_nmethod(nmethod* nm) { 498 ScavengableNMethods::register_nmethod(nm); 499 } 500 501 void SerialHeap::unregister_nmethod(nmethod* nm) { 502 ScavengableNMethods::unregister_nmethod(nm); 503 } 504 505 void SerialHeap::verify_nmethod(nmethod* nm) { 506 ScavengableNMethods::verify_nmethod(nm); 507 } 508 509 void SerialHeap::prune_scavengable_nmethods() { 510 ScavengableNMethods::prune_nmethods_not_into_young(); 511 } 512 513 void SerialHeap::prune_unlinked_nmethods() { 514 ScavengableNMethods::prune_unlinked_nmethods(); 515 } 516 517 HeapWord* SerialHeap::satisfy_failed_allocation(size_t size, bool is_tlab) { 518 assert(size != 0, "precondition"); 519 520 HeapWord* result = nullptr; 521 522 GCLocker::check_active_before_gc(); 523 if (GCLocker::is_active_and_needs_gc()) { 524 // GC locker is active; instead of a collection we will attempt 525 // to expand the heap, if there's room for expansion. 526 if (!is_maximal_no_gc()) { 527 result = expand_heap_and_allocate(size, is_tlab); 528 } 529 return result; // Could be null if we are out of space. 530 } 531 532 // If young-gen can handle this allocation, attempt young-gc firstly. 533 bool should_run_young_gc = _young_gen->should_allocate(size, is_tlab); 534 collect_at_safepoint(!should_run_young_gc); 535 536 result = attempt_allocation(size, is_tlab, false /*first_only*/); 537 if (result != nullptr) { 538 return result; 539 } 540 541 // OK, collection failed, try expansion. 542 result = expand_heap_and_allocate(size, is_tlab); 543 if (result != nullptr) { 544 return result; 545 } 546 547 // If we reach this point, we're really out of memory. Try every trick 548 // we can to reclaim memory. Force collection of soft references. Force 549 // a complete compaction of the heap. Any additional methods for finding 550 // free memory should be here, especially if they are expensive. If this 551 // attempt fails, an OOM exception will be thrown. 552 { 553 UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted 554 const bool clear_all_soft_refs = true; 555 do_full_collection_no_gc_locker(clear_all_soft_refs); 556 } 557 558 result = attempt_allocation(size, is_tlab, false /* first_only */); 559 if (result != nullptr) { 560 return result; 561 } 562 // The previous full-gc can shrink the heap, so re-expand it. 563 result = expand_heap_and_allocate(size, is_tlab); 564 if (result != nullptr) { 565 return result; 566 } 567 568 // What else? We might try synchronous finalization later. If the total 569 // space available is large enough for the allocation, then a more 570 // complete compaction phase than we've tried so far might be 571 // appropriate. 572 return nullptr; 573 } 574 575 void SerialHeap::process_roots(ScanningOption so, 576 OopClosure* strong_roots, 577 CLDClosure* strong_cld_closure, 578 CLDClosure* weak_cld_closure, 579 NMethodToOopClosure* code_roots) { 580 // General roots. 581 assert(code_roots != nullptr, "code root closure should always be set"); 582 583 ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure); 584 585 // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway 586 NMethodToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? nullptr : code_roots; 587 588 Threads::oops_do(strong_roots, roots_from_code_p); 589 590 OopStorageSet::strong_oops_do(strong_roots); 591 592 if (so & SO_ScavengeCodeCache) { 593 assert(code_roots != nullptr, "must supply closure for code cache"); 594 595 // We only visit parts of the CodeCache when scavenging. 596 ScavengableNMethods::nmethods_do(code_roots); 597 } 598 if (so & SO_AllCodeCache) { 599 assert(code_roots != nullptr, "must supply closure for code cache"); 600 601 // CMSCollector uses this to do intermediate-strength collections. 602 // We scan the entire code cache, since CodeCache::do_unloading is not called. 603 CodeCache::nmethods_do(code_roots); 604 } 605 } 606 607 template <typename OopClosureType> 608 static void oop_iterate_from(OopClosureType* blk, ContiguousSpace* space, HeapWord** from) { 609 assert(*from != nullptr, "precondition"); 610 HeapWord* t; 611 HeapWord* p = *from; 612 613 const intx interval = PrefetchScanIntervalInBytes; 614 do { 615 t = space->top(); 616 while (p < t) { 617 Prefetch::write(p, interval); 618 p += cast_to_oop(p)->oop_iterate_size(blk); 619 } 620 } while (t < space->top()); 621 622 *from = space->top(); 623 } 624 625 void SerialHeap::scan_evacuated_objs(YoungGenScanClosure* young_cl, 626 OldGenScanClosure* old_cl) { 627 ContiguousSpace* to_space = young_gen()->to(); 628 do { 629 oop_iterate_from(young_cl, to_space, &_young_gen_saved_top); 630 oop_iterate_from(old_cl, old_gen()->space(), &_old_gen_saved_top); 631 // Recheck to-space only, because postcondition of oop_iterate_from is no 632 // unscanned objs 633 } while (_young_gen_saved_top != to_space->top()); 634 guarantee(young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan"); 635 } 636 637 void SerialHeap::try_collect_at_safepoint(bool full) { 638 assert(SafepointSynchronize::is_at_safepoint(), "precondition"); 639 if (GCLocker::check_active_before_gc()) { 640 return; 641 } 642 collect_at_safepoint(full); 643 } 644 645 void SerialHeap::collect_at_safepoint(bool full) { 646 assert(!GCLocker::is_active(), "precondition"); 647 bool clear_soft_refs = must_clear_all_soft_refs(); 648 649 if (!full) { 650 bool success = do_young_collection(clear_soft_refs); 651 if (success) { 652 return; 653 } 654 // Upgrade to Full-GC if young-gc fails 655 } 656 do_full_collection_no_gc_locker(clear_soft_refs); 657 } 658 659 // public collection interfaces 660 void SerialHeap::collect(GCCause::Cause cause) { 661 // The caller doesn't have the Heap_lock 662 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 663 664 unsigned int gc_count_before; 665 unsigned int full_gc_count_before; 666 667 { 668 MutexLocker ml(Heap_lock); 669 // Read the GC count while holding the Heap_lock 670 gc_count_before = total_collections(); 671 full_gc_count_before = total_full_collections(); 672 } 673 674 if (GCLocker::should_discard(cause, gc_count_before)) { 675 return; 676 } 677 678 bool should_run_young_gc = (cause == GCCause::_wb_young_gc) 679 || (cause == GCCause::_gc_locker) 680 DEBUG_ONLY(|| (cause == GCCause::_scavenge_alot)); 681 682 while (true) { 683 VM_SerialGCCollect op(!should_run_young_gc, 684 gc_count_before, 685 full_gc_count_before, 686 cause); 687 VMThread::execute(&op); 688 689 if (!GCCause::is_explicit_full_gc(cause)) { 690 return; 691 } 692 693 { 694 MutexLocker ml(Heap_lock); 695 // Read the GC count while holding the Heap_lock 696 if (full_gc_count_before != total_full_collections()) { 697 return; 698 } 699 } 700 701 if (GCLocker::is_active_and_needs_gc()) { 702 // If GCLocker is active, wait until clear before retrying. 703 GCLocker::stall_until_clear(); 704 } 705 } 706 } 707 708 void SerialHeap::do_full_collection(bool clear_all_soft_refs) { 709 if (GCLocker::check_active_before_gc()) { 710 return; 711 } 712 do_full_collection_no_gc_locker(clear_all_soft_refs); 713 } 714 715 void SerialHeap::do_full_collection_no_gc_locker(bool clear_all_soft_refs) { 716 IsSTWGCActiveMark gc_active_mark; 717 SvcGCMarker sgcm(SvcGCMarker::FULL); 718 GCIdMark gc_id_mark; 719 GCTraceCPUTime tcpu(SerialFullGC::gc_tracer()); 720 GCTraceTime(Info, gc) t("Pause Full", nullptr, gc_cause(), true); 721 TraceCollectorStats tcs(_old_gen->counters()); 722 TraceMemoryManagerStats tmms(_old_gen->gc_manager(), gc_cause(), "end of major GC"); 723 const PreGenGCValues pre_gc_values = get_pre_gc_values(); 724 print_heap_before_gc(); 725 726 increment_total_collections(true); 727 const bool should_verify = total_collections() >= VerifyGCStartAt; 728 if (should_verify && VerifyBeforeGC) { 729 prepare_for_verify(); 730 Universe::verify("Before GC"); 731 } 732 733 gc_prologue(true); 734 COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear()); 735 CodeCache::on_gc_marking_cycle_start(); 736 ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */, 737 false /* unregister_nmethods_during_purge */, 738 false /* lock_nmethod_free_separately */); 739 740 STWGCTimer* gc_timer = SerialFullGC::gc_timer(); 741 gc_timer->register_gc_start(); 742 743 SerialOldTracer* gc_tracer = SerialFullGC::gc_tracer(); 744 gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start()); 745 746 pre_full_gc_dump(gc_timer); 747 748 SerialFullGC::invoke_at_safepoint(clear_all_soft_refs); 749 750 post_full_gc_dump(gc_timer); 751 752 gc_timer->register_gc_end(); 753 754 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); 755 CodeCache::on_gc_marking_cycle_finish(); 756 CodeCache::arm_all_nmethods(); 757 COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers()); 758 759 // Adjust generation sizes. 760 _old_gen->compute_new_size(); 761 _young_gen->compute_new_size(); 762 763 // Delete metaspaces for unloaded class loaders and clean up loader_data graph 764 ClassLoaderDataGraph::purge(/*at_safepoint*/true); 765 DEBUG_ONLY(MetaspaceUtils::verify();) 766 767 // Need to clear claim bits for the next mark. 768 ClassLoaderDataGraph::clear_claimed_marks(); 769 770 _old_gen->update_promote_stats(); 771 772 // Resize the metaspace capacity after full collections 773 MetaspaceGC::compute_new_size(); 774 775 print_heap_change(pre_gc_values); 776 777 // Track memory usage and detect low memory after GC finishes 778 MemoryService::track_memory_usage(); 779 780 // Need to tell the epilogue code we are done with Full GC, regardless what was 781 // the initial value for "complete" flag. 782 gc_epilogue(true); 783 784 print_heap_after_gc(); 785 786 if (should_verify && VerifyAfterGC) { 787 Universe::verify("After GC"); 788 } 789 } 790 791 bool SerialHeap::is_in_young(const void* p) const { 792 bool result = p < _old_gen->reserved().start(); 793 assert(result == _young_gen->is_in_reserved(p), 794 "incorrect test - result=%d, p=" PTR_FORMAT, result, p2i(p)); 795 return result; 796 } 797 798 bool SerialHeap::requires_barriers(stackChunkOop obj) const { 799 return !is_in_young(obj); 800 } 801 802 // Returns "TRUE" iff "p" points into the committed areas of the heap. 803 bool SerialHeap::is_in(const void* p) const { 804 return _young_gen->is_in(p) || _old_gen->is_in(p); 805 } 806 807 void SerialHeap::object_iterate(ObjectClosure* cl) { 808 _young_gen->object_iterate(cl); 809 _old_gen->object_iterate(cl); 810 } 811 812 HeapWord* SerialHeap::block_start(const void* addr) const { 813 assert(is_in_reserved(addr), "block_start of address outside of heap"); 814 if (_young_gen->is_in_reserved(addr)) { 815 assert(_young_gen->is_in(addr), "addr should be in allocated part of generation"); 816 return _young_gen->block_start(addr); 817 } 818 819 assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address"); 820 assert(_old_gen->is_in(addr), "addr should be in allocated part of generation"); 821 return _old_gen->block_start(addr); 822 } 823 824 bool SerialHeap::block_is_obj(const HeapWord* addr) const { 825 assert(is_in_reserved(addr), "block_is_obj of address outside of heap"); 826 assert(block_start(addr) == addr, "addr must be a block start"); 827 828 if (_young_gen->is_in_reserved(addr)) { 829 return _young_gen->eden()->is_in(addr) 830 || _young_gen->from()->is_in(addr) 831 || _young_gen->to() ->is_in(addr); 832 } 833 834 assert(_old_gen->is_in_reserved(addr), "must be in old-gen"); 835 return addr < _old_gen->space()->top(); 836 } 837 838 size_t SerialHeap::tlab_capacity(Thread* thr) const { 839 // Only young-gen supports tlab allocation. 840 return _young_gen->tlab_capacity(); 841 } 842 843 size_t SerialHeap::tlab_used(Thread* thr) const { 844 return _young_gen->tlab_used(); 845 } 846 847 size_t SerialHeap::unsafe_max_tlab_alloc(Thread* thr) const { 848 return _young_gen->unsafe_max_tlab_alloc(); 849 } 850 851 HeapWord* SerialHeap::allocate_new_tlab(size_t min_size, 852 size_t requested_size, 853 size_t* actual_size) { 854 HeapWord* result = mem_allocate_work(requested_size /* size */, 855 true /* is_tlab */); 856 if (result != nullptr) { 857 *actual_size = requested_size; 858 } 859 860 return result; 861 } 862 863 void SerialHeap::prepare_for_verify() { 864 ensure_parsability(false); // no need to retire TLABs 865 } 866 867 bool SerialHeap::is_maximal_no_gc() const { 868 // We don't expand young-gen except at a GC. 869 return _old_gen->is_maximal_no_gc(); 870 } 871 872 void SerialHeap::save_marks() { 873 _young_gen_saved_top = _young_gen->to()->top(); 874 _old_gen_saved_top = _old_gen->space()->top(); 875 } 876 877 void SerialHeap::verify(VerifyOption option /* ignored */) { 878 log_debug(gc, verify)("%s", _old_gen->name()); 879 _old_gen->verify(); 880 881 log_debug(gc, verify)("%s", _young_gen->name()); 882 _young_gen->verify(); 883 884 log_debug(gc, verify)("RemSet"); 885 rem_set()->verify(); 886 } 887 888 void SerialHeap::print_on(outputStream* st) const { 889 if (_young_gen != nullptr) { 890 _young_gen->print_on(st); 891 } 892 if (_old_gen != nullptr) { 893 _old_gen->print_on(st); 894 } 895 MetaspaceUtils::print_on(st); 896 } 897 898 void SerialHeap::gc_threads_do(ThreadClosure* tc) const { 899 } 900 901 bool SerialHeap::print_location(outputStream* st, void* addr) const { 902 return BlockLocationPrinter<SerialHeap>::print_location(st, addr); 903 } 904 905 void SerialHeap::print_tracing_info() const { 906 // Does nothing 907 } 908 909 void SerialHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const { 910 const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen(); 911 912 log_info(gc, heap)(HEAP_CHANGE_FORMAT" " 913 HEAP_CHANGE_FORMAT" " 914 HEAP_CHANGE_FORMAT, 915 HEAP_CHANGE_FORMAT_ARGS(def_new_gen->short_name(), 916 pre_gc_values.young_gen_used(), 917 pre_gc_values.young_gen_capacity(), 918 def_new_gen->used(), 919 def_new_gen->capacity()), 920 HEAP_CHANGE_FORMAT_ARGS("Eden", 921 pre_gc_values.eden_used(), 922 pre_gc_values.eden_capacity(), 923 def_new_gen->eden()->used(), 924 def_new_gen->eden()->capacity()), 925 HEAP_CHANGE_FORMAT_ARGS("From", 926 pre_gc_values.from_used(), 927 pre_gc_values.from_capacity(), 928 def_new_gen->from()->used(), 929 def_new_gen->from()->capacity())); 930 log_info(gc, heap)(HEAP_CHANGE_FORMAT, 931 HEAP_CHANGE_FORMAT_ARGS(old_gen()->short_name(), 932 pre_gc_values.old_gen_used(), 933 pre_gc_values.old_gen_capacity(), 934 old_gen()->used(), 935 old_gen()->capacity())); 936 MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes()); 937 } 938 939 void SerialHeap::gc_prologue(bool full) { 940 // Fill TLAB's and such 941 ensure_parsability(true); // retire TLABs 942 943 _old_gen->gc_prologue(); 944 }; 945 946 void SerialHeap::gc_epilogue(bool full) { 947 #if COMPILER2_OR_JVMCI 948 assert(DerivedPointerTable::is_empty(), "derived pointer present"); 949 #endif // COMPILER2_OR_JVMCI 950 951 resize_all_tlabs(); 952 953 _young_gen->gc_epilogue(full); 954 _old_gen->gc_epilogue(); 955 956 MetaspaceCounters::update_performance_counters(); 957 };