1 /* 2 * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/parallel/objectStartArray.inline.hpp" 27 #include "gc/parallel/parallelArguments.hpp" 28 #include "gc/parallel/parallelInitLogger.hpp" 29 #include "gc/parallel/parallelScavengeHeap.inline.hpp" 30 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 31 #include "gc/parallel/psMemoryPool.hpp" 32 #include "gc/parallel/psParallelCompact.inline.hpp" 33 #include "gc/parallel/psPromotionManager.hpp" 34 #include "gc/parallel/psScavenge.hpp" 35 #include "gc/parallel/psVMOperations.hpp" 36 #include "gc/shared/gcHeapSummary.hpp" 37 #include "gc/shared/gcLocker.inline.hpp" 38 #include "gc/shared/gcWhen.hpp" 39 #include "gc/shared/genArguments.hpp" 40 #include "gc/shared/locationPrinter.inline.hpp" 41 #include "gc/shared/scavengableNMethods.hpp" 42 #include "gc/shared/suspendibleThreadSet.hpp" 43 #include "logging/log.hpp" 44 #include "memory/iterator.hpp" 45 #include "memory/metaspaceCounters.hpp" 46 #include "memory/metaspaceUtils.hpp" 47 #include "memory/universe.hpp" 48 #include "nmt/memTracker.hpp" 49 #include "oops/oop.inline.hpp" 50 #include "runtime/cpuTimeCounters.hpp" 51 #include "runtime/handles.inline.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/vmThread.hpp" 54 #include "services/memoryManager.hpp" 55 #include "utilities/macros.hpp" 56 #include "utilities/vmError.hpp" 57 58 PSYoungGen* ParallelScavengeHeap::_young_gen = nullptr; 59 PSOldGen* ParallelScavengeHeap::_old_gen = nullptr; 60 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = nullptr; 61 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = nullptr; 62 63 jint ParallelScavengeHeap::initialize() { 64 const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes(); 65 66 ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment); 67 68 trace_actual_reserved_page_size(reserved_heap_size, heap_rs); 69 70 initialize_reserved_region(heap_rs); 71 // Layout the reserved space for the generations. 72 ReservedSpace old_rs = heap_rs.first_part(MaxOldSize); 73 ReservedSpace young_rs = heap_rs.last_part(MaxOldSize); 74 assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap"); 75 76 PSCardTable* card_table = new PSCardTable(heap_rs.region()); 77 card_table->initialize(old_rs.base(), young_rs.base()); 78 79 CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table); 80 barrier_set->initialize(); 81 BarrierSet::set_barrier_set(barrier_set); 82 83 // Set up WorkerThreads 84 _workers.initialize_workers(); 85 86 // Create and initialize the generations. 87 _young_gen = new PSYoungGen( 88 young_rs, 89 NewSize, 90 MinNewSize, 91 MaxNewSize); 92 _old_gen = new PSOldGen( 93 old_rs, 94 OldSize, 95 MinOldSize, 96 MaxOldSize, 97 "old", 1); 98 99 assert(young_gen()->max_gen_size() == young_rs.size(),"Consistency check"); 100 assert(old_gen()->max_gen_size() == old_rs.size(), "Consistency check"); 101 102 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 103 104 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes(); 105 const size_t old_capacity = _old_gen->capacity_in_bytes(); 106 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); 107 _size_policy = 108 new PSAdaptiveSizePolicy(eden_capacity, 109 initial_promo_size, 110 young_gen()->to_space()->capacity_in_bytes(), 111 GenAlignment, 112 max_gc_pause_sec, 113 GCTimeRatio 114 ); 115 116 assert((old_gen()->virtual_space()->high_boundary() == 117 young_gen()->virtual_space()->low_boundary()), 118 "Boundaries must meet"); 119 // initialize the policy counters - 2 collectors, 2 generations 120 _gc_policy_counters = 121 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy); 122 123 if (!PSParallelCompact::initialize_aux_data()) { 124 return JNI_ENOMEM; 125 } 126 127 // Create CPU time counter 128 CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers); 129 130 ParallelInitLogger::print(); 131 132 return JNI_OK; 133 } 134 135 void ParallelScavengeHeap::initialize_serviceability() { 136 137 _eden_pool = new EdenMutableSpacePool(_young_gen, 138 _young_gen->eden_space(), 139 "PS Eden Space", 140 false /* support_usage_threshold */); 141 142 _survivor_pool = new SurvivorMutableSpacePool(_young_gen, 143 "PS Survivor Space", 144 false /* support_usage_threshold */); 145 146 _old_pool = new PSGenerationPool(_old_gen, 147 "PS Old Gen", 148 true /* support_usage_threshold */); 149 150 _young_manager = new GCMemoryManager("PS Scavenge"); 151 _old_manager = new GCMemoryManager("PS MarkSweep"); 152 153 _old_manager->add_pool(_eden_pool); 154 _old_manager->add_pool(_survivor_pool); 155 _old_manager->add_pool(_old_pool); 156 157 _young_manager->add_pool(_eden_pool); 158 _young_manager->add_pool(_survivor_pool); 159 160 } 161 162 void ParallelScavengeHeap::safepoint_synchronize_begin() { 163 if (UseStringDeduplication) { 164 SuspendibleThreadSet::synchronize(); 165 } 166 } 167 168 void ParallelScavengeHeap::safepoint_synchronize_end() { 169 if (UseStringDeduplication) { 170 SuspendibleThreadSet::desynchronize(); 171 } 172 } 173 class PSIsScavengable : public BoolObjectClosure { 174 bool do_object_b(oop obj) { 175 return ParallelScavengeHeap::heap()->is_in_young(obj); 176 } 177 }; 178 179 static PSIsScavengable _is_scavengable; 180 181 void ParallelScavengeHeap::post_initialize() { 182 CollectedHeap::post_initialize(); 183 // Need to init the tenuring threshold 184 PSScavenge::initialize(); 185 PSParallelCompact::post_initialize(); 186 PSPromotionManager::initialize(); 187 188 ScavengableNMethods::initialize(&_is_scavengable); 189 } 190 191 void ParallelScavengeHeap::update_counters() { 192 young_gen()->update_counters(); 193 old_gen()->update_counters(); 194 MetaspaceCounters::update_performance_counters(); 195 update_parallel_worker_threads_cpu_time(); 196 } 197 198 size_t ParallelScavengeHeap::capacity() const { 199 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); 200 return value; 201 } 202 203 size_t ParallelScavengeHeap::used() const { 204 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes(); 205 return value; 206 } 207 208 bool ParallelScavengeHeap::is_maximal_no_gc() const { 209 // We don't expand young-gen except at a GC. 210 return old_gen()->is_maximal_no_gc(); 211 } 212 213 214 size_t ParallelScavengeHeap::max_capacity() const { 215 size_t estimated = reserved_region().byte_size(); 216 if (UseAdaptiveSizePolicy) { 217 estimated -= _size_policy->max_survivor_size(young_gen()->max_gen_size()); 218 } else { 219 estimated -= young_gen()->to_space()->capacity_in_bytes(); 220 } 221 return MAX2(estimated, capacity()); 222 } 223 224 bool ParallelScavengeHeap::is_in(const void* p) const { 225 return young_gen()->is_in(p) || old_gen()->is_in(p); 226 } 227 228 bool ParallelScavengeHeap::is_in_reserved(const void* p) const { 229 return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p); 230 } 231 232 bool ParallelScavengeHeap::requires_barriers(stackChunkOop p) const { 233 return !is_in_young(p); 234 } 235 236 // There are two levels of allocation policy here. 237 // 238 // When an allocation request fails, the requesting thread must invoke a VM 239 // operation, transfer control to the VM thread, and await the results of a 240 // garbage collection. That is quite expensive, and we should avoid doing it 241 // multiple times if possible. 242 // 243 // To accomplish this, we have a basic allocation policy, and also a 244 // failed allocation policy. 245 // 246 // The basic allocation policy controls how you allocate memory without 247 // attempting garbage collection. It is okay to grab locks and 248 // expand the heap, if that can be done without coming to a safepoint. 249 // It is likely that the basic allocation policy will not be very 250 // aggressive. 251 // 252 // The failed allocation policy is invoked from the VM thread after 253 // the basic allocation policy is unable to satisfy a mem_allocate 254 // request. This policy needs to cover the entire range of collection, 255 // heap expansion, and out-of-memory conditions. It should make every 256 // attempt to allocate the requested memory. 257 258 // Basic allocation policy. Should never be called at a safepoint, or 259 // from the VM thread. 260 // 261 // This method must handle cases where many mem_allocate requests fail 262 // simultaneously. When that happens, only one VM operation will succeed, 263 // and the rest will not be executed. For that reason, this method loops 264 // during failed allocation attempts. If the java heap becomes exhausted, 265 // we rely on the size_policy object to force a bail out. 266 HeapWord* ParallelScavengeHeap::mem_allocate( 267 size_t size, 268 bool* gc_overhead_limit_was_exceeded) { 269 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 270 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 271 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 272 273 // In general gc_overhead_limit_was_exceeded should be false so 274 // set it so here and reset it to true only if the gc time 275 // limit is being exceeded as checked below. 276 *gc_overhead_limit_was_exceeded = false; 277 278 HeapWord* result = young_gen()->allocate(size); 279 280 uint loop_count = 0; 281 uint gc_count = 0; 282 uint gclocker_stalled_count = 0; 283 284 while (result == nullptr) { 285 // We don't want to have multiple collections for a single filled generation. 286 // To prevent this, each thread tracks the total_collections() value, and if 287 // the count has changed, does not do a new collection. 288 // 289 // The collection count must be read only while holding the heap lock. VM 290 // operations also hold the heap lock during collections. There is a lock 291 // contention case where thread A blocks waiting on the Heap_lock, while 292 // thread B is holding it doing a collection. When thread A gets the lock, 293 // the collection count has already changed. To prevent duplicate collections, 294 // The policy MUST attempt allocations during the same period it reads the 295 // total_collections() value! 296 { 297 MutexLocker ml(Heap_lock); 298 gc_count = total_collections(); 299 300 result = young_gen()->allocate(size); 301 if (result != nullptr) { 302 return result; 303 } 304 305 // If certain conditions hold, try allocating from the old gen. 306 result = mem_allocate_old_gen(size); 307 if (result != nullptr) { 308 return result; 309 } 310 311 if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 312 return nullptr; 313 } 314 315 // Failed to allocate without a gc. 316 if (GCLocker::is_active_and_needs_gc()) { 317 // If this thread is not in a jni critical section, we stall 318 // the requestor until the critical section has cleared and 319 // GC allowed. When the critical section clears, a GC is 320 // initiated by the last thread exiting the critical section; so 321 // we retry the allocation sequence from the beginning of the loop, 322 // rather than causing more, now probably unnecessary, GC attempts. 323 JavaThread* jthr = JavaThread::current(); 324 if (!jthr->in_critical()) { 325 MutexUnlocker mul(Heap_lock); 326 GCLocker::stall_until_clear(); 327 gclocker_stalled_count += 1; 328 continue; 329 } else { 330 if (CheckJNICalls) { 331 fatal("Possible deadlock due to allocating while" 332 " in jni critical section"); 333 } 334 return nullptr; 335 } 336 } 337 } 338 339 if (result == nullptr) { 340 // Generate a VM operation 341 VM_ParallelGCFailedAllocation op(size, gc_count); 342 VMThread::execute(&op); 343 344 // Did the VM operation execute? If so, return the result directly. 345 // This prevents us from looping until time out on requests that can 346 // not be satisfied. 347 if (op.prologue_succeeded()) { 348 assert(is_in_or_null(op.result()), "result not in heap"); 349 350 // If GC was locked out during VM operation then retry allocation 351 // and/or stall as necessary. 352 if (op.gc_locked()) { 353 assert(op.result() == nullptr, "must be null if gc_locked() is true"); 354 continue; // retry and/or stall as necessary 355 } 356 357 // Exit the loop if the gc time limit has been exceeded. 358 // The allocation must have failed above ("result" guarding 359 // this path is null) and the most recent collection has exceeded the 360 // gc overhead limit (although enough may have been collected to 361 // satisfy the allocation). Exit the loop so that an out-of-memory 362 // will be thrown (return a null ignoring the contents of 363 // op.result()), 364 // but clear gc_overhead_limit_exceeded so that the next collection 365 // starts with a clean slate (i.e., forgets about previous overhead 366 // excesses). Fill op.result() with a filler object so that the 367 // heap remains parsable. 368 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 369 const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear(); 370 371 if (limit_exceeded && softrefs_clear) { 372 *gc_overhead_limit_was_exceeded = true; 373 size_policy()->set_gc_overhead_limit_exceeded(false); 374 log_trace(gc)("ParallelScavengeHeap::mem_allocate: return null because gc_overhead_limit_exceeded is set"); 375 if (op.result() != nullptr) { 376 CollectedHeap::fill_with_object(op.result(), size); 377 } 378 return nullptr; 379 } 380 381 return op.result(); 382 } 383 } 384 385 // The policy object will prevent us from looping forever. If the 386 // time spent in gc crosses a threshold, we will bail out. 387 loop_count++; 388 if ((result == nullptr) && (QueuedAllocationWarningCount > 0) && 389 (loop_count % QueuedAllocationWarningCount == 0)) { 390 log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count); 391 log_warning(gc)("\tsize=" SIZE_FORMAT, size); 392 } 393 } 394 395 return result; 396 } 397 398 // A "death march" is a series of ultra-slow allocations in which a full gc is 399 // done before each allocation, and after the full gc the allocation still 400 // cannot be satisfied from the young gen. This routine detects that condition; 401 // it should be called after a full gc has been done and the allocation 402 // attempted from the young gen. The parameter 'addr' should be the result of 403 // that young gen allocation attempt. 404 void 405 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) { 406 if (addr != nullptr) { 407 _death_march_count = 0; // death march has ended 408 } else if (_death_march_count == 0) { 409 if (should_alloc_in_eden(size)) { 410 _death_march_count = 1; // death march has started 411 } 412 } 413 } 414 415 HeapWord* ParallelScavengeHeap::allocate_old_gen_and_record(size_t size) { 416 assert_locked_or_safepoint(Heap_lock); 417 HeapWord* res = old_gen()->allocate(size); 418 if (res != nullptr) { 419 _size_policy->tenured_allocation(size * HeapWordSize); 420 } 421 return res; 422 } 423 424 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) { 425 if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) { 426 // Size is too big for eden, or gc is locked out. 427 return allocate_old_gen_and_record(size); 428 } 429 430 // If a "death march" is in progress, allocate from the old gen a limited 431 // number of times before doing a GC. 432 if (_death_march_count > 0) { 433 if (_death_march_count < 64) { 434 ++_death_march_count; 435 return allocate_old_gen_and_record(size); 436 } else { 437 _death_march_count = 0; 438 } 439 } 440 return nullptr; 441 } 442 443 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) { 444 // The do_full_collection() parameter clear_all_soft_refs 445 // is interpreted here as maximum_compaction which will 446 // cause SoftRefs to be cleared. 447 bool maximum_compaction = clear_all_soft_refs; 448 PSParallelCompact::invoke(maximum_compaction); 449 } 450 451 // Failed allocation policy. Must be called from the VM thread, and 452 // only at a safepoint! Note that this method has policy for allocation 453 // flow, and NOT collection policy. So we do not check for gc collection 454 // time over limit here, that is the responsibility of the heap specific 455 // collection methods. This method decides where to attempt allocations, 456 // and when to attempt collections, but no collection specific policy. 457 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) { 458 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 459 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 460 assert(!is_stw_gc_active(), "not reentrant"); 461 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 462 463 // We assume that allocation in eden will fail unless we collect. 464 465 // First level allocation failure, scavenge and allocate in young gen. 466 GCCauseSetter gccs(this, GCCause::_allocation_failure); 467 const bool invoked_full_gc = PSScavenge::invoke(); 468 HeapWord* result = young_gen()->allocate(size); 469 470 // Second level allocation failure. 471 // Mark sweep and allocate in young generation. 472 if (result == nullptr && !invoked_full_gc) { 473 do_full_collection(false); 474 result = young_gen()->allocate(size); 475 } 476 477 death_march_check(result, size); 478 479 // Third level allocation failure. 480 // After mark sweep and young generation allocation failure, 481 // allocate in old generation. 482 if (result == nullptr) { 483 result = allocate_old_gen_and_record(size); 484 } 485 486 // Fourth level allocation failure. We're running out of memory. 487 // More complete mark sweep and allocate in young generation. 488 if (result == nullptr) { 489 do_full_collection(true); 490 result = young_gen()->allocate(size); 491 } 492 493 // Fifth level allocation failure. 494 // After more complete mark sweep, allocate in old generation. 495 if (result == nullptr) { 496 result = allocate_old_gen_and_record(size); 497 } 498 499 return result; 500 } 501 502 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { 503 CollectedHeap::ensure_parsability(retire_tlabs); 504 young_gen()->eden_space()->ensure_parsability(); 505 } 506 507 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { 508 return young_gen()->eden_space()->tlab_capacity(thr); 509 } 510 511 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const { 512 return young_gen()->eden_space()->tlab_used(thr); 513 } 514 515 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { 516 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); 517 } 518 519 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) { 520 HeapWord* result = young_gen()->allocate(requested_size); 521 if (result != nullptr) { 522 *actual_size = requested_size; 523 } 524 525 return result; 526 } 527 528 void ParallelScavengeHeap::resize_all_tlabs() { 529 CollectedHeap::resize_all_tlabs(); 530 } 531 532 void ParallelScavengeHeap::prune_scavengable_nmethods() { 533 ScavengableNMethods::prune_nmethods_not_into_young(); 534 } 535 536 void ParallelScavengeHeap::prune_unlinked_nmethods() { 537 ScavengableNMethods::prune_unlinked_nmethods(); 538 } 539 540 // This method is used by System.gc() and JVMTI. 541 void ParallelScavengeHeap::collect(GCCause::Cause cause) { 542 assert(!Heap_lock->owned_by_self(), 543 "this thread should not own the Heap_lock"); 544 545 uint gc_count = 0; 546 uint full_gc_count = 0; 547 { 548 MutexLocker ml(Heap_lock); 549 // This value is guarded by the Heap_lock 550 gc_count = total_collections(); 551 full_gc_count = total_full_collections(); 552 } 553 554 if (GCLocker::should_discard(cause, gc_count)) { 555 return; 556 } 557 558 while (true) { 559 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); 560 VMThread::execute(&op); 561 562 if (!GCCause::is_explicit_full_gc(cause) || op.full_gc_succeeded()) { 563 return; 564 } 565 566 { 567 MutexLocker ml(Heap_lock); 568 if (full_gc_count != total_full_collections()) { 569 return; 570 } 571 } 572 573 if (GCLocker::is_active_and_needs_gc()) { 574 // If GCLocker is active, wait until clear before retrying. 575 GCLocker::stall_until_clear(); 576 } 577 } 578 } 579 580 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { 581 young_gen()->object_iterate(cl); 582 old_gen()->object_iterate(cl); 583 } 584 585 // The HeapBlockClaimer is used during parallel iteration over the heap, 586 // allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these. 587 // The eden and survivor spaces are treated as single blocks as it is hard to divide 588 // these spaces. 589 // The old space is divided into fixed-size blocks. 590 class HeapBlockClaimer : public StackObj { 591 size_t _claimed_index; 592 593 public: 594 static const size_t InvalidIndex = SIZE_MAX; 595 static const size_t EdenIndex = 0; 596 static const size_t SurvivorIndex = 1; 597 static const size_t NumNonOldGenClaims = 2; 598 599 HeapBlockClaimer() : _claimed_index(EdenIndex) { } 600 // Claim the block and get the block index. 601 size_t claim_and_get_block() { 602 size_t block_index; 603 block_index = Atomic::fetch_then_add(&_claimed_index, 1u); 604 605 PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen(); 606 size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims; 607 608 return block_index < num_claims ? block_index : InvalidIndex; 609 } 610 }; 611 612 void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl, 613 HeapBlockClaimer* claimer) { 614 size_t block_index = claimer->claim_and_get_block(); 615 // Iterate until all blocks are claimed 616 if (block_index == HeapBlockClaimer::EdenIndex) { 617 young_gen()->eden_space()->object_iterate(cl); 618 block_index = claimer->claim_and_get_block(); 619 } 620 if (block_index == HeapBlockClaimer::SurvivorIndex) { 621 young_gen()->from_space()->object_iterate(cl); 622 young_gen()->to_space()->object_iterate(cl); 623 block_index = claimer->claim_and_get_block(); 624 } 625 while (block_index != HeapBlockClaimer::InvalidIndex) { 626 old_gen()->object_iterate_block(cl, block_index - HeapBlockClaimer::NumNonOldGenClaims); 627 block_index = claimer->claim_and_get_block(); 628 } 629 } 630 631 class PSScavengeParallelObjectIterator : public ParallelObjectIteratorImpl { 632 private: 633 ParallelScavengeHeap* _heap; 634 HeapBlockClaimer _claimer; 635 636 public: 637 PSScavengeParallelObjectIterator() : 638 _heap(ParallelScavengeHeap::heap()), 639 _claimer() {} 640 641 virtual void object_iterate(ObjectClosure* cl, uint worker_id) { 642 _heap->object_iterate_parallel(cl, &_claimer); 643 } 644 }; 645 646 ParallelObjectIteratorImpl* ParallelScavengeHeap::parallel_object_iterator(uint thread_num) { 647 return new PSScavengeParallelObjectIterator(); 648 } 649 650 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { 651 if (young_gen()->is_in_reserved(addr)) { 652 assert(young_gen()->is_in(addr), 653 "addr should be in allocated part of young gen"); 654 // called from os::print_location by find or VMError 655 if (DebuggingContext::is_enabled() || VMError::is_error_reported()) { 656 return nullptr; 657 } 658 Unimplemented(); 659 } else if (old_gen()->is_in_reserved(addr)) { 660 assert(old_gen()->is_in(addr), 661 "addr should be in allocated part of old gen"); 662 return old_gen()->start_array()->object_start((HeapWord*)addr); 663 } 664 return 0; 665 } 666 667 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { 668 return block_start(addr) == addr; 669 } 670 671 void ParallelScavengeHeap::prepare_for_verify() { 672 ensure_parsability(false); // no need to retire TLABs for verification 673 } 674 675 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() { 676 PSOldGen* old = old_gen(); 677 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr(); 678 HeapWord* old_reserved_start = old->reserved().start(); 679 HeapWord* old_reserved_end = old->reserved().end(); 680 VirtualSpaceSummary old_summary(old_reserved_start, old_committed_end, old_reserved_end); 681 SpaceSummary old_space(old_reserved_start, old_committed_end, old->used_in_bytes()); 682 683 PSYoungGen* young = young_gen(); 684 VirtualSpaceSummary young_summary(young->reserved().start(), 685 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end()); 686 687 MutableSpace* eden = young_gen()->eden_space(); 688 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes()); 689 690 MutableSpace* from = young_gen()->from_space(); 691 SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes()); 692 693 MutableSpace* to = young_gen()->to_space(); 694 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes()); 695 696 VirtualSpaceSummary heap_summary = create_heap_space_summary(); 697 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space); 698 } 699 700 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const { 701 return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr); 702 } 703 704 void ParallelScavengeHeap::print_on(outputStream* st) const { 705 if (young_gen() != nullptr) { 706 young_gen()->print_on(st); 707 } 708 if (old_gen() != nullptr) { 709 old_gen()->print_on(st); 710 } 711 MetaspaceUtils::print_on(st); 712 } 713 714 void ParallelScavengeHeap::print_on_error(outputStream* st) const { 715 this->CollectedHeap::print_on_error(st); 716 717 st->cr(); 718 PSParallelCompact::print_on_error(st); 719 } 720 721 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { 722 ParallelScavengeHeap::heap()->workers().threads_do(tc); 723 } 724 725 void ParallelScavengeHeap::print_tracing_info() const { 726 AdaptiveSizePolicyOutput::print(); 727 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds()); 728 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds()); 729 } 730 731 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const { 732 const PSYoungGen* const young = young_gen(); 733 const MutableSpace* const eden = young->eden_space(); 734 const MutableSpace* const from = young->from_space(); 735 const PSOldGen* const old = old_gen(); 736 737 return PreGenGCValues(young->used_in_bytes(), 738 young->capacity_in_bytes(), 739 eden->used_in_bytes(), 740 eden->capacity_in_bytes(), 741 from->used_in_bytes(), 742 from->capacity_in_bytes(), 743 old->used_in_bytes(), 744 old->capacity_in_bytes()); 745 } 746 747 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const { 748 const PSYoungGen* const young = young_gen(); 749 const MutableSpace* const eden = young->eden_space(); 750 const MutableSpace* const from = young->from_space(); 751 const PSOldGen* const old = old_gen(); 752 753 log_info(gc, heap)(HEAP_CHANGE_FORMAT" " 754 HEAP_CHANGE_FORMAT" " 755 HEAP_CHANGE_FORMAT, 756 HEAP_CHANGE_FORMAT_ARGS(young->name(), 757 pre_gc_values.young_gen_used(), 758 pre_gc_values.young_gen_capacity(), 759 young->used_in_bytes(), 760 young->capacity_in_bytes()), 761 HEAP_CHANGE_FORMAT_ARGS("Eden", 762 pre_gc_values.eden_used(), 763 pre_gc_values.eden_capacity(), 764 eden->used_in_bytes(), 765 eden->capacity_in_bytes()), 766 HEAP_CHANGE_FORMAT_ARGS("From", 767 pre_gc_values.from_used(), 768 pre_gc_values.from_capacity(), 769 from->used_in_bytes(), 770 from->capacity_in_bytes())); 771 log_info(gc, heap)(HEAP_CHANGE_FORMAT, 772 HEAP_CHANGE_FORMAT_ARGS(old->name(), 773 pre_gc_values.old_gen_used(), 774 pre_gc_values.old_gen_capacity(), 775 old->used_in_bytes(), 776 old->capacity_in_bytes())); 777 MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes()); 778 } 779 780 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) { 781 // Why do we need the total_collections()-filter below? 782 if (total_collections() > 0) { 783 log_debug(gc, verify)("Tenured"); 784 old_gen()->verify(); 785 786 log_debug(gc, verify)("Eden"); 787 young_gen()->verify(); 788 789 log_debug(gc, verify)("CardTable"); 790 card_table()->verify_all_young_refs_imprecise(); 791 } 792 } 793 794 void ParallelScavengeHeap::trace_actual_reserved_page_size(const size_t reserved_heap_size, const ReservedSpace rs) { 795 // Check if Info level is enabled, since os::trace_page_sizes() logs on Info level. 796 if(log_is_enabled(Info, pagesize)) { 797 const size_t page_size = rs.page_size(); 798 os::trace_page_sizes("Heap", 799 MinHeapSize, 800 reserved_heap_size, 801 rs.base(), 802 rs.size(), 803 page_size); 804 } 805 } 806 807 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 808 const PSHeapSummary& heap_summary = create_ps_heap_summary(); 809 gc_tracer->report_gc_heap_summary(when, heap_summary); 810 811 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 812 gc_tracer->report_metaspace_summary(when, metaspace_summary); 813 } 814 815 CardTableBarrierSet* ParallelScavengeHeap::barrier_set() { 816 return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set()); 817 } 818 819 PSCardTable* ParallelScavengeHeap::card_table() { 820 return static_cast<PSCardTable*>(barrier_set()->card_table()); 821 } 822 823 void ParallelScavengeHeap::resize_young_gen(size_t eden_size, 824 size_t survivor_size) { 825 // Delegate the resize to the generation. 826 _young_gen->resize(eden_size, survivor_size); 827 } 828 829 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { 830 // Delegate the resize to the generation. 831 _old_gen->resize(desired_free_space); 832 } 833 834 HeapWord* ParallelScavengeHeap::allocate_loaded_archive_space(size_t size) { 835 return _old_gen->allocate(size); 836 } 837 838 void ParallelScavengeHeap::complete_loaded_archive_space(MemRegion archive_space) { 839 assert(_old_gen->object_space()->used_region().contains(archive_space), 840 "Archive space not contained in old gen"); 841 _old_gen->complete_loaded_archive_space(archive_space); 842 } 843 844 void ParallelScavengeHeap::register_nmethod(nmethod* nm) { 845 ScavengableNMethods::register_nmethod(nm); 846 } 847 848 void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) { 849 ScavengableNMethods::unregister_nmethod(nm); 850 } 851 852 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) { 853 ScavengableNMethods::verify_nmethod(nm); 854 } 855 856 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() { 857 GrowableArray<GCMemoryManager*> memory_managers(2); 858 memory_managers.append(_young_manager); 859 memory_managers.append(_old_manager); 860 return memory_managers; 861 } 862 863 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() { 864 GrowableArray<MemoryPool*> memory_pools(3); 865 memory_pools.append(_eden_pool); 866 memory_pools.append(_survivor_pool); 867 memory_pools.append(_old_pool); 868 return memory_pools; 869 } 870 871 void ParallelScavengeHeap::pin_object(JavaThread* thread, oop obj) { 872 GCLocker::lock_critical(thread); 873 } 874 875 void ParallelScavengeHeap::unpin_object(JavaThread* thread, oop obj) { 876 GCLocker::unlock_critical(thread); 877 } 878 879 void ParallelScavengeHeap::update_parallel_worker_threads_cpu_time() { 880 assert(Thread::current()->is_VM_thread(), 881 "Must be called from VM thread to avoid races"); 882 if (!UsePerfData || !os::is_thread_cpu_time_supported()) { 883 return; 884 } 885 886 // Ensure ThreadTotalCPUTimeClosure destructor is called before publishing gc 887 // time. 888 { 889 ThreadTotalCPUTimeClosure tttc(CPUTimeGroups::CPUTimeType::gc_parallel_workers); 890 // Currently parallel worker threads in GCTaskManager never terminate, so it 891 // is safe for VMThread to read their CPU times. If upstream changes this 892 // behavior, we should rethink if it is still safe. 893 gc_threads_do(&tttc); 894 } 895 896 CPUTimeCounters::publish_gc_total_cpu_time(); 897 }