1 /* 2 * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "classfile/classLoaderDataGraph.hpp" 26 #include "classfile/stringTable.hpp" 27 #include "code/codeCache.hpp" 28 #include "compiler/oopMap.hpp" 29 #include "gc/parallel/parallelScavengeHeap.hpp" 30 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 31 #include "gc/parallel/psClosure.inline.hpp" 32 #include "gc/parallel/psCompactionManager.hpp" 33 #include "gc/parallel/psParallelCompact.inline.hpp" 34 #include "gc/parallel/psPromotionManager.inline.hpp" 35 #include "gc/parallel/psRootType.hpp" 36 #include "gc/parallel/psScavenge.inline.hpp" 37 #include "gc/shared/gcCause.hpp" 38 #include "gc/shared/gcHeapSummary.hpp" 39 #include "gc/shared/gcId.hpp" 40 #include "gc/shared/gcLocker.hpp" 41 #include "gc/shared/gcTimer.hpp" 42 #include "gc/shared/gcTrace.hpp" 43 #include "gc/shared/gcTraceTime.inline.hpp" 44 #include "gc/shared/gcVMOperations.hpp" 45 #include "gc/shared/isGCActiveMark.hpp" 46 #include "gc/shared/oopStorage.inline.hpp" 47 #include "gc/shared/oopStorageParState.inline.hpp" 48 #include "gc/shared/oopStorageSetParState.inline.hpp" 49 #include "gc/shared/referencePolicy.hpp" 50 #include "gc/shared/referenceProcessor.hpp" 51 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 52 #include "gc/shared/scavengableNMethods.hpp" 53 #include "gc/shared/spaceDecorator.hpp" 54 #include "gc/shared/strongRootsScope.hpp" 55 #include "gc/shared/taskTerminator.hpp" 56 #include "gc/shared/weakProcessor.inline.hpp" 57 #include "gc/shared/workerPolicy.hpp" 58 #include "gc/shared/workerThread.hpp" 59 #include "gc/shared/workerUtils.hpp" 60 #include "logging/log.hpp" 61 #include "memory/iterator.hpp" 62 #include "memory/resourceArea.hpp" 63 #include "memory/universe.hpp" 64 #include "oops/access.inline.hpp" 65 #include "oops/compressedOops.inline.hpp" 66 #include "oops/oop.inline.hpp" 67 #include "runtime/handles.inline.hpp" 68 #include "runtime/threads.hpp" 69 #include "runtime/vmOperations.hpp" 70 #include "runtime/vmThread.hpp" 71 #include "services/memoryService.hpp" 72 #include "utilities/stack.inline.hpp" 73 74 SpanSubjectToDiscoveryClosure PSScavenge::_span_based_discoverer; 75 ReferenceProcessor* PSScavenge::_ref_processor = nullptr; 76 PSCardTable* PSScavenge::_card_table = nullptr; 77 bool PSScavenge::_survivor_overflow = false; 78 uint PSScavenge::_tenuring_threshold = 0; 79 HeapWord* PSScavenge::_young_generation_boundary = nullptr; 80 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0; 81 elapsedTimer PSScavenge::_accumulated_time; 82 STWGCTimer PSScavenge::_gc_timer; 83 ParallelScavengeTracer PSScavenge::_gc_tracer; 84 CollectorCounters* PSScavenge::_counters = nullptr; 85 86 static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_id) { 87 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 88 89 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id); 90 PSPromoteRootsClosure roots_to_old_closure(pm); 91 92 switch (root_type) { 93 case ParallelRootType::class_loader_data: 94 { 95 PSScavengeCLDClosure cld_closure(pm); 96 ClassLoaderDataGraph::cld_do(&cld_closure); 97 } 98 break; 99 100 case ParallelRootType::code_cache: 101 { 102 MarkingNMethodClosure code_closure(&roots_to_old_closure, NMethodToOopClosure::FixRelocations, false /* keepalive nmethods */); 103 ScavengableNMethods::nmethods_do(&code_closure); 104 } 105 break; 106 107 case ParallelRootType::sentinel: 108 DEBUG_ONLY(default:) // DEBUG_ONLY hack will create compile error on release builds (-Wswitch) and runtime check on debug builds 109 fatal("Bad enumeration value: %u", root_type); 110 break; 111 } 112 113 // Do the real work 114 pm->drain_stacks(false); 115 } 116 117 static void steal_work(TaskTerminator& terminator, uint worker_id) { 118 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 119 120 PSPromotionManager* pm = 121 PSPromotionManager::gc_thread_promotion_manager(worker_id); 122 pm->drain_stacks(true); 123 guarantee(pm->stacks_empty(), 124 "stacks should be empty at this point"); 125 126 while (true) { 127 ScannerTask task; 128 if (PSPromotionManager::steal_depth(worker_id, task)) { 129 pm->process_popped_location_depth(task, true); 130 pm->drain_stacks_depth(true); 131 } else { 132 if (terminator.offer_termination()) { 133 break; 134 } 135 } 136 } 137 guarantee(pm->stacks_empty(), "stacks should be empty at this point"); 138 } 139 140 // Define before use 141 class PSIsAliveClosure: public BoolObjectClosure { 142 public: 143 bool do_object_b(oop p) { 144 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded(); 145 } 146 }; 147 148 PSIsAliveClosure PSScavenge::_is_alive_closure; 149 150 class PSKeepAliveClosure: public OopClosure { 151 protected: 152 MutableSpace* _to_space; 153 PSPromotionManager* _promotion_manager; 154 155 public: 156 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { 157 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 158 _to_space = heap->young_gen()->to_space(); 159 160 assert(_promotion_manager != nullptr, "Sanity"); 161 } 162 163 template <class T> void do_oop_work(T* p) { 164 #ifdef ASSERT 165 // Referent must be non-null and in from-space 166 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p); 167 assert(oopDesc::is_oop(obj), "referent must be an oop"); 168 assert(PSScavenge::is_obj_in_young(obj), "must be in young-gen"); 169 assert(!PSScavenge::is_obj_in_to_space(obj), "must be in from-space"); 170 #endif 171 172 _promotion_manager->copy_and_push_safe_barrier</*promote_immediately=*/false>(p); 173 } 174 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } 175 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } 176 }; 177 178 class PSEvacuateFollowersClosure: public VoidClosure { 179 private: 180 PSPromotionManager* _promotion_manager; 181 TaskTerminator* _terminator; 182 uint _worker_id; 183 184 public: 185 PSEvacuateFollowersClosure(PSPromotionManager* pm, TaskTerminator* terminator, uint worker_id) 186 : _promotion_manager(pm), _terminator(terminator), _worker_id(worker_id) {} 187 188 virtual void do_void() { 189 assert(_promotion_manager != nullptr, "Sanity"); 190 _promotion_manager->drain_stacks(true); 191 guarantee(_promotion_manager->stacks_empty(), 192 "stacks should be empty at this point"); 193 194 if (_terminator != nullptr) { 195 steal_work(*_terminator, _worker_id); 196 } 197 } 198 }; 199 200 class ParallelScavengeRefProcProxyTask : public RefProcProxyTask { 201 TaskTerminator _terminator; 202 203 public: 204 ParallelScavengeRefProcProxyTask(uint max_workers) 205 : RefProcProxyTask("ParallelScavengeRefProcProxyTask", max_workers), 206 _terminator(max_workers, ParCompactionManager::marking_stacks()) {} 207 208 void work(uint worker_id) override { 209 assert(worker_id < _max_workers, "sanity"); 210 PSPromotionManager* promotion_manager = (_tm == RefProcThreadModel::Single) ? PSPromotionManager::vm_thread_promotion_manager() : PSPromotionManager::gc_thread_promotion_manager(worker_id); 211 PSIsAliveClosure is_alive; 212 PSKeepAliveClosure keep_alive(promotion_manager); 213 BarrierEnqueueDiscoveredFieldClosure enqueue; 214 PSEvacuateFollowersClosure complete_gc(promotion_manager, (_marks_oops_alive && _tm == RefProcThreadModel::Multi) ? &_terminator : nullptr, worker_id);; 215 _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc); 216 } 217 218 void prepare_run_task_hook() override { 219 _terminator.reset_for_reuse(_queue_count); 220 } 221 }; 222 223 class PSThreadRootsTaskClosure : public ThreadClosure { 224 uint _worker_id; 225 public: 226 PSThreadRootsTaskClosure(uint worker_id) : _worker_id(worker_id) { } 227 virtual void do_thread(Thread* thread) { 228 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 229 230 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(_worker_id); 231 PSScavengeRootsClosure roots_closure(pm); 232 MarkingNMethodClosure roots_in_nmethods(&roots_closure, NMethodToOopClosure::FixRelocations, false /* keepalive nmethods */); 233 234 thread->oops_do(&roots_closure, &roots_in_nmethods); 235 236 // Do the real work 237 pm->drain_stacks(false); 238 } 239 }; 240 241 class ScavengeRootsTask : public WorkerTask { 242 StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do 243 OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_strong_par_state; 244 SequentialSubTasksDone _subtasks; 245 PSOldGen* _old_gen; 246 HeapWord* _gen_top; 247 uint _active_workers; 248 bool _is_old_gen_empty; 249 TaskTerminator _terminator; 250 251 public: 252 ScavengeRootsTask(PSOldGen* old_gen, 253 uint active_workers) : 254 WorkerTask("ScavengeRootsTask"), 255 _strong_roots_scope(active_workers), 256 _subtasks(ParallelRootType::sentinel), 257 _old_gen(old_gen), 258 _gen_top(old_gen->object_space()->top()), 259 _active_workers(active_workers), 260 _is_old_gen_empty(old_gen->object_space()->is_empty()), 261 _terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) { 262 if (!_is_old_gen_empty) { 263 PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table(); 264 card_table->pre_scavenge(active_workers); 265 } 266 } 267 268 virtual void work(uint worker_id) { 269 assert(worker_id < _active_workers, "Sanity"); 270 ResourceMark rm; 271 272 if (!_is_old_gen_empty) { 273 // There are only old-to-young pointers if there are objects 274 // in the old gen. 275 { 276 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id); 277 PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table(); 278 279 // The top of the old gen changes during scavenge when objects are promoted. 280 card_table->scavenge_contents_parallel(_old_gen->start_array(), 281 _old_gen->object_space()->bottom(), 282 _gen_top, 283 pm, 284 worker_id, 285 _active_workers); 286 287 // Do the real work 288 pm->drain_stacks(false); 289 } 290 } 291 292 for (uint root_type = 0; _subtasks.try_claim_task(root_type); /* empty */ ) { 293 scavenge_roots_work(static_cast<ParallelRootType::Value>(root_type), worker_id); 294 } 295 296 PSThreadRootsTaskClosure closure(worker_id); 297 Threads::possibly_parallel_threads_do(true /* is_par */, &closure); 298 299 // Scavenge OopStorages 300 { 301 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id); 302 PSScavengeRootsClosure closure(pm); 303 _oop_storage_strong_par_state.oops_do(&closure); 304 // Do the real work 305 pm->drain_stacks(false); 306 } 307 308 // If active_workers can exceed 1, add a steal_work(). 309 // PSPromotionManager::drain_stacks_depth() does not fully drain its 310 // stacks and expects a steal_work() to complete the draining if 311 // ParallelGCThreads is > 1. 312 313 if (_active_workers > 1) { 314 steal_work(_terminator, worker_id); 315 } 316 } 317 }; 318 319 bool PSScavenge::invoke(bool clear_soft_refs) { 320 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 321 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 322 323 // Check for potential problems. 324 if (!should_attempt_scavenge()) { 325 return false; 326 } 327 328 IsSTWGCActiveMark mark; 329 330 _gc_timer.register_gc_start(); 331 332 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 333 GCCause::Cause gc_cause = heap->gc_cause(); 334 335 SvcGCMarker sgcm(SvcGCMarker::MINOR); 336 GCIdMark gc_id_mark; 337 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); 338 339 bool promotion_failure_occurred = false; 340 341 PSYoungGen* young_gen = heap->young_gen(); 342 PSOldGen* old_gen = heap->old_gen(); 343 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 344 345 assert(young_gen->to_space()->is_empty(), 346 "Attempt to scavenge with live objects in to_space"); 347 348 heap->increment_total_collections(); 349 350 if (AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) { 351 // Gather the feedback data for eden occupancy. 352 young_gen->eden_space()->accumulate_statistics(); 353 } 354 355 heap->print_heap_before_gc(); 356 heap->trace_heap_before_gc(&_gc_tracer); 357 358 assert(!NeverTenure || _tenuring_threshold == markWord::max_age + 1, "Sanity"); 359 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); 360 361 // Fill in TLABs 362 heap->ensure_parsability(true); // retire TLABs 363 364 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 365 Universe::verify("Before GC"); 366 } 367 368 { 369 ResourceMark rm; 370 371 GCTraceCPUTime tcpu(&_gc_tracer); 372 GCTraceTime(Info, gc) tm("Pause Young", nullptr, gc_cause, true); 373 TraceCollectorStats tcs(counters()); 374 TraceMemoryManagerStats tms(heap->young_gc_manager(), gc_cause, "end of minor GC"); 375 376 if (log_is_enabled(Debug, gc, heap, exit)) { 377 accumulated_time()->start(); 378 } 379 380 // Let the size policy know we're starting 381 size_policy->minor_collection_begin(); 382 383 #if COMPILER2_OR_JVMCI 384 DerivedPointerTable::clear(); 385 #endif 386 387 reference_processor()->start_discovery(clear_soft_refs); 388 389 const PreGenGCValues pre_gc_values = heap->get_pre_gc_values(); 390 391 // Reset our survivor overflow. 392 set_survivor_overflow(false); 393 394 const uint active_workers = 395 WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(), 396 ParallelScavengeHeap::heap()->workers().active_workers(), 397 Threads::number_of_non_daemon_threads()); 398 ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers); 399 400 PSPromotionManager::pre_scavenge(); 401 402 { 403 GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer); 404 405 ScavengeRootsTask task(old_gen, active_workers); 406 ParallelScavengeHeap::heap()->workers().run_task(&task); 407 } 408 409 // Process reference objects discovered during scavenge 410 { 411 GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer); 412 413 reference_processor()->set_active_mt_degree(active_workers); 414 ReferenceProcessorStats stats; 415 ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->max_num_queues()); 416 417 ParallelScavengeRefProcProxyTask task(reference_processor()->max_num_queues()); 418 stats = reference_processor()->process_discovered_references(task, pt); 419 420 _gc_tracer.report_gc_reference_stats(stats); 421 pt.print_all_references(); 422 } 423 424 { 425 GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer); 426 PSAdjustWeakRootsClosure root_closure; 427 WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(), &_is_alive_closure, &root_closure, 1); 428 } 429 430 // Finally, flush the promotion_manager's labs, and deallocate its stacks. 431 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); 432 if (promotion_failure_occurred) { 433 clean_up_failed_promotion(); 434 log_info(gc, promotion)("Promotion failed"); 435 } 436 437 _gc_tracer.report_tenuring_threshold(tenuring_threshold()); 438 439 // Let the size policy know we're done. Note that we count promotion 440 // failure cleanup time as part of the collection (otherwise, we're 441 // implicitly saying it's mutator time). 442 size_policy->minor_collection_end(gc_cause); 443 444 if (!promotion_failure_occurred) { 445 // Swap the survivor spaces. 446 young_gen->eden_space()->clear(SpaceDecorator::Mangle); 447 young_gen->from_space()->clear(SpaceDecorator::Mangle); 448 young_gen->swap_spaces(); 449 450 size_t survived = young_gen->from_space()->used_in_bytes(); 451 size_t promoted = old_gen->used_in_bytes() - pre_gc_values.old_gen_used(); 452 size_policy->update_averages(_survivor_overflow, survived, promoted); 453 454 // A successful scavenge should restart the GC time limit count which is 455 // for full GC's. 456 size_policy->reset_gc_overhead_limit_count(); 457 if (UseAdaptiveSizePolicy) { 458 // Calculate the new survivor size and tenuring threshold 459 460 log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections()); 461 log_trace(gc, ergo)("old_gen_capacity: %zu young_gen_capacity: %zu", 462 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); 463 464 if (UsePerfData) { 465 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 466 counters->update_old_eden_size( 467 size_policy->calculated_eden_size_in_bytes()); 468 counters->update_old_promo_size( 469 size_policy->calculated_promo_size_in_bytes()); 470 counters->update_old_capacity(old_gen->capacity_in_bytes()); 471 counters->update_young_capacity(young_gen->capacity_in_bytes()); 472 counters->update_survived(survived); 473 counters->update_promoted(promoted); 474 counters->update_survivor_overflowed(_survivor_overflow); 475 } 476 477 size_t max_young_size = young_gen->max_gen_size(); 478 479 // Deciding a free ratio in the young generation is tricky, so if 480 // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating 481 // that the old generation size may have been limited because of them) we 482 // should then limit our young generation size using NewRatio to have it 483 // follow the old generation size. 484 if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) { 485 max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, 486 young_gen->max_gen_size()); 487 } 488 489 size_t survivor_limit = 490 size_policy->max_survivor_size(max_young_size); 491 _tenuring_threshold = 492 size_policy->compute_survivor_space_size_and_threshold(_survivor_overflow, 493 _tenuring_threshold, 494 survivor_limit); 495 496 log_debug(gc, age)("Desired survivor size %zu bytes, new threshold %u (max threshold %u)", 497 size_policy->calculated_survivor_size_in_bytes(), 498 _tenuring_threshold, MaxTenuringThreshold); 499 500 if (UsePerfData) { 501 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 502 counters->update_tenuring_threshold(_tenuring_threshold); 503 counters->update_survivor_size_counters(); 504 } 505 506 // Do call at minor collections? 507 // Don't check if the size_policy is ready at this 508 // level. Let the size_policy check that internally. 509 if (UseAdaptiveGenerationSizePolicyAtMinorCollection && 510 AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) { 511 // Calculate optimal free space amounts 512 assert(young_gen->max_gen_size() > 513 young_gen->from_space()->capacity_in_bytes() + 514 young_gen->to_space()->capacity_in_bytes(), 515 "Sizes of space in young gen are out-of-bounds"); 516 517 size_t young_live = young_gen->used_in_bytes(); 518 size_t eden_live = young_gen->eden_space()->used_in_bytes(); 519 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); 520 size_t max_old_gen_size = old_gen->max_gen_size(); 521 size_t max_eden_size = max_young_size - 522 young_gen->from_space()->capacity_in_bytes() - 523 young_gen->to_space()->capacity_in_bytes(); 524 525 // Used for diagnostics 526 size_policy->clear_generation_free_space_flags(); 527 528 size_policy->compute_eden_space_size(young_live, 529 eden_live, 530 cur_eden, 531 max_eden_size, 532 false /* not full gc*/); 533 534 size_policy->check_gc_overhead_limit(eden_live, 535 max_old_gen_size, 536 max_eden_size, 537 false /* not full gc*/, 538 gc_cause, 539 heap->soft_ref_policy()); 540 541 size_policy->decay_supplemental_growth(false /* not full gc*/); 542 } 543 // Resize the young generation at every collection 544 // even if new sizes have not been calculated. This is 545 // to allow resizes that may have been inhibited by the 546 // relative location of the "to" and "from" spaces. 547 548 // Resizing the old gen at young collections can cause increases 549 // that don't feed back to the generation sizing policy until 550 // a full collection. Don't resize the old gen here. 551 552 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 553 size_policy->calculated_survivor_size_in_bytes()); 554 555 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections()); 556 } 557 558 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can 559 // cause the change of the heap layout. Make sure eden is reshaped if that's the case. 560 // Also update() will case adaptive NUMA chunk resizing. 561 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); 562 young_gen->eden_space()->update(); 563 564 heap->gc_policy_counters()->update_counters(); 565 566 heap->resize_all_tlabs(); 567 568 assert(young_gen->to_space()->is_empty(), "to space should be empty now"); 569 } 570 571 #if COMPILER2_OR_JVMCI 572 DerivedPointerTable::update_pointers(); 573 #endif 574 575 if (log_is_enabled(Debug, gc, heap, exit)) { 576 accumulated_time()->stop(); 577 } 578 579 heap->print_heap_change(pre_gc_values); 580 581 // Track memory usage and detect low memory 582 MemoryService::track_memory_usage(); 583 heap->update_counters(); 584 } 585 586 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 587 Universe::verify("After GC"); 588 } 589 590 heap->print_heap_after_gc(); 591 heap->trace_heap_after_gc(&_gc_tracer); 592 593 AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections()); 594 595 _gc_timer.register_gc_end(); 596 597 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); 598 599 return !promotion_failure_occurred; 600 } 601 602 void PSScavenge::clean_up_failed_promotion() { 603 PSPromotionManager::restore_preserved_marks(); 604 605 // Reset the PromotionFailureALot counters. 606 NOT_PRODUCT(ParallelScavengeHeap::heap()->reset_promotion_should_fail();) 607 } 608 609 bool PSScavenge::should_attempt_scavenge() { 610 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 611 612 PSYoungGen* young_gen = heap->young_gen(); 613 PSOldGen* old_gen = heap->old_gen(); 614 615 if (!young_gen->to_space()->is_empty()) { 616 // To-space is not empty; should run full-gc instead. 617 return false; 618 } 619 620 // Test to see if the scavenge will likely fail. 621 PSAdaptiveSizePolicy* policy = heap->size_policy(); 622 623 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); 624 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); 625 // Total free size after possible old gen expansion 626 size_t free_in_old_gen = old_gen->max_gen_size() - old_gen->used_in_bytes(); 627 bool result = promotion_estimate < free_in_old_gen; 628 629 log_trace(ergo)("%s scavenge: average_promoted %zu padded_average_promoted %zu free in old gen %zu", 630 result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(), 631 (size_t) policy->padded_average_promoted_in_bytes(), 632 free_in_old_gen); 633 634 return result; 635 } 636 637 // Adaptive size policy support. 638 void PSScavenge::set_young_generation_boundary(HeapWord* v) { 639 _young_generation_boundary = v; 640 if (UseCompressedOops) { 641 _young_generation_boundary_compressed = (uintptr_t)CompressedOops::encode(cast_to_oop(v)); 642 } 643 } 644 645 void PSScavenge::initialize() { 646 // Arguments must have been parsed 647 648 if (AlwaysTenure || NeverTenure) { 649 assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markWord::max_age + 1, 650 "MaxTenuringThreshold should be 0 or markWord::max_age + 1, but is %d", (int) MaxTenuringThreshold); 651 _tenuring_threshold = MaxTenuringThreshold; 652 } else { 653 // We want to smooth out our startup times for the AdaptiveSizePolicy 654 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : 655 MaxTenuringThreshold; 656 } 657 658 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 659 PSYoungGen* young_gen = heap->young_gen(); 660 PSOldGen* old_gen = heap->old_gen(); 661 662 // Set boundary between young_gen and old_gen 663 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), 664 "old above young"); 665 set_young_generation_boundary(young_gen->eden_space()->bottom()); 666 667 // Initialize ref handling object for scavenging. 668 _span_based_discoverer.set_span(young_gen->reserved()); 669 _ref_processor = 670 new ReferenceProcessor(&_span_based_discoverer, 671 ParallelGCThreads, // mt processing degree 672 ParallelGCThreads, // mt discovery degree 673 false, // concurrent_discovery 674 &_is_alive_closure); // header provides liveness info 675 676 // Cache the cardtable 677 _card_table = heap->card_table(); 678 679 _counters = new CollectorCounters("Parallel young collection pauses", 0); 680 }