1 /* 2 * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "classfile/classLoaderDataGraph.hpp" 26 #include "classfile/stringTable.hpp" 27 #include "code/codeCache.hpp" 28 #include "compiler/oopMap.hpp" 29 #include "gc/parallel/parallelScavengeHeap.hpp" 30 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 31 #include "gc/parallel/psClosure.inline.hpp" 32 #include "gc/parallel/psCompactionManager.hpp" 33 #include "gc/parallel/psCompactionManagerNew.hpp" 34 #include "gc/parallel/psParallelCompact.inline.hpp" 35 #include "gc/parallel/psPromotionManager.inline.hpp" 36 #include "gc/parallel/psRootType.hpp" 37 #include "gc/parallel/psScavenge.inline.hpp" 38 #include "gc/shared/gcCause.hpp" 39 #include "gc/shared/gcHeapSummary.hpp" 40 #include "gc/shared/gcId.hpp" 41 #include "gc/shared/gcLocker.hpp" 42 #include "gc/shared/gcTimer.hpp" 43 #include "gc/shared/gcTrace.hpp" 44 #include "gc/shared/gcTraceTime.inline.hpp" 45 #include "gc/shared/gcVMOperations.hpp" 46 #include "gc/shared/isGCActiveMark.hpp" 47 #include "gc/shared/oopStorage.inline.hpp" 48 #include "gc/shared/oopStorageParState.inline.hpp" 49 #include "gc/shared/oopStorageSetParState.inline.hpp" 50 #include "gc/shared/referencePolicy.hpp" 51 #include "gc/shared/referenceProcessor.hpp" 52 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 53 #include "gc/shared/scavengableNMethods.hpp" 54 #include "gc/shared/spaceDecorator.hpp" 55 #include "gc/shared/strongRootsScope.hpp" 56 #include "gc/shared/taskTerminator.hpp" 57 #include "gc/shared/weakProcessor.inline.hpp" 58 #include "gc/shared/workerPolicy.hpp" 59 #include "gc/shared/workerThread.hpp" 60 #include "gc/shared/workerUtils.hpp" 61 #include "logging/log.hpp" 62 #include "memory/iterator.hpp" 63 #include "memory/resourceArea.hpp" 64 #include "memory/universe.hpp" 65 #include "oops/access.inline.hpp" 66 #include "oops/compressedOops.inline.hpp" 67 #include "oops/oop.inline.hpp" 68 #include "runtime/handles.inline.hpp" 69 #include "runtime/threads.hpp" 70 #include "runtime/vmOperations.hpp" 71 #include "runtime/vmThread.hpp" 72 #include "services/memoryService.hpp" 73 #include "utilities/stack.inline.hpp" 74 75 SpanSubjectToDiscoveryClosure PSScavenge::_span_based_discoverer; 76 ReferenceProcessor* PSScavenge::_ref_processor = nullptr; 77 PSCardTable* PSScavenge::_card_table = nullptr; 78 bool PSScavenge::_survivor_overflow = false; 79 uint PSScavenge::_tenuring_threshold = 0; 80 HeapWord* PSScavenge::_young_generation_boundary = nullptr; 81 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0; 82 elapsedTimer PSScavenge::_accumulated_time; 83 STWGCTimer PSScavenge::_gc_timer; 84 ParallelScavengeTracer PSScavenge::_gc_tracer; 85 CollectorCounters* PSScavenge::_counters = nullptr; 86 87 static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_id) { 88 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 89 90 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id); 91 PSPromoteRootsClosure roots_to_old_closure(pm); 92 93 switch (root_type) { 94 case ParallelRootType::class_loader_data: 95 { 96 PSScavengeCLDClosure cld_closure(pm); 97 ClassLoaderDataGraph::cld_do(&cld_closure); 98 } 99 break; 100 101 case ParallelRootType::code_cache: 102 { 103 MarkingNMethodClosure code_closure(&roots_to_old_closure, NMethodToOopClosure::FixRelocations, false /* keepalive nmethods */); 104 ScavengableNMethods::nmethods_do(&code_closure); 105 } 106 break; 107 108 case ParallelRootType::sentinel: 109 DEBUG_ONLY(default:) // DEBUG_ONLY hack will create compile error on release builds (-Wswitch) and runtime check on debug builds 110 fatal("Bad enumeration value: %u", root_type); 111 break; 112 } 113 114 // Do the real work 115 pm->drain_stacks(false); 116 } 117 118 static void steal_work(TaskTerminator& terminator, uint worker_id) { 119 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 120 121 PSPromotionManager* pm = 122 PSPromotionManager::gc_thread_promotion_manager(worker_id); 123 pm->drain_stacks(true); 124 guarantee(pm->stacks_empty(), 125 "stacks should be empty at this point"); 126 127 while (true) { 128 ScannerTask task; 129 if (PSPromotionManager::steal_depth(worker_id, task)) { 130 pm->process_popped_location_depth(task, true); 131 pm->drain_stacks_depth(true); 132 } else { 133 if (terminator.offer_termination()) { 134 break; 135 } 136 } 137 } 138 guarantee(pm->stacks_empty(), "stacks should be empty at this point"); 139 } 140 141 // Define before use 142 class PSIsAliveClosure: public BoolObjectClosure { 143 public: 144 bool do_object_b(oop p) { 145 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded(); 146 } 147 }; 148 149 PSIsAliveClosure PSScavenge::_is_alive_closure; 150 151 class PSKeepAliveClosure: public OopClosure { 152 protected: 153 MutableSpace* _to_space; 154 PSPromotionManager* _promotion_manager; 155 156 public: 157 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { 158 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 159 _to_space = heap->young_gen()->to_space(); 160 161 assert(_promotion_manager != nullptr, "Sanity"); 162 } 163 164 template <class T> void do_oop_work(T* p) { 165 #ifdef ASSERT 166 // Referent must be non-null and in from-space 167 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p); 168 assert(oopDesc::is_oop(obj), "referent must be an oop"); 169 assert(PSScavenge::is_obj_in_young(obj), "must be in young-gen"); 170 assert(!PSScavenge::is_obj_in_to_space(obj), "must be in from-space"); 171 #endif 172 173 _promotion_manager->copy_and_push_safe_barrier</*promote_immediately=*/false>(p); 174 } 175 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } 176 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } 177 }; 178 179 class PSEvacuateFollowersClosure: public VoidClosure { 180 private: 181 PSPromotionManager* _promotion_manager; 182 TaskTerminator* _terminator; 183 uint _worker_id; 184 185 public: 186 PSEvacuateFollowersClosure(PSPromotionManager* pm, TaskTerminator* terminator, uint worker_id) 187 : _promotion_manager(pm), _terminator(terminator), _worker_id(worker_id) {} 188 189 virtual void do_void() { 190 assert(_promotion_manager != nullptr, "Sanity"); 191 _promotion_manager->drain_stacks(true); 192 guarantee(_promotion_manager->stacks_empty(), 193 "stacks should be empty at this point"); 194 195 if (_terminator != nullptr) { 196 steal_work(*_terminator, _worker_id); 197 } 198 } 199 }; 200 201 class ParallelScavengeRefProcProxyTask : public RefProcProxyTask { 202 TaskTerminator _terminator; 203 204 public: 205 ParallelScavengeRefProcProxyTask(uint max_workers) 206 : RefProcProxyTask("ParallelScavengeRefProcProxyTask", max_workers), 207 _terminator(max_workers, UseCompactObjectHeaders ? ParCompactionManagerNew::marking_stacks() : ParCompactionManager::marking_stacks()) {} 208 209 void work(uint worker_id) override { 210 assert(worker_id < _max_workers, "sanity"); 211 PSPromotionManager* promotion_manager = (_tm == RefProcThreadModel::Single) ? PSPromotionManager::vm_thread_promotion_manager() : PSPromotionManager::gc_thread_promotion_manager(worker_id); 212 PSIsAliveClosure is_alive; 213 PSKeepAliveClosure keep_alive(promotion_manager); 214 BarrierEnqueueDiscoveredFieldClosure enqueue; 215 PSEvacuateFollowersClosure complete_gc(promotion_manager, (_marks_oops_alive && _tm == RefProcThreadModel::Multi) ? &_terminator : nullptr, worker_id);; 216 _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc); 217 } 218 219 void prepare_run_task_hook() override { 220 _terminator.reset_for_reuse(_queue_count); 221 } 222 }; 223 224 class PSThreadRootsTaskClosure : public ThreadClosure { 225 uint _worker_id; 226 public: 227 PSThreadRootsTaskClosure(uint worker_id) : _worker_id(worker_id) { } 228 virtual void do_thread(Thread* thread) { 229 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 230 231 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(_worker_id); 232 PSScavengeRootsClosure roots_closure(pm); 233 MarkingNMethodClosure roots_in_nmethods(&roots_closure, NMethodToOopClosure::FixRelocations, false /* keepalive nmethods */); 234 235 thread->oops_do(&roots_closure, &roots_in_nmethods); 236 237 // Do the real work 238 pm->drain_stacks(false); 239 } 240 }; 241 242 class ScavengeRootsTask : public WorkerTask { 243 StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do 244 OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_strong_par_state; 245 SequentialSubTasksDone _subtasks; 246 PSOldGen* _old_gen; 247 HeapWord* _gen_top; 248 uint _active_workers; 249 bool _is_old_gen_empty; 250 TaskTerminator _terminator; 251 252 public: 253 ScavengeRootsTask(PSOldGen* old_gen, 254 uint active_workers) : 255 WorkerTask("ScavengeRootsTask"), 256 _strong_roots_scope(active_workers), 257 _subtasks(ParallelRootType::sentinel), 258 _old_gen(old_gen), 259 _gen_top(old_gen->object_space()->top()), 260 _active_workers(active_workers), 261 _is_old_gen_empty(old_gen->object_space()->is_empty()), 262 _terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) { 263 if (!_is_old_gen_empty) { 264 PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table(); 265 card_table->pre_scavenge(active_workers); 266 } 267 } 268 269 virtual void work(uint worker_id) { 270 assert(worker_id < _active_workers, "Sanity"); 271 ResourceMark rm; 272 273 if (!_is_old_gen_empty) { 274 // There are only old-to-young pointers if there are objects 275 // in the old gen. 276 { 277 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id); 278 PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table(); 279 280 // The top of the old gen changes during scavenge when objects are promoted. 281 card_table->scavenge_contents_parallel(_old_gen->start_array(), 282 _old_gen->object_space()->bottom(), 283 _gen_top, 284 pm, 285 worker_id, 286 _active_workers); 287 288 // Do the real work 289 pm->drain_stacks(false); 290 } 291 } 292 293 for (uint root_type = 0; _subtasks.try_claim_task(root_type); /* empty */ ) { 294 scavenge_roots_work(static_cast<ParallelRootType::Value>(root_type), worker_id); 295 } 296 297 PSThreadRootsTaskClosure closure(worker_id); 298 Threads::possibly_parallel_threads_do(_active_workers > 1 /* is_par */, &closure); 299 300 // Scavenge OopStorages 301 { 302 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id); 303 PSScavengeRootsClosure closure(pm); 304 _oop_storage_strong_par_state.oops_do(&closure); 305 // Do the real work 306 pm->drain_stacks(false); 307 } 308 309 // If active_workers can exceed 1, add a steal_work(). 310 // PSPromotionManager::drain_stacks_depth() does not fully drain its 311 // stacks and expects a steal_work() to complete the draining if 312 // ParallelGCThreads is > 1. 313 314 if (_active_workers > 1) { 315 steal_work(_terminator, worker_id); 316 } 317 } 318 }; 319 320 bool PSScavenge::invoke(bool clear_soft_refs) { 321 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 322 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 323 324 // Check for potential problems. 325 if (!should_attempt_scavenge()) { 326 return false; 327 } 328 329 IsSTWGCActiveMark mark; 330 331 _gc_timer.register_gc_start(); 332 333 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 334 GCCause::Cause gc_cause = heap->gc_cause(); 335 336 SvcGCMarker sgcm(SvcGCMarker::MINOR); 337 GCIdMark gc_id_mark; 338 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); 339 340 bool promotion_failure_occurred = false; 341 342 PSYoungGen* young_gen = heap->young_gen(); 343 PSOldGen* old_gen = heap->old_gen(); 344 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 345 346 assert(young_gen->to_space()->is_empty(), 347 "Attempt to scavenge with live objects in to_space"); 348 349 heap->increment_total_collections(); 350 351 if (AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) { 352 // Gather the feedback data for eden occupancy. 353 young_gen->eden_space()->accumulate_statistics(); 354 } 355 356 heap->print_before_gc(); 357 heap->trace_heap_before_gc(&_gc_tracer); 358 359 assert(!NeverTenure || _tenuring_threshold == markWord::max_age + 1, "Sanity"); 360 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); 361 362 // Fill in TLABs 363 heap->ensure_parsability(true); // retire TLABs 364 365 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 366 Universe::verify("Before GC"); 367 } 368 369 { 370 ResourceMark rm; 371 372 GCTraceCPUTime tcpu(&_gc_tracer); 373 GCTraceTime(Info, gc) tm("Pause Young", nullptr, gc_cause, true); 374 TraceCollectorStats tcs(counters()); 375 TraceMemoryManagerStats tms(heap->young_gc_manager(), gc_cause, "end of minor GC"); 376 377 if (log_is_enabled(Debug, gc, heap, exit)) { 378 accumulated_time()->start(); 379 } 380 381 // Let the size policy know we're starting 382 size_policy->minor_collection_begin(); 383 384 #if COMPILER2_OR_JVMCI 385 DerivedPointerTable::clear(); 386 #endif 387 388 reference_processor()->start_discovery(clear_soft_refs); 389 390 const PreGenGCValues pre_gc_values = heap->get_pre_gc_values(); 391 392 // Reset our survivor overflow. 393 set_survivor_overflow(false); 394 395 const uint active_workers = 396 WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(), 397 ParallelScavengeHeap::heap()->workers().active_workers(), 398 Threads::number_of_non_daemon_threads()); 399 ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers); 400 401 PSPromotionManager::pre_scavenge(); 402 403 { 404 GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer); 405 406 ScavengeRootsTask task(old_gen, active_workers); 407 ParallelScavengeHeap::heap()->workers().run_task(&task); 408 } 409 410 // Process reference objects discovered during scavenge 411 { 412 GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer); 413 414 ReferenceProcessorStats stats; 415 ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->max_num_queues()); 416 417 ParallelScavengeRefProcProxyTask task(reference_processor()->max_num_queues()); 418 stats = reference_processor()->process_discovered_references(task, &ParallelScavengeHeap::heap()->workers(), pt); 419 420 _gc_tracer.report_gc_reference_stats(stats); 421 pt.print_all_references(); 422 } 423 424 { 425 GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer); 426 PSAdjustWeakRootsClosure root_closure; 427 WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(), &_is_alive_closure, &root_closure, 1); 428 } 429 430 // Finally, flush the promotion_manager's labs, and deallocate its stacks. 431 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); 432 if (promotion_failure_occurred) { 433 clean_up_failed_promotion(); 434 log_info(gc, promotion)("Promotion failed"); 435 } 436 437 _gc_tracer.report_tenuring_threshold(tenuring_threshold()); 438 439 // Let the size policy know we're done. Note that we count promotion 440 // failure cleanup time as part of the collection (otherwise, we're 441 // implicitly saying it's mutator time). 442 size_policy->minor_collection_end(gc_cause); 443 444 if (!promotion_failure_occurred) { 445 // Swap the survivor spaces. 446 young_gen->eden_space()->clear(SpaceDecorator::Mangle); 447 young_gen->from_space()->clear(SpaceDecorator::Mangle); 448 young_gen->swap_spaces(); 449 450 size_t survived = young_gen->from_space()->used_in_bytes(); 451 size_t promoted = old_gen->used_in_bytes() - pre_gc_values.old_gen_used(); 452 size_policy->update_averages(_survivor_overflow, survived, promoted); 453 454 // A successful scavenge should restart the GC time limit count which is 455 // for full GC's. 456 size_policy->reset_gc_overhead_limit_count(); 457 if (UseAdaptiveSizePolicy) { 458 // Calculate the new survivor size and tenuring threshold 459 460 log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections()); 461 log_trace(gc, ergo)("old_gen_capacity: %zu young_gen_capacity: %zu", 462 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); 463 464 if (UsePerfData) { 465 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 466 counters->update_old_eden_size( 467 size_policy->calculated_eden_size_in_bytes()); 468 counters->update_old_promo_size( 469 size_policy->calculated_promo_size_in_bytes()); 470 counters->update_old_capacity(old_gen->capacity_in_bytes()); 471 counters->update_young_capacity(young_gen->capacity_in_bytes()); 472 counters->update_survived(survived); 473 counters->update_promoted(promoted); 474 counters->update_survivor_overflowed(_survivor_overflow); 475 } 476 477 size_t max_young_size = young_gen->max_gen_size(); 478 479 // Deciding a free ratio in the young generation is tricky, so if 480 // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating 481 // that the old generation size may have been limited because of them) we 482 // should then limit our young generation size using NewRatio to have it 483 // follow the old generation size. 484 if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) { 485 max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, 486 young_gen->max_gen_size()); 487 } 488 489 size_t survivor_limit = 490 size_policy->max_survivor_size(max_young_size); 491 _tenuring_threshold = 492 size_policy->compute_survivor_space_size_and_threshold(_survivor_overflow, 493 _tenuring_threshold, 494 survivor_limit); 495 496 log_debug(gc, age)("Desired survivor size %zu bytes, new threshold %u (max threshold %u)", 497 size_policy->calculated_survivor_size_in_bytes(), 498 _tenuring_threshold, MaxTenuringThreshold); 499 500 if (UsePerfData) { 501 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 502 counters->update_tenuring_threshold(_tenuring_threshold); 503 counters->update_survivor_size_counters(); 504 } 505 506 // Do call at minor collections? 507 // Don't check if the size_policy is ready at this 508 // level. Let the size_policy check that internally. 509 if (UseAdaptiveGenerationSizePolicyAtMinorCollection && 510 AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) { 511 // Calculate optimal free space amounts 512 assert(young_gen->max_gen_size() > 513 young_gen->from_space()->capacity_in_bytes() + 514 young_gen->to_space()->capacity_in_bytes(), 515 "Sizes of space in young gen are out-of-bounds"); 516 517 size_t young_live = young_gen->used_in_bytes(); 518 size_t eden_live = young_gen->eden_space()->used_in_bytes(); 519 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); 520 size_t max_old_gen_size = old_gen->max_gen_size(); 521 size_t max_eden_size = max_young_size - 522 young_gen->from_space()->capacity_in_bytes() - 523 young_gen->to_space()->capacity_in_bytes(); 524 525 // Used for diagnostics 526 size_policy->clear_generation_free_space_flags(); 527 528 size_policy->compute_eden_space_size(young_live, 529 eden_live, 530 cur_eden, 531 max_eden_size, 532 false /* not full gc*/); 533 534 size_policy->check_gc_overhead_limit(eden_live, 535 max_old_gen_size, 536 max_eden_size, 537 false /* not full gc*/, 538 gc_cause, 539 heap->soft_ref_policy()); 540 541 size_policy->decay_supplemental_growth(false /* not full gc*/); 542 } 543 // Resize the young generation at every collection 544 // even if new sizes have not been calculated. This is 545 // to allow resizes that may have been inhibited by the 546 // relative location of the "to" and "from" spaces. 547 548 // Resizing the old gen at young collections can cause increases 549 // that don't feed back to the generation sizing policy until 550 // a full collection. Don't resize the old gen here. 551 552 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 553 size_policy->calculated_survivor_size_in_bytes()); 554 555 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections()); 556 } 557 558 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can 559 // cause the change of the heap layout. Make sure eden is reshaped if that's the case. 560 // Also update() will case adaptive NUMA chunk resizing. 561 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); 562 young_gen->eden_space()->update(); 563 564 heap->gc_policy_counters()->update_counters(); 565 566 heap->resize_all_tlabs(); 567 568 assert(young_gen->to_space()->is_empty(), "to space should be empty now"); 569 } 570 571 #if COMPILER2_OR_JVMCI 572 DerivedPointerTable::update_pointers(); 573 #endif 574 575 if (log_is_enabled(Debug, gc, heap, exit)) { 576 accumulated_time()->stop(); 577 } 578 579 heap->print_heap_change(pre_gc_values); 580 581 // Track memory usage and detect low memory 582 MemoryService::track_memory_usage(); 583 heap->update_counters(); 584 } 585 586 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 587 Universe::verify("After GC"); 588 } 589 590 heap->print_after_gc(); 591 heap->trace_heap_after_gc(&_gc_tracer); 592 593 AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections()); 594 595 _gc_timer.register_gc_end(); 596 597 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); 598 599 return !promotion_failure_occurred; 600 } 601 602 void PSScavenge::clean_up_failed_promotion() { 603 PSPromotionManager::restore_preserved_marks(); 604 605 // Reset the PromotionFailureALot counters. 606 NOT_PRODUCT(ParallelScavengeHeap::heap()->reset_promotion_should_fail();) 607 } 608 609 bool PSScavenge::should_attempt_scavenge() { 610 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 611 612 PSYoungGen* young_gen = heap->young_gen(); 613 PSOldGen* old_gen = heap->old_gen(); 614 615 if (!young_gen->to_space()->is_empty()) { 616 // To-space is not empty; should run full-gc instead. 617 return false; 618 } 619 620 // Test to see if the scavenge will likely fail. 621 PSAdaptiveSizePolicy* policy = heap->size_policy(); 622 623 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); 624 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); 625 // Total free size after possible old gen expansion 626 size_t free_in_old_gen = old_gen->max_gen_size() - old_gen->used_in_bytes(); 627 bool result = promotion_estimate < free_in_old_gen; 628 629 log_trace(ergo)("%s scavenge: average_promoted %zu padded_average_promoted %zu free in old gen %zu", 630 result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(), 631 (size_t) policy->padded_average_promoted_in_bytes(), 632 free_in_old_gen); 633 634 return result; 635 } 636 637 // Adaptive size policy support. 638 void PSScavenge::set_young_generation_boundary(HeapWord* v) { 639 _young_generation_boundary = v; 640 if (UseCompressedOops) { 641 _young_generation_boundary_compressed = (uintptr_t)CompressedOops::encode(cast_to_oop(v)); 642 } 643 } 644 645 void PSScavenge::initialize() { 646 // Arguments must have been parsed 647 648 if (AlwaysTenure || NeverTenure) { 649 assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markWord::max_age + 1, 650 "MaxTenuringThreshold should be 0 or markWord::max_age + 1, but is %d", (int) MaxTenuringThreshold); 651 _tenuring_threshold = MaxTenuringThreshold; 652 } else { 653 // We want to smooth out our startup times for the AdaptiveSizePolicy 654 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : 655 MaxTenuringThreshold; 656 } 657 658 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 659 PSYoungGen* young_gen = heap->young_gen(); 660 PSOldGen* old_gen = heap->old_gen(); 661 662 // Set boundary between young_gen and old_gen 663 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), 664 "old above young"); 665 set_young_generation_boundary(young_gen->eden_space()->bottom()); 666 667 // Initialize ref handling object for scavenging. 668 _span_based_discoverer.set_span(young_gen->reserved()); 669 _ref_processor = 670 new ReferenceProcessor(&_span_based_discoverer, 671 ParallelGCThreads, // mt processing degree 672 ParallelGCThreads, // mt discovery degree 673 false, // concurrent_discovery 674 &_is_alive_closure); // header provides liveness info 675 676 // Cache the cardtable 677 _card_table = heap->card_table(); 678 679 _counters = new CollectorCounters("Parallel young collection pauses", 0); 680 }