1 /* 2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shenandoah/shenandoahAgeCensus.hpp" 28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 29 #include "gc/shenandoah/shenandoahFreeSet.hpp" 30 #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp" 31 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp" 32 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 33 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 34 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 35 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp" 36 #include "gc/shenandoah/shenandoahInitLogger.hpp" 37 #include "gc/shenandoah/shenandoahMemoryPool.hpp" 38 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 39 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 40 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 41 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 42 #include "gc/shenandoah/shenandoahRegulatorThread.hpp" 43 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" 44 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 45 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 46 #include "gc/shenandoah/shenandoahUtils.hpp" 47 #include "logging/log.hpp" 48 #include "utilities/events.hpp" 49 50 51 class ShenandoahGenerationalInitLogger : public ShenandoahInitLogger { 52 public: 53 static void print() { 54 ShenandoahGenerationalInitLogger logger; 55 logger.print_all(); 56 } 57 58 void print_heap() override { 59 ShenandoahInitLogger::print_heap(); 60 61 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); 62 63 ShenandoahYoungGeneration* young = heap->young_generation(); 64 log_info(gc, init)("Young Generation Soft Size: " EXACTFMT, EXACTFMTARGS(young->soft_max_capacity())); 65 log_info(gc, init)("Young Generation Max: " EXACTFMT, EXACTFMTARGS(young->max_capacity())); 66 67 ShenandoahOldGeneration* old = heap->old_generation(); 68 log_info(gc, init)("Old Generation Soft Size: " EXACTFMT, EXACTFMTARGS(old->soft_max_capacity())); 69 log_info(gc, init)("Old Generation Max: " EXACTFMT, EXACTFMTARGS(old->max_capacity())); 70 } 71 72 protected: 73 void print_gc_specific() override { 74 ShenandoahInitLogger::print_gc_specific(); 75 76 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); 77 log_info(gc, init)("Young Heuristics: %s", heap->young_generation()->heuristics()->name()); 78 log_info(gc, init)("Old Heuristics: %s", heap->old_generation()->heuristics()->name()); 79 } 80 }; 81 82 size_t ShenandoahGenerationalHeap::calculate_min_plab() { 83 return align_up(PLAB::min_size(), CardTable::card_size_in_words()); 84 } 85 86 size_t ShenandoahGenerationalHeap::calculate_max_plab() { 87 size_t MaxTLABSizeWords = ShenandoahHeapRegion::max_tlab_size_words(); 88 return align_down(MaxTLABSizeWords, CardTable::card_size_in_words()); 89 } 90 91 // Returns size in bytes 92 size_t ShenandoahGenerationalHeap::unsafe_max_tlab_alloc(Thread *thread) const { 93 return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->available()); 94 } 95 96 ShenandoahGenerationalHeap::ShenandoahGenerationalHeap(ShenandoahCollectorPolicy* policy) : 97 ShenandoahHeap(policy), 98 _age_census(nullptr), 99 _evac_tracker(new ShenandoahEvacuationTracker()), 100 _min_plab_size(calculate_min_plab()), 101 _max_plab_size(calculate_max_plab()), 102 _regulator_thread(nullptr), 103 _young_gen_memory_pool(nullptr), 104 _old_gen_memory_pool(nullptr) { 105 assert(is_aligned(_min_plab_size, CardTable::card_size_in_words()), "min_plab_size must be aligned"); 106 assert(is_aligned(_max_plab_size, CardTable::card_size_in_words()), "max_plab_size must be aligned"); 107 } 108 109 void ShenandoahGenerationalHeap::post_initialize() { 110 ShenandoahHeap::post_initialize(); 111 _age_census = new ShenandoahAgeCensus(); 112 } 113 114 void ShenandoahGenerationalHeap::print_init_logger() const { 115 ShenandoahGenerationalInitLogger logger; 116 logger.print_all(); 117 } 118 119 void ShenandoahGenerationalHeap::print_tracing_info() const { 120 ShenandoahHeap::print_tracing_info(); 121 122 LogTarget(Info, gc, stats) lt; 123 if (lt.is_enabled()) { 124 LogStream ls(lt); 125 ls.cr(); 126 ls.cr(); 127 evac_tracker()->print_global_on(&ls); 128 } 129 } 130 131 void ShenandoahGenerationalHeap::initialize_heuristics() { 132 // Initialize global generation and heuristics even in generational mode. 133 ShenandoahHeap::initialize_heuristics(); 134 135 // Max capacity is the maximum _allowed_ capacity. That is, the maximum allowed capacity 136 // for old would be total heap - minimum capacity of young. This means the sum of the maximum 137 // allowed for old and young could exceed the total heap size. It remains the case that the 138 // _actual_ capacity of young + old = total. 139 _generation_sizer.heap_size_changed(max_capacity()); 140 size_t initial_capacity_young = _generation_sizer.max_young_size(); 141 size_t max_capacity_young = _generation_sizer.max_young_size(); 142 size_t initial_capacity_old = max_capacity() - max_capacity_young; 143 size_t max_capacity_old = max_capacity() - initial_capacity_young; 144 145 _young_generation = new ShenandoahYoungGeneration(max_workers(), max_capacity_young, initial_capacity_young); 146 _old_generation = new ShenandoahOldGeneration(max_workers(), max_capacity_old, initial_capacity_old); 147 _young_generation->initialize_heuristics(mode()); 148 _old_generation->initialize_heuristics(mode()); 149 } 150 151 void ShenandoahGenerationalHeap::initialize_serviceability() { 152 assert(mode()->is_generational(), "Only for the generational mode"); 153 _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this); 154 _old_gen_memory_pool = new ShenandoahOldGenMemoryPool(this); 155 cycle_memory_manager()->add_pool(_young_gen_memory_pool); 156 cycle_memory_manager()->add_pool(_old_gen_memory_pool); 157 stw_memory_manager()->add_pool(_young_gen_memory_pool); 158 stw_memory_manager()->add_pool(_old_gen_memory_pool); 159 } 160 161 GrowableArray<MemoryPool*> ShenandoahGenerationalHeap::memory_pools() { 162 assert(mode()->is_generational(), "Only for the generational mode"); 163 GrowableArray<MemoryPool*> memory_pools(2); 164 memory_pools.append(_young_gen_memory_pool); 165 memory_pools.append(_old_gen_memory_pool); 166 return memory_pools; 167 } 168 169 void ShenandoahGenerationalHeap::initialize_controller() { 170 auto control_thread = new ShenandoahGenerationalControlThread(); 171 _control_thread = control_thread; 172 _regulator_thread = new ShenandoahRegulatorThread(control_thread); 173 } 174 175 void ShenandoahGenerationalHeap::gc_threads_do(ThreadClosure* tcl) const { 176 if (!shenandoah_policy()->is_at_shutdown()) { 177 ShenandoahHeap::gc_threads_do(tcl); 178 tcl->do_thread(regulator_thread()); 179 } 180 } 181 182 void ShenandoahGenerationalHeap::stop() { 183 ShenandoahHeap::stop(); 184 regulator_thread()->stop(); 185 } 186 187 bool ShenandoahGenerationalHeap::requires_barriers(stackChunkOop obj) const { 188 if (is_idle()) { 189 return false; 190 } 191 192 if (is_concurrent_young_mark_in_progress() && is_in_young(obj) && !marking_context()->allocated_after_mark_start(obj)) { 193 // We are marking young, this object is in young, and it is below the TAMS 194 return true; 195 } 196 197 if (is_in_old(obj)) { 198 // Card marking barriers are required for objects in the old generation 199 return true; 200 } 201 202 if (has_forwarded_objects()) { 203 // Object may have pointers that need to be updated 204 return true; 205 } 206 207 return false; 208 } 209 210 void ShenandoahGenerationalHeap::evacuate_collection_set(bool concurrent) { 211 ShenandoahRegionIterator regions; 212 ShenandoahGenerationalEvacuationTask task(this, ®ions, concurrent, false /* only promote regions */); 213 workers()->run_task(&task); 214 } 215 216 void ShenandoahGenerationalHeap::promote_regions_in_place(bool concurrent) { 217 ShenandoahRegionIterator regions; 218 ShenandoahGenerationalEvacuationTask task(this, ®ions, concurrent, true /* only promote regions */); 219 workers()->run_task(&task); 220 } 221 222 oop ShenandoahGenerationalHeap::evacuate_object(oop p, Thread* thread) { 223 assert(thread == Thread::current(), "Expected thread parameter to be current thread."); 224 if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) { 225 // This thread went through the OOM during evac protocol and it is safe to return 226 // the forward pointer. It must not attempt to evacuate anymore. 227 return ShenandoahBarrierSet::resolve_forwarded(p); 228 } 229 230 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope"); 231 232 ShenandoahHeapRegion* r = heap_region_containing(p); 233 assert(!r->is_humongous(), "never evacuate humongous objects"); 234 235 ShenandoahAffiliation target_gen = r->affiliation(); 236 // gc_generation() can change asynchronously and should not be used here. 237 assert(active_generation() != nullptr, "Error"); 238 if (active_generation()->is_young() && target_gen == YOUNG_GENERATION) { 239 markWord mark = p->mark(); 240 if (mark.is_marked()) { 241 // Already forwarded. 242 return ShenandoahBarrierSet::resolve_forwarded(p); 243 } 244 245 if (mark.has_displaced_mark_helper()) { 246 // We don't want to deal with MT here just to ensure we read the right mark word. 247 // Skip the potential promotion attempt for this one. 248 } else if (r->age() + mark.age() >= age_census()->tenuring_threshold()) { 249 oop result = try_evacuate_object(p, thread, r, OLD_GENERATION); 250 if (result != nullptr) { 251 return result; 252 } 253 // If we failed to promote this aged object, we'll fall through to code below and evacuate to young-gen. 254 } 255 } 256 return try_evacuate_object(p, thread, r, target_gen); 257 } 258 259 // try_evacuate_object registers the object and dirties the associated remembered set information when evacuating 260 // to OLD_GENERATION. 261 oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region, 262 ShenandoahAffiliation target_gen) { 263 bool alloc_from_lab = true; 264 bool has_plab = false; 265 HeapWord* copy = nullptr; 266 size_t size = p->size(); 267 bool is_promotion = (target_gen == OLD_GENERATION) && from_region->is_young(); 268 269 #ifdef ASSERT 270 if (ShenandoahOOMDuringEvacALot && 271 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call 272 copy = nullptr; 273 } else { 274 #endif 275 if (UseTLAB) { 276 switch (target_gen) { 277 case YOUNG_GENERATION: { 278 copy = allocate_from_gclab(thread, size); 279 if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) { 280 // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve. Try resetting 281 // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations. 282 ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size()); 283 copy = allocate_from_gclab(thread, size); 284 // If we still get nullptr, we'll try a shared allocation below. 285 } 286 break; 287 } 288 case OLD_GENERATION: { 289 PLAB* plab = ShenandoahThreadLocalData::plab(thread); 290 if (plab != nullptr) { 291 has_plab = true; 292 copy = allocate_from_plab(thread, size, is_promotion); 293 if ((copy == nullptr) && (size < ShenandoahThreadLocalData::plab_size(thread)) && 294 ShenandoahThreadLocalData::plab_retries_enabled(thread)) { 295 // PLAB allocation failed because we are bumping up against the limit on old evacuation reserve or because 296 // the requested object does not fit within the current plab but the plab still has an "abundance" of memory, 297 // where abundance is defined as >= ShenGenHeap::plab_min_size(). In the former case, we try shrinking the 298 // desired PLAB size to the minimum and retry PLAB allocation to avoid cascading of shared memory allocations. 299 if (plab->words_remaining() < plab_min_size()) { 300 ShenandoahThreadLocalData::set_plab_size(thread, plab_min_size()); 301 copy = allocate_from_plab(thread, size, is_promotion); 302 // If we still get nullptr, we'll try a shared allocation below. 303 if (copy == nullptr) { 304 // If retry fails, don't continue to retry until we have success (probably in next GC pass) 305 ShenandoahThreadLocalData::disable_plab_retries(thread); 306 } 307 } 308 // else, copy still equals nullptr. this causes shared allocation below, preserving this plab for future needs. 309 } 310 } 311 break; 312 } 313 default: { 314 ShouldNotReachHere(); 315 break; 316 } 317 } 318 } 319 320 if (copy == nullptr) { 321 // If we failed to allocate in LAB, we'll try a shared allocation. 322 if (!is_promotion || !has_plab || (size > PLAB::min_size())) { 323 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen, is_promotion); 324 copy = allocate_memory(req); 325 alloc_from_lab = false; 326 } 327 // else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate. 328 // We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too 329 // costly. Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future 330 // evacuation pass. This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size()) 331 } 332 #ifdef ASSERT 333 } 334 #endif 335 336 if (copy == nullptr) { 337 if (target_gen == OLD_GENERATION) { 338 if (from_region->is_young()) { 339 // Signal that promotion failed. Will evacuate this old object somewhere in young gen. 340 old_generation()->handle_failed_promotion(thread, size); 341 return nullptr; 342 } else { 343 // Remember that evacuation to old gen failed. We'll want to trigger a full gc to recover from this 344 // after the evacuation threads have finished. 345 old_generation()->handle_failed_evacuation(); 346 } 347 } 348 349 control_thread()->handle_alloc_failure_evac(size); 350 351 oom_evac_handler()->handle_out_of_memory_during_evacuation(); 352 353 return ShenandoahBarrierSet::resolve_forwarded(p); 354 } 355 356 // Copy the object: 357 NOT_PRODUCT(evac_tracker()->begin_evacuation(thread, size * HeapWordSize)); 358 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size); 359 oop copy_val = cast_to_oop(copy); 360 361 // Update the age of the evacuated object 362 if (target_gen == YOUNG_GENERATION && is_aging_cycle()) { 363 ShenandoahHeap::increase_object_age(copy_val, from_region->age() + 1); 364 } 365 366 // Try to install the new forwarding pointer. 367 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val); 368 if (result == copy_val) { 369 // Successfully evacuated. Our copy is now the public one! 370 371 // This is necessary for virtual thread support. This uses the mark word without 372 // considering that it may now be a forwarding pointer (and could therefore crash). 373 // Secondarily, we do not want to spend cycles relativizing stack chunks for oops 374 // that lost the evacuation race (and will therefore not become visible). It is 375 // safe to do this on the public copy (this is also done during concurrent mark). 376 ContinuationGCSupport::relativize_stack_chunk(copy_val); 377 378 // Record that the evacuation succeeded 379 NOT_PRODUCT(evac_tracker()->end_evacuation(thread, size * HeapWordSize)); 380 381 if (target_gen == OLD_GENERATION) { 382 old_generation()->handle_evacuation(copy, size, from_region->is_young()); 383 } else { 384 // When copying to the old generation above, we don't care 385 // about recording object age in the census stats. 386 assert(target_gen == YOUNG_GENERATION, "Error"); 387 // We record this census only when simulating pre-adaptive tenuring behavior, or 388 // when we have been asked to record the census at evacuation rather than at mark 389 if (ShenandoahGenerationalCensusAtEvac || !ShenandoahGenerationalAdaptiveTenuring) { 390 evac_tracker()->record_age(thread, size * HeapWordSize, ShenandoahHeap::get_object_age(copy_val)); 391 } 392 } 393 shenandoah_assert_correct(nullptr, copy_val); 394 return copy_val; 395 } else { 396 // Failed to evacuate. We need to deal with the object that is left behind. Since this 397 // new allocation is certainly after TAMS, it will be considered live in the next cycle. 398 // But if it happens to contain references to evacuated regions, those references would 399 // not get updated for this stale copy during this cycle, and we will crash while scanning 400 // it the next cycle. 401 if (alloc_from_lab) { 402 // For LAB allocations, it is enough to rollback the allocation ptr. Either the next 403 // object will overwrite this stale copy, or the filler object on LAB retirement will 404 // do this. 405 switch (target_gen) { 406 case YOUNG_GENERATION: { 407 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size); 408 break; 409 } 410 case OLD_GENERATION: { 411 ShenandoahThreadLocalData::plab(thread)->undo_allocation(copy, size); 412 if (is_promotion) { 413 ShenandoahThreadLocalData::subtract_from_plab_promoted(thread, size * HeapWordSize); 414 } 415 break; 416 } 417 default: { 418 ShouldNotReachHere(); 419 break; 420 } 421 } 422 } else { 423 // For non-LAB allocations, we have no way to retract the allocation, and 424 // have to explicitly overwrite the copy with the filler object. With that overwrite, 425 // we have to keep the fwdptr initialized and pointing to our (stale) copy. 426 assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size"); 427 fill_with_object(copy, size); 428 shenandoah_assert_correct(nullptr, copy_val); 429 // For non-LAB allocations, the object has already been registered 430 } 431 shenandoah_assert_correct(nullptr, result); 432 return result; 433 } 434 } 435 436 inline HeapWord* ShenandoahGenerationalHeap::allocate_from_plab(Thread* thread, size_t size, bool is_promotion) { 437 assert(UseTLAB, "TLABs should be enabled"); 438 439 PLAB* plab = ShenandoahThreadLocalData::plab(thread); 440 HeapWord* obj; 441 442 if (plab == nullptr) { 443 assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), "Performance: thread should have PLAB: %s", thread->name()); 444 // No PLABs in this thread, fallback to shared allocation 445 return nullptr; 446 } else if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) { 447 return nullptr; 448 } 449 // if plab->word_size() <= 0, thread's plab not yet initialized for this pass, so allow_plab_promotions() is not trustworthy 450 obj = plab->allocate(size); 451 if ((obj == nullptr) && (plab->words_remaining() < plab_min_size())) { 452 // allocate_from_plab_slow will establish allow_plab_promotions(thread) for future invocations 453 obj = allocate_from_plab_slow(thread, size, is_promotion); 454 } 455 // if plab->words_remaining() >= ShenGenHeap::heap()->plab_min_size(), just return nullptr so we can use a shared allocation 456 if (obj == nullptr) { 457 return nullptr; 458 } 459 460 if (is_promotion) { 461 ShenandoahThreadLocalData::add_to_plab_promoted(thread, size * HeapWordSize); 462 } 463 return obj; 464 } 465 466 // Establish a new PLAB and allocate size HeapWords within it. 467 HeapWord* ShenandoahGenerationalHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) { 468 // New object should fit the PLAB size 469 470 assert(mode()->is_generational(), "PLABs only relevant to generational GC"); 471 const size_t plab_min_size = this->plab_min_size(); 472 // PLABs are aligned to card boundaries to avoid synchronization with concurrent 473 // allocations in other PLABs. 474 const size_t min_size = (size > plab_min_size)? align_up(size, CardTable::card_size_in_words()): plab_min_size; 475 476 // Figure out size of new PLAB, using value determined at last refill. 477 size_t cur_size = ShenandoahThreadLocalData::plab_size(thread); 478 if (cur_size == 0) { 479 cur_size = plab_min_size; 480 } 481 482 // Expand aggressively, doubling at each refill in this epoch, ceiling at plab_max_size() 483 size_t future_size = MIN2(cur_size * 2, plab_max_size()); 484 // Doubling, starting at a card-multiple, should give us a card-multiple. (Ceiling and floor 485 // are card multiples.) 486 assert(is_aligned(future_size, CardTable::card_size_in_words()), "Card multiple by construction, future_size: " SIZE_FORMAT 487 ", card_size: " SIZE_FORMAT ", cur_size: " SIZE_FORMAT ", max: " SIZE_FORMAT, 488 future_size, (size_t) CardTable::card_size_in_words(), cur_size, plab_max_size()); 489 490 // Record new heuristic value even if we take any shortcut. This captures 491 // the case when moderately-sized objects always take a shortcut. At some point, 492 // heuristics should catch up with them. Note that the requested cur_size may 493 // not be honored, but we remember that this is the preferred size. 494 log_debug(gc, free)("Set new PLAB size: " SIZE_FORMAT, future_size); 495 ShenandoahThreadLocalData::set_plab_size(thread, future_size); 496 if (cur_size < size) { 497 // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation. 498 // This avoids retiring perfectly good PLABs in order to represent a single large object allocation. 499 log_debug(gc, free)("Current PLAB size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, cur_size, size); 500 return nullptr; 501 } 502 503 // Retire current PLAB, and allocate a new one. 504 PLAB* plab = ShenandoahThreadLocalData::plab(thread); 505 if (plab->words_remaining() < plab_min_size) { 506 // Retire current PLAB. This takes care of any PLAB book-keeping. 507 // retire_plab() registers the remnant filler object with the remembered set scanner without a lock. 508 // Since PLABs are card-aligned, concurrent registrations in other PLABs don't interfere. 509 retire_plab(plab, thread); 510 511 size_t actual_size = 0; 512 HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size); 513 if (plab_buf == nullptr) { 514 if (min_size == plab_min_size) { 515 // Disable PLAB promotions for this thread because we cannot even allocate a minimal PLAB. This allows us 516 // to fail faster on subsequent promotion attempts. 517 ShenandoahThreadLocalData::disable_plab_promotions(thread); 518 } 519 return nullptr; 520 } else { 521 ShenandoahThreadLocalData::enable_plab_retries(thread); 522 } 523 // Since the allocated PLAB may have been down-sized for alignment, plab->allocate(size) below may still fail. 524 if (ZeroTLAB) { 525 // ... and clear it. 526 Copy::zero_to_words(plab_buf, actual_size); 527 } else { 528 // ...and zap just allocated object. 529 #ifdef ASSERT 530 // Skip mangling the space corresponding to the object header to 531 // ensure that the returned space is not considered parsable by 532 // any concurrent GC thread. 533 size_t hdr_size = oopDesc::header_size(); 534 Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal); 535 #endif // ASSERT 536 } 537 assert(is_aligned(actual_size, CardTable::card_size_in_words()), "Align by design"); 538 plab->set_buf(plab_buf, actual_size); 539 if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) { 540 return nullptr; 541 } 542 return plab->allocate(size); 543 } else { 544 // If there's still at least min_size() words available within the current plab, don't retire it. Let's nibble 545 // away on this plab as long as we can. Meanwhile, return nullptr to force this particular allocation request 546 // to be satisfied with a shared allocation. By packing more promotions into the previously allocated PLAB, we 547 // reduce the likelihood of evacuation failures, and we reduce the need for downsizing our PLABs. 548 return nullptr; 549 } 550 } 551 552 HeapWord* ShenandoahGenerationalHeap::allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size) { 553 // Align requested sizes to card-sized multiples. Align down so that we don't violate max size of TLAB. 554 assert(is_aligned(min_size, CardTable::card_size_in_words()), "Align by design"); 555 assert(word_size >= min_size, "Requested PLAB is too small"); 556 557 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size); 558 // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread 559 // if we are at risk of infringing on the old-gen evacuation budget. 560 HeapWord* res = allocate_memory(req); 561 if (res != nullptr) { 562 *actual_size = req.actual_size(); 563 } else { 564 *actual_size = 0; 565 } 566 assert(is_aligned(res, CardTable::card_size_in_words()), "Align by design"); 567 return res; 568 } 569 570 void ShenandoahGenerationalHeap::retire_plab(PLAB* plab, Thread* thread) { 571 // We don't enforce limits on plab evacuations. We let it consume all available old-gen memory in order to reduce 572 // probability of an evacuation failure. We do enforce limits on promotion, to make sure that excessive promotion 573 // does not result in an old-gen evacuation failure. Note that a failed promotion is relatively harmless. Any 574 // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle. 575 576 // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to 577 // promotions. Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions. 578 // 1. Some of the plab may have been dedicated to evacuations. 579 // 2. Some of the plab may have been abandoned due to waste (at the end of the plab). 580 size_t not_promoted = 581 ShenandoahThreadLocalData::get_plab_actual_size(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread); 582 ShenandoahThreadLocalData::reset_plab_promoted(thread); 583 ShenandoahThreadLocalData::set_plab_actual_size(thread, 0); 584 if (not_promoted > 0) { 585 old_generation()->unexpend_promoted(not_promoted); 586 } 587 const size_t original_waste = plab->waste(); 588 HeapWord* const top = plab->top(); 589 590 // plab->retire() overwrites unused memory between plab->top() and plab->hard_end() with a dummy object to make memory parsable. 591 // It adds the size of this unused memory, in words, to plab->waste(). 592 plab->retire(); 593 if (top != nullptr && plab->waste() > original_waste && is_in_old(top)) { 594 // If retiring the plab created a filler object, then we need to register it with our card scanner so it can 595 // safely walk the region backing the plab. 596 log_debug(gc)("retire_plab() is registering remnant of size " SIZE_FORMAT " at " PTR_FORMAT, 597 plab->waste() - original_waste, p2i(top)); 598 // No lock is necessary because the PLAB memory is aligned on card boundaries. 599 old_generation()->card_scan()->register_object_without_lock(top); 600 } 601 } 602 603 void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) { 604 Thread* thread = Thread::current(); 605 retire_plab(plab, thread); 606 } 607 608 ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_generations() { 609 shenandoah_assert_heaplocked_or_safepoint(); 610 611 ShenandoahOldGeneration* old_gen = old_generation(); 612 const ssize_t old_region_balance = old_gen->get_region_balance(); 613 old_gen->set_region_balance(0); 614 615 if (old_region_balance > 0) { 616 const auto old_region_surplus = checked_cast<size_t>(old_region_balance); 617 const bool success = generation_sizer()->transfer_to_young(old_region_surplus); 618 return TransferResult { 619 success, old_region_surplus, "young" 620 }; 621 } 622 623 if (old_region_balance < 0) { 624 const auto old_region_deficit = checked_cast<size_t>(-old_region_balance); 625 const bool success = generation_sizer()->transfer_to_old(old_region_deficit); 626 if (!success) { 627 old_gen->handle_failed_transfer(); 628 } 629 return TransferResult { 630 success, old_region_deficit, "old" 631 }; 632 } 633 634 return TransferResult {true, 0, "none"}; 635 } 636 637 // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations 638 // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to 639 // xfer_limit, and any surplus is transferred to the young generation. 640 // xfer_limit is the maximum we're able to transfer from young to old. 641 void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions) { 642 643 // We can limit the old reserve to the size of anticipated promotions: 644 // max_old_reserve is an upper bound on memory evacuated from old and promoted to old, 645 // clamped by the old generation space available. 646 // 647 // Here's the algebra. 648 // Let SOEP = ShenandoahOldEvacRatioPercent, 649 // OE = old evac, 650 // YE = young evac, and 651 // TE = total evac = OE + YE 652 // By definition: 653 // SOEP/100 = OE/TE 654 // = OE/(OE+YE) 655 // => SOEP/(100-SOEP) = OE/((OE+YE)-OE) // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c) 656 // = OE/YE 657 // => OE = YE*SOEP/(100-SOEP) 658 659 // We have to be careful in the event that SOEP is set to 100 by the user. 660 assert(ShenandoahOldEvacRatioPercent <= 100, "Error"); 661 const size_t old_available = old_generation()->available(); 662 // The free set will reserve this amount of memory to hold young evacuations 663 const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; 664 665 // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit. 666 667 const double bound_on_old_reserve = old_available + old_xfer_limit + young_reserve; 668 const double max_old_reserve = (ShenandoahOldEvacRatioPercent == 100)? 669 bound_on_old_reserve: MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent) / double(100 - ShenandoahOldEvacRatioPercent), 670 bound_on_old_reserve); 671 672 const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 673 674 // Decide how much old space we should reserve for a mixed collection 675 double reserve_for_mixed = 0; 676 if (old_generation()->has_unprocessed_collection_candidates()) { 677 // We want this much memory to be unfragmented in order to reliably evacuate old. This is conservative because we 678 // may not evacuate the entirety of unprocessed candidates in a single mixed evacuation. 679 const double max_evac_need = (double(old_generation()->unprocessed_collection_candidates_live_memory()) * ShenandoahOldEvacWaste); 680 assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes, 681 "Unaffiliated available must be less than total available"); 682 const double old_fragmented_available = double(old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes); 683 reserve_for_mixed = max_evac_need + old_fragmented_available; 684 if (reserve_for_mixed > max_old_reserve) { 685 reserve_for_mixed = max_old_reserve; 686 } 687 } 688 689 // Decide how much space we should reserve for promotions from young 690 size_t reserve_for_promo = 0; 691 const size_t promo_load = old_generation()->get_promotion_potential(); 692 const bool doing_promotions = promo_load > 0; 693 if (doing_promotions) { 694 // We're promoting and have a bound on the maximum amount that can be promoted 695 assert(max_old_reserve >= reserve_for_mixed, "Sanity"); 696 const size_t available_for_promotions = max_old_reserve - reserve_for_mixed; 697 reserve_for_promo = MIN2((size_t)(promo_load * ShenandoahPromoEvacWaste), available_for_promotions); 698 } 699 700 // This is the total old we want to ideally reserve 701 const size_t old_reserve = reserve_for_mixed + reserve_for_promo; 702 assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations"); 703 704 // We now check if the old generation is running a surplus or a deficit. 705 const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes; 706 if (max_old_available >= old_reserve) { 707 // We are running a surplus, so the old region surplus can go to young 708 const size_t old_surplus = (max_old_available - old_reserve) / region_size_bytes; 709 const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions; 710 const size_t old_region_surplus = MIN2(old_surplus, unaffiliated_old_regions); 711 old_generation()->set_region_balance(checked_cast<ssize_t>(old_region_surplus)); 712 } else { 713 // We are running a deficit which we'd like to fill from young. 714 // Ignore that this will directly impact young_generation()->max_capacity(), 715 // indirectly impacting young_reserve and old_reserve. These computations are conservative. 716 // Note that deficit is rounded up by one region. 717 const size_t old_need = (old_reserve - max_old_available + region_size_bytes - 1) / region_size_bytes; 718 const size_t max_old_region_xfer = old_xfer_limit / region_size_bytes; 719 720 // Round down the regions we can transfer from young to old. If we're running short 721 // on young-gen memory, we restrict the xfer. Old-gen collection activities will be 722 // curtailed if the budget is restricted. 723 const size_t old_region_deficit = MIN2(old_need, max_old_region_xfer); 724 old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit)); 725 } 726 } 727 728 void ShenandoahGenerationalHeap::reset_generation_reserves() { 729 young_generation()->set_evacuation_reserve(0); 730 old_generation()->set_evacuation_reserve(0); 731 old_generation()->set_promoted_reserve(0); 732 } 733 734 void ShenandoahGenerationalHeap::TransferResult::print_on(const char* when, outputStream* ss) const { 735 auto heap = ShenandoahGenerationalHeap::heap(); 736 ShenandoahYoungGeneration* const young_gen = heap->young_generation(); 737 ShenandoahOldGeneration* const old_gen = heap->old_generation(); 738 const size_t young_available = young_gen->available(); 739 const size_t old_available = old_gen->available(); 740 ss->print_cr("After %s, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: " 741 PROPERFMT ", young_available: " PROPERFMT, 742 when, 743 success? "successfully transferred": "failed to transfer", region_count, region_destination, 744 PROPERFMTARGS(old_available), PROPERFMTARGS(young_available)); 745 } 746 747 void ShenandoahGenerationalHeap::coalesce_and_fill_old_regions(bool concurrent) { 748 class ShenandoahGlobalCoalesceAndFill : public WorkerTask { 749 private: 750 ShenandoahPhaseTimings::Phase _phase; 751 ShenandoahRegionIterator _regions; 752 public: 753 explicit ShenandoahGlobalCoalesceAndFill(ShenandoahPhaseTimings::Phase phase) : 754 WorkerTask("Shenandoah Global Coalesce"), 755 _phase(phase) {} 756 757 void work(uint worker_id) override { 758 ShenandoahWorkerTimingsTracker timer(_phase, 759 ShenandoahPhaseTimings::ScanClusters, 760 worker_id, true); 761 ShenandoahHeapRegion* region; 762 while ((region = _regions.next()) != nullptr) { 763 // old region is not in the collection set and was not immediately trashed 764 if (region->is_old() && region->is_active() && !region->is_humongous()) { 765 // Reset the coalesce and fill boundary because this is a global collect 766 // and cannot be preempted by young collects. We want to be sure the entire 767 // region is coalesced here and does not resume from a previously interrupted 768 // or completed coalescing. 769 region->begin_preemptible_coalesce_and_fill(); 770 region->oop_coalesce_and_fill(false); 771 } 772 } 773 } 774 }; 775 776 ShenandoahPhaseTimings::Phase phase = concurrent ? 777 ShenandoahPhaseTimings::conc_coalesce_and_fill : 778 ShenandoahPhaseTimings::degen_gc_coalesce_and_fill; 779 780 // This is not cancellable 781 ShenandoahGlobalCoalesceAndFill coalesce(phase); 782 workers()->run_task(&coalesce); 783 old_generation()->set_parsable(true); 784 } 785 786 template<bool CONCURRENT> 787 class ShenandoahGenerationalUpdateHeapRefsTask : public WorkerTask { 788 private: 789 ShenandoahGenerationalHeap* _heap; 790 ShenandoahRegionIterator* _regions; 791 ShenandoahRegionChunkIterator* _work_chunks; 792 793 public: 794 explicit ShenandoahGenerationalUpdateHeapRefsTask(ShenandoahRegionIterator* regions, 795 ShenandoahRegionChunkIterator* work_chunks) : 796 WorkerTask("Shenandoah Update References"), 797 _heap(ShenandoahGenerationalHeap::heap()), 798 _regions(regions), 799 _work_chunks(work_chunks) 800 { 801 bool old_bitmap_stable = _heap->old_generation()->is_mark_complete(); 802 log_debug(gc, remset)("Update refs, scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable)); 803 } 804 805 void work(uint worker_id) { 806 if (CONCURRENT) { 807 ShenandoahConcurrentWorkerSession worker_session(worker_id); 808 ShenandoahSuspendibleThreadSetJoiner stsj; 809 do_work<ShenandoahConcUpdateRefsClosure>(worker_id); 810 } else { 811 ShenandoahParallelWorkerSession worker_session(worker_id); 812 do_work<ShenandoahSTWUpdateRefsClosure>(worker_id); 813 } 814 } 815 816 private: 817 template<class T> 818 void do_work(uint worker_id) { 819 T cl; 820 821 if (CONCURRENT && (worker_id == 0)) { 822 // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the 823 // results of evacuation. These reserves are no longer necessary because evacuation has completed. 824 size_t cset_regions = _heap->collection_set()->count(); 825 826 // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation 827 // to the mutator free set. At the end of GC, we will have cset_regions newly evacuated fully empty regions from 828 // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the 829 // next GC cycle. 830 _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions); 831 } 832 // If !CONCURRENT, there's no value in expanding Mutator free set 833 834 ShenandoahHeapRegion* r = _regions->next(); 835 // We update references for global, old, and young collections. 836 ShenandoahGeneration* const gc_generation = _heap->gc_generation(); 837 shenandoah_assert_generations_reconciled(); 838 assert(gc_generation->is_mark_complete(), "Expected complete marking"); 839 ShenandoahMarkingContext* const ctx = _heap->marking_context(); 840 bool is_mixed = _heap->collection_set()->has_old_regions(); 841 while (r != nullptr) { 842 HeapWord* update_watermark = r->get_update_watermark(); 843 assert(update_watermark >= r->bottom(), "sanity"); 844 845 log_debug(gc)("Update refs worker " UINT32_FORMAT ", looking at region " SIZE_FORMAT, worker_id, r->index()); 846 bool region_progress = false; 847 if (r->is_active() && !r->is_cset()) { 848 if (r->is_young()) { 849 _heap->marked_object_oop_iterate(r, &cl, update_watermark); 850 region_progress = true; 851 } else if (r->is_old()) { 852 if (gc_generation->is_global()) { 853 854 _heap->marked_object_oop_iterate(r, &cl, update_watermark); 855 region_progress = true; 856 } 857 // Otherwise, this is an old region in a young or mixed cycle. Process it during a second phase, below. 858 // Don't bother to report pacing progress in this case. 859 } else { 860 // Because updating of references runs concurrently, it is possible that a FREE inactive region transitions 861 // to a non-free active region while this loop is executing. Whenever this happens, the changing of a region's 862 // active status may propagate at a different speed than the changing of the region's affiliation. 863 864 // When we reach this control point, it is because a race has allowed a region's is_active() status to be seen 865 // by this thread before the region's affiliation() is seen by this thread. 866 867 // It's ok for this race to occur because the newly transformed region does not have any references to be 868 // updated. 869 870 assert(r->get_update_watermark() == r->bottom(), 871 "%s Region " SIZE_FORMAT " is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE", 872 r->affiliation_name(), r->index()); 873 } 874 } 875 876 if (region_progress && ShenandoahPacing) { 877 _heap->pacer()->report_update_refs(pointer_delta(update_watermark, r->bottom())); 878 } 879 880 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) { 881 return; 882 } 883 884 r = _regions->next(); 885 } 886 887 if (!gc_generation->is_global()) { 888 // Since this is generational and not GLOBAL, we have to process the remembered set. There's no remembered 889 // set processing if not in generational mode or if GLOBAL mode. 890 891 // After this thread has exhausted its traditional update-refs work, it continues with updating refs within 892 // remembered set. The remembered set workload is better balanced between threads, so threads that are "behind" 893 // can catch up with other threads during this phase, allowing all threads to work more effectively in parallel. 894 update_references_in_remembered_set(worker_id, cl, ctx, is_mixed); 895 } 896 } 897 898 template<class T> 899 void update_references_in_remembered_set(uint worker_id, T &cl, const ShenandoahMarkingContext* ctx, bool is_mixed) { 900 901 struct ShenandoahRegionChunk assignment; 902 ShenandoahScanRemembered* scanner = _heap->old_generation()->card_scan(); 903 904 while (!_heap->check_cancelled_gc_and_yield(CONCURRENT) && _work_chunks->next(&assignment)) { 905 // Keep grabbing next work chunk to process until finished, or asked to yield 906 ShenandoahHeapRegion* r = assignment._r; 907 if (r->is_active() && !r->is_cset() && r->is_old()) { 908 HeapWord* start_of_range = r->bottom() + assignment._chunk_offset; 909 HeapWord* end_of_range = r->get_update_watermark(); 910 if (end_of_range > start_of_range + assignment._chunk_size) { 911 end_of_range = start_of_range + assignment._chunk_size; 912 } 913 914 if (start_of_range >= end_of_range) { 915 continue; 916 } 917 918 // Old region in a young cycle or mixed cycle. 919 if (is_mixed) { 920 if (r->is_humongous()) { 921 // Need to examine both dirty and clean cards during mixed evac. 922 r->oop_iterate_humongous_slice_all(&cl,start_of_range, assignment._chunk_size); 923 } else { 924 // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced 925 // and filled. This will use mark bits to find objects that need to be updated. 926 update_references_in_old_region(cl, ctx, scanner, r, start_of_range, end_of_range); 927 } 928 } else { 929 // This is a young evacuation 930 size_t cluster_size = CardTable::card_size_in_words() * ShenandoahCardCluster::CardsPerCluster; 931 size_t clusters = assignment._chunk_size / cluster_size; 932 assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries"); 933 scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, worker_id); 934 } 935 936 if (ShenandoahPacing) { 937 _heap->pacer()->report_update_refs(pointer_delta(end_of_range, start_of_range)); 938 } 939 } 940 } 941 } 942 943 template<class T> 944 void update_references_in_old_region(T &cl, const ShenandoahMarkingContext* ctx, ShenandoahScanRemembered* scanner, 945 const ShenandoahHeapRegion* r, HeapWord* start_of_range, 946 HeapWord* end_of_range) const { 947 // In case last object in my range spans boundary of my chunk, I may need to scan all the way to top() 948 ShenandoahObjectToOopBoundedClosure<T> objs(&cl, start_of_range, r->top()); 949 950 // Any object that begins in a previous range is part of a different scanning assignment. Any object that 951 // starts after end_of_range is also not my responsibility. (Either allocated during evacuation, so does 952 // not hold pointers to from-space, or is beyond the range of my assigned work chunk.) 953 954 // Find the first object that begins in my range, if there is one. Note that `p` will be set to `end_of_range` 955 // when no live object is found in the range. 956 HeapWord* tams = ctx->top_at_mark_start(r); 957 HeapWord* p = get_first_object_start_word(ctx, scanner, tams, start_of_range, end_of_range); 958 959 while (p < end_of_range) { 960 // p is known to point to the beginning of marked object obj 961 oop obj = cast_to_oop(p); 962 objs.do_object(obj); 963 HeapWord* prev_p = p; 964 p += obj->size(); 965 if (p < tams) { 966 p = ctx->get_next_marked_addr(p, tams); 967 // If there are no more marked objects before tams, this returns tams. Note that tams is 968 // either >= end_of_range, or tams is the start of an object that is marked. 969 } 970 assert(p != prev_p, "Lack of forward progress"); 971 } 972 } 973 974 HeapWord* get_first_object_start_word(const ShenandoahMarkingContext* ctx, ShenandoahScanRemembered* scanner, HeapWord* tams, 975 HeapWord* start_of_range, HeapWord* end_of_range) const { 976 HeapWord* p = start_of_range; 977 978 if (p >= tams) { 979 // We cannot use ctx->is_marked(obj) to test whether an object begins at this address. Instead, 980 // we need to use the remembered set crossing map to advance p to the first object that starts 981 // within the enclosing card. 982 size_t card_index = scanner->card_index_for_addr(start_of_range); 983 while (true) { 984 HeapWord* first_object = scanner->first_object_in_card(card_index); 985 if (first_object != nullptr) { 986 p = first_object; 987 break; 988 } else if (scanner->addr_for_card_index(card_index + 1) < end_of_range) { 989 card_index++; 990 } else { 991 // Signal that no object was found in range 992 p = end_of_range; 993 break; 994 } 995 } 996 } else if (!ctx->is_marked(cast_to_oop(p))) { 997 p = ctx->get_next_marked_addr(p, tams); 998 // If there are no more marked objects before tams, this returns tams. 999 // Note that tams is either >= end_of_range, or tams is the start of an object that is marked. 1000 } 1001 return p; 1002 } 1003 }; 1004 1005 void ShenandoahGenerationalHeap::update_heap_references(bool concurrent) { 1006 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC"); 1007 const uint nworkers = workers()->active_workers(); 1008 ShenandoahRegionChunkIterator work_list(nworkers); 1009 if (concurrent) { 1010 ShenandoahGenerationalUpdateHeapRefsTask<true> task(&_update_refs_iterator, &work_list); 1011 workers()->run_task(&task); 1012 } else { 1013 ShenandoahGenerationalUpdateHeapRefsTask<false> task(&_update_refs_iterator, &work_list); 1014 workers()->run_task(&task); 1015 } 1016 1017 if (ShenandoahEnableCardStats) { 1018 // Only do this if we are collecting card stats 1019 ShenandoahScanRemembered* card_scan = old_generation()->card_scan(); 1020 assert(card_scan != nullptr, "Card table must exist when card stats are enabled"); 1021 card_scan->log_card_stats(nworkers, CARD_STAT_UPDATE_REFS); 1022 } 1023 } 1024 1025 struct ShenandoahCompositeRegionClosure { 1026 template<typename C1, typename C2> 1027 class Closure : public ShenandoahHeapRegionClosure { 1028 private: 1029 C1 &_c1; 1030 C2 &_c2; 1031 1032 public: 1033 Closure(C1 &c1, C2 &c2) : ShenandoahHeapRegionClosure(), _c1(c1), _c2(c2) {} 1034 1035 void heap_region_do(ShenandoahHeapRegion* r) override { 1036 _c1.heap_region_do(r); 1037 _c2.heap_region_do(r); 1038 } 1039 1040 bool is_thread_safe() override { 1041 return _c1.is_thread_safe() && _c2.is_thread_safe(); 1042 } 1043 }; 1044 1045 template<typename C1, typename C2> 1046 static Closure<C1, C2> of(C1 &c1, C2 &c2) { 1047 return Closure<C1, C2>(c1, c2); 1048 } 1049 }; 1050 1051 class ShenandoahUpdateRegionAges : public ShenandoahHeapRegionClosure { 1052 private: 1053 ShenandoahMarkingContext* _ctx; 1054 1055 public: 1056 explicit ShenandoahUpdateRegionAges(ShenandoahMarkingContext* ctx) : _ctx(ctx) { } 1057 1058 void heap_region_do(ShenandoahHeapRegion* r) override { 1059 // Maintenance of region age must follow evacuation in order to account for 1060 // evacuation allocations within survivor regions. We consult region age during 1061 // the subsequent evacuation to determine whether certain objects need to 1062 // be promoted. 1063 if (r->is_young() && r->is_active()) { 1064 HeapWord *tams = _ctx->top_at_mark_start(r); 1065 HeapWord *top = r->top(); 1066 1067 // Allocations move the watermark when top moves. However, compacting 1068 // objects will sometimes lower top beneath the watermark, after which, 1069 // attempts to read the watermark will assert out (watermark should not be 1070 // higher than top). 1071 if (top > tams) { 1072 // There have been allocations in this region since the start of the cycle. 1073 // Any objects new to this region must not assimilate elevated age. 1074 r->reset_age(); 1075 } else if (ShenandoahGenerationalHeap::heap()->is_aging_cycle()) { 1076 r->increment_age(); 1077 } 1078 } 1079 } 1080 1081 bool is_thread_safe() override { 1082 return true; 1083 } 1084 }; 1085 1086 void ShenandoahGenerationalHeap::final_update_refs_update_region_states() { 1087 ShenandoahSynchronizePinnedRegionStates pins; 1088 ShenandoahUpdateRegionAges ages(active_generation()->complete_marking_context()); 1089 auto cl = ShenandoahCompositeRegionClosure::of(pins, ages); 1090 parallel_heap_region_iterate(&cl); 1091 } 1092 1093 void ShenandoahGenerationalHeap::complete_degenerated_cycle() { 1094 shenandoah_assert_heaplocked_or_safepoint(); 1095 if (is_concurrent_old_mark_in_progress()) { 1096 // This is still necessary for degenerated cycles because the degeneration point may occur 1097 // after final mark of the young generation. See ShenandoahConcurrentGC::op_final_update_refs for 1098 // a more detailed explanation. 1099 old_generation()->transfer_pointers_from_satb(); 1100 } 1101 1102 // We defer generation resizing actions until after cset regions have been recycled. 1103 TransferResult result = balance_generations(); 1104 LogTarget(Info, gc, ergo) lt; 1105 if (lt.is_enabled()) { 1106 LogStream ls(lt); 1107 result.print_on("Degenerated GC", &ls); 1108 } 1109 1110 // In case degeneration interrupted concurrent evacuation or update references, we need to clean up 1111 // transient state. Otherwise, these actions have no effect. 1112 reset_generation_reserves(); 1113 1114 if (!old_generation()->is_parsable()) { 1115 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill); 1116 coalesce_and_fill_old_regions(false); 1117 } 1118 } 1119 1120 void ShenandoahGenerationalHeap::complete_concurrent_cycle() { 1121 if (!old_generation()->is_parsable()) { 1122 // Class unloading may render the card offsets unusable, so we must rebuild them before 1123 // the next remembered set scan. We _could_ let the control thread do this sometime after 1124 // the global cycle has completed and before the next young collection, but under memory 1125 // pressure the control thread may not have the time (that is, because it's running back 1126 // to back GCs). In that scenario, we would have to make the old regions parsable before 1127 // we could start a young collection. This could delay the start of the young cycle and 1128 // throw off the heuristics. 1129 entry_global_coalesce_and_fill(); 1130 } 1131 1132 TransferResult result; 1133 { 1134 ShenandoahHeapLocker locker(lock()); 1135 1136 result = balance_generations(); 1137 reset_generation_reserves(); 1138 } 1139 1140 LogTarget(Info, gc, ergo) lt; 1141 if (lt.is_enabled()) { 1142 LogStream ls(lt); 1143 result.print_on("Concurrent GC", &ls); 1144 } 1145 } 1146 1147 void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() { 1148 const char* msg = "Coalescing and filling old regions"; 1149 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_coalesce_and_fill); 1150 1151 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters()); 1152 EventMark em("%s", msg); 1153 ShenandoahWorkerScope scope(workers(), 1154 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 1155 "concurrent coalesce and fill"); 1156 1157 coalesce_and_fill_old_regions(true); 1158 } 1159 1160 void ShenandoahGenerationalHeap::update_region_ages(ShenandoahMarkingContext* ctx) { 1161 ShenandoahUpdateRegionAges cl(ctx); 1162 parallel_heap_region_iterate(&cl); 1163 }