1 /* 2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shenandoah/shenandoahAgeCensus.hpp" 28 #include "gc/shenandoah/shenandoahClosures.inline.hpp" 29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 30 #include "gc/shenandoah/shenandoahFreeSet.hpp" 31 #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp" 32 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp" 33 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 34 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 35 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 36 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp" 37 #include "gc/shenandoah/shenandoahInitLogger.hpp" 38 #include "gc/shenandoah/shenandoahMemoryPool.hpp" 39 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 40 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 41 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 42 #include "gc/shenandoah/shenandoahRegulatorThread.hpp" 43 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" 44 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 45 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 46 #include "gc/shenandoah/shenandoahUtils.hpp" 47 #include "logging/log.hpp" 48 #include "utilities/events.hpp" 49 50 51 class ShenandoahGenerationalInitLogger : public ShenandoahInitLogger { 52 public: 53 static void print() { 54 ShenandoahGenerationalInitLogger logger; 55 logger.print_all(); 56 } 57 58 void print_heap() override { 59 ShenandoahInitLogger::print_heap(); 60 61 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); 62 63 ShenandoahYoungGeneration* young = heap->young_generation(); 64 log_info(gc, init)("Young Generation Soft Size: " EXACTFMT, EXACTFMTARGS(young->soft_max_capacity())); 65 log_info(gc, init)("Young Generation Max: " EXACTFMT, EXACTFMTARGS(young->max_capacity())); 66 67 ShenandoahOldGeneration* old = heap->old_generation(); 68 log_info(gc, init)("Old Generation Soft Size: " EXACTFMT, EXACTFMTARGS(old->soft_max_capacity())); 69 log_info(gc, init)("Old Generation Max: " EXACTFMT, EXACTFMTARGS(old->max_capacity())); 70 } 71 72 protected: 73 void print_gc_specific() override { 74 ShenandoahInitLogger::print_gc_specific(); 75 76 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); 77 log_info(gc, init)("Young Heuristics: %s", heap->young_generation()->heuristics()->name()); 78 log_info(gc, init)("Old Heuristics: %s", heap->old_generation()->heuristics()->name()); 79 } 80 }; 81 82 size_t ShenandoahGenerationalHeap::calculate_min_plab() { 83 return align_up(PLAB::min_size(), CardTable::card_size_in_words()); 84 } 85 86 size_t ShenandoahGenerationalHeap::calculate_max_plab() { 87 size_t MaxTLABSizeWords = ShenandoahHeapRegion::max_tlab_size_words(); 88 return align_down(MaxTLABSizeWords, CardTable::card_size_in_words()); 89 } 90 91 // Returns size in bytes 92 size_t ShenandoahGenerationalHeap::unsafe_max_tlab_alloc(Thread *thread) const { 93 return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->available()); 94 } 95 96 ShenandoahGenerationalHeap::ShenandoahGenerationalHeap(ShenandoahCollectorPolicy* policy) : 97 ShenandoahHeap(policy), 98 _age_census(nullptr), 99 _evac_tracker(new ShenandoahEvacuationTracker()), 100 _min_plab_size(calculate_min_plab()), 101 _max_plab_size(calculate_max_plab()), 102 _regulator_thread(nullptr), 103 _young_gen_memory_pool(nullptr), 104 _old_gen_memory_pool(nullptr) { 105 assert(is_aligned(_min_plab_size, CardTable::card_size_in_words()), "min_plab_size must be aligned"); 106 assert(is_aligned(_max_plab_size, CardTable::card_size_in_words()), "max_plab_size must be aligned"); 107 } 108 109 void ShenandoahGenerationalHeap::post_initialize() { 110 ShenandoahHeap::post_initialize(); 111 _age_census = new ShenandoahAgeCensus(); 112 } 113 114 void ShenandoahGenerationalHeap::print_init_logger() const { 115 ShenandoahGenerationalInitLogger logger; 116 logger.print_all(); 117 } 118 119 void ShenandoahGenerationalHeap::print_tracing_info() const { 120 ShenandoahHeap::print_tracing_info(); 121 122 LogTarget(Info, gc, stats) lt; 123 if (lt.is_enabled()) { 124 LogStream ls(lt); 125 ls.cr(); 126 ls.cr(); 127 evac_tracker()->print_global_on(&ls); 128 } 129 } 130 131 void ShenandoahGenerationalHeap::initialize_heuristics() { 132 // Initialize global generation and heuristics even in generational mode. 133 ShenandoahHeap::initialize_heuristics(); 134 135 // Max capacity is the maximum _allowed_ capacity. That is, the maximum allowed capacity 136 // for old would be total heap - minimum capacity of young. This means the sum of the maximum 137 // allowed for old and young could exceed the total heap size. It remains the case that the 138 // _actual_ capacity of young + old = total. 139 _generation_sizer.heap_size_changed(max_capacity()); 140 size_t initial_capacity_young = _generation_sizer.max_young_size(); 141 size_t max_capacity_young = _generation_sizer.max_young_size(); 142 size_t initial_capacity_old = max_capacity() - max_capacity_young; 143 size_t max_capacity_old = max_capacity() - initial_capacity_young; 144 145 _young_generation = new ShenandoahYoungGeneration(max_workers(), max_capacity_young, initial_capacity_young); 146 _old_generation = new ShenandoahOldGeneration(max_workers(), max_capacity_old, initial_capacity_old); 147 _young_generation->initialize_heuristics(mode()); 148 _old_generation->initialize_heuristics(mode()); 149 } 150 151 void ShenandoahGenerationalHeap::initialize_serviceability() { 152 assert(mode()->is_generational(), "Only for the generational mode"); 153 _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this); 154 _old_gen_memory_pool = new ShenandoahOldGenMemoryPool(this); 155 cycle_memory_manager()->add_pool(_young_gen_memory_pool); 156 cycle_memory_manager()->add_pool(_old_gen_memory_pool); 157 stw_memory_manager()->add_pool(_young_gen_memory_pool); 158 stw_memory_manager()->add_pool(_old_gen_memory_pool); 159 } 160 161 GrowableArray<MemoryPool*> ShenandoahGenerationalHeap::memory_pools() { 162 assert(mode()->is_generational(), "Only for the generational mode"); 163 GrowableArray<MemoryPool*> memory_pools(2); 164 memory_pools.append(_young_gen_memory_pool); 165 memory_pools.append(_old_gen_memory_pool); 166 return memory_pools; 167 } 168 169 void ShenandoahGenerationalHeap::initialize_controller() { 170 auto control_thread = new ShenandoahGenerationalControlThread(); 171 _control_thread = control_thread; 172 _regulator_thread = new ShenandoahRegulatorThread(control_thread); 173 } 174 175 void ShenandoahGenerationalHeap::gc_threads_do(ThreadClosure* tcl) const { 176 if (!shenandoah_policy()->is_at_shutdown()) { 177 ShenandoahHeap::gc_threads_do(tcl); 178 tcl->do_thread(regulator_thread()); 179 } 180 } 181 182 void ShenandoahGenerationalHeap::stop() { 183 regulator_thread()->stop(); 184 ShenandoahHeap::stop(); 185 } 186 187 void ShenandoahGenerationalHeap::evacuate_collection_set(bool concurrent) { 188 ShenandoahRegionIterator regions; 189 ShenandoahGenerationalEvacuationTask task(this, ®ions, concurrent, false /* only promote regions */); 190 workers()->run_task(&task); 191 } 192 193 void ShenandoahGenerationalHeap::promote_regions_in_place(bool concurrent) { 194 ShenandoahRegionIterator regions; 195 ShenandoahGenerationalEvacuationTask task(this, ®ions, concurrent, true /* only promote regions */); 196 workers()->run_task(&task); 197 } 198 199 oop ShenandoahGenerationalHeap::evacuate_object(oop p, Thread* thread) { 200 assert(thread == Thread::current(), "Expected thread parameter to be current thread."); 201 if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) { 202 // This thread went through the OOM during evac protocol and it is safe to return 203 // the forward pointer. It must not attempt to evacuate anymore. 204 return ShenandoahBarrierSet::resolve_forwarded(p); 205 } 206 207 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope"); 208 209 ShenandoahHeapRegion* r = heap_region_containing(p); 210 assert(!r->is_humongous(), "never evacuate humongous objects"); 211 212 ShenandoahAffiliation target_gen = r->affiliation(); 213 // gc_generation() can change asynchronously and should not be used here. 214 assert(active_generation() != nullptr, "Error"); 215 if (active_generation()->is_young() && target_gen == YOUNG_GENERATION) { 216 markWord mark = p->mark(); 217 if (mark.is_marked()) { 218 // Already forwarded. 219 return ShenandoahBarrierSet::resolve_forwarded(p); 220 } 221 222 if (mark.has_displaced_mark_helper()) { 223 // We don't want to deal with MT here just to ensure we read the right mark word. 224 // Skip the potential promotion attempt for this one. 225 } else if (r->age() + mark.age() >= age_census()->tenuring_threshold()) { 226 oop result = try_evacuate_object(p, thread, r, OLD_GENERATION); 227 if (result != nullptr) { 228 return result; 229 } 230 // If we failed to promote this aged object, we'll fall through to code below and evacuate to young-gen. 231 } 232 } 233 return try_evacuate_object(p, thread, r, target_gen); 234 } 235 236 // try_evacuate_object registers the object and dirties the associated remembered set information when evacuating 237 // to OLD_GENERATION. 238 oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region, 239 ShenandoahAffiliation target_gen) { 240 bool alloc_from_lab = true; 241 bool has_plab = false; 242 HeapWord* copy = nullptr; 243 244 markWord mark = p->mark(); 245 if (ShenandoahForwarding::is_forwarded(mark)) { 246 return ShenandoahForwarding::get_forwardee(p); 247 } 248 size_t old_size = ShenandoahForwarding::size(p); 249 size_t size = p->copy_size(old_size, mark); 250 251 bool is_promotion = (target_gen == OLD_GENERATION) && from_region->is_young(); 252 253 #ifdef ASSERT 254 if (ShenandoahOOMDuringEvacALot && 255 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call 256 copy = nullptr; 257 } else { 258 #endif 259 if (UseTLAB) { 260 switch (target_gen) { 261 case YOUNG_GENERATION: { 262 copy = allocate_from_gclab(thread, size); 263 if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) { 264 // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve. Try resetting 265 // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations. 266 ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size()); 267 copy = allocate_from_gclab(thread, size); 268 // If we still get nullptr, we'll try a shared allocation below. 269 } 270 break; 271 } 272 case OLD_GENERATION: { 273 PLAB* plab = ShenandoahThreadLocalData::plab(thread); 274 if (plab != nullptr) { 275 has_plab = true; 276 copy = allocate_from_plab(thread, size, is_promotion); 277 if ((copy == nullptr) && (size < ShenandoahThreadLocalData::plab_size(thread)) && 278 ShenandoahThreadLocalData::plab_retries_enabled(thread)) { 279 // PLAB allocation failed because we are bumping up against the limit on old evacuation reserve or because 280 // the requested object does not fit within the current plab but the plab still has an "abundance" of memory, 281 // where abundance is defined as >= ShenGenHeap::plab_min_size(). In the former case, we try shrinking the 282 // desired PLAB size to the minimum and retry PLAB allocation to avoid cascading of shared memory allocations. 283 if (plab->words_remaining() < plab_min_size()) { 284 ShenandoahThreadLocalData::set_plab_size(thread, plab_min_size()); 285 copy = allocate_from_plab(thread, size, is_promotion); 286 // If we still get nullptr, we'll try a shared allocation below. 287 if (copy == nullptr) { 288 // If retry fails, don't continue to retry until we have success (probably in next GC pass) 289 ShenandoahThreadLocalData::disable_plab_retries(thread); 290 } 291 } 292 // else, copy still equals nullptr. this causes shared allocation below, preserving this plab for future needs. 293 } 294 } 295 break; 296 } 297 default: { 298 ShouldNotReachHere(); 299 break; 300 } 301 } 302 } 303 304 if (copy == nullptr) { 305 // If we failed to allocate in LAB, we'll try a shared allocation. 306 if (!is_promotion || !has_plab || (size > PLAB::min_size())) { 307 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen, is_promotion); 308 copy = allocate_memory(req); 309 alloc_from_lab = false; 310 } 311 // else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate. 312 // We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too 313 // costly. Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future 314 // evacuation pass. This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size()) 315 } 316 #ifdef ASSERT 317 } 318 #endif 319 320 if (copy == nullptr) { 321 if (target_gen == OLD_GENERATION) { 322 if (from_region->is_young()) { 323 // Signal that promotion failed. Will evacuate this old object somewhere in young gen. 324 old_generation()->handle_failed_promotion(thread, size); 325 return nullptr; 326 } else { 327 // Remember that evacuation to old gen failed. We'll want to trigger a full gc to recover from this 328 // after the evacuation threads have finished. 329 old_generation()->handle_failed_evacuation(); 330 } 331 } 332 333 control_thread()->handle_alloc_failure_evac(size); 334 335 oom_evac_handler()->handle_out_of_memory_during_evacuation(); 336 337 return ShenandoahBarrierSet::resolve_forwarded(p); 338 } 339 340 // Copy the object: 341 NOT_PRODUCT(evac_tracker()->begin_evacuation(thread, size * HeapWordSize)); 342 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, old_size); 343 oop copy_val = cast_to_oop(copy); 344 345 // Update the age of the evacuated object 346 if (target_gen == YOUNG_GENERATION && is_aging_cycle()) { 347 ShenandoahHeap::increase_object_age(copy_val, from_region->age() + 1); 348 } 349 350 // Try to install the new forwarding pointer. 351 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val); 352 if (result == copy_val) { 353 // Successfully evacuated. Our copy is now the public one! 354 355 // This is necessary for virtual thread support. This uses the mark word without 356 // considering that it may now be a forwarding pointer (and could therefore crash). 357 // Secondarily, we do not want to spend cycles relativizing stack chunks for oops 358 // that lost the evacuation race (and will therefore not become visible). It is 359 // safe to do this on the public copy (this is also done during concurrent mark). 360 copy_val->initialize_hash_if_necessary(p); 361 ContinuationGCSupport::relativize_stack_chunk(copy_val); 362 363 // Record that the evacuation succeeded 364 NOT_PRODUCT(evac_tracker()->end_evacuation(thread, size * HeapWordSize)); 365 366 if (target_gen == OLD_GENERATION) { 367 old_generation()->handle_evacuation(copy, size, from_region->is_young()); 368 } else { 369 // When copying to the old generation above, we don't care 370 // about recording object age in the census stats. 371 assert(target_gen == YOUNG_GENERATION, "Error"); 372 // We record this census only when simulating pre-adaptive tenuring behavior, or 373 // when we have been asked to record the census at evacuation rather than at mark 374 if (ShenandoahGenerationalCensusAtEvac || !ShenandoahGenerationalAdaptiveTenuring) { 375 evac_tracker()->record_age(thread, size * HeapWordSize, ShenandoahHeap::get_object_age(copy_val)); 376 } 377 } 378 shenandoah_assert_correct(nullptr, copy_val); 379 return copy_val; 380 } else { 381 // Failed to evacuate. We need to deal with the object that is left behind. Since this 382 // new allocation is certainly after TAMS, it will be considered live in the next cycle. 383 // But if it happens to contain references to evacuated regions, those references would 384 // not get updated for this stale copy during this cycle, and we will crash while scanning 385 // it the next cycle. 386 if (alloc_from_lab) { 387 // For LAB allocations, it is enough to rollback the allocation ptr. Either the next 388 // object will overwrite this stale copy, or the filler object on LAB retirement will 389 // do this. 390 switch (target_gen) { 391 case YOUNG_GENERATION: { 392 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size); 393 break; 394 } 395 case OLD_GENERATION: { 396 ShenandoahThreadLocalData::plab(thread)->undo_allocation(copy, size); 397 if (is_promotion) { 398 ShenandoahThreadLocalData::subtract_from_plab_promoted(thread, size * HeapWordSize); 399 } 400 break; 401 } 402 default: { 403 ShouldNotReachHere(); 404 break; 405 } 406 } 407 } else { 408 // For non-LAB allocations, we have no way to retract the allocation, and 409 // have to explicitly overwrite the copy with the filler object. With that overwrite, 410 // we have to keep the fwdptr initialized and pointing to our (stale) copy. 411 assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size"); 412 fill_with_object(copy, size); 413 shenandoah_assert_correct(nullptr, copy_val); 414 // For non-LAB allocations, the object has already been registered 415 } 416 shenandoah_assert_correct(nullptr, result); 417 return result; 418 } 419 } 420 421 inline HeapWord* ShenandoahGenerationalHeap::allocate_from_plab(Thread* thread, size_t size, bool is_promotion) { 422 assert(UseTLAB, "TLABs should be enabled"); 423 424 PLAB* plab = ShenandoahThreadLocalData::plab(thread); 425 HeapWord* obj; 426 427 if (plab == nullptr) { 428 assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), "Performance: thread should have PLAB: %s", thread->name()); 429 // No PLABs in this thread, fallback to shared allocation 430 return nullptr; 431 } else if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) { 432 return nullptr; 433 } 434 // if plab->word_size() <= 0, thread's plab not yet initialized for this pass, so allow_plab_promotions() is not trustworthy 435 obj = plab->allocate(size); 436 if ((obj == nullptr) && (plab->words_remaining() < plab_min_size())) { 437 // allocate_from_plab_slow will establish allow_plab_promotions(thread) for future invocations 438 obj = allocate_from_plab_slow(thread, size, is_promotion); 439 } 440 // if plab->words_remaining() >= ShenGenHeap::heap()->plab_min_size(), just return nullptr so we can use a shared allocation 441 if (obj == nullptr) { 442 return nullptr; 443 } 444 445 if (is_promotion) { 446 ShenandoahThreadLocalData::add_to_plab_promoted(thread, size * HeapWordSize); 447 } 448 return obj; 449 } 450 451 // Establish a new PLAB and allocate size HeapWords within it. 452 HeapWord* ShenandoahGenerationalHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) { 453 // New object should fit the PLAB size 454 455 assert(mode()->is_generational(), "PLABs only relevant to generational GC"); 456 const size_t plab_min_size = this->plab_min_size(); 457 // PLABs are aligned to card boundaries to avoid synchronization with concurrent 458 // allocations in other PLABs. 459 const size_t min_size = (size > plab_min_size)? align_up(size, CardTable::card_size_in_words()): plab_min_size; 460 461 // Figure out size of new PLAB, using value determined at last refill. 462 size_t cur_size = ShenandoahThreadLocalData::plab_size(thread); 463 if (cur_size == 0) { 464 cur_size = plab_min_size; 465 } 466 467 // Expand aggressively, doubling at each refill in this epoch, ceiling at plab_max_size() 468 size_t future_size = MIN2(cur_size * 2, plab_max_size()); 469 // Doubling, starting at a card-multiple, should give us a card-multiple. (Ceiling and floor 470 // are card multiples.) 471 assert(is_aligned(future_size, CardTable::card_size_in_words()), "Card multiple by construction, future_size: " SIZE_FORMAT 472 ", card_size: " SIZE_FORMAT ", cur_size: " SIZE_FORMAT ", max: " SIZE_FORMAT, 473 future_size, (size_t) CardTable::card_size_in_words(), cur_size, plab_max_size()); 474 475 // Record new heuristic value even if we take any shortcut. This captures 476 // the case when moderately-sized objects always take a shortcut. At some point, 477 // heuristics should catch up with them. Note that the requested cur_size may 478 // not be honored, but we remember that this is the preferred size. 479 log_debug(gc, free)("Set new PLAB size: " SIZE_FORMAT, future_size); 480 ShenandoahThreadLocalData::set_plab_size(thread, future_size); 481 if (cur_size < size) { 482 // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation. 483 // This avoids retiring perfectly good PLABs in order to represent a single large object allocation. 484 log_debug(gc, free)("Current PLAB size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, cur_size, size); 485 return nullptr; 486 } 487 488 // Retire current PLAB, and allocate a new one. 489 PLAB* plab = ShenandoahThreadLocalData::plab(thread); 490 if (plab->words_remaining() < plab_min_size) { 491 // Retire current PLAB. This takes care of any PLAB book-keeping. 492 // retire_plab() registers the remnant filler object with the remembered set scanner without a lock. 493 // Since PLABs are card-aligned, concurrent registrations in other PLABs don't interfere. 494 retire_plab(plab, thread); 495 496 size_t actual_size = 0; 497 HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size); 498 if (plab_buf == nullptr) { 499 if (min_size == plab_min_size) { 500 // Disable PLAB promotions for this thread because we cannot even allocate a minimal PLAB. This allows us 501 // to fail faster on subsequent promotion attempts. 502 ShenandoahThreadLocalData::disable_plab_promotions(thread); 503 } 504 return nullptr; 505 } else { 506 ShenandoahThreadLocalData::enable_plab_retries(thread); 507 } 508 // Since the allocated PLAB may have been down-sized for alignment, plab->allocate(size) below may still fail. 509 if (ZeroTLAB) { 510 // ... and clear it. 511 Copy::zero_to_words(plab_buf, actual_size); 512 } else { 513 // ...and zap just allocated object. 514 #ifdef ASSERT 515 // Skip mangling the space corresponding to the object header to 516 // ensure that the returned space is not considered parsable by 517 // any concurrent GC thread. 518 size_t hdr_size = oopDesc::header_size(); 519 Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal); 520 #endif // ASSERT 521 } 522 assert(is_aligned(actual_size, CardTable::card_size_in_words()), "Align by design"); 523 plab->set_buf(plab_buf, actual_size); 524 if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) { 525 return nullptr; 526 } 527 return plab->allocate(size); 528 } else { 529 // If there's still at least min_size() words available within the current plab, don't retire it. Let's nibble 530 // away on this plab as long as we can. Meanwhile, return nullptr to force this particular allocation request 531 // to be satisfied with a shared allocation. By packing more promotions into the previously allocated PLAB, we 532 // reduce the likelihood of evacuation failures, and we reduce the need for downsizing our PLABs. 533 return nullptr; 534 } 535 } 536 537 HeapWord* ShenandoahGenerationalHeap::allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size) { 538 // Align requested sizes to card-sized multiples. Align down so that we don't violate max size of TLAB. 539 assert(is_aligned(min_size, CardTable::card_size_in_words()), "Align by design"); 540 assert(word_size >= min_size, "Requested PLAB is too small"); 541 542 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size); 543 // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread 544 // if we are at risk of infringing on the old-gen evacuation budget. 545 HeapWord* res = allocate_memory(req); 546 if (res != nullptr) { 547 *actual_size = req.actual_size(); 548 } else { 549 *actual_size = 0; 550 } 551 assert(is_aligned(res, CardTable::card_size_in_words()), "Align by design"); 552 return res; 553 } 554 555 void ShenandoahGenerationalHeap::retire_plab(PLAB* plab, Thread* thread) { 556 // We don't enforce limits on plab evacuations. We let it consume all available old-gen memory in order to reduce 557 // probability of an evacuation failure. We do enforce limits on promotion, to make sure that excessive promotion 558 // does not result in an old-gen evacuation failure. Note that a failed promotion is relatively harmless. Any 559 // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle. 560 561 // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to 562 // promotions. Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions. 563 // 1. Some of the plab may have been dedicated to evacuations. 564 // 2. Some of the plab may have been abandoned due to waste (at the end of the plab). 565 size_t not_promoted = 566 ShenandoahThreadLocalData::get_plab_actual_size(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread); 567 ShenandoahThreadLocalData::reset_plab_promoted(thread); 568 ShenandoahThreadLocalData::set_plab_actual_size(thread, 0); 569 if (not_promoted > 0) { 570 old_generation()->unexpend_promoted(not_promoted); 571 } 572 const size_t original_waste = plab->waste(); 573 HeapWord* const top = plab->top(); 574 575 // plab->retire() overwrites unused memory between plab->top() and plab->hard_end() with a dummy object to make memory parsable. 576 // It adds the size of this unused memory, in words, to plab->waste(). 577 plab->retire(); 578 if (top != nullptr && plab->waste() > original_waste && is_in_old(top)) { 579 // If retiring the plab created a filler object, then we need to register it with our card scanner so it can 580 // safely walk the region backing the plab. 581 log_debug(gc)("retire_plab() is registering remnant of size " SIZE_FORMAT " at " PTR_FORMAT, 582 plab->waste() - original_waste, p2i(top)); 583 // No lock is necessary because the PLAB memory is aligned on card boundaries. 584 old_generation()->card_scan()->register_object_without_lock(top); 585 } 586 } 587 588 void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) { 589 Thread* thread = Thread::current(); 590 retire_plab(plab, thread); 591 } 592 593 ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_generations() { 594 shenandoah_assert_heaplocked_or_safepoint(); 595 596 ShenandoahOldGeneration* old_gen = old_generation(); 597 const ssize_t old_region_balance = old_gen->get_region_balance(); 598 old_gen->set_region_balance(0); 599 600 if (old_region_balance > 0) { 601 const auto old_region_surplus = checked_cast<size_t>(old_region_balance); 602 const bool success = generation_sizer()->transfer_to_young(old_region_surplus); 603 return TransferResult { 604 success, old_region_surplus, "young" 605 }; 606 } 607 608 if (old_region_balance < 0) { 609 const auto old_region_deficit = checked_cast<size_t>(-old_region_balance); 610 const bool success = generation_sizer()->transfer_to_old(old_region_deficit); 611 if (!success) { 612 old_gen->handle_failed_transfer(); 613 } 614 return TransferResult { 615 success, old_region_deficit, "old" 616 }; 617 } 618 619 return TransferResult {true, 0, "none"}; 620 } 621 622 // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations 623 // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to 624 // xfer_limit, and any surplus is transferred to the young generation. 625 // xfer_limit is the maximum we're able to transfer from young to old. 626 void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions) { 627 628 // We can limit the old reserve to the size of anticipated promotions: 629 // max_old_reserve is an upper bound on memory evacuated from old and promoted to old, 630 // clamped by the old generation space available. 631 // 632 // Here's the algebra. 633 // Let SOEP = ShenandoahOldEvacRatioPercent, 634 // OE = old evac, 635 // YE = young evac, and 636 // TE = total evac = OE + YE 637 // By definition: 638 // SOEP/100 = OE/TE 639 // = OE/(OE+YE) 640 // => SOEP/(100-SOEP) = OE/((OE+YE)-OE) // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c) 641 // = OE/YE 642 // => OE = YE*SOEP/(100-SOEP) 643 644 // We have to be careful in the event that SOEP is set to 100 by the user. 645 assert(ShenandoahOldEvacRatioPercent <= 100, "Error"); 646 const size_t old_available = old_generation()->available(); 647 // The free set will reserve this amount of memory to hold young evacuations 648 const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; 649 650 // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit. 651 652 const double bound_on_old_reserve = old_available + old_xfer_limit + young_reserve; 653 const double max_old_reserve = (ShenandoahOldEvacRatioPercent == 100)? 654 bound_on_old_reserve: MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent) / double(100 - ShenandoahOldEvacRatioPercent), 655 bound_on_old_reserve); 656 657 const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 658 659 // Decide how much old space we should reserve for a mixed collection 660 double reserve_for_mixed = 0; 661 if (old_generation()->has_unprocessed_collection_candidates()) { 662 // We want this much memory to be unfragmented in order to reliably evacuate old. This is conservative because we 663 // may not evacuate the entirety of unprocessed candidates in a single mixed evacuation. 664 const double max_evac_need = (double(old_generation()->unprocessed_collection_candidates_live_memory()) * ShenandoahOldEvacWaste); 665 assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes, 666 "Unaffiliated available must be less than total available"); 667 const double old_fragmented_available = double(old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes); 668 reserve_for_mixed = max_evac_need + old_fragmented_available; 669 if (reserve_for_mixed > max_old_reserve) { 670 reserve_for_mixed = max_old_reserve; 671 } 672 } 673 674 // Decide how much space we should reserve for promotions from young 675 size_t reserve_for_promo = 0; 676 const size_t promo_load = old_generation()->get_promotion_potential(); 677 const bool doing_promotions = promo_load > 0; 678 if (doing_promotions) { 679 // We're promoting and have a bound on the maximum amount that can be promoted 680 assert(max_old_reserve >= reserve_for_mixed, "Sanity"); 681 const size_t available_for_promotions = max_old_reserve - reserve_for_mixed; 682 reserve_for_promo = MIN2((size_t)(promo_load * ShenandoahPromoEvacWaste), available_for_promotions); 683 } 684 685 // This is the total old we want to ideally reserve 686 const size_t old_reserve = reserve_for_mixed + reserve_for_promo; 687 assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations"); 688 689 // We now check if the old generation is running a surplus or a deficit. 690 const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes; 691 if (max_old_available >= old_reserve) { 692 // We are running a surplus, so the old region surplus can go to young 693 const size_t old_surplus = (max_old_available - old_reserve) / region_size_bytes; 694 const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions; 695 const size_t old_region_surplus = MIN2(old_surplus, unaffiliated_old_regions); 696 old_generation()->set_region_balance(checked_cast<ssize_t>(old_region_surplus)); 697 } else { 698 // We are running a deficit which we'd like to fill from young. 699 // Ignore that this will directly impact young_generation()->max_capacity(), 700 // indirectly impacting young_reserve and old_reserve. These computations are conservative. 701 // Note that deficit is rounded up by one region. 702 const size_t old_need = (old_reserve - max_old_available + region_size_bytes - 1) / region_size_bytes; 703 const size_t max_old_region_xfer = old_xfer_limit / region_size_bytes; 704 705 // Round down the regions we can transfer from young to old. If we're running short 706 // on young-gen memory, we restrict the xfer. Old-gen collection activities will be 707 // curtailed if the budget is restricted. 708 const size_t old_region_deficit = MIN2(old_need, max_old_region_xfer); 709 old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit)); 710 } 711 } 712 713 void ShenandoahGenerationalHeap::reset_generation_reserves() { 714 young_generation()->set_evacuation_reserve(0); 715 old_generation()->set_evacuation_reserve(0); 716 old_generation()->set_promoted_reserve(0); 717 } 718 719 void ShenandoahGenerationalHeap::TransferResult::print_on(const char* when, outputStream* ss) const { 720 auto heap = ShenandoahGenerationalHeap::heap(); 721 ShenandoahYoungGeneration* const young_gen = heap->young_generation(); 722 ShenandoahOldGeneration* const old_gen = heap->old_generation(); 723 const size_t young_available = young_gen->available(); 724 const size_t old_available = old_gen->available(); 725 ss->print_cr("After %s, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: " 726 PROPERFMT ", young_available: " PROPERFMT, 727 when, 728 success? "successfully transferred": "failed to transfer", region_count, region_destination, 729 PROPERFMTARGS(old_available), PROPERFMTARGS(young_available)); 730 } 731 732 void ShenandoahGenerationalHeap::coalesce_and_fill_old_regions(bool concurrent) { 733 class ShenandoahGlobalCoalesceAndFill : public WorkerTask { 734 private: 735 ShenandoahPhaseTimings::Phase _phase; 736 ShenandoahRegionIterator _regions; 737 public: 738 explicit ShenandoahGlobalCoalesceAndFill(ShenandoahPhaseTimings::Phase phase) : 739 WorkerTask("Shenandoah Global Coalesce"), 740 _phase(phase) {} 741 742 void work(uint worker_id) override { 743 ShenandoahWorkerTimingsTracker timer(_phase, 744 ShenandoahPhaseTimings::ScanClusters, 745 worker_id, true); 746 ShenandoahHeapRegion* region; 747 while ((region = _regions.next()) != nullptr) { 748 // old region is not in the collection set and was not immediately trashed 749 if (region->is_old() && region->is_active() && !region->is_humongous()) { 750 // Reset the coalesce and fill boundary because this is a global collect 751 // and cannot be preempted by young collects. We want to be sure the entire 752 // region is coalesced here and does not resume from a previously interrupted 753 // or completed coalescing. 754 region->begin_preemptible_coalesce_and_fill(); 755 region->oop_coalesce_and_fill(false); 756 } 757 } 758 } 759 }; 760 761 ShenandoahPhaseTimings::Phase phase = concurrent ? 762 ShenandoahPhaseTimings::conc_coalesce_and_fill : 763 ShenandoahPhaseTimings::degen_gc_coalesce_and_fill; 764 765 // This is not cancellable 766 ShenandoahGlobalCoalesceAndFill coalesce(phase); 767 workers()->run_task(&coalesce); 768 old_generation()->set_parsable(true); 769 } 770 771 template<bool CONCURRENT> 772 class ShenandoahGenerationalUpdateHeapRefsTask : public WorkerTask { 773 private: 774 ShenandoahGenerationalHeap* _heap; 775 ShenandoahRegionIterator* _regions; 776 ShenandoahRegionChunkIterator* _work_chunks; 777 778 public: 779 explicit ShenandoahGenerationalUpdateHeapRefsTask(ShenandoahRegionIterator* regions, 780 ShenandoahRegionChunkIterator* work_chunks) : 781 WorkerTask("Shenandoah Update References"), 782 _heap(ShenandoahGenerationalHeap::heap()), 783 _regions(regions), 784 _work_chunks(work_chunks) 785 { 786 bool old_bitmap_stable = _heap->old_generation()->is_mark_complete(); 787 log_debug(gc, remset)("Update refs, scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable)); 788 } 789 790 void work(uint worker_id) { 791 if (CONCURRENT) { 792 ShenandoahConcurrentWorkerSession worker_session(worker_id); 793 ShenandoahSuspendibleThreadSetJoiner stsj; 794 do_work<ShenandoahConcUpdateRefsClosure>(worker_id); 795 } else { 796 ShenandoahParallelWorkerSession worker_session(worker_id); 797 do_work<ShenandoahNonConcUpdateRefsClosure>(worker_id); 798 } 799 } 800 801 private: 802 template<class T> 803 void do_work(uint worker_id) { 804 T cl; 805 806 if (CONCURRENT && (worker_id == 0)) { 807 // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the 808 // results of evacuation. These reserves are no longer necessary because evacuation has completed. 809 size_t cset_regions = _heap->collection_set()->count(); 810 811 // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation 812 // to the mutator free set. At the end of GC, we will have cset_regions newly evacuated fully empty regions from 813 // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the 814 // next GC cycle. 815 _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions); 816 } 817 // If !CONCURRENT, there's no value in expanding Mutator free set 818 819 ShenandoahHeapRegion* r = _regions->next(); 820 // We update references for global, old, and young collections. 821 ShenandoahGeneration* const gc_generation = _heap->gc_generation(); 822 shenandoah_assert_generations_reconciled(); 823 assert(gc_generation->is_mark_complete(), "Expected complete marking"); 824 ShenandoahMarkingContext* const ctx = _heap->marking_context(); 825 bool is_mixed = _heap->collection_set()->has_old_regions(); 826 while (r != nullptr) { 827 HeapWord* update_watermark = r->get_update_watermark(); 828 assert(update_watermark >= r->bottom(), "sanity"); 829 830 log_debug(gc)("Update refs worker " UINT32_FORMAT ", looking at region " SIZE_FORMAT, worker_id, r->index()); 831 bool region_progress = false; 832 if (r->is_active() && !r->is_cset()) { 833 if (r->is_young()) { 834 _heap->marked_object_oop_iterate(r, &cl, update_watermark); 835 region_progress = true; 836 } else if (r->is_old()) { 837 if (gc_generation->is_global()) { 838 839 _heap->marked_object_oop_iterate(r, &cl, update_watermark); 840 region_progress = true; 841 } 842 // Otherwise, this is an old region in a young or mixed cycle. Process it during a second phase, below. 843 // Don't bother to report pacing progress in this case. 844 } else { 845 // Because updating of references runs concurrently, it is possible that a FREE inactive region transitions 846 // to a non-free active region while this loop is executing. Whenever this happens, the changing of a region's 847 // active status may propagate at a different speed than the changing of the region's affiliation. 848 849 // When we reach this control point, it is because a race has allowed a region's is_active() status to be seen 850 // by this thread before the region's affiliation() is seen by this thread. 851 852 // It's ok for this race to occur because the newly transformed region does not have any references to be 853 // updated. 854 855 assert(r->get_update_watermark() == r->bottom(), 856 "%s Region " SIZE_FORMAT " is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE", 857 r->affiliation_name(), r->index()); 858 } 859 } 860 861 if (region_progress && ShenandoahPacing) { 862 _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom())); 863 } 864 865 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) { 866 return; 867 } 868 869 r = _regions->next(); 870 } 871 872 if (!gc_generation->is_global()) { 873 // Since this is generational and not GLOBAL, we have to process the remembered set. There's no remembered 874 // set processing if not in generational mode or if GLOBAL mode. 875 876 // After this thread has exhausted its traditional update-refs work, it continues with updating refs within 877 // remembered set. The remembered set workload is better balanced between threads, so threads that are "behind" 878 // can catch up with other threads during this phase, allowing all threads to work more effectively in parallel. 879 update_references_in_remembered_set(worker_id, cl, ctx, is_mixed); 880 } 881 } 882 883 template<class T> 884 void update_references_in_remembered_set(uint worker_id, T &cl, const ShenandoahMarkingContext* ctx, bool is_mixed) { 885 886 struct ShenandoahRegionChunk assignment; 887 ShenandoahScanRemembered* scanner = _heap->old_generation()->card_scan(); 888 889 while (!_heap->check_cancelled_gc_and_yield(CONCURRENT) && _work_chunks->next(&assignment)) { 890 // Keep grabbing next work chunk to process until finished, or asked to yield 891 ShenandoahHeapRegion* r = assignment._r; 892 if (r->is_active() && !r->is_cset() && r->is_old()) { 893 HeapWord* start_of_range = r->bottom() + assignment._chunk_offset; 894 HeapWord* end_of_range = r->get_update_watermark(); 895 if (end_of_range > start_of_range + assignment._chunk_size) { 896 end_of_range = start_of_range + assignment._chunk_size; 897 } 898 899 if (start_of_range >= end_of_range) { 900 continue; 901 } 902 903 // Old region in a young cycle or mixed cycle. 904 if (is_mixed) { 905 if (r->is_humongous()) { 906 // Need to examine both dirty and clean cards during mixed evac. 907 r->oop_iterate_humongous_slice_all(&cl,start_of_range, assignment._chunk_size); 908 } else { 909 // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced 910 // and filled. This will use mark bits to find objects that need to be updated. 911 update_references_in_old_region(cl, ctx, scanner, r, start_of_range, end_of_range); 912 } 913 } else { 914 // This is a young evacuation 915 size_t cluster_size = CardTable::card_size_in_words() * ShenandoahCardCluster::CardsPerCluster; 916 size_t clusters = assignment._chunk_size / cluster_size; 917 assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries"); 918 scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, worker_id); 919 } 920 921 if (ShenandoahPacing) { 922 _heap->pacer()->report_updaterefs(pointer_delta(end_of_range, start_of_range)); 923 } 924 } 925 } 926 } 927 928 template<class T> 929 void update_references_in_old_region(T &cl, const ShenandoahMarkingContext* ctx, ShenandoahScanRemembered* scanner, 930 const ShenandoahHeapRegion* r, HeapWord* start_of_range, 931 HeapWord* end_of_range) const { 932 // In case last object in my range spans boundary of my chunk, I may need to scan all the way to top() 933 ShenandoahObjectToOopBoundedClosure<T> objs(&cl, start_of_range, r->top()); 934 935 // Any object that begins in a previous range is part of a different scanning assignment. Any object that 936 // starts after end_of_range is also not my responsibility. (Either allocated during evacuation, so does 937 // not hold pointers to from-space, or is beyond the range of my assigned work chunk.) 938 939 // Find the first object that begins in my range, if there is one. Note that `p` will be set to `end_of_range` 940 // when no live object is found in the range. 941 HeapWord* tams = ctx->top_at_mark_start(r); 942 HeapWord* p = get_first_object_start_word(ctx, scanner, tams, start_of_range, end_of_range); 943 944 while (p < end_of_range) { 945 // p is known to point to the beginning of marked object obj 946 oop obj = cast_to_oop(p); 947 objs.do_object(obj); 948 HeapWord* prev_p = p; 949 p += obj->size(); 950 if (p < tams) { 951 p = ctx->get_next_marked_addr(p, tams); 952 // If there are no more marked objects before tams, this returns tams. Note that tams is 953 // either >= end_of_range, or tams is the start of an object that is marked. 954 } 955 assert(p != prev_p, "Lack of forward progress"); 956 } 957 } 958 959 HeapWord* get_first_object_start_word(const ShenandoahMarkingContext* ctx, ShenandoahScanRemembered* scanner, HeapWord* tams, 960 HeapWord* start_of_range, HeapWord* end_of_range) const { 961 HeapWord* p = start_of_range; 962 963 if (p >= tams) { 964 // We cannot use ctx->is_marked(obj) to test whether an object begins at this address. Instead, 965 // we need to use the remembered set crossing map to advance p to the first object that starts 966 // within the enclosing card. 967 size_t card_index = scanner->card_index_for_addr(start_of_range); 968 while (true) { 969 HeapWord* first_object = scanner->first_object_in_card(card_index); 970 if (first_object != nullptr) { 971 p = first_object; 972 break; 973 } else if (scanner->addr_for_card_index(card_index + 1) < end_of_range) { 974 card_index++; 975 } else { 976 // Signal that no object was found in range 977 p = end_of_range; 978 break; 979 } 980 } 981 } else if (!ctx->is_marked(cast_to_oop(p))) { 982 p = ctx->get_next_marked_addr(p, tams); 983 // If there are no more marked objects before tams, this returns tams. 984 // Note that tams is either >= end_of_range, or tams is the start of an object that is marked. 985 } 986 return p; 987 } 988 }; 989 990 void ShenandoahGenerationalHeap::update_heap_references(bool concurrent) { 991 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC"); 992 const uint nworkers = workers()->active_workers(); 993 ShenandoahRegionChunkIterator work_list(nworkers); 994 if (concurrent) { 995 ShenandoahGenerationalUpdateHeapRefsTask<true> task(&_update_refs_iterator, &work_list); 996 workers()->run_task(&task); 997 } else { 998 ShenandoahGenerationalUpdateHeapRefsTask<false> task(&_update_refs_iterator, &work_list); 999 workers()->run_task(&task); 1000 } 1001 1002 if (ShenandoahEnableCardStats) { 1003 // Only do this if we are collecting card stats 1004 ShenandoahScanRemembered* card_scan = old_generation()->card_scan(); 1005 assert(card_scan != nullptr, "Card table must exist when card stats are enabled"); 1006 card_scan->log_card_stats(nworkers, CARD_STAT_UPDATE_REFS); 1007 } 1008 } 1009 1010 struct ShenandoahCompositeRegionClosure { 1011 template<typename C1, typename C2> 1012 class Closure : public ShenandoahHeapRegionClosure { 1013 private: 1014 C1 &_c1; 1015 C2 &_c2; 1016 1017 public: 1018 Closure(C1 &c1, C2 &c2) : ShenandoahHeapRegionClosure(), _c1(c1), _c2(c2) {} 1019 1020 void heap_region_do(ShenandoahHeapRegion* r) override { 1021 _c1.heap_region_do(r); 1022 _c2.heap_region_do(r); 1023 } 1024 1025 bool is_thread_safe() override { 1026 return _c1.is_thread_safe() && _c2.is_thread_safe(); 1027 } 1028 }; 1029 1030 template<typename C1, typename C2> 1031 static Closure<C1, C2> of(C1 &c1, C2 &c2) { 1032 return Closure<C1, C2>(c1, c2); 1033 } 1034 }; 1035 1036 class ShenandoahUpdateRegionAges : public ShenandoahHeapRegionClosure { 1037 private: 1038 ShenandoahMarkingContext* _ctx; 1039 1040 public: 1041 explicit ShenandoahUpdateRegionAges(ShenandoahMarkingContext* ctx) : _ctx(ctx) { } 1042 1043 void heap_region_do(ShenandoahHeapRegion* r) override { 1044 // Maintenance of region age must follow evacuation in order to account for 1045 // evacuation allocations within survivor regions. We consult region age during 1046 // the subsequent evacuation to determine whether certain objects need to 1047 // be promoted. 1048 if (r->is_young() && r->is_active()) { 1049 HeapWord *tams = _ctx->top_at_mark_start(r); 1050 HeapWord *top = r->top(); 1051 1052 // Allocations move the watermark when top moves. However, compacting 1053 // objects will sometimes lower top beneath the watermark, after which, 1054 // attempts to read the watermark will assert out (watermark should not be 1055 // higher than top). 1056 if (top > tams) { 1057 // There have been allocations in this region since the start of the cycle. 1058 // Any objects new to this region must not assimilate elevated age. 1059 r->reset_age(); 1060 } else if (ShenandoahGenerationalHeap::heap()->is_aging_cycle()) { 1061 r->increment_age(); 1062 } 1063 } 1064 } 1065 1066 bool is_thread_safe() override { 1067 return true; 1068 } 1069 }; 1070 1071 void ShenandoahGenerationalHeap::final_update_refs_update_region_states() { 1072 ShenandoahSynchronizePinnedRegionStates pins; 1073 ShenandoahUpdateRegionAges ages(active_generation()->complete_marking_context()); 1074 auto cl = ShenandoahCompositeRegionClosure::of(pins, ages); 1075 parallel_heap_region_iterate(&cl); 1076 } 1077 1078 void ShenandoahGenerationalHeap::complete_degenerated_cycle() { 1079 shenandoah_assert_heaplocked_or_safepoint(); 1080 if (is_concurrent_old_mark_in_progress()) { 1081 // This is still necessary for degenerated cycles because the degeneration point may occur 1082 // after final mark of the young generation. See ShenandoahConcurrentGC::op_final_updaterefs for 1083 // a more detailed explanation. 1084 old_generation()->transfer_pointers_from_satb(); 1085 } 1086 1087 // We defer generation resizing actions until after cset regions have been recycled. 1088 TransferResult result = balance_generations(); 1089 LogTarget(Info, gc, ergo) lt; 1090 if (lt.is_enabled()) { 1091 LogStream ls(lt); 1092 result.print_on("Degenerated GC", &ls); 1093 } 1094 1095 // In case degeneration interrupted concurrent evacuation or update references, we need to clean up 1096 // transient state. Otherwise, these actions have no effect. 1097 reset_generation_reserves(); 1098 1099 if (!old_generation()->is_parsable()) { 1100 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill); 1101 coalesce_and_fill_old_regions(false); 1102 } 1103 } 1104 1105 void ShenandoahGenerationalHeap::complete_concurrent_cycle() { 1106 if (!old_generation()->is_parsable()) { 1107 // Class unloading may render the card offsets unusable, so we must rebuild them before 1108 // the next remembered set scan. We _could_ let the control thread do this sometime after 1109 // the global cycle has completed and before the next young collection, but under memory 1110 // pressure the control thread may not have the time (that is, because it's running back 1111 // to back GCs). In that scenario, we would have to make the old regions parsable before 1112 // we could start a young collection. This could delay the start of the young cycle and 1113 // throw off the heuristics. 1114 entry_global_coalesce_and_fill(); 1115 } 1116 1117 TransferResult result; 1118 { 1119 ShenandoahHeapLocker locker(lock()); 1120 1121 result = balance_generations(); 1122 reset_generation_reserves(); 1123 } 1124 1125 LogTarget(Info, gc, ergo) lt; 1126 if (lt.is_enabled()) { 1127 LogStream ls(lt); 1128 result.print_on("Concurrent GC", &ls); 1129 } 1130 } 1131 1132 void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() { 1133 const char* msg = "Coalescing and filling old regions"; 1134 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_coalesce_and_fill); 1135 1136 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters()); 1137 EventMark em("%s", msg); 1138 ShenandoahWorkerScope scope(workers(), 1139 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 1140 "concurrent coalesce and fill"); 1141 1142 coalesce_and_fill_old_regions(true); 1143 } 1144 1145 void ShenandoahGenerationalHeap::update_region_ages(ShenandoahMarkingContext* ctx) { 1146 ShenandoahUpdateRegionAges cl(ctx); 1147 parallel_heap_region_iterate(&cl); 1148 }