1 /* 2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 26 #include "precompiled.hpp" 27 28 #include "gc/shared/strongRootsScope.hpp" 29 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp" 30 #include "gc/shenandoah/shenandoahAsserts.hpp" 31 #include "gc/shenandoah/shenandoahCardTable.hpp" 32 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 33 #include "gc/shenandoah/shenandoahFreeSet.hpp" 34 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 35 #include "gc/shenandoah/shenandoahHeap.hpp" 36 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 37 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 38 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp" 39 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 40 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 42 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 43 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" 44 #include "gc/shenandoah/shenandoahUtils.hpp" 45 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 46 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 47 #include "runtime/threads.hpp" 48 #include "utilities/events.hpp" 49 50 class ShenandoahFlushAllSATB : public ThreadClosure { 51 private: 52 SATBMarkQueueSet& _satb_qset; 53 54 public: 55 explicit ShenandoahFlushAllSATB(SATBMarkQueueSet& satb_qset) : 56 _satb_qset(satb_qset) {} 57 58 void do_thread(Thread* thread) override { 59 // Transfer any partial buffer to the qset for completed buffer processing. 60 _satb_qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread)); 61 } 62 }; 63 64 class ShenandoahProcessOldSATB : public SATBBufferClosure { 65 private: 66 ShenandoahObjToScanQueue* _queue; 67 ShenandoahHeap* _heap; 68 ShenandoahMarkingContext* const _mark_context; 69 size_t _trashed_oops; 70 71 public: 72 explicit ShenandoahProcessOldSATB(ShenandoahObjToScanQueue* q) : 73 _queue(q), 74 _heap(ShenandoahHeap::heap()), 75 _mark_context(_heap->marking_context()), 76 _trashed_oops(0) {} 77 78 void do_buffer(void** buffer, size_t size) override { 79 assert(size == 0 || !_heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(), "Forwarded objects are not expected here"); 80 for (size_t i = 0; i < size; ++i) { 81 oop *p = (oop *) &buffer[i]; 82 ShenandoahHeapRegion* region = _heap->heap_region_containing(*p); 83 if (region->is_old() && region->is_active()) { 84 ShenandoahMark::mark_through_ref<oop, OLD>(p, _queue, nullptr, _mark_context, false); 85 } else { 86 _trashed_oops++; 87 } 88 } 89 } 90 91 size_t trashed_oops() const { 92 return _trashed_oops; 93 } 94 }; 95 96 class ShenandoahPurgeSATBTask : public WorkerTask { 97 private: 98 ShenandoahObjToScanQueueSet* _mark_queues; 99 volatile size_t _trashed_oops; 100 101 public: 102 explicit ShenandoahPurgeSATBTask(ShenandoahObjToScanQueueSet* queues) : 103 WorkerTask("Purge SATB"), 104 _mark_queues(queues), 105 _trashed_oops(0) { 106 Threads::change_thread_claim_token(); 107 } 108 109 ~ShenandoahPurgeSATBTask() { 110 if (_trashed_oops > 0) { 111 log_debug(gc)("Purged " SIZE_FORMAT " oops from old generation SATB buffers", _trashed_oops); 112 } 113 } 114 115 void work(uint worker_id) override { 116 ShenandoahParallelWorkerSession worker_session(worker_id); 117 ShenandoahSATBMarkQueueSet &satb_queues = ShenandoahBarrierSet::satb_mark_queue_set(); 118 ShenandoahFlushAllSATB flusher(satb_queues); 119 Threads::possibly_parallel_threads_do(true /* is_par */, &flusher); 120 121 ShenandoahObjToScanQueue* mark_queue = _mark_queues->queue(worker_id); 122 ShenandoahProcessOldSATB processor(mark_queue); 123 while (satb_queues.apply_closure_to_completed_buffer(&processor)) {} 124 125 Atomic::add(&_trashed_oops, processor.trashed_oops()); 126 } 127 }; 128 129 class ShenandoahConcurrentCoalesceAndFillTask : public WorkerTask { 130 private: 131 uint _nworkers; 132 ShenandoahHeapRegion** _coalesce_and_fill_region_array; 133 uint _coalesce_and_fill_region_count; 134 volatile bool _is_preempted; 135 136 public: 137 ShenandoahConcurrentCoalesceAndFillTask(uint nworkers, 138 ShenandoahHeapRegion** coalesce_and_fill_region_array, 139 uint region_count) : 140 WorkerTask("Shenandoah Concurrent Coalesce and Fill"), 141 _nworkers(nworkers), 142 _coalesce_and_fill_region_array(coalesce_and_fill_region_array), 143 _coalesce_and_fill_region_count(region_count), 144 _is_preempted(false) { 145 } 146 147 void work(uint worker_id) override { 148 ShenandoahWorkerTimingsTracker timer(ShenandoahPhaseTimings::conc_coalesce_and_fill, ShenandoahPhaseTimings::ScanClusters, worker_id); 149 for (uint region_idx = worker_id; region_idx < _coalesce_and_fill_region_count; region_idx += _nworkers) { 150 ShenandoahHeapRegion* r = _coalesce_and_fill_region_array[region_idx]; 151 if (r->is_humongous()) { 152 // There is only one object in this region and it is not garbage, 153 // so no need to coalesce or fill. 154 continue; 155 } 156 157 if (!r->oop_coalesce_and_fill(true)) { 158 // Coalesce and fill has been preempted 159 Atomic::store(&_is_preempted, true); 160 return; 161 } 162 } 163 } 164 165 // Value returned from is_completed() is only valid after all worker thread have terminated. 166 bool is_completed() { 167 return !Atomic::load(&_is_preempted); 168 } 169 }; 170 171 ShenandoahOldGeneration::ShenandoahOldGeneration(uint max_queues, size_t max_capacity, size_t soft_max_capacity) 172 : ShenandoahGeneration(OLD, max_queues, max_capacity, soft_max_capacity), 173 _coalesce_and_fill_region_array(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, ShenandoahHeap::heap()->num_regions(), mtGC)), 174 _old_heuristics(nullptr), 175 _region_balance(0), 176 _promoted_reserve(0), 177 _promoted_expended(0), 178 _promotion_potential(0), 179 _pad_for_promote_in_place(0), 180 _promotable_humongous_regions(0), 181 _promotable_regular_regions(0), 182 _is_parsable(true), 183 _card_scan(nullptr), 184 _state(WAITING_FOR_BOOTSTRAP), 185 _growth_before_compaction(INITIAL_GROWTH_BEFORE_COMPACTION), 186 _min_growth_before_compaction ((ShenandoahMinOldGenGrowthPercent * FRACTIONAL_DENOMINATOR) / 100) 187 { 188 _live_bytes_after_last_mark = ShenandoahHeap::heap()->capacity() * INITIAL_LIVE_FRACTION / FRACTIONAL_DENOMINATOR; 189 // Always clear references for old generation 190 ref_processor()->set_soft_reference_policy(true); 191 192 if (ShenandoahCardBarrier) { 193 ShenandoahCardTable* card_table = ShenandoahBarrierSet::barrier_set()->card_table(); 194 size_t card_count = card_table->cards_required(ShenandoahHeap::heap()->reserved_region().word_size()); 195 auto rs = new ShenandoahDirectCardMarkRememberedSet(card_table, card_count); 196 _card_scan = new ShenandoahScanRemembered(rs); 197 } 198 } 199 200 void ShenandoahOldGeneration::set_promoted_reserve(size_t new_val) { 201 shenandoah_assert_heaplocked_or_safepoint(); 202 _promoted_reserve = new_val; 203 } 204 205 size_t ShenandoahOldGeneration::get_promoted_reserve() const { 206 return _promoted_reserve; 207 } 208 209 void ShenandoahOldGeneration::augment_promoted_reserve(size_t increment) { 210 shenandoah_assert_heaplocked_or_safepoint(); 211 _promoted_reserve += increment; 212 } 213 214 void ShenandoahOldGeneration::reset_promoted_expended() { 215 shenandoah_assert_heaplocked_or_safepoint(); 216 Atomic::store(&_promoted_expended, (size_t) 0); 217 } 218 219 size_t ShenandoahOldGeneration::expend_promoted(size_t increment) { 220 shenandoah_assert_heaplocked_or_safepoint(); 221 assert(get_promoted_expended() + increment <= get_promoted_reserve(), "Do not expend more promotion than budgeted"); 222 return Atomic::add(&_promoted_expended, increment); 223 } 224 225 size_t ShenandoahOldGeneration::unexpend_promoted(size_t decrement) { 226 return Atomic::sub(&_promoted_expended, decrement); 227 } 228 229 size_t ShenandoahOldGeneration::get_promoted_expended() const { 230 return Atomic::load(&_promoted_expended); 231 } 232 233 bool ShenandoahOldGeneration::can_allocate(const ShenandoahAllocRequest &req) const { 234 assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "GCLAB pertains only to young-gen memory"); 235 236 const size_t requested_bytes = req.size() * HeapWordSize; 237 // The promotion reserve may also be used for evacuations. If we can promote this object, 238 // then we can also evacuate it. 239 if (can_promote(requested_bytes)) { 240 // The promotion reserve should be able to accommodate this request. The request 241 // might still fail if alignment with the card table increases the size. The request 242 // may also fail if the heap is badly fragmented and the free set cannot find room for it. 243 return true; 244 } 245 246 if (req.type() == ShenandoahAllocRequest::_alloc_plab) { 247 // The promotion reserve cannot accommodate this plab request. Check if we still have room for 248 // evacuations. Note that we cannot really know how much of the plab will be used for evacuations, 249 // so here we only check that some evacuation reserve still exists. 250 return get_evacuation_reserve() > 0; 251 } 252 253 // This is a shared allocation request. We've already checked that it can't be promoted, so if 254 // it is a promotion, we return false. Otherwise, it is a shared evacuation request, and we allow 255 // the allocation to proceed. 256 return !req.is_promotion(); 257 } 258 259 void 260 ShenandoahOldGeneration::configure_plab_for_current_thread(const ShenandoahAllocRequest &req) { 261 // Note: Even when a mutator is performing a promotion outside a LAB, we use a 'shared_gc' request. 262 if (req.is_gc_alloc()) { 263 const size_t actual_size = req.actual_size() * HeapWordSize; 264 if (req.type() == ShenandoahAllocRequest::_alloc_plab) { 265 // We've created a new plab. Now we configure it whether it will be used for promotions 266 // and evacuations - or just evacuations. 267 Thread* thread = Thread::current(); 268 ShenandoahThreadLocalData::reset_plab_promoted(thread); 269 270 // The actual size of the allocation may be larger than the requested bytes (due to alignment on card boundaries). 271 // If this puts us over our promotion budget, we need to disable future PLAB promotions for this thread. 272 if (can_promote(actual_size)) { 273 // Assume the entirety of this PLAB will be used for promotion. This prevents promotion from overreach. 274 // When we retire this plab, we'll unexpend what we don't really use. 275 expend_promoted(actual_size); 276 ShenandoahThreadLocalData::enable_plab_promotions(thread); 277 ShenandoahThreadLocalData::set_plab_actual_size(thread, actual_size); 278 } else { 279 // Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations. 280 ShenandoahThreadLocalData::disable_plab_promotions(thread); 281 ShenandoahThreadLocalData::set_plab_actual_size(thread, 0); 282 } 283 } else if (req.is_promotion()) { 284 // Shared promotion. 285 expend_promoted(actual_size); 286 } 287 } 288 } 289 290 size_t ShenandoahOldGeneration::get_live_bytes_after_last_mark() const { 291 return _live_bytes_after_last_mark; 292 } 293 294 void ShenandoahOldGeneration::set_live_bytes_after_last_mark(size_t bytes) { 295 if (bytes == 0) { 296 // Restart search for best old-gen size to the initial state 297 _live_bytes_after_last_mark = ShenandoahHeap::heap()->capacity() * INITIAL_LIVE_FRACTION / FRACTIONAL_DENOMINATOR; 298 _growth_before_compaction = INITIAL_GROWTH_BEFORE_COMPACTION; 299 } else { 300 _live_bytes_after_last_mark = bytes; 301 _growth_before_compaction /= 2; 302 if (_growth_before_compaction < _min_growth_before_compaction) { 303 _growth_before_compaction = _min_growth_before_compaction; 304 } 305 } 306 } 307 308 void ShenandoahOldGeneration::handle_failed_transfer() { 309 _old_heuristics->trigger_cannot_expand(); 310 } 311 312 size_t ShenandoahOldGeneration::usage_trigger_threshold() const { 313 size_t result = _live_bytes_after_last_mark + (_live_bytes_after_last_mark * _growth_before_compaction) / FRACTIONAL_DENOMINATOR; 314 return result; 315 } 316 317 bool ShenandoahOldGeneration::contains(ShenandoahAffiliation affiliation) const { 318 return affiliation == OLD_GENERATION; 319 } 320 bool ShenandoahOldGeneration::contains(ShenandoahHeapRegion* region) const { 321 return region->is_old(); 322 } 323 324 void ShenandoahOldGeneration::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) { 325 ShenandoahIncludeRegionClosure<OLD_GENERATION> old_regions_cl(cl); 326 ShenandoahHeap::heap()->parallel_heap_region_iterate(&old_regions_cl); 327 } 328 329 void ShenandoahOldGeneration::heap_region_iterate(ShenandoahHeapRegionClosure* cl) { 330 ShenandoahIncludeRegionClosure<OLD_GENERATION> old_regions_cl(cl); 331 ShenandoahHeap::heap()->heap_region_iterate(&old_regions_cl); 332 } 333 334 void ShenandoahOldGeneration::set_concurrent_mark_in_progress(bool in_progress) { 335 ShenandoahHeap::heap()->set_concurrent_old_mark_in_progress(in_progress); 336 } 337 338 bool ShenandoahOldGeneration::is_concurrent_mark_in_progress() { 339 return ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(); 340 } 341 342 void ShenandoahOldGeneration::cancel_marking() { 343 if (is_concurrent_mark_in_progress()) { 344 log_debug(gc)("Abandon SATB buffers"); 345 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking(); 346 } 347 348 ShenandoahGeneration::cancel_marking(); 349 } 350 351 void ShenandoahOldGeneration::cancel_gc() { 352 shenandoah_assert_safepoint(); 353 if (is_idle()) { 354 #ifdef ASSERT 355 validate_waiting_for_bootstrap(); 356 #endif 357 } else { 358 log_info(gc)("Terminating old gc cycle."); 359 // Stop marking 360 cancel_marking(); 361 // Stop tracking old regions 362 abandon_collection_candidates(); 363 // Remove old generation access to young generation mark queues 364 ShenandoahHeap::heap()->young_generation()->set_old_gen_task_queues(nullptr); 365 // Transition to IDLE now. 366 transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); 367 } 368 } 369 370 void ShenandoahOldGeneration::prepare_gc() { 371 // Now that we have made the old generation parsable, it is safe to reset the mark bitmap. 372 assert(state() != FILLING, "Cannot reset old without making it parsable"); 373 374 ShenandoahGeneration::prepare_gc(); 375 } 376 377 bool ShenandoahOldGeneration::entry_coalesce_and_fill() { 378 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 379 380 static const char* msg = "Coalescing and filling (Old)"; 381 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_coalesce_and_fill); 382 383 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 384 EventMark em("%s", msg); 385 ShenandoahWorkerScope scope(heap->workers(), 386 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 387 msg); 388 389 return coalesce_and_fill(); 390 } 391 392 // Make the old generation regions parsable, so they can be safely 393 // scanned when looking for objects in memory indicated by dirty cards. 394 bool ShenandoahOldGeneration::coalesce_and_fill() { 395 transition_to(FILLING); 396 397 // This code will see the same set of regions to fill on each resumption as it did 398 // on the initial run. That's okay because each region keeps track of its own coalesce 399 // and fill state. Regions that were filled on a prior attempt will not try to fill again. 400 uint coalesce_and_fill_regions_count = _old_heuristics->get_coalesce_and_fill_candidates(_coalesce_and_fill_region_array); 401 assert(coalesce_and_fill_regions_count <= ShenandoahHeap::heap()->num_regions(), "Sanity"); 402 if (coalesce_and_fill_regions_count == 0) { 403 // No regions need to be filled. 404 abandon_collection_candidates(); 405 return true; 406 } 407 408 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 409 WorkerThreads* workers = heap->workers(); 410 uint nworkers = workers->active_workers(); 411 ShenandoahConcurrentCoalesceAndFillTask task(nworkers, _coalesce_and_fill_region_array, coalesce_and_fill_regions_count); 412 413 log_debug(gc)("Starting (or resuming) coalesce-and-fill of " UINT32_FORMAT " old heap regions", coalesce_and_fill_regions_count); 414 workers->run_task(&task); 415 if (task.is_completed()) { 416 // We no longer need to track regions that need to be coalesced and filled. 417 abandon_collection_candidates(); 418 return true; 419 } else { 420 // Coalesce-and-fill has been preempted. We'll finish that effort in the future. Do not invoke 421 // ShenandoahGeneration::prepare_gc() until coalesce-and-fill is done because it resets the mark bitmap 422 // and invokes set_mark_incomplete(). Coalesce-and-fill depends on the mark bitmap. 423 log_debug(gc)("Suspending coalesce-and-fill of old heap regions"); 424 return false; 425 } 426 } 427 428 void ShenandoahOldGeneration::transfer_pointers_from_satb() { 429 ShenandoahHeap* heap = ShenandoahHeap::heap(); 430 shenandoah_assert_safepoint(); 431 assert(heap->is_concurrent_old_mark_in_progress(), "Only necessary during old marking."); 432 log_debug(gc)("Transfer SATB buffers"); 433 uint nworkers = heap->workers()->active_workers(); 434 StrongRootsScope scope(nworkers); 435 436 ShenandoahPurgeSATBTask purge_satb_task(task_queues()); 437 heap->workers()->run_task(&purge_satb_task); 438 } 439 440 bool ShenandoahOldGeneration::contains(oop obj) const { 441 return ShenandoahHeap::heap()->is_in_old(obj); 442 } 443 444 void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent) { 445 ShenandoahHeap* heap = ShenandoahHeap::heap(); 446 assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC"); 447 448 { 449 ShenandoahGCPhase phase(concurrent ? 450 ShenandoahPhaseTimings::final_update_region_states : 451 ShenandoahPhaseTimings::degen_gc_final_update_region_states); 452 ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context()); 453 454 parallel_heap_region_iterate(&cl); 455 heap->assert_pinned_region_status(); 456 } 457 458 { 459 // This doesn't actually choose a collection set, but prepares a list of 460 // regions as 'candidates' for inclusion in a mixed collection. 461 ShenandoahGCPhase phase(concurrent ? 462 ShenandoahPhaseTimings::choose_cset : 463 ShenandoahPhaseTimings::degen_gc_choose_cset); 464 ShenandoahHeapLocker locker(heap->lock()); 465 _old_heuristics->prepare_for_old_collections(); 466 } 467 468 { 469 // Though we did not choose a collection set above, we still may have 470 // freed up immediate garbage regions so proceed with rebuilding the free set. 471 ShenandoahGCPhase phase(concurrent ? 472 ShenandoahPhaseTimings::final_rebuild_freeset : 473 ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset); 474 ShenandoahHeapLocker locker(heap->lock()); 475 size_t cset_young_regions, cset_old_regions; 476 size_t first_old, last_old, num_old; 477 heap->free_set()->prepare_to_rebuild(cset_young_regions, cset_old_regions, first_old, last_old, num_old); 478 // This is just old-gen completion. No future budgeting required here. The only reason to rebuild the freeset here 479 // is in case there was any immediate old garbage identified. 480 heap->free_set()->finish_rebuild(cset_young_regions, cset_old_regions, num_old); 481 } 482 } 483 484 const char* ShenandoahOldGeneration::state_name(State state) { 485 switch (state) { 486 case WAITING_FOR_BOOTSTRAP: return "Waiting for Bootstrap"; 487 case FILLING: return "Coalescing"; 488 case BOOTSTRAPPING: return "Bootstrapping"; 489 case MARKING: return "Marking"; 490 case EVACUATING: return "Evacuating"; 491 case EVACUATING_AFTER_GLOBAL: return "Evacuating (G)"; 492 default: 493 ShouldNotReachHere(); 494 return "Unknown"; 495 } 496 } 497 498 void ShenandoahOldGeneration::transition_to(State new_state) { 499 if (_state != new_state) { 500 log_debug(gc)("Old generation transition from %s to %s", state_name(_state), state_name(new_state)); 501 EventMark event("Old was %s, now is %s", state_name(_state), state_name(new_state)); 502 validate_transition(new_state); 503 _state = new_state; 504 } 505 } 506 507 #ifdef ASSERT 508 // This diagram depicts the expected state transitions for marking the old generation 509 // and preparing for old collections. When a young generation cycle executes, the 510 // remembered set scan must visit objects in old regions. Visiting an object which 511 // has become dead on previous old cycles will result in crashes. To avoid visiting 512 // such objects, the remembered set scan will use the old generation mark bitmap when 513 // possible. It is _not_ possible to use the old generation bitmap when old marking 514 // is active (bitmap is not complete). For this reason, the old regions are made 515 // parsable _before_ the old generation bitmap is reset. The diagram does not depict 516 // cancellation of old collections by global or full collections. 517 // 518 // When a global collection supersedes an old collection, the global mark still 519 // "completes" the old mark bitmap. Subsequent remembered set scans may use the 520 // old generation mark bitmap, but any uncollected old regions must still be made parsable 521 // before the next old generation cycle begins. For this reason, a global collection may 522 // create mixed collection candidates and coalesce and fill candidates and will put 523 // the old generation in the respective states (EVACUATING or FILLING). After a Full GC, 524 // the mark bitmaps are all reset, all regions are parsable and the mark context will 525 // not be "complete". After a Full GC, remembered set scans will _not_ use the mark bitmap 526 // and we expect the old generation to be waiting for bootstrap. 527 // 528 // +-----------------+ 529 // +------------> | FILLING | <---+ 530 // | +--------> | | | 531 // | | +-----------------+ | 532 // | | | | 533 // | | | Filling Complete | <-> A global collection may 534 // | | v | move the old generation 535 // | | +-----------------+ | directly from waiting for 536 // +-- |-- |--------> | WAITING | | bootstrap to filling or 537 // | | | +---- | FOR BOOTSTRAP | ----+ evacuating. It may also 538 // | | | | +-----------------+ move from filling to waiting 539 // | | | | | for bootstrap. 540 // | | | | | Reset Bitmap 541 // | | | | v 542 // | | | | +-----------------+ +----------------------+ 543 // | | | | | BOOTSTRAP | <-> | YOUNG GC | 544 // | | | | | | | (RSet Parses Region) | 545 // | | | | +-----------------+ +----------------------+ 546 // | | | | | 547 // | | | | | Old Marking 548 // | | | | v 549 // | | | | +-----------------+ +----------------------+ 550 // | | | | | MARKING | <-> | YOUNG GC | 551 // | | +--------- | | | (RSet Parses Region) | 552 // | | | +-----------------+ +----------------------+ 553 // | | | | 554 // | | | | Has Evacuation Candidates 555 // | | | v 556 // | | | +-----------------+ +--------------------+ 557 // | | +---> | EVACUATING | <-> | YOUNG GC | 558 // | +------------- | | | (RSet Uses Bitmap) | 559 // | +-----------------+ +--------------------+ 560 // | | 561 // | | Global Cycle Coalesces and Fills Old Regions 562 // | v 563 // | +-----------------+ +--------------------+ 564 // +----------------- | EVACUATING | <-> | YOUNG GC | 565 // | AFTER GLOBAL | | (RSet Uses Bitmap) | 566 // +-----------------+ +--------------------+ 567 // 568 // 569 void ShenandoahOldGeneration::validate_transition(State new_state) { 570 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); 571 switch (new_state) { 572 case FILLING: 573 assert(_state != BOOTSTRAPPING, "Cannot begin making old regions parsable after bootstrapping"); 574 assert(is_mark_complete(), "Cannot begin filling without first completing marking, state is '%s'", state_name(_state)); 575 assert(_old_heuristics->has_coalesce_and_fill_candidates(), "Cannot begin filling without something to fill."); 576 break; 577 case WAITING_FOR_BOOTSTRAP: 578 // GC cancellation can send us back here from any state. 579 validate_waiting_for_bootstrap(); 580 break; 581 case BOOTSTRAPPING: 582 assert(_state == WAITING_FOR_BOOTSTRAP, "Cannot reset bitmap without making old regions parsable, state is '%s'", state_name(_state)); 583 assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot bootstrap with mixed collection candidates"); 584 assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot still be making old regions parsable."); 585 break; 586 case MARKING: 587 assert(_state == BOOTSTRAPPING, "Must have finished bootstrapping before marking, state is '%s'", state_name(_state)); 588 assert(heap->young_generation()->old_gen_task_queues() != nullptr, "Young generation needs old mark queues."); 589 assert(heap->is_concurrent_old_mark_in_progress(), "Should be marking old now."); 590 break; 591 case EVACUATING_AFTER_GLOBAL: 592 assert(_state == EVACUATING, "Must have been evacuating, state is '%s'", state_name(_state)); 593 break; 594 case EVACUATING: 595 assert(_state == WAITING_FOR_BOOTSTRAP || _state == MARKING, "Cannot have old collection candidates without first marking, state is '%s'", state_name(_state)); 596 assert(_old_heuristics->unprocessed_old_collection_candidates() > 0, "Must have collection candidates here."); 597 break; 598 default: 599 fatal("Unknown new state"); 600 } 601 } 602 603 bool ShenandoahOldGeneration::validate_waiting_for_bootstrap() { 604 ShenandoahHeap* heap = ShenandoahHeap::heap(); 605 assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot become ready for bootstrap during old mark."); 606 assert(heap->young_generation()->old_gen_task_queues() == nullptr, "Cannot become ready for bootstrap when still setup for bootstrapping."); 607 assert(!is_concurrent_mark_in_progress(), "Cannot be marking in IDLE"); 608 assert(!heap->young_generation()->is_bootstrap_cycle(), "Cannot have old mark queues if IDLE"); 609 assert(!_old_heuristics->has_coalesce_and_fill_candidates(), "Cannot have coalesce and fill candidates in IDLE"); 610 assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot have mixed collection candidates in IDLE"); 611 return true; 612 } 613 #endif 614 615 ShenandoahHeuristics* ShenandoahOldGeneration::initialize_heuristics(ShenandoahMode* gc_mode) { 616 _old_heuristics = new ShenandoahOldHeuristics(this, ShenandoahGenerationalHeap::heap()); 617 _old_heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedOldGCInterval); 618 _heuristics = _old_heuristics; 619 return _heuristics; 620 } 621 622 void ShenandoahOldGeneration::record_success_concurrent(bool abbreviated) { 623 heuristics()->record_success_concurrent(); 624 ShenandoahHeap::heap()->shenandoah_policy()->record_success_old(); 625 } 626 627 void ShenandoahOldGeneration::handle_failed_evacuation() { 628 if (_failed_evacuation.try_set()) { 629 log_debug(gc)("Old gen evac failure."); 630 } 631 } 632 633 void ShenandoahOldGeneration::handle_failed_promotion(Thread* thread, size_t size) { 634 // We squelch excessive reports to reduce noise in logs. 635 const size_t MaxReportsPerEpoch = 4; 636 static size_t last_report_epoch = 0; 637 static size_t epoch_report_count = 0; 638 auto heap = ShenandoahGenerationalHeap::heap(); 639 640 size_t promotion_reserve; 641 size_t promotion_expended; 642 643 const size_t gc_id = heap->control_thread()->get_gc_id(); 644 645 if ((gc_id != last_report_epoch) || (epoch_report_count++ < MaxReportsPerEpoch)) { 646 { 647 // Promotion failures should be very rare. Invest in providing useful diagnostic info. 648 ShenandoahHeapLocker locker(heap->lock()); 649 promotion_reserve = get_promoted_reserve(); 650 promotion_expended = get_promoted_expended(); 651 } 652 PLAB* const plab = ShenandoahThreadLocalData::plab(thread); 653 const size_t words_remaining = (plab == nullptr)? 0: plab->words_remaining(); 654 const char* promote_enabled = ShenandoahThreadLocalData::allow_plab_promotions(thread)? "enabled": "disabled"; 655 656 log_info(gc, ergo)("Promotion failed, size " SIZE_FORMAT ", has plab? %s, PLAB remaining: " SIZE_FORMAT 657 ", plab promotions %s, promotion reserve: " SIZE_FORMAT ", promotion expended: " SIZE_FORMAT 658 ", old capacity: " SIZE_FORMAT ", old_used: " SIZE_FORMAT ", old unaffiliated regions: " SIZE_FORMAT, 659 size * HeapWordSize, plab == nullptr? "no": "yes", 660 words_remaining * HeapWordSize, promote_enabled, promotion_reserve, promotion_expended, 661 max_capacity(), used(), free_unaffiliated_regions()); 662 663 if ((gc_id == last_report_epoch) && (epoch_report_count >= MaxReportsPerEpoch)) { 664 log_debug(gc, ergo)("Squelching additional promotion failure reports for current epoch"); 665 } else if (gc_id != last_report_epoch) { 666 last_report_epoch = gc_id; 667 epoch_report_count = 1; 668 } 669 } 670 } 671 672 void ShenandoahOldGeneration::handle_evacuation(HeapWord* obj, size_t words, bool promotion) { 673 // Only register the copy of the object that won the evacuation race. 674 _card_scan->register_object_without_lock(obj); 675 676 // Mark the entire range of the evacuated object as dirty. At next remembered set scan, 677 // we will clear dirty bits that do not hold interesting pointers. It's more efficient to 678 // do this in batch, in a background GC thread than to try to carefully dirty only cards 679 // that hold interesting pointers right now. 680 _card_scan->mark_range_as_dirty(obj, words); 681 682 if (promotion) { 683 // This evacuation was a promotion, track this as allocation against old gen 684 increase_allocated(words * HeapWordSize); 685 } 686 } 687 688 bool ShenandoahOldGeneration::has_unprocessed_collection_candidates() { 689 return _old_heuristics->unprocessed_old_collection_candidates() > 0; 690 } 691 692 size_t ShenandoahOldGeneration::unprocessed_collection_candidates_live_memory() { 693 return _old_heuristics->unprocessed_old_collection_candidates_live_memory(); 694 } 695 696 void ShenandoahOldGeneration::abandon_collection_candidates() { 697 _old_heuristics->abandon_collection_candidates(); 698 } 699 700 void ShenandoahOldGeneration::prepare_for_mixed_collections_after_global_gc() { 701 assert(is_mark_complete(), "Expected old generation mark to be complete after global cycle."); 702 _old_heuristics->prepare_for_old_collections(); 703 log_info(gc, ergo)("After choosing global collection set, mixed candidates: " UINT32_FORMAT ", coalescing candidates: " SIZE_FORMAT, 704 _old_heuristics->unprocessed_old_collection_candidates(), 705 _old_heuristics->coalesce_and_fill_candidates_count()); 706 } 707 708 void ShenandoahOldGeneration::parallel_heap_region_iterate_free(ShenandoahHeapRegionClosure* cl) { 709 // Iterate over old and free regions (exclude young). 710 ShenandoahExcludeRegionClosure<YOUNG_GENERATION> exclude_cl(cl); 711 ShenandoahGeneration::parallel_heap_region_iterate_free(&exclude_cl); 712 } 713 714 void ShenandoahOldGeneration::set_parsable(bool parsable) { 715 _is_parsable = parsable; 716 if (_is_parsable) { 717 // The current state would have been chosen during final mark of the global 718 // collection, _before_ any decisions about class unloading have been made. 719 // 720 // After unloading classes, we have made the old generation regions parsable. 721 // We can skip filling or transition to a state that knows everything has 722 // already been filled. 723 switch (state()) { 724 case ShenandoahOldGeneration::EVACUATING: 725 transition_to(ShenandoahOldGeneration::EVACUATING_AFTER_GLOBAL); 726 break; 727 case ShenandoahOldGeneration::FILLING: 728 assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Expected no mixed collection candidates"); 729 assert(_old_heuristics->coalesce_and_fill_candidates_count() > 0, "Expected coalesce and fill candidates"); 730 // When the heuristic put the old generation in this state, it didn't know 731 // that we would unload classes and make everything parsable. But, we know 732 // that now so we can override this state. 733 abandon_collection_candidates(); 734 transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); 735 break; 736 default: 737 // We can get here during a full GC. The full GC will cancel anything 738 // happening in the old generation and return it to the waiting for bootstrap 739 // state. The full GC will then record that the old regions are parsable 740 // after rebuilding the remembered set. 741 assert(is_idle(), "Unexpected state %s at end of global GC", state_name()); 742 break; 743 } 744 } 745 } 746 747 void ShenandoahOldGeneration::complete_mixed_evacuations() { 748 assert(is_doing_mixed_evacuations(), "Mixed evacuations should be in progress"); 749 if (!_old_heuristics->has_coalesce_and_fill_candidates()) { 750 // No candidate regions to coalesce and fill 751 transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); 752 return; 753 } 754 755 if (state() == ShenandoahOldGeneration::EVACUATING) { 756 transition_to(ShenandoahOldGeneration::FILLING); 757 return; 758 } 759 760 // Here, we have no more candidates for mixed collections. The candidates for coalescing 761 // and filling have already been processed during the global cycle, so there is nothing 762 // more to do. 763 assert(state() == ShenandoahOldGeneration::EVACUATING_AFTER_GLOBAL, "Should be evacuating after a global cycle"); 764 abandon_collection_candidates(); 765 transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); 766 } 767 768 void ShenandoahOldGeneration::abandon_mixed_evacuations() { 769 switch(state()) { 770 case ShenandoahOldGeneration::EVACUATING: 771 transition_to(ShenandoahOldGeneration::FILLING); 772 break; 773 case ShenandoahOldGeneration::EVACUATING_AFTER_GLOBAL: 774 abandon_collection_candidates(); 775 transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); 776 break; 777 default: 778 log_warning(gc)("Abandon mixed evacuations in unexpected state: %s", state_name(state())); 779 ShouldNotReachHere(); 780 break; 781 } 782 } 783 784 void ShenandoahOldGeneration::clear_cards_for(ShenandoahHeapRegion* region) { 785 _card_scan->mark_range_as_empty(region->bottom(), pointer_delta(region->end(), region->bottom())); 786 } 787 788 void ShenandoahOldGeneration::mark_card_as_dirty(void* location) { 789 _card_scan->mark_card_as_dirty((HeapWord*)location); 790 }