1 /* 2 * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "compiler/oopMap.hpp" 28 #include "gc/shared/continuationGCSupport.hpp" 29 #include "gc/shared/gcTraceTime.inline.hpp" 30 #include "gc/shared/preservedMarks.inline.hpp" 31 #include "gc/shared/tlab_globals.hpp" 32 #include "gc/shared/workerThread.hpp" 33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 35 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 36 #include "gc/shenandoah/shenandoahFreeSet.hpp" 37 #include "gc/shenandoah/shenandoahFullGC.hpp" 38 #include "gc/shenandoah/shenandoahGeneration.hpp" 39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 40 #include "gc/shenandoah/shenandoahMark.inline.hpp" 41 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 42 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 43 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 44 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 45 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 46 #include "gc/shenandoah/shenandoahMetrics.hpp" 47 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 48 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 49 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 50 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 51 #include "gc/shenandoah/shenandoahSTWMark.hpp" 52 #include "gc/shenandoah/shenandoahUtils.hpp" 53 #include "gc/shenandoah/shenandoahVerifier.hpp" 54 #include "gc/shenandoah/shenandoahVMOperations.hpp" 55 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 56 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 57 #include "memory/metaspaceUtils.hpp" 58 #include "memory/universe.hpp" 59 #include "oops/compressedOops.inline.hpp" 60 #include "oops/oop.inline.hpp" 61 #include "runtime/javaThread.hpp" 62 #include "runtime/orderAccess.hpp" 63 #include "runtime/vmThread.hpp" 64 #include "utilities/copy.hpp" 65 #include "utilities/events.hpp" 66 #include "utilities/growableArray.hpp" 67 68 // After Full GC is done, reconstruct the remembered set by iterating over OLD regions, 69 // registering all objects between bottom() and top(), and setting remembered set cards to 70 // DIRTY if they hold interesting pointers. 71 class ShenandoahReconstructRememberedSetTask : public WorkerTask { 72 private: 73 ShenandoahRegionIterator _regions; 74 75 public: 76 ShenandoahReconstructRememberedSetTask() : 77 WorkerTask("Shenandoah Reset Bitmap") { } 78 79 void work(uint worker_id) { 80 ShenandoahParallelWorkerSession worker_session(worker_id); 81 ShenandoahHeapRegion* r = _regions.next(); 82 ShenandoahHeap* heap = ShenandoahHeap::heap(); 83 RememberedScanner* scanner = heap->card_scan(); 84 ShenandoahSetRememberedCardsToDirtyClosure dirty_cards_for_interesting_pointers; 85 86 while (r != nullptr) { 87 if (r->is_old() && r->is_active()) { 88 HeapWord* obj_addr = r->bottom(); 89 if (r->is_humongous_start()) { 90 // First, clear the remembered set 91 oop obj = cast_to_oop(obj_addr); 92 size_t size = obj->size(); 93 HeapWord* end_object = r->bottom() + size; 94 95 // First, clear the remembered set for all spanned humongous regions 96 size_t num_regions = (size + ShenandoahHeapRegion::region_size_words() - 1) / ShenandoahHeapRegion::region_size_words(); 97 size_t region_span = num_regions * ShenandoahHeapRegion::region_size_words(); 98 scanner->reset_remset(r->bottom(), region_span); 99 size_t region_index = r->index(); 100 ShenandoahHeapRegion* humongous_region = heap->get_region(region_index); 101 while (num_regions-- != 0) { 102 scanner->reset_object_range(humongous_region->bottom(), humongous_region->end()); 103 region_index++; 104 humongous_region = heap->get_region(region_index); 105 } 106 107 // Then register the humongous object and DIRTY relevant remembered set cards 108 scanner->register_object_wo_lock(obj_addr); 109 obj->oop_iterate(&dirty_cards_for_interesting_pointers); 110 } else if (!r->is_humongous()) { 111 // First, clear the remembered set 112 scanner->reset_remset(r->bottom(), ShenandoahHeapRegion::region_size_words()); 113 scanner->reset_object_range(r->bottom(), r->end()); 114 115 // Then iterate over all objects, registering object and DIRTYing relevant remembered set cards 116 HeapWord* t = r->top(); 117 while (obj_addr < t) { 118 oop obj = cast_to_oop(obj_addr); 119 size_t size = obj->size(); 120 scanner->register_object_wo_lock(obj_addr); 121 obj_addr += obj->oop_iterate_size(&dirty_cards_for_interesting_pointers); 122 } 123 } // else, ignore humongous continuation region 124 } 125 // else, this region is FREE or YOUNG or inactive and we can ignore it. 126 r = _regions.next(); 127 } 128 } 129 }; 130 131 ShenandoahFullGC::ShenandoahFullGC() : 132 _gc_timer(ShenandoahHeap::heap()->gc_timer()), 133 _preserved_marks(new PreservedMarksSet(true)) {} 134 135 ShenandoahFullGC::~ShenandoahFullGC() { 136 delete _preserved_marks; 137 } 138 139 bool ShenandoahFullGC::collect(GCCause::Cause cause) { 140 vmop_entry_full(cause); 141 // Always success 142 return true; 143 } 144 145 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) { 146 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 147 TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters()); 148 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross); 149 150 heap->try_inject_alloc_failure(); 151 VM_ShenandoahFullGC op(cause, this); 152 VMThread::execute(&op); 153 } 154 155 void ShenandoahFullGC::entry_full(GCCause::Cause cause) { 156 static const char* msg = "Pause Full"; 157 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */); 158 EventMark em("%s", msg); 159 160 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 161 ShenandoahWorkerPolicy::calc_workers_for_fullgc(), 162 "full gc"); 163 164 op_full(cause); 165 } 166 167 void ShenandoahFullGC::op_full(GCCause::Cause cause) { 168 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 169 ShenandoahMetricsSnapshot metrics; 170 metrics.snap_before(); 171 172 // Perform full GC 173 do_it(cause); 174 175 metrics.snap_after(); 176 if (heap->mode()->is_generational()) { 177 heap->log_heap_status("At end of Full GC"); 178 179 // Since we allow temporary violation of these constraints during Full GC, we want to enforce that the assertions are 180 // made valid by the time Full GC completes. 181 assert(heap->old_generation()->used_regions_size() <= heap->old_generation()->adjusted_capacity(), 182 "Old generation affiliated regions must be less than capacity"); 183 assert(heap->young_generation()->used_regions_size() <= heap->young_generation()->adjusted_capacity(), 184 "Young generation affiliated regions must be less than capacity"); 185 } 186 if (metrics.is_good_progress()) { 187 ShenandoahHeap::heap()->notify_gc_progress(); 188 } else { 189 // Nothing to do. Tell the allocation path that we have failed to make 190 // progress, and it can finally fail. 191 ShenandoahHeap::heap()->notify_gc_no_progress(); 192 } 193 } 194 195 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { 196 ShenandoahHeap* heap = ShenandoahHeap::heap(); 197 // Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL. 198 heap->set_gc_generation(heap->global_generation()); 199 200 if (heap->mode()->is_generational()) { 201 // Defer unadjust_available() invocations until after Full GC finishes its efforts because Full GC makes use 202 // of young-gen memory that may have been loaned from old-gen. 203 204 // No need to old_gen->increase_used(). That was done when plabs were allocated, accounting for both old evacs and promotions. 205 206 heap->set_alloc_supplement_reserve(0); 207 heap->set_young_evac_reserve(0); 208 heap->set_old_evac_reserve(0); 209 heap->reset_old_evac_expended(); 210 heap->set_promoted_reserve(0); 211 212 // Full GC supersedes any marking or coalescing in old generation. 213 heap->cancel_old_gc(); 214 } 215 216 if (ShenandoahVerify) { 217 heap->verifier()->verify_before_fullgc(); 218 } 219 220 if (VerifyBeforeGC) { 221 Universe::verify(); 222 } 223 224 // Degenerated GC may carry concurrent root flags when upgrading to 225 // full GC. We need to reset it before mutators resume. 226 heap->set_concurrent_strong_root_in_progress(false); 227 heap->set_concurrent_weak_root_in_progress(false); 228 229 heap->set_full_gc_in_progress(true); 230 231 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); 232 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped"); 233 234 { 235 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre); 236 heap->pre_full_gc_dump(_gc_timer); 237 } 238 239 { 240 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare); 241 // Full GC is supposed to recover from any GC state: 242 243 // a0. Remember if we have forwarded objects 244 bool has_forwarded_objects = heap->has_forwarded_objects(); 245 246 // a1. Cancel evacuation, if in progress 247 if (heap->is_evacuation_in_progress()) { 248 heap->set_evacuation_in_progress(false); 249 } 250 assert(!heap->is_evacuation_in_progress(), "sanity"); 251 252 // a2. Cancel update-refs, if in progress 253 if (heap->is_update_refs_in_progress()) { 254 heap->set_update_refs_in_progress(false); 255 } 256 assert(!heap->is_update_refs_in_progress(), "sanity"); 257 258 // b. Cancel all concurrent marks, if in progress 259 if (heap->is_concurrent_mark_in_progress()) { 260 heap->cancel_concurrent_mark(); 261 } 262 assert(!heap->is_concurrent_mark_in_progress(), "sanity"); 263 264 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots. 265 if (has_forwarded_objects) { 266 update_roots(true /*full_gc*/); 267 } 268 269 // d. Reset the bitmaps for new marking 270 heap->global_generation()->reset_mark_bitmap(); 271 assert(heap->marking_context()->is_bitmap_clear(), "sanity"); 272 assert(!heap->global_generation()->is_mark_complete(), "sanity"); 273 274 // e. Abandon reference discovery and clear all discovered references. 275 ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor(); 276 rp->abandon_partial_discovery(); 277 278 // f. Sync pinned region status from the CP marks 279 heap->sync_pinned_region_status(); 280 281 // The rest of prologue: 282 _preserved_marks->init(heap->workers()->active_workers()); 283 284 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change"); 285 } 286 287 if (UseTLAB) { 288 // TODO: Do we need to explicitly retire PLABs? 289 heap->gclabs_retire(ResizeTLAB); 290 heap->tlabs_retire(ResizeTLAB); 291 } 292 293 OrderAccess::fence(); 294 295 phase1_mark_heap(); 296 297 // Once marking is done, which may have fixed up forwarded objects, we can drop it. 298 // Coming out of Full GC, we would not have any forwarded objects. 299 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3. 300 heap->set_has_forwarded_objects(false); 301 302 heap->set_full_gc_move_in_progress(true); 303 304 // Setup workers for the rest 305 OrderAccess::fence(); 306 307 // Initialize worker slices 308 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC); 309 for (uint i = 0; i < heap->max_workers(); i++) { 310 worker_slices[i] = new ShenandoahHeapRegionSet(); 311 } 312 313 { 314 // The rest of code performs region moves, where region status is undefined 315 // until all phases run together. 316 ShenandoahHeapLocker lock(heap->lock()); 317 318 phase2_calculate_target_addresses(worker_slices); 319 320 OrderAccess::fence(); 321 322 phase3_update_references(); 323 324 phase4_compact_objects(worker_slices); 325 } 326 327 { 328 // Epilogue 329 _preserved_marks->restore(heap->workers()); 330 _preserved_marks->reclaim(); 331 332 if (heap->mode()->is_generational()) { 333 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_reconstruct_remembered_set); 334 ShenandoahReconstructRememberedSetTask task; 335 heap->workers()->run_task(&task); 336 } 337 } 338 339 // Resize metaspace 340 MetaspaceGC::compute_new_size(); 341 342 heap->adjust_generation_sizes(); 343 344 // Free worker slices 345 for (uint i = 0; i < heap->max_workers(); i++) { 346 delete worker_slices[i]; 347 } 348 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices); 349 350 heap->set_full_gc_move_in_progress(false); 351 heap->set_full_gc_in_progress(false); 352 353 if (ShenandoahVerify) { 354 if (heap->mode()->is_generational()) { 355 heap->verifier()->verify_after_generational_fullgc(); 356 } else { 357 heap->verifier()->verify_after_fullgc(); 358 } 359 } 360 361 // Having reclaimed all dead memory, it is now safe to restore capacities to original values. 362 heap->young_generation()->unadjust_available(); 363 heap->old_generation()->unadjust_available(); 364 365 if (VerifyAfterGC) { 366 Universe::verify(); 367 } 368 369 { 370 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post); 371 heap->post_full_gc_dump(_gc_timer); 372 } 373 } 374 375 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure { 376 private: 377 ShenandoahMarkingContext* const _ctx; 378 379 public: 380 ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 381 382 void heap_region_do(ShenandoahHeapRegion *r) { 383 if (r->affiliation() != FREE) { 384 _ctx->capture_top_at_mark_start(r); 385 r->clear_live_data(); 386 } 387 } 388 389 bool is_thread_safe() { return true; } 390 }; 391 392 void ShenandoahFullGC::phase1_mark_heap() { 393 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer); 394 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark); 395 396 ShenandoahHeap* heap = ShenandoahHeap::heap(); 397 398 ShenandoahPrepareForMarkClosure cl; 399 heap->parallel_heap_region_iterate(&cl); 400 401 heap->set_unload_classes(heap->global_generation()->heuristics()->can_unload_classes()); 402 403 ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor(); 404 // enable ("weak") refs discovery 405 rp->set_soft_reference_policy(true); // forcefully purge all soft references 406 407 ShenandoahSTWMark mark(heap->global_generation(), true /*full_gc*/); 408 mark.mark(); 409 heap->parallel_cleaning(true /* full_gc */); 410 } 411 412 class ShenandoahPrepareForCompactionTask : public WorkerTask { 413 private: 414 PreservedMarksSet* const _preserved_marks; 415 ShenandoahHeap* const _heap; 416 ShenandoahHeapRegionSet** const _worker_slices; 417 size_t const _num_workers; 418 419 public: 420 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices, 421 size_t num_workers); 422 423 static bool is_candidate_region(ShenandoahHeapRegion* r) { 424 // Empty region: get it into the slice to defragment the slice itself. 425 // We could have skipped this without violating correctness, but we really 426 // want to compact all live regions to the start of the heap, which sometimes 427 // means moving them into the fully empty regions. 428 if (r->is_empty()) return true; 429 430 // Can move the region, and this is not the humongous region. Humongous 431 // moves are special cased here, because their moves are handled separately. 432 return r->is_stw_move_allowed() && !r->is_humongous(); 433 } 434 435 void work(uint worker_id); 436 }; 437 438 class ShenandoahPrepareForGenerationalCompactionObjectClosure : public ObjectClosure { 439 private: 440 ShenandoahPrepareForCompactionTask* _compactor; 441 PreservedMarks* const _preserved_marks; 442 ShenandoahHeap* const _heap; 443 444 // _empty_regions is a thread-local list of heap regions that have been completely emptied by this worker thread's 445 // compaction efforts. The worker thread that drives these efforts adds compacted regions to this list if the 446 // region has not been compacted onto itself. 447 GrowableArray<ShenandoahHeapRegion*>& _empty_regions; 448 int _empty_regions_pos; 449 ShenandoahHeapRegion* _old_to_region; 450 ShenandoahHeapRegion* _young_to_region; 451 ShenandoahHeapRegion* _from_region; 452 ShenandoahRegionAffiliation _from_affiliation; 453 HeapWord* _old_compact_point; 454 HeapWord* _young_compact_point; 455 uint _worker_id; 456 457 public: 458 ShenandoahPrepareForGenerationalCompactionObjectClosure(ShenandoahPrepareForCompactionTask* compactor, 459 PreservedMarks* preserved_marks, 460 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 461 ShenandoahHeapRegion* old_to_region, 462 ShenandoahHeapRegion* young_to_region, uint worker_id) : 463 _compactor(compactor), 464 _preserved_marks(preserved_marks), 465 _heap(ShenandoahHeap::heap()), 466 _empty_regions(empty_regions), 467 _empty_regions_pos(0), 468 _old_to_region(old_to_region), 469 _young_to_region(young_to_region), 470 _from_region(nullptr), 471 _old_compact_point((old_to_region != nullptr)? old_to_region->bottom(): nullptr), 472 _young_compact_point((young_to_region != nullptr)? young_to_region->bottom(): nullptr), 473 _worker_id(worker_id) {} 474 475 void set_from_region(ShenandoahHeapRegion* from_region) { 476 _from_region = from_region; 477 _from_affiliation = from_region->affiliation(); 478 if (_from_region->has_live()) { 479 if (_from_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION) { 480 if (_old_to_region == nullptr) { 481 _old_to_region = from_region; 482 _old_compact_point = from_region->bottom(); 483 } 484 } else { 485 assert(_from_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION, "from_region must be OLD or YOUNG"); 486 if (_young_to_region == nullptr) { 487 _young_to_region = from_region; 488 _young_compact_point = from_region->bottom(); 489 } 490 } 491 } // else, we won't iterate over this _from_region so we don't need to set up to region to hold copies 492 } 493 494 void finish() { 495 finish_old_region(); 496 finish_young_region(); 497 } 498 499 void finish_old_region() { 500 if (_old_to_region != nullptr) { 501 log_debug(gc)("Planned compaction into Old Region " SIZE_FORMAT ", used: " SIZE_FORMAT " tabulated by worker %u", 502 _old_to_region->index(), _old_compact_point - _old_to_region->bottom(), _worker_id); 503 _old_to_region->set_new_top(_old_compact_point); 504 _old_to_region = nullptr; 505 } 506 } 507 508 void finish_young_region() { 509 if (_young_to_region != nullptr) { 510 log_debug(gc)("Worker %u planned compaction into Young Region " SIZE_FORMAT ", used: " SIZE_FORMAT, 511 _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom()); 512 _young_to_region->set_new_top(_young_compact_point); 513 _young_to_region = nullptr; 514 } 515 } 516 517 bool is_compact_same_region() { 518 return (_from_region == _old_to_region) || (_from_region == _young_to_region); 519 } 520 521 int empty_regions_pos() { 522 return _empty_regions_pos; 523 } 524 525 void do_object(oop p) { 526 assert(_from_region != nullptr, "must set before work"); 527 assert((_from_region->bottom() <= cast_from_oop<HeapWord*>(p)) && (cast_from_oop<HeapWord*>(p) < _from_region->top()), 528 "Object must reside in _from_region"); 529 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 530 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked"); 531 532 size_t obj_size = p->size(); 533 uint from_region_age = _from_region->age(); 534 uint object_age = p->age(); 535 536 bool promote_object = false; 537 if ((_from_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION) && 538 (from_region_age + object_age >= InitialTenuringThreshold)) { 539 if ((_old_to_region != nullptr) && (_old_compact_point + obj_size > _old_to_region->end())) { 540 finish_old_region(); 541 _old_to_region = nullptr; 542 } 543 if (_old_to_region == nullptr) { 544 if (_empty_regions_pos < _empty_regions.length()) { 545 ShenandoahHeapRegion* new_to_region = _empty_regions.at(_empty_regions_pos); 546 _empty_regions_pos++; 547 new_to_region->set_affiliation(OLD_GENERATION); 548 _old_to_region = new_to_region; 549 _old_compact_point = _old_to_region->bottom(); 550 promote_object = true; 551 } 552 // Else this worker thread does not yet have any empty regions into which this aged object can be promoted so 553 // we leave promote_object as false, deferring the promotion. 554 } else { 555 promote_object = true; 556 } 557 } 558 559 if (promote_object || (_from_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION)) { 560 assert(_old_to_region != nullptr, "_old_to_region should not be nullptr when evacuating to OLD region"); 561 if (_old_compact_point + obj_size > _old_to_region->end()) { 562 ShenandoahHeapRegion* new_to_region; 563 564 log_debug(gc)("Worker %u finishing old region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT 565 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _old_to_region->index(), 566 p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end())); 567 568 // Object does not fit. Get a new _old_to_region. 569 finish_old_region(); 570 if (_empty_regions_pos < _empty_regions.length()) { 571 new_to_region = _empty_regions.at(_empty_regions_pos); 572 _empty_regions_pos++; 573 new_to_region->set_affiliation(OLD_GENERATION); 574 } else { 575 // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct 576 // from _from_region. That's because there is always room for _from_region to be compacted into itself. 577 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction. 578 new_to_region = _from_region; 579 } 580 581 assert(new_to_region != _old_to_region, "must not reuse same OLD to-region"); 582 assert(new_to_region != nullptr, "must not be nullptr"); 583 _old_to_region = new_to_region; 584 _old_compact_point = _old_to_region->bottom(); 585 } 586 587 // Object fits into current region, record new location: 588 assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit"); 589 shenandoah_assert_not_forwarded(nullptr, p); 590 _preserved_marks->push_if_necessary(p, p->mark()); 591 p->forward_to(cast_to_oop(_old_compact_point)); 592 _old_compact_point += obj_size; 593 } else { 594 assert(_from_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION, 595 "_from_region must be OLD_GENERATION or YOUNG_GENERATION"); 596 assert(_young_to_region != nullptr, "_young_to_region should not be nullptr when compacting YOUNG _from_region"); 597 598 // After full gc compaction, all regions have age 0. Embed the region's age into the object's age in order to preserve 599 // tenuring progress. 600 if (_heap->is_aging_cycle()) { 601 _heap->increase_object_age(p, from_region_age + 1); 602 } else { 603 _heap->increase_object_age(p, from_region_age); 604 } 605 606 if (_young_compact_point + obj_size > _young_to_region->end()) { 607 ShenandoahHeapRegion* new_to_region; 608 609 log_debug(gc)("Worker %u finishing young region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT 610 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _young_to_region->index(), 611 p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end())); 612 613 // Object does not fit. Get a new _young_to_region. 614 finish_young_region(); 615 if (_empty_regions_pos < _empty_regions.length()) { 616 new_to_region = _empty_regions.at(_empty_regions_pos); 617 _empty_regions_pos++; 618 new_to_region->set_affiliation(YOUNG_GENERATION); 619 } else { 620 // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct 621 // from _from_region. That's because there is always room for _from_region to be compacted into itself. 622 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction. 623 new_to_region = _from_region; 624 } 625 626 assert(new_to_region != _young_to_region, "must not reuse same OLD to-region"); 627 assert(new_to_region != nullptr, "must not be nullptr"); 628 _young_to_region = new_to_region; 629 _young_compact_point = _young_to_region->bottom(); 630 } 631 632 // Object fits into current region, record new location: 633 assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit"); 634 shenandoah_assert_not_forwarded(nullptr, p); 635 _preserved_marks->push_if_necessary(p, p->mark()); 636 p->forward_to(cast_to_oop(_young_compact_point)); 637 _young_compact_point += obj_size; 638 } 639 } 640 }; 641 642 643 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { 644 private: 645 PreservedMarks* const _preserved_marks; 646 ShenandoahHeap* const _heap; 647 GrowableArray<ShenandoahHeapRegion*>& _empty_regions; 648 int _empty_regions_pos; 649 ShenandoahHeapRegion* _to_region; 650 ShenandoahHeapRegion* _from_region; 651 HeapWord* _compact_point; 652 653 public: 654 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks, 655 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 656 ShenandoahHeapRegion* to_region) : 657 _preserved_marks(preserved_marks), 658 _heap(ShenandoahHeap::heap()), 659 _empty_regions(empty_regions), 660 _empty_regions_pos(0), 661 _to_region(to_region), 662 _from_region(nullptr), 663 _compact_point(to_region->bottom()) {} 664 665 void set_from_region(ShenandoahHeapRegion* from_region) { 666 _from_region = from_region; 667 } 668 669 void finish_region() { 670 assert(_to_region != nullptr, "should not happen"); 671 assert(!_heap->mode()->is_generational(), "Generational GC should use different Closure"); 672 _to_region->set_new_top(_compact_point); 673 } 674 675 bool is_compact_same_region() { 676 return _from_region == _to_region; 677 } 678 679 int empty_regions_pos() { 680 return _empty_regions_pos; 681 } 682 683 void do_object(oop p) { 684 assert(_from_region != nullptr, "must set before work"); 685 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 686 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked"); 687 688 size_t obj_size = p->size(); 689 if (_compact_point + obj_size > _to_region->end()) { 690 finish_region(); 691 692 // Object doesn't fit. Pick next empty region and start compacting there. 693 ShenandoahHeapRegion* new_to_region; 694 if (_empty_regions_pos < _empty_regions.length()) { 695 new_to_region = _empty_regions.at(_empty_regions_pos); 696 _empty_regions_pos++; 697 } else { 698 // Out of empty region? Compact within the same region. 699 new_to_region = _from_region; 700 } 701 702 assert(new_to_region != _to_region, "must not reuse same to-region"); 703 assert(new_to_region != nullptr, "must not be null"); 704 _to_region = new_to_region; 705 _compact_point = _to_region->bottom(); 706 } 707 708 // Object fits into current region, record new location: 709 assert(_compact_point + obj_size <= _to_region->end(), "must fit"); 710 shenandoah_assert_not_forwarded(nullptr, p); 711 _preserved_marks->push_if_necessary(p, p->mark()); 712 p->forward_to(cast_to_oop(_compact_point)); 713 _compact_point += obj_size; 714 } 715 }; 716 717 718 ShenandoahPrepareForCompactionTask::ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, 719 ShenandoahHeapRegionSet **worker_slices, 720 size_t num_workers) : 721 WorkerTask("Shenandoah Prepare For Compaction"), 722 _preserved_marks(preserved_marks), _heap(ShenandoahHeap::heap()), 723 _worker_slices(worker_slices), _num_workers(num_workers) { } 724 725 726 void ShenandoahPrepareForCompactionTask::work(uint worker_id) { 727 ShenandoahParallelWorkerSession worker_session(worker_id); 728 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id]; 729 ShenandoahHeapRegionSetIterator it(slice); 730 ShenandoahHeapRegion* from_region = it.next(); 731 // No work? 732 if (from_region == nullptr) { 733 return; 734 } 735 736 // Sliding compaction. Walk all regions in the slice, and compact them. 737 // Remember empty regions and reuse them as needed. 738 ResourceMark rm; 739 740 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions()); 741 742 if (_heap->mode()->is_generational()) { 743 ShenandoahHeapRegion* old_to_region = (from_region->is_old())? from_region: nullptr; 744 ShenandoahHeapRegion* young_to_region = (from_region->is_young())? from_region: nullptr; 745 ShenandoahPrepareForGenerationalCompactionObjectClosure cl(this, _preserved_marks->get(worker_id), empty_regions, 746 old_to_region, young_to_region, worker_id); 747 while (from_region != nullptr) { 748 assert(is_candidate_region(from_region), "Sanity"); 749 log_debug(gc)("Worker %u compacting %s Region " SIZE_FORMAT " which had used " SIZE_FORMAT " and %s live", 750 worker_id, affiliation_name(from_region->affiliation()), 751 from_region->index(), from_region->used(), from_region->has_live()? "has": "does not have"); 752 cl.set_from_region(from_region); 753 if (from_region->has_live()) { 754 _heap->marked_object_iterate(from_region, &cl); 755 } 756 // Compacted the region to somewhere else? From-region is empty then. 757 if (!cl.is_compact_same_region()) { 758 empty_regions.append(from_region); 759 } 760 from_region = it.next(); 761 } 762 cl.finish(); 763 764 // Mark all remaining regions as empty 765 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) { 766 ShenandoahHeapRegion* r = empty_regions.at(pos); 767 r->set_new_top(r->bottom()); 768 } 769 } else { 770 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region); 771 while (from_region != nullptr) { 772 assert(is_candidate_region(from_region), "Sanity"); 773 cl.set_from_region(from_region); 774 if (from_region->has_live()) { 775 _heap->marked_object_iterate(from_region, &cl); 776 } 777 778 // Compacted the region to somewhere else? From-region is empty then. 779 if (!cl.is_compact_same_region()) { 780 empty_regions.append(from_region); 781 } 782 from_region = it.next(); 783 } 784 cl.finish_region(); 785 786 // Mark all remaining regions as empty 787 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) { 788 ShenandoahHeapRegion* r = empty_regions.at(pos); 789 r->set_new_top(r->bottom()); 790 } 791 } 792 } 793 794 void ShenandoahFullGC::calculate_target_humongous_objects() { 795 ShenandoahHeap* heap = ShenandoahHeap::heap(); 796 797 // Compute the new addresses for humongous objects. We need to do this after addresses 798 // for regular objects are calculated, and we know what regions in heap suffix are 799 // available for humongous moves. 800 // 801 // Scan the heap backwards, because we are compacting humongous regions towards the end. 802 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide 803 // humongous start there. 804 // 805 // The complication is potential non-movable regions during the scan. If such region is 806 // detected, then sliding restarts towards that non-movable region. 807 808 size_t to_begin = heap->num_regions(); 809 size_t to_end = heap->num_regions(); 810 811 log_debug(gc)("Full GC calculating target humongous objects from end " SIZE_FORMAT, to_end); 812 for (size_t c = heap->num_regions(); c > 0; c--) { 813 ShenandoahHeapRegion *r = heap->get_region(c - 1); 814 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) { 815 // To-region candidate: record this, and continue scan 816 to_begin = r->index(); 817 continue; 818 } 819 820 if (r->is_humongous_start() && r->is_stw_move_allowed()) { 821 // From-region candidate: movable humongous region 822 oop old_obj = cast_to_oop(r->bottom()); 823 size_t words_size = old_obj->size(); 824 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 825 826 size_t start = to_end - num_regions; 827 828 if (start >= to_begin && start != r->index()) { 829 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan. 830 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark()); 831 old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom())); 832 to_end = start; 833 continue; 834 } 835 } 836 837 // Failed to fit. Scan starting from current region. 838 to_begin = r->index(); 839 to_end = r->index(); 840 } 841 } 842 843 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure { 844 private: 845 ShenandoahHeap* const _heap; 846 847 public: 848 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {} 849 void heap_region_do(ShenandoahHeapRegion* r) { 850 bool is_generational = _heap->mode()->is_generational(); 851 if (r->is_trash()) { 852 r->recycle(); 853 } 854 if (r->is_cset()) { 855 // Leave afffiliation unchanged. 856 r->make_regular_bypass(); 857 } 858 if (r->is_empty_uncommitted()) { 859 r->make_committed_bypass(); 860 } 861 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index()); 862 863 // Record current region occupancy: this communicates empty regions are free 864 // to the rest of Full GC code. 865 r->set_new_top(r->top()); 866 } 867 }; 868 869 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure { 870 private: 871 ShenandoahHeap* const _heap; 872 ShenandoahMarkingContext* const _ctx; 873 874 public: 875 ShenandoahTrashImmediateGarbageClosure() : 876 _heap(ShenandoahHeap::heap()), 877 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 878 879 void heap_region_do(ShenandoahHeapRegion* r) { 880 if (r->affiliation() != FREE) { 881 if (r->is_humongous_start()) { 882 oop humongous_obj = cast_to_oop(r->bottom()); 883 if (!_ctx->is_marked(humongous_obj)) { 884 assert(!r->has_live(), 885 "Humongous Start %s Region " SIZE_FORMAT " is not marked, should not have live", 886 affiliation_name(r->affiliation()), r->index()); 887 log_debug(gc)("Trashing immediate humongous region " SIZE_FORMAT " because not marked", r->index()); 888 _heap->trash_humongous_region_at(r); 889 } else { 890 assert(r->has_live(), 891 "Humongous Start %s Region " SIZE_FORMAT " should have live", affiliation_name(r->affiliation()), r->index()); 892 } 893 } else if (r->is_humongous_continuation()) { 894 // If we hit continuation, the non-live humongous starts should have been trashed already 895 assert(r->humongous_start_region()->has_live(), 896 "Humongous Continuation %s Region " SIZE_FORMAT " should have live", affiliation_name(r->affiliation()), r->index()); 897 } else if (r->is_regular()) { 898 if (!r->has_live()) { 899 log_debug(gc)("Trashing immediate regular region " SIZE_FORMAT " because has no live", r->index()); 900 r->make_trash_immediate(); 901 } 902 } 903 } 904 // else, ignore this FREE region. 905 // TODO: change iterators so they do not process FREE regions. 906 } 907 }; 908 909 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) { 910 ShenandoahHeap* heap = ShenandoahHeap::heap(); 911 912 uint n_workers = heap->workers()->active_workers(); 913 size_t n_regions = heap->num_regions(); 914 915 // What we want to accomplish: have the dense prefix of data, while still balancing 916 // out the parallel work. 917 // 918 // Assuming the amount of work is driven by the live data that needs moving, we can slice 919 // the entire heap into equal-live-sized prefix slices, and compact into them. So, each 920 // thread takes all regions in its prefix subset, and then it takes some regions from 921 // the tail. 922 // 923 // Tail region selection becomes interesting. 924 // 925 // First, we want to distribute the regions fairly between the workers, and those regions 926 // might have different amount of live data. So, until we sure no workers need live data, 927 // we need to only take what the worker needs. 928 // 929 // Second, since we slide everything to the left in each slice, the most busy regions 930 // would be the ones on the left. Which means we want to have all workers have their after-tail 931 // regions as close to the left as possible. 932 // 933 // The easiest way to do this is to distribute after-tail regions in round-robin between 934 // workers that still need live data. 935 // 936 // Consider parallel workers A, B, C, then the target slice layout would be: 937 // 938 // AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA 939 // 940 // (.....dense-prefix.....) (.....................tail...................) 941 // [all regions fully live] [left-most regions are fuller that right-most] 942 // 943 944 // Compute how much live data is there. This would approximate the size of dense prefix 945 // we target to create. 946 size_t total_live = 0; 947 for (size_t idx = 0; idx < n_regions; idx++) { 948 ShenandoahHeapRegion *r = heap->get_region(idx); 949 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 950 total_live += r->get_live_data_words(); 951 } 952 } 953 954 // Estimate the size for the dense prefix. Note that we specifically count only the 955 // "full" regions, so there would be some non-full regions in the slice tail. 956 size_t live_per_worker = total_live / n_workers; 957 size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words(); 958 size_t prefix_regions_total = prefix_regions_per_worker * n_workers; 959 prefix_regions_total = MIN2(prefix_regions_total, n_regions); 960 assert(prefix_regions_total <= n_regions, "Sanity"); 961 962 // There might be non-candidate regions in the prefix. To compute where the tail actually 963 // ends up being, we need to account those as well. 964 size_t prefix_end = prefix_regions_total; 965 for (size_t idx = 0; idx < prefix_regions_total; idx++) { 966 ShenandoahHeapRegion *r = heap->get_region(idx); 967 if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 968 prefix_end++; 969 } 970 } 971 prefix_end = MIN2(prefix_end, n_regions); 972 assert(prefix_end <= n_regions, "Sanity"); 973 974 // Distribute prefix regions per worker: each thread definitely gets its own same-sized 975 // subset of dense prefix. 976 size_t prefix_idx = 0; 977 978 size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC); 979 980 for (size_t wid = 0; wid < n_workers; wid++) { 981 ShenandoahHeapRegionSet* slice = worker_slices[wid]; 982 983 live[wid] = 0; 984 size_t regs = 0; 985 986 // Add all prefix regions for this worker 987 while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) { 988 ShenandoahHeapRegion *r = heap->get_region(prefix_idx); 989 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 990 slice->add_region(r); 991 live[wid] += r->get_live_data_words(); 992 regs++; 993 } 994 prefix_idx++; 995 } 996 } 997 998 // Distribute the tail among workers in round-robin fashion. 999 size_t wid = n_workers - 1; 1000 1001 for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) { 1002 ShenandoahHeapRegion *r = heap->get_region(tail_idx); 1003 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 1004 assert(wid < n_workers, "Sanity"); 1005 1006 size_t live_region = r->get_live_data_words(); 1007 1008 // Select next worker that still needs live data. 1009 size_t old_wid = wid; 1010 do { 1011 wid++; 1012 if (wid == n_workers) wid = 0; 1013 } while (live[wid] + live_region >= live_per_worker && old_wid != wid); 1014 1015 if (old_wid == wid) { 1016 // Circled back to the same worker? This means liveness data was 1017 // miscalculated. Bump the live_per_worker limit so that 1018 // everyone gets a piece of the leftover work. 1019 live_per_worker += ShenandoahHeapRegion::region_size_words(); 1020 } 1021 1022 worker_slices[wid]->add_region(r); 1023 live[wid] += live_region; 1024 } 1025 } 1026 1027 FREE_C_HEAP_ARRAY(size_t, live); 1028 1029 #ifdef ASSERT 1030 ResourceBitMap map(n_regions); 1031 for (size_t wid = 0; wid < n_workers; wid++) { 1032 ShenandoahHeapRegionSetIterator it(worker_slices[wid]); 1033 ShenandoahHeapRegion* r = it.next(); 1034 while (r != nullptr) { 1035 size_t idx = r->index(); 1036 assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx); 1037 assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx); 1038 map.at_put(idx, true); 1039 r = it.next(); 1040 } 1041 } 1042 1043 for (size_t rid = 0; rid < n_regions; rid++) { 1044 bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid)); 1045 bool is_distributed = map.at(rid); 1046 assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid); 1047 } 1048 #endif 1049 } 1050 1051 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) { 1052 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer); 1053 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses); 1054 1055 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1056 1057 // About to figure out which regions can be compacted, make sure pinning status 1058 // had been updated in GC prologue. 1059 heap->assert_pinned_region_status(); 1060 1061 { 1062 // Trash the immediately collectible regions before computing addresses 1063 ShenandoahTrashImmediateGarbageClosure tigcl; 1064 heap->heap_region_iterate(&tigcl); 1065 1066 // Make sure regions are in good state: committed, active, clean. 1067 // This is needed because we are potentially sliding the data through them. 1068 ShenandoahEnsureHeapActiveClosure ecl; 1069 heap->heap_region_iterate(&ecl); 1070 } 1071 1072 if (heap->mode()->is_generational()) { 1073 heap->young_generation()->clear_used(); 1074 heap->old_generation()->clear_used(); 1075 } 1076 1077 // Compute the new addresses for regular objects 1078 { 1079 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular); 1080 1081 distribute_slices(worker_slices); 1082 1083 size_t num_workers = heap->max_workers(); 1084 1085 ResourceMark rm; 1086 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices, num_workers); 1087 heap->workers()->run_task(&task); 1088 } 1089 1090 // Compute the new addresses for humongous objects 1091 { 1092 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong); 1093 calculate_target_humongous_objects(); 1094 } 1095 } 1096 1097 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure { 1098 private: 1099 ShenandoahHeap* const _heap; 1100 ShenandoahMarkingContext* const _ctx; 1101 1102 template <class T> 1103 inline void do_oop_work(T* p) { 1104 T o = RawAccess<>::oop_load(p); 1105 if (!CompressedOops::is_null(o)) { 1106 oop obj = CompressedOops::decode_not_null(o); 1107 assert(_ctx->is_marked(obj), "must be marked"); 1108 if (obj->is_forwarded()) { 1109 oop forw = obj->forwardee(); 1110 RawAccess<IS_NOT_NULL>::oop_store(p, forw); 1111 } 1112 } 1113 } 1114 1115 public: 1116 ShenandoahAdjustPointersClosure() : 1117 _heap(ShenandoahHeap::heap()), 1118 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 1119 1120 void do_oop(oop* p) { do_oop_work(p); } 1121 void do_oop(narrowOop* p) { do_oop_work(p); } 1122 void do_method(Method* m) {} 1123 void do_nmethod(nmethod* nm) {} 1124 }; 1125 1126 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure { 1127 private: 1128 ShenandoahHeap* const _heap; 1129 ShenandoahAdjustPointersClosure _cl; 1130 1131 public: 1132 ShenandoahAdjustPointersObjectClosure() : 1133 _heap(ShenandoahHeap::heap()) { 1134 } 1135 void do_object(oop p) { 1136 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 1137 p->oop_iterate(&_cl); 1138 } 1139 }; 1140 1141 class ShenandoahAdjustPointersTask : public WorkerTask { 1142 private: 1143 ShenandoahHeap* const _heap; 1144 ShenandoahRegionIterator _regions; 1145 1146 public: 1147 ShenandoahAdjustPointersTask() : 1148 WorkerTask("Shenandoah Adjust Pointers"), 1149 _heap(ShenandoahHeap::heap()) { 1150 } 1151 1152 void work(uint worker_id) { 1153 ShenandoahParallelWorkerSession worker_session(worker_id); 1154 ShenandoahAdjustPointersObjectClosure obj_cl; 1155 ShenandoahHeapRegion* r = _regions.next(); 1156 while (r != nullptr) { 1157 if (!r->is_humongous_continuation() && r->has_live()) { 1158 _heap->marked_object_iterate(r, &obj_cl); 1159 } 1160 if (r->is_pinned() && r->is_old() && r->is_active() && !r->is_humongous()) { 1161 // Pinned regions are not compacted so they may still hold unmarked objects with 1162 // reference to reclaimed memory. Remembered set scanning will crash if it attempts 1163 // to iterate the oops in these objects. 1164 r->begin_preemptible_coalesce_and_fill(); 1165 r->oop_fill_and_coalesce_wo_cancel(); 1166 } 1167 r = _regions.next(); 1168 } 1169 } 1170 }; 1171 1172 class ShenandoahAdjustRootPointersTask : public WorkerTask { 1173 private: 1174 ShenandoahRootAdjuster* _rp; 1175 PreservedMarksSet* _preserved_marks; 1176 public: 1177 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) : 1178 WorkerTask("Shenandoah Adjust Root Pointers"), 1179 _rp(rp), 1180 _preserved_marks(preserved_marks) {} 1181 1182 void work(uint worker_id) { 1183 ShenandoahParallelWorkerSession worker_session(worker_id); 1184 ShenandoahAdjustPointersClosure cl; 1185 _rp->roots_do(worker_id, &cl); 1186 _preserved_marks->get(worker_id)->adjust_during_full_gc(); 1187 } 1188 }; 1189 1190 void ShenandoahFullGC::phase3_update_references() { 1191 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer); 1192 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers); 1193 1194 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1195 1196 WorkerThreads* workers = heap->workers(); 1197 uint nworkers = workers->active_workers(); 1198 { 1199 #if COMPILER2_OR_JVMCI 1200 DerivedPointerTable::clear(); 1201 #endif 1202 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots); 1203 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks); 1204 workers->run_task(&task); 1205 #if COMPILER2_OR_JVMCI 1206 DerivedPointerTable::update_pointers(); 1207 #endif 1208 } 1209 1210 ShenandoahAdjustPointersTask adjust_pointers_task; 1211 workers->run_task(&adjust_pointers_task); 1212 } 1213 1214 class ShenandoahCompactObjectsClosure : public ObjectClosure { 1215 private: 1216 ShenandoahHeap* const _heap; 1217 uint const _worker_id; 1218 1219 public: 1220 ShenandoahCompactObjectsClosure(uint worker_id) : 1221 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {} 1222 1223 void do_object(oop p) { 1224 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 1225 size_t size = p->size(); 1226 if (p->is_forwarded()) { 1227 HeapWord* compact_from = cast_from_oop<HeapWord*>(p); 1228 HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee()); 1229 Copy::aligned_conjoint_words(compact_from, compact_to, size); 1230 oop new_obj = cast_to_oop(compact_to); 1231 1232 ContinuationGCSupport::relativize_stack_chunk(new_obj); 1233 new_obj->init_mark(); 1234 } 1235 } 1236 }; 1237 1238 class ShenandoahCompactObjectsTask : public WorkerTask { 1239 private: 1240 ShenandoahHeap* const _heap; 1241 ShenandoahHeapRegionSet** const _worker_slices; 1242 1243 public: 1244 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) : 1245 WorkerTask("Shenandoah Compact Objects"), 1246 _heap(ShenandoahHeap::heap()), 1247 _worker_slices(worker_slices) { 1248 } 1249 1250 void work(uint worker_id) { 1251 ShenandoahParallelWorkerSession worker_session(worker_id); 1252 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]); 1253 1254 ShenandoahCompactObjectsClosure cl(worker_id); 1255 ShenandoahHeapRegion* r = slice.next(); 1256 while (r != nullptr) { 1257 assert(!r->is_humongous(), "must not get humongous regions here"); 1258 if (r->has_live()) { 1259 _heap->marked_object_iterate(r, &cl); 1260 } 1261 r->set_top(r->new_top()); 1262 r = slice.next(); 1263 } 1264 } 1265 }; 1266 1267 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { 1268 private: 1269 ShenandoahHeap* const _heap; 1270 size_t _live; 1271 1272 public: 1273 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) { 1274 _heap->free_set()->clear(); 1275 } 1276 1277 void heap_region_do(ShenandoahHeapRegion* r) { 1278 assert (!r->is_cset(), "cset regions should have been demoted already"); 1279 bool is_generational = _heap->mode()->is_generational(); 1280 1281 // Need to reset the complete-top-at-mark-start pointer here because 1282 // the complete marking bitmap is no longer valid. This ensures 1283 // size-based iteration in marked_object_iterate(). 1284 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip 1285 // pinned regions. 1286 if (!r->is_pinned()) { 1287 _heap->complete_marking_context()->reset_top_at_mark_start(r); 1288 } 1289 1290 size_t live = r->used(); 1291 1292 // Make empty regions that have been allocated into regular 1293 if (r->is_empty() && live > 0) { 1294 if (!is_generational) { 1295 r->make_young_maybe(); 1296 } 1297 // else, generational mode compaction has already established affiliation. 1298 r->make_regular_bypass(); 1299 } 1300 1301 // Reclaim regular regions that became empty 1302 if (r->is_regular() && live == 0) { 1303 r->make_trash(); 1304 } 1305 1306 // Recycle all trash regions 1307 if (r->is_trash()) { 1308 live = 0; 1309 r->recycle(); 1310 } 1311 1312 // Update final usage for generations 1313 if (is_generational && live != 0) { 1314 if (r->is_young()) { 1315 _heap->young_generation()->increase_used(live); 1316 } else if (r->is_old()) { 1317 _heap->old_generation()->increase_used(live); 1318 } 1319 } 1320 1321 r->set_live_data(live); 1322 r->reset_alloc_metadata(); 1323 _live += live; 1324 } 1325 1326 size_t get_live() { 1327 return _live; 1328 } 1329 }; 1330 1331 void ShenandoahFullGC::compact_humongous_objects() { 1332 // Compact humongous regions, based on their fwdptr objects. 1333 // 1334 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases, 1335 // humongous regions are already compacted, and do not require further moves, which alleviates 1336 // sliding costs. We may consider doing this in parallel in future. 1337 1338 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1339 1340 for (size_t c = heap->num_regions(); c > 0; c--) { 1341 ShenandoahHeapRegion* r = heap->get_region(c - 1); 1342 if (r->is_humongous_start()) { 1343 oop old_obj = cast_to_oop(r->bottom()); 1344 if (!old_obj->is_forwarded()) { 1345 // No need to move the object, it stays at the same slot 1346 continue; 1347 } 1348 size_t words_size = old_obj->size(); 1349 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 1350 1351 size_t old_start = r->index(); 1352 size_t old_end = old_start + num_regions - 1; 1353 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee()); 1354 size_t new_end = new_start + num_regions - 1; 1355 assert(old_start != new_start, "must be real move"); 1356 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index()); 1357 1358 ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(heap->get_region(old_start)->bottom())); 1359 log_debug(gc)("Full GC compaction moves humongous object from region " SIZE_FORMAT " to region " SIZE_FORMAT, 1360 old_start, new_start); 1361 1362 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(), 1363 heap->get_region(new_start)->bottom(), 1364 words_size); 1365 1366 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom()); 1367 new_obj->init_mark(); 1368 1369 { 1370 ShenandoahRegionAffiliation original_affiliation = r->affiliation(); 1371 for (size_t c = old_start; c <= old_end; c++) { 1372 ShenandoahHeapRegion* r = heap->get_region(c); 1373 // Leave humongous region affiliation unchanged. 1374 r->make_regular_bypass(); 1375 r->set_top(r->bottom()); 1376 } 1377 1378 for (size_t c = new_start; c <= new_end; c++) { 1379 ShenandoahHeapRegion* r = heap->get_region(c); 1380 if (c == new_start) { 1381 r->make_humongous_start_bypass(original_affiliation); 1382 } else { 1383 r->make_humongous_cont_bypass(original_affiliation); 1384 } 1385 1386 // Trailing region may be non-full, record the remainder there 1387 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); 1388 if ((c == new_end) && (remainder != 0)) { 1389 r->set_top(r->bottom() + remainder); 1390 } else { 1391 r->set_top(r->end()); 1392 } 1393 1394 r->reset_alloc_metadata(); 1395 } 1396 } 1397 } 1398 } 1399 } 1400 1401 // This is slightly different to ShHeap::reset_next_mark_bitmap: 1402 // we need to remain able to walk pinned regions. 1403 // Since pinned region do not move and don't get compacted, we will get holes with 1404 // unreachable objects in them (which may have pointers to unloaded Klasses and thus 1405 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using 1406 // a valid marking bitmap and valid TAMS pointer. This class only resets marking 1407 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions. 1408 class ShenandoahMCResetCompleteBitmapTask : public WorkerTask { 1409 private: 1410 ShenandoahRegionIterator _regions; 1411 1412 public: 1413 ShenandoahMCResetCompleteBitmapTask() : 1414 WorkerTask("Shenandoah Reset Bitmap") { 1415 } 1416 1417 void work(uint worker_id) { 1418 ShenandoahParallelWorkerSession worker_session(worker_id); 1419 ShenandoahHeapRegion* region = _regions.next(); 1420 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1421 ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); 1422 while (region != nullptr) { 1423 if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) { 1424 ctx->clear_bitmap(region); 1425 } 1426 region = _regions.next(); 1427 } 1428 } 1429 }; 1430 1431 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) { 1432 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer); 1433 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects); 1434 1435 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1436 1437 // Compact regular objects first 1438 { 1439 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular); 1440 ShenandoahCompactObjectsTask compact_task(worker_slices); 1441 heap->workers()->run_task(&compact_task); 1442 } 1443 1444 // Compact humongous objects after regular object moves 1445 { 1446 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong); 1447 compact_humongous_objects(); 1448 } 1449 1450 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer 1451 // and must ensure the bitmap is in sync. 1452 { 1453 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete); 1454 ShenandoahMCResetCompleteBitmapTask task; 1455 heap->workers()->run_task(&task); 1456 } 1457 1458 // Bring regions in proper states after the collection, and set heap properties. 1459 { 1460 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild); 1461 1462 if (heap->mode()->is_generational()) { 1463 heap->young_generation()->clear_used(); 1464 heap->old_generation()->clear_used(); 1465 } 1466 1467 ShenandoahPostCompactClosure post_compact; 1468 heap->heap_region_iterate(&post_compact); 1469 heap->set_used(post_compact.get_live()); 1470 if (heap->mode()->is_generational()) { 1471 log_info(gc)("FullGC done: GLOBAL usage: " SIZE_FORMAT ", young usage: " SIZE_FORMAT ", old usage: " SIZE_FORMAT, 1472 post_compact.get_live(), heap->young_generation()->used(), heap->old_generation()->used()); 1473 } 1474 1475 heap->collection_set()->clear(); 1476 heap->free_set()->rebuild(); 1477 } 1478 1479 heap->clear_cancelled_gc(true /* clear oom handler */); 1480 }