1 /* 2 * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "compiler/oopMap.hpp" 28 #include "gc/shared/gcTraceTime.inline.hpp" 29 #include "gc/shared/preservedMarks.inline.hpp" 30 #include "gc/shared/tlab_globals.hpp" 31 #include "gc/shared/workerThread.hpp" 32 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 34 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 35 #include "gc/shenandoah/shenandoahFreeSet.hpp" 36 #include "gc/shenandoah/shenandoahFullGC.hpp" 37 #include "gc/shenandoah/shenandoahGeneration.hpp" 38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 39 #include "gc/shenandoah/shenandoahMark.inline.hpp" 40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 42 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 43 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 45 #include "gc/shenandoah/shenandoahMetrics.hpp" 46 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 47 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 48 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 49 #include "gc/shenandoah/shenandoahSTWMark.hpp" 50 #include "gc/shenandoah/shenandoahUtils.hpp" 51 #include "gc/shenandoah/shenandoahVerifier.hpp" 52 #include "gc/shenandoah/shenandoahVMOperations.hpp" 53 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 54 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 55 #include "memory/metaspaceUtils.hpp" 56 #include "memory/universe.hpp" 57 #include "oops/compressedOops.inline.hpp" 58 #include "oops/oop.inline.hpp" 59 #include "runtime/orderAccess.hpp" 60 #include "runtime/thread.hpp" 61 #include "runtime/vmThread.hpp" 62 #include "utilities/copy.hpp" 63 #include "utilities/events.hpp" 64 #include "utilities/growableArray.hpp" 65 66 // After Full GC is done, reconstruct the remembered set by iterating over OLD regions, 67 // registering all objects between bottom() and top(), and setting remembered set cards to 68 // DIRTY if they hold interesting pointers. 69 class ShenandoahReconstructRememberedSetTask : public WorkerTask { 70 private: 71 ShenandoahRegionIterator _regions; 72 73 public: 74 ShenandoahReconstructRememberedSetTask() : 75 WorkerTask("Shenandoah Reset Bitmap") { } 76 77 void work(uint worker_id) { 78 ShenandoahParallelWorkerSession worker_session(worker_id); 79 ShenandoahHeapRegion* r = _regions.next(); 80 ShenandoahHeap* heap = ShenandoahHeap::heap(); 81 RememberedScanner* scanner = heap->card_scan(); 82 ShenandoahSetRememberedCardsToDirtyClosure dirty_cards_for_interesting_pointers; 83 84 while (r != NULL) { 85 if (r->is_old() && r->is_active()) { 86 HeapWord* obj_addr = r->bottom(); 87 if (r->is_humongous_start()) { 88 // First, clear the remembered set 89 oop obj = cast_to_oop(obj_addr); 90 size_t size = obj->size(); 91 HeapWord* end_object = r->bottom() + size; 92 93 // First, clear the remembered set for all spanned humongous regions 94 size_t num_regions = (size + ShenandoahHeapRegion::region_size_words() - 1) / ShenandoahHeapRegion::region_size_words(); 95 size_t region_span = num_regions * ShenandoahHeapRegion::region_size_words(); 96 scanner->reset_remset(r->bottom(), region_span); 97 size_t region_index = r->index(); 98 ShenandoahHeapRegion* humongous_region = heap->get_region(region_index); 99 while (num_regions-- != 0) { 100 scanner->reset_object_range(humongous_region->bottom(), humongous_region->end()); 101 region_index++; 102 humongous_region = heap->get_region(region_index); 103 } 104 105 // Then register the humongous object and DIRTY relevant remembered set cards 106 scanner->register_object_wo_lock(obj_addr); 107 obj->oop_iterate(&dirty_cards_for_interesting_pointers); 108 } else if (!r->is_humongous()) { 109 // First, clear the remembered set 110 scanner->reset_remset(r->bottom(), ShenandoahHeapRegion::region_size_words()); 111 scanner->reset_object_range(r->bottom(), r->end()); 112 113 // Then iterate over all objects, registering object and DIRTYing relevant remembered set cards 114 HeapWord* t = r->top(); 115 while (obj_addr < t) { 116 oop obj = cast_to_oop(obj_addr); 117 size_t size = obj->size(); 118 scanner->register_object_wo_lock(obj_addr); 119 obj_addr += obj->oop_iterate_size(&dirty_cards_for_interesting_pointers); 120 } 121 } // else, ignore humongous continuation region 122 } 123 // else, this region is FREE or YOUNG or inactive and we can ignore it. 124 r = _regions.next(); 125 } 126 } 127 }; 128 129 ShenandoahFullGC::ShenandoahFullGC() : 130 _gc_timer(ShenandoahHeap::heap()->gc_timer()), 131 _preserved_marks(new PreservedMarksSet(true)) {} 132 133 bool ShenandoahFullGC::collect(GCCause::Cause cause) { 134 vmop_entry_full(cause); 135 // Always success 136 return true; 137 } 138 139 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) { 140 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 141 TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters()); 142 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross); 143 144 heap->try_inject_alloc_failure(); 145 VM_ShenandoahFullGC op(cause, this); 146 VMThread::execute(&op); 147 } 148 149 void ShenandoahFullGC::entry_full(GCCause::Cause cause) { 150 static const char* msg = "Pause Full"; 151 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */); 152 EventMark em("%s", msg); 153 154 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 155 ShenandoahWorkerPolicy::calc_workers_for_fullgc(), 156 "full gc"); 157 158 op_full(cause); 159 } 160 161 void ShenandoahFullGC::op_full(GCCause::Cause cause) { 162 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 163 ShenandoahMetricsSnapshot metrics; 164 metrics.snap_before(); 165 166 // Perform full GC 167 do_it(cause); 168 169 metrics.snap_after(); 170 171 if (metrics.is_good_progress()) { 172 ShenandoahHeap::heap()->notify_gc_progress(); 173 } else { 174 // Nothing to do. Tell the allocation path that we have failed to make 175 // progress, and it can finally fail. 176 ShenandoahHeap::heap()->notify_gc_no_progress(); 177 } 178 } 179 180 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { 181 ShenandoahHeap* heap = ShenandoahHeap::heap(); 182 // Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL. 183 heap->set_gc_generation(heap->global_generation()); 184 185 // There will be no concurrent allocations during full GC so reset these coordination variables. 186 heap->young_generation()->unadjust_available(); 187 heap->old_generation()->unadjust_available(); 188 // No need to old_gen->increase_used(). That was done when plabs were allocated, accounting for both old evacs and promotions. 189 190 heap->set_alloc_supplement_reserve(0); 191 heap->set_young_evac_reserve(0); 192 heap->set_old_evac_reserve(0); 193 heap->reset_old_evac_expended(); 194 heap->set_promotion_reserve(0); 195 196 if (heap->mode()->is_generational()) { 197 // Full GC supersedes any marking or coalescing in old generation. 198 heap->cancel_old_gc(); 199 } 200 201 if (ShenandoahVerify) { 202 heap->verifier()->verify_before_fullgc(); 203 } 204 205 if (VerifyBeforeGC) { 206 Universe::verify(); 207 } 208 209 // Degenerated GC may carry concurrent root flags when upgrading to 210 // full GC. We need to reset it before mutators resume. 211 heap->set_concurrent_strong_root_in_progress(false); 212 heap->set_concurrent_weak_root_in_progress(false); 213 214 heap->set_full_gc_in_progress(true); 215 216 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); 217 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped"); 218 219 { 220 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre); 221 heap->pre_full_gc_dump(_gc_timer); 222 } 223 224 { 225 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare); 226 // Full GC is supposed to recover from any GC state: 227 228 // a0. Remember if we have forwarded objects 229 bool has_forwarded_objects = heap->has_forwarded_objects(); 230 231 // a1. Cancel evacuation, if in progress 232 if (heap->is_evacuation_in_progress()) { 233 heap->set_evacuation_in_progress(false); 234 } 235 assert(!heap->is_evacuation_in_progress(), "sanity"); 236 237 // a2. Cancel update-refs, if in progress 238 if (heap->is_update_refs_in_progress()) { 239 heap->set_update_refs_in_progress(false); 240 } 241 assert(!heap->is_update_refs_in_progress(), "sanity"); 242 243 // b. Cancel all concurrent marks, if in progress 244 if (heap->is_concurrent_mark_in_progress()) { 245 heap->cancel_concurrent_mark(); 246 } 247 assert(!heap->is_concurrent_mark_in_progress(), "sanity"); 248 249 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots. 250 if (has_forwarded_objects) { 251 update_roots(true /*full_gc*/); 252 } 253 254 // d. Reset the bitmaps for new marking 255 heap->global_generation()->reset_mark_bitmap(); 256 assert(heap->marking_context()->is_bitmap_clear(), "sanity"); 257 assert(!heap->global_generation()->is_mark_complete(), "sanity"); 258 259 // e. Abandon reference discovery and clear all discovered references. 260 ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor(); 261 rp->abandon_partial_discovery(); 262 263 // f. Sync pinned region status from the CP marks 264 heap->sync_pinned_region_status(); 265 266 // The rest of prologue: 267 _preserved_marks->init(heap->workers()->active_workers()); 268 269 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change"); 270 } 271 272 if (UseTLAB) { 273 // TODO: Do we need to explicitly retire PLABs? 274 heap->gclabs_retire(ResizeTLAB); 275 heap->tlabs_retire(ResizeTLAB); 276 } 277 278 OrderAccess::fence(); 279 280 phase1_mark_heap(); 281 282 // Once marking is done, which may have fixed up forwarded objects, we can drop it. 283 // Coming out of Full GC, we would not have any forwarded objects. 284 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3. 285 heap->set_has_forwarded_objects(false); 286 287 heap->set_full_gc_move_in_progress(true); 288 289 // Setup workers for the rest 290 OrderAccess::fence(); 291 292 // Initialize worker slices 293 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC); 294 for (uint i = 0; i < heap->max_workers(); i++) { 295 worker_slices[i] = new ShenandoahHeapRegionSet(); 296 } 297 298 { 299 // The rest of code performs region moves, where region status is undefined 300 // until all phases run together. 301 ShenandoahHeapLocker lock(heap->lock()); 302 303 phase2_calculate_target_addresses(worker_slices); 304 305 OrderAccess::fence(); 306 307 phase3_update_references(); 308 309 phase4_compact_objects(worker_slices); 310 } 311 312 { 313 // Epilogue 314 _preserved_marks->restore(heap->workers()); 315 _preserved_marks->reclaim(); 316 317 if (heap->mode()->is_generational()) { 318 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_reconstruct_remembered_set); 319 ShenandoahReconstructRememberedSetTask task; 320 heap->workers()->run_task(&task); 321 } 322 } 323 324 // Resize metaspace 325 MetaspaceGC::compute_new_size(); 326 327 // Free worker slices 328 for (uint i = 0; i < heap->max_workers(); i++) { 329 delete worker_slices[i]; 330 } 331 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices); 332 333 heap->set_full_gc_move_in_progress(false); 334 heap->set_full_gc_in_progress(false); 335 336 if (ShenandoahVerify) { 337 if (heap->mode()->is_generational()) { 338 heap->verifier()->verify_after_generational_fullgc(); 339 } else { 340 heap->verifier()->verify_after_fullgc(); 341 } 342 } 343 344 if (VerifyAfterGC) { 345 Universe::verify(); 346 } 347 348 { 349 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post); 350 heap->post_full_gc_dump(_gc_timer); 351 } 352 } 353 354 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure { 355 private: 356 ShenandoahMarkingContext* const _ctx; 357 358 public: 359 ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 360 361 void heap_region_do(ShenandoahHeapRegion *r) { 362 if (r->affiliation() != FREE) { 363 _ctx->capture_top_at_mark_start(r); 364 r->clear_live_data(); 365 } 366 } 367 368 bool is_thread_safe() { return true; } 369 }; 370 371 void ShenandoahFullGC::phase1_mark_heap() { 372 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer); 373 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark); 374 375 ShenandoahHeap* heap = ShenandoahHeap::heap(); 376 377 ShenandoahPrepareForMarkClosure cl; 378 heap->parallel_heap_region_iterate(&cl); 379 380 heap->set_unload_classes(heap->global_generation()->heuristics()->can_unload_classes()); 381 382 ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor(); 383 // enable ("weak") refs discovery 384 rp->set_soft_reference_policy(true); // forcefully purge all soft references 385 386 ShenandoahSTWMark mark(heap->global_generation(), true /*full_gc*/); 387 mark.mark(); 388 heap->parallel_cleaning(true /* full_gc */); 389 } 390 391 class ShenandoahPrepareForCompactionTask : public WorkerTask { 392 private: 393 PreservedMarksSet* const _preserved_marks; 394 ShenandoahHeap* const _heap; 395 ShenandoahHeapRegionSet** const _worker_slices; 396 size_t const _num_workers; 397 398 public: 399 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices, 400 size_t num_workers); 401 402 static bool is_candidate_region(ShenandoahHeapRegion* r) { 403 // Empty region: get it into the slice to defragment the slice itself. 404 // We could have skipped this without violating correctness, but we really 405 // want to compact all live regions to the start of the heap, which sometimes 406 // means moving them into the fully empty regions. 407 if (r->is_empty()) return true; 408 409 // Can move the region, and this is not the humongous region. Humongous 410 // moves are special cased here, because their moves are handled separately. 411 return r->is_stw_move_allowed() && !r->is_humongous(); 412 } 413 414 void work(uint worker_id); 415 }; 416 417 class ShenandoahPrepareForGenerationalCompactionObjectClosure : public ObjectClosure { 418 private: 419 ShenandoahPrepareForCompactionTask* _compactor; 420 PreservedMarks* const _preserved_marks; 421 ShenandoahHeap* const _heap; 422 423 // _empty_regions is a thread-local list of heap regions that have been completely emptied by this worker thread's 424 // compaction efforts. The worker thread that drives these efforts adds compacted regions to this list if the 425 // region has not been compacted onto itself. 426 GrowableArray<ShenandoahHeapRegion*>& _empty_regions; 427 int _empty_regions_pos; 428 ShenandoahHeapRegion* _old_to_region; 429 ShenandoahHeapRegion* _young_to_region; 430 ShenandoahHeapRegion* _from_region; 431 ShenandoahRegionAffiliation _from_affiliation; 432 HeapWord* _old_compact_point; 433 HeapWord* _young_compact_point; 434 uint _worker_id; 435 436 public: 437 ShenandoahPrepareForGenerationalCompactionObjectClosure(ShenandoahPrepareForCompactionTask* compactor, 438 PreservedMarks* preserved_marks, 439 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 440 ShenandoahHeapRegion* old_to_region, 441 ShenandoahHeapRegion* young_to_region, uint worker_id) : 442 _compactor(compactor), 443 _preserved_marks(preserved_marks), 444 _heap(ShenandoahHeap::heap()), 445 _empty_regions(empty_regions), 446 _empty_regions_pos(0), 447 _old_to_region(old_to_region), 448 _young_to_region(young_to_region), 449 _from_region(NULL), 450 _old_compact_point((old_to_region != nullptr)? old_to_region->bottom(): nullptr), 451 _young_compact_point((young_to_region != nullptr)? young_to_region->bottom(): nullptr), 452 _worker_id(worker_id) {} 453 454 void set_from_region(ShenandoahHeapRegion* from_region) { 455 _from_region = from_region; 456 _from_affiliation = from_region->affiliation(); 457 if (_from_region->has_live()) { 458 if (_from_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION) { 459 if (_old_to_region == nullptr) { 460 _old_to_region = from_region; 461 _old_compact_point = from_region->bottom(); 462 } 463 } else { 464 assert(_from_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION, "from_region must be OLD or YOUNG"); 465 if (_young_to_region == nullptr) { 466 _young_to_region = from_region; 467 _young_compact_point = from_region->bottom(); 468 } 469 } 470 } // else, we won't iterate over this _from_region so we don't need to set up to region to hold copies 471 } 472 473 void finish() { 474 finish_old_region(); 475 finish_young_region(); 476 } 477 478 void finish_old_region() { 479 if (_old_to_region != nullptr) { 480 log_debug(gc)("Planned compaction into Old Region " SIZE_FORMAT ", used: " SIZE_FORMAT " tabulated by worker %u", 481 _old_to_region->index(), _old_compact_point - _old_to_region->bottom(), _worker_id); 482 _old_to_region->set_new_top(_old_compact_point); 483 _old_to_region = nullptr; 484 } 485 } 486 487 void finish_young_region() { 488 if (_young_to_region != nullptr) { 489 log_debug(gc)("Worker %u planned compaction into Young Region " SIZE_FORMAT ", used: " SIZE_FORMAT, 490 _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom()); 491 _young_to_region->set_new_top(_young_compact_point); 492 _young_to_region = nullptr; 493 } 494 } 495 496 bool is_compact_same_region() { 497 return (_from_region == _old_to_region) || (_from_region == _young_to_region); 498 } 499 500 int empty_regions_pos() { 501 return _empty_regions_pos; 502 } 503 504 void do_object(oop p) { 505 assert(_from_region != NULL, "must set before work"); 506 assert((_from_region->bottom() <= cast_from_oop<HeapWord*>(p)) && (cast_from_oop<HeapWord*>(p) < _from_region->top()), 507 "Object must reside in _from_region"); 508 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 509 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked"); 510 511 size_t obj_size = p->size(); 512 uint from_region_age = _from_region->age(); 513 uint object_age = p->age(); 514 515 bool promote_object = false; 516 if ((_from_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION) && 517 (from_region_age + object_age > InitialTenuringThreshold)) { 518 if ((_old_to_region != nullptr) && (_old_compact_point + obj_size > _old_to_region->end())) { 519 finish_old_region(); 520 _old_to_region = nullptr; 521 } 522 if (_old_to_region == nullptr) { 523 if (_empty_regions_pos < _empty_regions.length()) { 524 ShenandoahHeapRegion* new_to_region = _empty_regions.at(_empty_regions_pos); 525 _empty_regions_pos++; 526 new_to_region->set_affiliation(OLD_GENERATION); 527 _old_to_region = new_to_region; 528 _old_compact_point = _old_to_region->bottom(); 529 promote_object = true; 530 } 531 // Else this worker thread does not yet have any empty regions into which this aged object can be promoted so 532 // we leave promote_object as false, deferring the promotion. 533 } else { 534 promote_object = true; 535 } 536 } 537 538 if (promote_object || (_from_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION)) { 539 assert(_old_to_region != nullptr, "_old_to_region should not be NULL when evacuating to OLD region"); 540 if (_old_compact_point + obj_size > _old_to_region->end()) { 541 ShenandoahHeapRegion* new_to_region; 542 543 log_debug(gc)("Worker %u finishing old region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT 544 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _old_to_region->index(), 545 p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end())); 546 547 // Object does not fit. Get a new _old_to_region. 548 finish_old_region(); 549 if (_empty_regions_pos < _empty_regions.length()) { 550 new_to_region = _empty_regions.at(_empty_regions_pos); 551 _empty_regions_pos++; 552 new_to_region->set_affiliation(OLD_GENERATION); 553 } else { 554 // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct 555 // from _from_region. That's because there is always room for _from_region to be compacted into itself. 556 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction. 557 new_to_region = _from_region; 558 } 559 560 assert(new_to_region != _old_to_region, "must not reuse same OLD to-region"); 561 assert(new_to_region != NULL, "must not be NULL"); 562 _old_to_region = new_to_region; 563 _old_compact_point = _old_to_region->bottom(); 564 } 565 566 // Object fits into current region, record new location: 567 assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit"); 568 shenandoah_assert_not_forwarded(NULL, p); 569 _preserved_marks->push_if_necessary(p, p->mark()); 570 p->forward_to(cast_to_oop(_old_compact_point)); 571 _old_compact_point += obj_size; 572 } else { 573 assert(_from_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION, 574 "_from_region must be OLD_GENERATION or YOUNG_GENERATION"); 575 assert(_young_to_region != nullptr, "_young_to_region should not be NULL when compacting YOUNG _from_region"); 576 577 // After full gc compaction, all regions have age 0. Embed the region's age into the object's age in order to preserve 578 // tenuring progress. 579 _heap->increase_object_age(p, from_region_age + 1); 580 581 if (_young_compact_point + obj_size > _young_to_region->end()) { 582 ShenandoahHeapRegion* new_to_region; 583 584 log_debug(gc)("Worker %u finishing young region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT 585 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _young_to_region->index(), 586 p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end())); 587 588 // Object does not fit. Get a new _young_to_region. 589 finish_young_region(); 590 if (_empty_regions_pos < _empty_regions.length()) { 591 new_to_region = _empty_regions.at(_empty_regions_pos); 592 _empty_regions_pos++; 593 new_to_region->set_affiliation(YOUNG_GENERATION); 594 } else { 595 // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct 596 // from _from_region. That's because there is always room for _from_region to be compacted into itself. 597 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction. 598 new_to_region = _from_region; 599 } 600 601 assert(new_to_region != _young_to_region, "must not reuse same OLD to-region"); 602 assert(new_to_region != NULL, "must not be NULL"); 603 _young_to_region = new_to_region; 604 _young_compact_point = _young_to_region->bottom(); 605 } 606 607 // Object fits into current region, record new location: 608 assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit"); 609 shenandoah_assert_not_forwarded(NULL, p); 610 _preserved_marks->push_if_necessary(p, p->mark()); 611 p->forward_to(cast_to_oop(_young_compact_point)); 612 _young_compact_point += obj_size; 613 } 614 } 615 }; 616 617 618 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { 619 private: 620 PreservedMarks* const _preserved_marks; 621 ShenandoahHeap* const _heap; 622 GrowableArray<ShenandoahHeapRegion*>& _empty_regions; 623 int _empty_regions_pos; 624 ShenandoahHeapRegion* _to_region; 625 ShenandoahHeapRegion* _from_region; 626 HeapWord* _compact_point; 627 628 public: 629 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks, 630 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 631 ShenandoahHeapRegion* to_region) : 632 _preserved_marks(preserved_marks), 633 _heap(ShenandoahHeap::heap()), 634 _empty_regions(empty_regions), 635 _empty_regions_pos(0), 636 _to_region(to_region), 637 _from_region(NULL), 638 _compact_point(to_region->bottom()) {} 639 640 void set_from_region(ShenandoahHeapRegion* from_region) { 641 _from_region = from_region; 642 } 643 644 void finish_region() { 645 assert(_to_region != NULL, "should not happen"); 646 assert(!_heap->mode()->is_generational(), "Generational GC should use different Closure"); 647 _to_region->set_new_top(_compact_point); 648 } 649 650 bool is_compact_same_region() { 651 return _from_region == _to_region; 652 } 653 654 int empty_regions_pos() { 655 return _empty_regions_pos; 656 } 657 658 void do_object(oop p) { 659 assert(_from_region != NULL, "must set before work"); 660 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 661 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked"); 662 663 size_t obj_size = p->size(); 664 if (_compact_point + obj_size > _to_region->end()) { 665 finish_region(); 666 667 // Object doesn't fit. Pick next empty region and start compacting there. 668 ShenandoahHeapRegion* new_to_region; 669 if (_empty_regions_pos < _empty_regions.length()) { 670 new_to_region = _empty_regions.at(_empty_regions_pos); 671 _empty_regions_pos++; 672 } else { 673 // Out of empty region? Compact within the same region. 674 new_to_region = _from_region; 675 } 676 677 assert(new_to_region != _to_region, "must not reuse same to-region"); 678 assert(new_to_region != NULL, "must not be NULL"); 679 _to_region = new_to_region; 680 _compact_point = _to_region->bottom(); 681 } 682 683 // Object fits into current region, record new location: 684 assert(_compact_point + obj_size <= _to_region->end(), "must fit"); 685 shenandoah_assert_not_forwarded(NULL, p); 686 _preserved_marks->push_if_necessary(p, p->mark()); 687 p->forward_to(cast_to_oop(_compact_point)); 688 _compact_point += obj_size; 689 } 690 }; 691 692 693 ShenandoahPrepareForCompactionTask::ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, 694 ShenandoahHeapRegionSet **worker_slices, 695 size_t num_workers) : 696 WorkerTask("Shenandoah Prepare For Compaction"), 697 _preserved_marks(preserved_marks), _heap(ShenandoahHeap::heap()), 698 _worker_slices(worker_slices), _num_workers(num_workers) { } 699 700 701 void ShenandoahPrepareForCompactionTask::work(uint worker_id) { 702 ShenandoahParallelWorkerSession worker_session(worker_id); 703 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id]; 704 ShenandoahHeapRegionSetIterator it(slice); 705 ShenandoahHeapRegion* from_region = it.next(); 706 // No work? 707 if (from_region == NULL) { 708 return; 709 } 710 711 // Sliding compaction. Walk all regions in the slice, and compact them. 712 // Remember empty regions and reuse them as needed. 713 ResourceMark rm; 714 715 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions()); 716 717 if (_heap->mode()->is_generational()) { 718 ShenandoahHeapRegion* old_to_region = (from_region->is_old())? from_region: nullptr; 719 ShenandoahHeapRegion* young_to_region = (from_region->is_young())? from_region: nullptr; 720 ShenandoahPrepareForGenerationalCompactionObjectClosure cl(this, _preserved_marks->get(worker_id), empty_regions, 721 old_to_region, young_to_region, worker_id); 722 while (from_region != NULL) { 723 assert(is_candidate_region(from_region), "Sanity"); 724 log_debug(gc)("Worker %u compacting %s Region " SIZE_FORMAT " which had used " SIZE_FORMAT " and %s live", 725 worker_id, affiliation_name(from_region->affiliation()), 726 from_region->index(), from_region->used(), from_region->has_live()? "has": "does not have"); 727 cl.set_from_region(from_region); 728 if (from_region->has_live()) { 729 _heap->marked_object_iterate(from_region, &cl); 730 } 731 732 // Compacted the region to somewhere else? From-region is empty then. 733 if (!cl.is_compact_same_region()) { 734 empty_regions.append(from_region); 735 } 736 from_region = it.next(); 737 } 738 cl.finish(); 739 740 // Mark all remaining regions as empty 741 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) { 742 ShenandoahHeapRegion* r = empty_regions.at(pos); 743 r->set_new_top(r->bottom()); 744 } 745 } else { 746 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region); 747 while (from_region != NULL) { 748 assert(is_candidate_region(from_region), "Sanity"); 749 cl.set_from_region(from_region); 750 if (from_region->has_live()) { 751 _heap->marked_object_iterate(from_region, &cl); 752 } 753 754 // Compacted the region to somewhere else? From-region is empty then. 755 if (!cl.is_compact_same_region()) { 756 empty_regions.append(from_region); 757 } 758 from_region = it.next(); 759 } 760 cl.finish_region(); 761 762 // Mark all remaining regions as empty 763 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) { 764 ShenandoahHeapRegion* r = empty_regions.at(pos); 765 r->set_new_top(r->bottom()); 766 } 767 } 768 } 769 770 void ShenandoahFullGC::calculate_target_humongous_objects() { 771 ShenandoahHeap* heap = ShenandoahHeap::heap(); 772 773 // Compute the new addresses for humongous objects. We need to do this after addresses 774 // for regular objects are calculated, and we know what regions in heap suffix are 775 // available for humongous moves. 776 // 777 // Scan the heap backwards, because we are compacting humongous regions towards the end. 778 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide 779 // humongous start there. 780 // 781 // The complication is potential non-movable regions during the scan. If such region is 782 // detected, then sliding restarts towards that non-movable region. 783 784 size_t to_begin = heap->num_regions(); 785 size_t to_end = heap->num_regions(); 786 787 log_debug(gc)("Full GC calculating target humongous objects from end " SIZE_FORMAT, to_end); 788 for (size_t c = heap->num_regions(); c > 0; c--) { 789 ShenandoahHeapRegion *r = heap->get_region(c - 1); 790 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) { 791 // To-region candidate: record this, and continue scan 792 to_begin = r->index(); 793 continue; 794 } 795 796 if (r->is_humongous_start() && r->is_stw_move_allowed()) { 797 // From-region candidate: movable humongous region 798 oop old_obj = cast_to_oop(r->bottom()); 799 size_t words_size = old_obj->size(); 800 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 801 802 size_t start = to_end - num_regions; 803 804 if (start >= to_begin && start != r->index()) { 805 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan. 806 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark()); 807 old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom())); 808 to_end = start; 809 continue; 810 } 811 } 812 813 // Failed to fit. Scan starting from current region. 814 to_begin = r->index(); 815 to_end = r->index(); 816 } 817 } 818 819 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure { 820 private: 821 ShenandoahHeap* const _heap; 822 823 public: 824 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {} 825 void heap_region_do(ShenandoahHeapRegion* r) { 826 if (r->is_trash()) { 827 r->recycle(); 828 } 829 if (r->is_cset()) { 830 r->make_regular_bypass(); 831 } 832 if (r->is_empty_uncommitted()) { 833 r->make_committed_bypass(); 834 } 835 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index()); 836 837 // Record current region occupancy: this communicates empty regions are free 838 // to the rest of Full GC code. 839 r->set_new_top(r->top()); 840 } 841 }; 842 843 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure { 844 private: 845 ShenandoahHeap* const _heap; 846 ShenandoahMarkingContext* const _ctx; 847 848 public: 849 ShenandoahTrashImmediateGarbageClosure() : 850 _heap(ShenandoahHeap::heap()), 851 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 852 853 void heap_region_do(ShenandoahHeapRegion* r) { 854 if (r->affiliation() != FREE) { 855 if (r->is_humongous_start()) { 856 oop humongous_obj = cast_to_oop(r->bottom()); 857 if (!_ctx->is_marked(humongous_obj)) { 858 assert(!r->has_live(), 859 "Humongous Start %s Region " SIZE_FORMAT " is not marked, should not have live", 860 affiliation_name(r->affiliation()), r->index()); 861 log_debug(gc)("Trashing immediate humongous region " SIZE_FORMAT " because not marked", r->index()); 862 _heap->trash_humongous_region_at(r); 863 } else { 864 assert(r->has_live(), 865 "Humongous Start %s Region " SIZE_FORMAT " should have live", affiliation_name(r->affiliation()), r->index()); 866 } 867 } else if (r->is_humongous_continuation()) { 868 // If we hit continuation, the non-live humongous starts should have been trashed already 869 assert(r->humongous_start_region()->has_live(), 870 "Humongous Continuation %s Region " SIZE_FORMAT " should have live", affiliation_name(r->affiliation()), r->index()); 871 } else if (r->is_regular()) { 872 if (!r->has_live()) { 873 log_debug(gc)("Trashing immediate regular region " SIZE_FORMAT " because has no live", r->index()); 874 r->make_trash_immediate(); 875 } 876 } 877 } 878 // else, ignore this FREE region. 879 // TODO: change iterators so they do not process FREE regions. 880 } 881 }; 882 883 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) { 884 ShenandoahHeap* heap = ShenandoahHeap::heap(); 885 886 uint n_workers = heap->workers()->active_workers(); 887 size_t n_regions = heap->num_regions(); 888 889 // What we want to accomplish: have the dense prefix of data, while still balancing 890 // out the parallel work. 891 // 892 // Assuming the amount of work is driven by the live data that needs moving, we can slice 893 // the entire heap into equal-live-sized prefix slices, and compact into them. So, each 894 // thread takes all regions in its prefix subset, and then it takes some regions from 895 // the tail. 896 // 897 // Tail region selection becomes interesting. 898 // 899 // First, we want to distribute the regions fairly between the workers, and those regions 900 // might have different amount of live data. So, until we sure no workers need live data, 901 // we need to only take what the worker needs. 902 // 903 // Second, since we slide everything to the left in each slice, the most busy regions 904 // would be the ones on the left. Which means we want to have all workers have their after-tail 905 // regions as close to the left as possible. 906 // 907 // The easiest way to do this is to distribute after-tail regions in round-robin between 908 // workers that still need live data. 909 // 910 // Consider parallel workers A, B, C, then the target slice layout would be: 911 // 912 // AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA 913 // 914 // (.....dense-prefix.....) (.....................tail...................) 915 // [all regions fully live] [left-most regions are fuller that right-most] 916 // 917 918 // Compute how much live data is there. This would approximate the size of dense prefix 919 // we target to create. 920 size_t total_live = 0; 921 for (size_t idx = 0; idx < n_regions; idx++) { 922 ShenandoahHeapRegion *r = heap->get_region(idx); 923 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 924 total_live += r->get_live_data_words(); 925 } 926 } 927 928 // Estimate the size for the dense prefix. Note that we specifically count only the 929 // "full" regions, so there would be some non-full regions in the slice tail. 930 size_t live_per_worker = total_live / n_workers; 931 size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words(); 932 size_t prefix_regions_total = prefix_regions_per_worker * n_workers; 933 prefix_regions_total = MIN2(prefix_regions_total, n_regions); 934 assert(prefix_regions_total <= n_regions, "Sanity"); 935 936 // There might be non-candidate regions in the prefix. To compute where the tail actually 937 // ends up being, we need to account those as well. 938 size_t prefix_end = prefix_regions_total; 939 for (size_t idx = 0; idx < prefix_regions_total; idx++) { 940 ShenandoahHeapRegion *r = heap->get_region(idx); 941 if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 942 prefix_end++; 943 } 944 } 945 prefix_end = MIN2(prefix_end, n_regions); 946 assert(prefix_end <= n_regions, "Sanity"); 947 948 // Distribute prefix regions per worker: each thread definitely gets its own same-sized 949 // subset of dense prefix. 950 size_t prefix_idx = 0; 951 952 size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC); 953 954 for (size_t wid = 0; wid < n_workers; wid++) { 955 ShenandoahHeapRegionSet* slice = worker_slices[wid]; 956 957 live[wid] = 0; 958 size_t regs = 0; 959 960 // Add all prefix regions for this worker 961 while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) { 962 ShenandoahHeapRegion *r = heap->get_region(prefix_idx); 963 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 964 slice->add_region(r); 965 live[wid] += r->get_live_data_words(); 966 regs++; 967 } 968 prefix_idx++; 969 } 970 } 971 972 // Distribute the tail among workers in round-robin fashion. 973 size_t wid = n_workers - 1; 974 975 for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) { 976 ShenandoahHeapRegion *r = heap->get_region(tail_idx); 977 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 978 assert(wid < n_workers, "Sanity"); 979 980 size_t live_region = r->get_live_data_words(); 981 982 // Select next worker that still needs live data. 983 size_t old_wid = wid; 984 do { 985 wid++; 986 if (wid == n_workers) wid = 0; 987 } while (live[wid] + live_region >= live_per_worker && old_wid != wid); 988 989 if (old_wid == wid) { 990 // Circled back to the same worker? This means liveness data was 991 // miscalculated. Bump the live_per_worker limit so that 992 // everyone gets a piece of the leftover work. 993 live_per_worker += ShenandoahHeapRegion::region_size_words(); 994 } 995 996 worker_slices[wid]->add_region(r); 997 live[wid] += live_region; 998 } 999 } 1000 1001 FREE_C_HEAP_ARRAY(size_t, live); 1002 1003 #ifdef ASSERT 1004 ResourceBitMap map(n_regions); 1005 for (size_t wid = 0; wid < n_workers; wid++) { 1006 ShenandoahHeapRegionSetIterator it(worker_slices[wid]); 1007 ShenandoahHeapRegion* r = it.next(); 1008 while (r != NULL) { 1009 size_t idx = r->index(); 1010 assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx); 1011 assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx); 1012 map.at_put(idx, true); 1013 r = it.next(); 1014 } 1015 } 1016 1017 for (size_t rid = 0; rid < n_regions; rid++) { 1018 bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid)); 1019 bool is_distributed = map.at(rid); 1020 assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid); 1021 } 1022 #endif 1023 } 1024 1025 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) { 1026 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer); 1027 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses); 1028 1029 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1030 1031 // About to figure out which regions can be compacted, make sure pinning status 1032 // had been updated in GC prologue. 1033 heap->assert_pinned_region_status(); 1034 1035 { 1036 // Trash the immediately collectible regions before computing addresses 1037 ShenandoahTrashImmediateGarbageClosure tigcl; 1038 heap->heap_region_iterate(&tigcl); 1039 1040 // Make sure regions are in good state: committed, active, clean. 1041 // This is needed because we are potentially sliding the data through them. 1042 ShenandoahEnsureHeapActiveClosure ecl; 1043 heap->heap_region_iterate(&ecl); 1044 } 1045 1046 if (heap->mode()->is_generational()) { 1047 heap->young_generation()->clear_used(); 1048 heap->old_generation()->clear_used(); 1049 } 1050 1051 // Compute the new addresses for regular objects 1052 { 1053 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular); 1054 1055 distribute_slices(worker_slices); 1056 1057 size_t num_workers = heap->max_workers(); 1058 1059 ResourceMark rm; 1060 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices, num_workers); 1061 heap->workers()->run_task(&task); 1062 } 1063 1064 // Compute the new addresses for humongous objects 1065 { 1066 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong); 1067 calculate_target_humongous_objects(); 1068 } 1069 } 1070 1071 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure { 1072 private: 1073 ShenandoahHeap* const _heap; 1074 ShenandoahMarkingContext* const _ctx; 1075 1076 template <class T> 1077 inline void do_oop_work(T* p) { 1078 T o = RawAccess<>::oop_load(p); 1079 if (!CompressedOops::is_null(o)) { 1080 oop obj = CompressedOops::decode_not_null(o); 1081 assert(_ctx->is_marked(obj), "must be marked"); 1082 if (obj->is_forwarded()) { 1083 oop forw = obj->forwardee(); 1084 RawAccess<IS_NOT_NULL>::oop_store(p, forw); 1085 } 1086 } 1087 } 1088 1089 public: 1090 ShenandoahAdjustPointersClosure() : 1091 _heap(ShenandoahHeap::heap()), 1092 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 1093 1094 void do_oop(oop* p) { do_oop_work(p); } 1095 void do_oop(narrowOop* p) { do_oop_work(p); } 1096 }; 1097 1098 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure { 1099 private: 1100 ShenandoahHeap* const _heap; 1101 ShenandoahAdjustPointersClosure _cl; 1102 1103 public: 1104 ShenandoahAdjustPointersObjectClosure() : 1105 _heap(ShenandoahHeap::heap()) { 1106 } 1107 void do_object(oop p) { 1108 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 1109 p->oop_iterate(&_cl); 1110 } 1111 }; 1112 1113 class ShenandoahAdjustPointersTask : public WorkerTask { 1114 private: 1115 ShenandoahHeap* const _heap; 1116 ShenandoahRegionIterator _regions; 1117 1118 public: 1119 ShenandoahAdjustPointersTask() : 1120 WorkerTask("Shenandoah Adjust Pointers"), 1121 _heap(ShenandoahHeap::heap()) { 1122 } 1123 1124 void work(uint worker_id) { 1125 ShenandoahParallelWorkerSession worker_session(worker_id); 1126 ShenandoahAdjustPointersObjectClosure obj_cl; 1127 ShenandoahHeapRegion* r = _regions.next(); 1128 while (r != NULL) { 1129 if (!r->is_humongous_continuation() && r->has_live()) { 1130 _heap->marked_object_iterate(r, &obj_cl); 1131 } 1132 if (r->is_pinned() && r->is_old() && r->is_active() && !r->is_humongous()) { 1133 // Pinned regions are not compacted so they may still hold unmarked objects with 1134 // reference to reclaimed memory. Remembered set scanning will crash if it attempts 1135 // to iterate the oops in these objects. 1136 r->begin_preemptible_coalesce_and_fill(); 1137 r->oop_fill_and_coalesce_wo_cancel(); 1138 } 1139 r = _regions.next(); 1140 } 1141 } 1142 }; 1143 1144 class ShenandoahAdjustRootPointersTask : public WorkerTask { 1145 private: 1146 ShenandoahRootAdjuster* _rp; 1147 PreservedMarksSet* _preserved_marks; 1148 public: 1149 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) : 1150 WorkerTask("Shenandoah Adjust Root Pointers"), 1151 _rp(rp), 1152 _preserved_marks(preserved_marks) {} 1153 1154 void work(uint worker_id) { 1155 ShenandoahParallelWorkerSession worker_session(worker_id); 1156 ShenandoahAdjustPointersClosure cl; 1157 _rp->roots_do(worker_id, &cl); 1158 _preserved_marks->get(worker_id)->adjust_during_full_gc(); 1159 } 1160 }; 1161 1162 void ShenandoahFullGC::phase3_update_references() { 1163 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer); 1164 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers); 1165 1166 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1167 1168 WorkerThreads* workers = heap->workers(); 1169 uint nworkers = workers->active_workers(); 1170 { 1171 #if COMPILER2_OR_JVMCI 1172 DerivedPointerTable::clear(); 1173 #endif 1174 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots); 1175 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks); 1176 workers->run_task(&task); 1177 #if COMPILER2_OR_JVMCI 1178 DerivedPointerTable::update_pointers(); 1179 #endif 1180 } 1181 1182 ShenandoahAdjustPointersTask adjust_pointers_task; 1183 workers->run_task(&adjust_pointers_task); 1184 } 1185 1186 class ShenandoahCompactObjectsClosure : public ObjectClosure { 1187 private: 1188 ShenandoahHeap* const _heap; 1189 uint const _worker_id; 1190 1191 public: 1192 ShenandoahCompactObjectsClosure(uint worker_id) : 1193 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {} 1194 1195 void do_object(oop p) { 1196 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 1197 size_t size = p->size(); 1198 if (p->is_forwarded()) { 1199 HeapWord* compact_from = cast_from_oop<HeapWord*>(p); 1200 HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee()); 1201 Copy::aligned_conjoint_words(compact_from, compact_to, size); 1202 oop new_obj = cast_to_oop(compact_to); 1203 new_obj->init_mark(); 1204 } 1205 } 1206 }; 1207 1208 class ShenandoahCompactObjectsTask : public WorkerTask { 1209 private: 1210 ShenandoahHeap* const _heap; 1211 ShenandoahHeapRegionSet** const _worker_slices; 1212 1213 public: 1214 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) : 1215 WorkerTask("Shenandoah Compact Objects"), 1216 _heap(ShenandoahHeap::heap()), 1217 _worker_slices(worker_slices) { 1218 } 1219 1220 void work(uint worker_id) { 1221 ShenandoahParallelWorkerSession worker_session(worker_id); 1222 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]); 1223 1224 ShenandoahCompactObjectsClosure cl(worker_id); 1225 ShenandoahHeapRegion* r = slice.next(); 1226 while (r != NULL) { 1227 assert(!r->is_humongous(), "must not get humongous regions here"); 1228 if (r->has_live()) { 1229 _heap->marked_object_iterate(r, &cl); 1230 } 1231 r->set_top(r->new_top()); 1232 r = slice.next(); 1233 } 1234 } 1235 }; 1236 1237 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { 1238 private: 1239 ShenandoahHeap* const _heap; 1240 size_t _live; 1241 1242 public: 1243 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) { 1244 _heap->free_set()->clear(); 1245 } 1246 1247 void heap_region_do(ShenandoahHeapRegion* r) { 1248 assert (!r->is_cset(), "cset regions should have been demoted already"); 1249 1250 // Need to reset the complete-top-at-mark-start pointer here because 1251 // the complete marking bitmap is no longer valid. This ensures 1252 // size-based iteration in marked_object_iterate(). 1253 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip 1254 // pinned regions. 1255 if (!r->is_pinned()) { 1256 _heap->complete_marking_context()->reset_top_at_mark_start(r); 1257 } 1258 1259 size_t live = r->used(); 1260 1261 // Make empty regions that have been allocated into regular 1262 if (r->is_empty() && live > 0) { 1263 r->make_regular_bypass(); 1264 } 1265 1266 // Reclaim regular regions that became empty 1267 if (r->is_regular() && live == 0) { 1268 r->make_trash(); 1269 } 1270 1271 // Recycle all trash regions 1272 if (r->is_trash()) { 1273 live = 0; 1274 r->recycle(); 1275 } 1276 1277 // Update final usage for generations 1278 if (_heap->mode()->is_generational() && live != 0) { 1279 if (r->is_young()) { 1280 _heap->young_generation()->increase_used(live); 1281 } else if (r->is_old()) { 1282 _heap->old_generation()->increase_used(live); 1283 } 1284 } 1285 1286 r->set_live_data(live); 1287 r->reset_alloc_metadata(); 1288 _live += live; 1289 } 1290 1291 size_t get_live() { 1292 return _live; 1293 } 1294 }; 1295 1296 void ShenandoahFullGC::compact_humongous_objects() { 1297 // Compact humongous regions, based on their fwdptr objects. 1298 // 1299 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases, 1300 // humongous regions are already compacted, and do not require further moves, which alleviates 1301 // sliding costs. We may consider doing this in parallel in future. 1302 1303 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1304 1305 for (size_t c = heap->num_regions(); c > 0; c--) { 1306 ShenandoahHeapRegion* r = heap->get_region(c - 1); 1307 if (r->is_humongous_start()) { 1308 oop old_obj = cast_to_oop(r->bottom()); 1309 if (!old_obj->is_forwarded()) { 1310 // No need to move the object, it stays at the same slot 1311 continue; 1312 } 1313 size_t words_size = old_obj->size(); 1314 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 1315 1316 size_t old_start = r->index(); 1317 size_t old_end = old_start + num_regions - 1; 1318 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee()); 1319 size_t new_end = new_start + num_regions - 1; 1320 assert(old_start != new_start, "must be real move"); 1321 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index()); 1322 1323 log_debug(gc)("Full GC compaction moves humongous object from region " SIZE_FORMAT " to region " SIZE_FORMAT, 1324 old_start, new_start); 1325 1326 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(), 1327 heap->get_region(new_start)->bottom(), 1328 words_size); 1329 1330 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom()); 1331 new_obj->init_mark(); 1332 1333 { 1334 ShenandoahRegionAffiliation original_affiliation = r->affiliation(); 1335 for (size_t c = old_start; c <= old_end; c++) { 1336 ShenandoahHeapRegion* r = heap->get_region(c); 1337 r->make_regular_bypass(); 1338 r->set_top(r->bottom()); 1339 } 1340 1341 for (size_t c = new_start; c <= new_end; c++) { 1342 ShenandoahHeapRegion* r = heap->get_region(c); 1343 if (c == new_start) { 1344 r->make_humongous_start_bypass(original_affiliation); 1345 } else { 1346 r->make_humongous_cont_bypass(original_affiliation); 1347 } 1348 1349 // Trailing region may be non-full, record the remainder there 1350 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); 1351 if ((c == new_end) && (remainder != 0)) { 1352 r->set_top(r->bottom() + remainder); 1353 } else { 1354 r->set_top(r->end()); 1355 } 1356 1357 r->reset_alloc_metadata(); 1358 } 1359 } 1360 } 1361 } 1362 } 1363 1364 // This is slightly different to ShHeap::reset_next_mark_bitmap: 1365 // we need to remain able to walk pinned regions. 1366 // Since pinned region do not move and don't get compacted, we will get holes with 1367 // unreachable objects in them (which may have pointers to unloaded Klasses and thus 1368 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using 1369 // a valid marking bitmap and valid TAMS pointer. This class only resets marking 1370 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions. 1371 class ShenandoahMCResetCompleteBitmapTask : public WorkerTask { 1372 private: 1373 ShenandoahRegionIterator _regions; 1374 1375 public: 1376 ShenandoahMCResetCompleteBitmapTask() : 1377 WorkerTask("Shenandoah Reset Bitmap") { 1378 } 1379 1380 void work(uint worker_id) { 1381 ShenandoahParallelWorkerSession worker_session(worker_id); 1382 ShenandoahHeapRegion* region = _regions.next(); 1383 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1384 ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); 1385 while (region != NULL) { 1386 if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) { 1387 ctx->clear_bitmap(region); 1388 } 1389 region = _regions.next(); 1390 } 1391 } 1392 }; 1393 1394 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) { 1395 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer); 1396 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects); 1397 1398 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1399 1400 // Compact regular objects first 1401 { 1402 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular); 1403 ShenandoahCompactObjectsTask compact_task(worker_slices); 1404 heap->workers()->run_task(&compact_task); 1405 } 1406 1407 // Compact humongous objects after regular object moves 1408 { 1409 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong); 1410 compact_humongous_objects(); 1411 } 1412 1413 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer 1414 // and must ensure the bitmap is in sync. 1415 { 1416 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete); 1417 ShenandoahMCResetCompleteBitmapTask task; 1418 heap->workers()->run_task(&task); 1419 } 1420 1421 // Bring regions in proper states after the collection, and set heap properties. 1422 { 1423 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild); 1424 1425 if (heap->mode()->is_generational()) { 1426 heap->young_generation()->clear_used(); 1427 heap->old_generation()->clear_used(); 1428 } 1429 1430 ShenandoahPostCompactClosure post_compact; 1431 heap->heap_region_iterate(&post_compact); 1432 heap->set_used(post_compact.get_live()); 1433 if (heap->mode()->is_generational()) { 1434 log_info(gc)("FullGC done: GLOBAL usage: " SIZE_FORMAT ", young usage: " SIZE_FORMAT ", old usage: " SIZE_FORMAT, 1435 post_compact.get_live(), heap->young_generation()->used(), heap->old_generation()->used()); 1436 } 1437 1438 heap->collection_set()->clear(); 1439 heap->free_set()->rebuild(); 1440 } 1441 1442 heap->clear_cancelled_gc(true /* clear oom handler */); 1443 }